From 2631659551d530cefabced743c548cea979305dd Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Tue, 15 Mar 2022 07:42:43 -0500 Subject: [PATCH 1/2] ci: swap ci parallelization for unconstrained gomaxprocs --- .circleci/config.yml | 3 +- acl/acl_test.go | 19 ++ acl/policy_test.go | 5 + ci/slow.go | 25 ++ client/acl_test.go | 11 +- client/agent_endpoint_test.go | 20 +- client/alloc_endpoint_test.go | 39 ++- client/alloc_watcher_e2e_test.go | 3 +- client/allocdir/alloc_dir_test.go | 20 ++ client/allocdir/fs_linux_test.go | 5 + client/allocdir/task_dir_test.go | 9 +- client/allochealth/tracker_test.go | 15 +- client/allocrunner/alloc_runner_test.go | 39 ++- client/allocrunner/alloc_runner_unix_test.go | 7 +- .../allocrunner/consul_grpc_sock_hook_test.go | 7 +- .../allocrunner/consul_http_sock_hook_test.go | 5 +- client/allocrunner/csi_hook_test.go | 2 + client/allocrunner/groupservice_hook_test.go | 11 +- client/allocrunner/health_hook_test.go | 17 +- client/allocrunner/network_hook_test.go | 3 + .../allocrunner/network_manager_linux_test.go | 3 + client/allocrunner/networking_cni_test.go | 5 + .../allocrunner/task_hook_coordinator_test.go | 22 +- .../taskrunner/artifact_hook_test.go | 5 +- .../taskrunner/connect_native_hook_test.go | 27 +- .../taskrunner/device_hook_test.go | 5 +- .../taskrunner/dispatch_hook_test.go | 7 +- .../taskrunner/envoy_bootstrap_hook_test.go | 25 +- .../taskrunner/envoy_version_hook_test.go | 21 +- client/allocrunner/taskrunner/errors_test.go | 5 +- .../taskrunner/logmon_hook_test.go | 5 +- .../taskrunner/logmon_hook_unix_test.go | 5 +- .../taskrunner/restarts/restarts_test.go | 24 +- .../taskrunner/script_check_hook_test.go | 13 +- .../taskrunner/service_hook_test.go | 2 + .../allocrunner/taskrunner/sids_hook_test.go | 25 +- .../allocrunner/taskrunner/stats_hook_test.go | 9 +- .../taskrunner/task_runner_test.go | 83 ++--- client/allocrunner/taskrunner/tasklet_test.go | 9 +- .../taskrunner/template/template_test.go | 57 ++-- .../taskrunner/validate_hook_test.go | 5 +- .../taskrunner/volume_hook_test.go | 5 + client/allocwatcher/alloc_watcher_test.go | 12 +- .../allocwatcher/alloc_watcher_unix_test.go | 4 +- .../allocwatcher/group_alloc_watcher_test.go | 6 +- client/client_stats_endpoint_test.go | 6 +- client/client_test.go | 70 ++-- client/config/config_test.go | 29 ++ client/consul/identities_test.go | 5 + client/csi_endpoint_test.go | 21 +- client/devicemanager/manager_test.go | 11 +- client/driver_manager_test.go | 7 +- client/dynamicplugins/registry_test.go | 22 +- client/fingerprint/arch_test.go | 3 + client/fingerprint/bridge_linux_test.go | 5 + client/fingerprint/cgroup_test.go | 3 + client/fingerprint/cni_test.go | 3 + client/fingerprint/consul_test.go | 23 +- client/fingerprint/cpu_test.go | 5 + client/fingerprint/env_aws_test.go | 19 ++ client/fingerprint/env_azure_test.go | 7 + client/fingerprint/env_digitalocean_test.go | 5 + client/fingerprint/env_gce_test.go | 7 + client/fingerprint/host_test.go | 3 + client/fingerprint/memory_test.go | 5 + client/fingerprint/network_test.go | 17 + client/fingerprint/nomad_test.go | 3 + client/fingerprint/signal_test.go | 3 + client/fingerprint/storage_test.go | 3 + client/fingerprint/vault_test.go | 3 + client/fingerprint_manager_test.go | 17 +- client/fs_endpoint_test.go | 47 +-- client/gc_test.go | 33 +- client/heartbeatstop_test.go | 3 +- client/logmon/logmon_test.go | 9 +- .../pluginmanager/csimanager/volume_test.go | 13 +- .../drivermanager/manager_test.go | 11 +- client/pluginmanager/group_test.go | 9 +- client/rpc_test.go | 5 +- client/servers/manager_internal_test.go | 7 + client/servers/manager_test.go | 13 + client/state/db_test.go | 15 +- client/state/upgrade_int_test.go | 3 +- client/state/upgrade_test.go | 11 +- client/stats/cpu_test.go | 5 + client/structs/broadcaster_test.go | 11 +- client/taskenv/env_test.go | 43 ++- client/taskenv/network_test.go | 3 + client/taskenv/services_test.go | 11 +- client/taskenv/util_test.go | 17 +- client/util_test.go | 78 ----- client/vaultclient/vaultclient_test.go | 20 +- command/acl_bootstrap_test.go | 7 +- command/acl_policy_apply_test.go | 3 +- command/acl_policy_delete_test.go | 3 +- command/acl_policy_info_test.go | 3 +- command/acl_policy_list_test.go | 3 +- command/acl_token_create_test.go | 3 +- command/acl_token_delete_test.go | 3 +- command/acl_token_info_test.go | 3 +- command/acl_token_list_test.go | 3 +- command/acl_token_self_test.go | 3 +- command/acl_token_update_test.go | 4 +- command/agent/acl_endpoint_test.go | 23 +- command/agent/agent_endpoint_test.go | 55 +-- command/agent/agent_test.go | 57 ++-- command/agent/alloc_endpoint_test.go | 35 +- command/agent/command_test.go | 12 +- command/agent/config_parse_test.go | 15 +- command/agent/config_test.go | 45 ++- command/agent/consul/check_watcher_test.go | 17 +- command/agent/consul/connect_proxies_test.go | 3 + command/agent/consul/connect_test.go | 23 +- command/agent/consul/group_test.go | 3 + command/agent/consul/int_test.go | 3 + .../agent/consul/namespaces_client_test.go | 7 +- command/agent/consul/self_test.go | 5 +- command/agent/consul/service_client_test.go | 21 +- command/agent/consul/unit_test.go | 61 +++- command/agent/consul/version_checker_test.go | 5 +- command/agent/csi_endpoint_test.go | 13 +- command/agent/deployment_endpoint_test.go | 17 +- command/agent/eval_endpoint_test.go | 12 +- command/agent/event_endpoint_test.go | 8 +- command/agent/fs_endpoint_test.go | 39 +-- command/agent/helpers_test.go | 5 +- command/agent/host/host_test.go | 3 + command/agent/http_stdlog_test.go | 5 + command/agent/http_test.go | 67 ++-- command/agent/job_endpoint_test.go | 108 +++--- command/agent/keyring_test.go | 5 +- command/agent/log_file_test.go | 13 +- command/agent/log_levels_test.go | 4 +- command/agent/metrics_endpoint_test.go | 11 +- command/agent/monitor/monitor_test.go | 5 +- command/agent/namespace_endpoint_test.go | 11 +- command/agent/node_endpoint_test.go | 17 +- command/agent/operator_endpoint_test.go | 24 +- command/agent/pprof/pprof_test.go | 9 + command/agent/region_endpoint_test.go | 4 +- command/agent/retry_join_test.go | 13 +- command/agent/scaling_endpoint_test.go | 10 +- command/agent/search_endpoint_test.go | 41 +-- command/agent/stats_endpoint_test.go | 5 +- command/agent/status_endpoint_test.go | 6 +- command/agent/syslog_test.go | 3 +- command/agent/system_endpoint_test.go | 6 +- command/agent_info_test.go | 11 +- command/agent_monitor_test.go | 5 +- command/alloc_exec_test.go | 7 +- command/alloc_fs_test.go | 7 +- command/alloc_logs_test.go | 7 +- command/alloc_restart_test.go | 7 + command/alloc_signal_test.go | 9 +- command/alloc_status_test.go | 19 +- command/alloc_stop_test.go | 5 +- command/check_test.go | 4 +- command/config_validate_test.go | 9 +- command/data_format_test.go | 6 +- command/deployment_fail_test.go | 7 +- command/deployment_list_test.go | 5 +- command/deployment_pause_test.go | 7 +- command/deployment_promote_test.go | 7 +- command/deployment_resume_test.go | 7 +- command/deployment_status_test.go | 7 +- command/deployment_unblock_test.go | 7 +- command/eval_list_test.go | 2 + command/eval_status_test.go | 7 +- command/event_test.go | 3 +- command/helper_devices_test.go | 12 + command/helpers_test.go | 25 +- command/integration_test.go | 6 +- command/job_allocs_test.go | 14 +- command/job_deployments_test.go | 15 +- command/job_dispatch_test.go | 10 +- command/job_eval_test.go | 12 +- command/job_history_test.go | 10 +- command/job_init_test.go | 9 +- command/job_inspect_test.go | 7 +- command/job_periodic_force_test.go | 13 +- command/job_plan_test.go | 11 +- command/job_promote_test.go | 7 +- command/job_revert_test.go | 7 +- command/job_run_test.go | 11 +- command/job_scale_test.go | 5 +- command/job_scaling_events_test.go | 3 +- command/job_status_test.go | 13 +- command/job_stop_test.go | 7 +- command/job_validate_test.go | 11 +- command/license_get_test.go | 5 +- command/meta_test.go | 5 +- command/metrics_test.go | 3 +- command/monitor_test.go | 11 +- command/namespace_apply_test.go | 7 +- command/namespace_delete_test.go | 9 +- command/namespace_inspect_test.go | 11 +- command/namespace_list_test.go | 5 +- command/namespace_status_test.go | 13 +- command/node_config_test.go | 7 +- command/node_drain_test.go | 13 +- command/node_eligibility_test.go | 7 +- command/node_status_test.go | 13 +- command/operator_api_test.go | 5 + command/operator_autopilot_get_test.go | 5 +- command/operator_autopilot_set_test.go | 5 +- command/operator_autopilot_test.go | 3 +- command/operator_debug_test.go | 39 ++- command/operator_keygen_test.go | 4 +- command/operator_raft_list_test.go | 5 +- command/operator_raft_remove_test.go | 9 +- command/operator_raft_test.go | 3 +- command/operator_snapshot_inspect_test.go | 9 +- command/operator_snapshot_restore_test.go | 5 +- command/operator_snapshot_save_test.go | 5 +- command/operator_test.go | 3 +- command/plugin_status_test.go | 7 +- command/quota_apply_test.go | 5 +- command/quota_delete_test.go | 9 +- command/quota_init_test.go | 7 +- command/quota_inspect_test.go | 9 +- command/quota_list_test.go | 7 +- command/quota_status_test.go | 9 +- command/recommendation_apply_test.go | 10 +- command/recommendation_dismiss_test.go | 5 +- command/recommendation_info_test.go | 5 +- command/recommendation_list_test.go | 8 +- command/scaling_policy_info_test.go | 3 +- command/scaling_policy_list_test.go | 8 +- command/scaling_policy_test.go | 3 + command/sentinel_apply_test.go | 3 +- command/sentinel_delete_test.go | 3 +- command/sentinel_list_test.go | 3 +- command/sentinel_read_test.go | 3 +- command/server_force_leave_test.go | 3 +- command/server_join_test.go | 3 +- command/server_members_test.go | 9 +- command/status_test.go | 19 +- command/system_gc_test.go | 5 +- command/system_reconcile_summaries_test.go | 5 +- command/system_reconcile_test.go | 3 +- command/system_test.go | 3 +- command/ui_test.go | 3 +- command/version_test.go | 3 +- command/volume_register_test.go | 5 +- command/volume_status_test.go | 7 +- contributing/testing.md | 24 ++ drivers/docker/config_test.go | 19 ++ drivers/docker/coordinator_test.go | 10 +- drivers/docker/docklog/docker_logger_test.go | 11 +- drivers/docker/driver_linux_test.go | 12 +- drivers/docker/driver_test.go | 212 ++++-------- drivers/docker/driver_unix_test.go | 44 ++- drivers/docker/fingerprint_test.go | 6 +- drivers/docker/network_test.go | 4 +- drivers/docker/ports_test.go | 6 +- drivers/docker/progress_test.go | 2 + drivers/docker/reconciler_test.go | 7 + drivers/docker/stats_test.go | 7 +- drivers/docker/utils_test.go | 3 + drivers/docker/utils_unix_test.go | 5 + drivers/exec/driver_test.go | 31 +- drivers/exec/driver_unix_test.go | 17 +- drivers/java/driver_test.go | 31 +- drivers/java/utils_test.go | 6 + drivers/mock/utils_test.go | 3 + drivers/qemu/driver_test.go | 32 +- drivers/rawexec/driver_test.go | 19 +- drivers/rawexec/driver_unix_test.go | 24 +- drivers/shared/capabilities/defaults_test.go | 9 + drivers/shared/capabilities/set_test.go | 19 +- drivers/shared/eventer/eventer_test.go | 5 +- .../shared/executor/executor_linux_test.go | 24 +- drivers/shared/executor/executor_test.go | 62 ++-- drivers/shared/executor/pid_collector_test.go | 5 +- helper/boltdd/boltdd_test.go | 11 +- helper/envoy/envoy_test.go | 3 +- helper/flags/autopilot_flags_test.go | 5 +- helper/flags/flag_test.go | 7 +- helper/freeport/freeport_test.go | 2 +- helper/pluginutils/hclspecutils/dec_test.go | 21 +- helper/pluginutils/hclutils/testing.go | 7 +- helper/pluginutils/loader/loader_test.go | 45 +-- .../pluginutils/singleton/singleton_test.go | 11 +- helper/raftutil/msgpack_test.go | 5 + helper/raftutil/state_test.go | 3 +- helper/tlsutil/config_test.go | 70 ++++ helper/tlsutil/generate_test.go | 11 +- internal/testing/apitests/jobs_test.go | 5 +- internal/testing/apitests/nodes_test.go | 5 +- .../apitests/operator_autopilot_test.go | 10 +- internal/testing/apitests/operator_test.go | 5 +- .../testing/apitests/streamingsync_test.go | 5 + internal/testing/apitests/structsync_test.go | 9 + internal/testing/apitests/tasks_test.go | 3 + jobspec/parse_test.go | 9 + jobspec/utils_test.go | 3 +- jobspec2/parse_test.go | 38 +++ lib/circbufwriter/writer_test.go | 7 + lib/cpuset/cpuset_test.go | 21 ++ lib/delayheap/delay_heap_test.go | 5 + lib/kheap/score_heap_test.go | 3 + nomad/acl_endpoint_test.go | 55 +-- nomad/acl_test.go | 7 +- nomad/alloc_endpoint_test.go | 36 +- nomad/autopilot_test.go | 14 +- nomad/blocked_evals_stats_test.go | 3 +- nomad/blocked_evals_test.go | 47 +-- nomad/client_agent_endpoint_test.go | 21 +- nomad/client_alloc_endpoint_test.go | 35 +- nomad/client_csi_endpoint_test.go | 39 +-- nomad/client_fs_endpoint_test.go | 41 +-- nomad/client_rpc_test.go | 19 +- nomad/client_stats_endpoint_test.go | 11 +- nomad/consul_oss_test.go | 4 +- nomad/consul_policy_oss_test.go | 3 +- nomad/consul_policy_test.go | 11 +- nomad/consul_test.go | 13 +- nomad/core_sched_test.go | 57 ++-- nomad/csi_endpoint_test.go | 39 ++- nomad/deployment_endpoint_test.go | 47 +-- .../deployments_watcher_test.go | 49 +-- nomad/drainer/drain_heap_test.go | 13 +- nomad/drainer/drainer_util_test.go | 5 +- nomad/drainer/draining_node_test.go | 5 +- nomad/drainer/watch_jobs_test.go | 14 +- nomad/drainer/watch_nodes_test.go | 11 +- nomad/drainer_int_test.go | 17 +- nomad/eval_broker_test.go | 45 +-- nomad/eval_endpoint_test.go | 51 +-- nomad/event_endpoint_test.go | 11 +- nomad/fsm_test.go | 152 ++++----- nomad/heartbeat_test.go | 19 +- nomad/job_endpoint_hook_connect_test.go | 38 ++- nomad/job_endpoint_hook_expose_check_test.go | 23 +- nomad/job_endpoint_oss_test.go | 3 +- nomad/job_endpoint_test.go | 220 ++++++------ nomad/job_endpoint_validators_test.go | 7 +- nomad/leader_test.go | 59 +++- nomad/namespace_endpoint_test.go | 33 +- nomad/node_endpoint_test.go | 112 ++++--- nomad/operator_endpoint_test.go | 29 +- nomad/periodic_endpoint_test.go | 7 +- nomad/periodic_test.go | 47 +-- nomad/plan_apply_pool_test.go | 5 +- nomad/plan_apply_test.go | 33 +- nomad/plan_endpoint_test.go | 5 +- nomad/plan_normalization_test.go | 3 + nomad/plan_queue_test.go | 11 +- nomad/regions_endpoint_test.go | 3 +- nomad/rpc_test.go | 35 +- nomad/scaling_endpoint_test.go | 17 +- nomad/search_endpoint_test.go | 79 ++--- nomad/serf_test.go | 15 +- nomad/server_test.go | 31 +- nomad/state/autopilot_test.go | 5 + nomad/state/deployment_events_test.go | 3 +- nomad/state/events_test.go | 33 +- nomad/state/paginator/filter_test.go | 5 +- nomad/state/paginator/paginator_test.go | 6 +- nomad/state/paginator/tokenizer_test.go | 3 + nomad/state/schema_test.go | 7 + nomad/state/state_store_restore_test.go | 37 ++- nomad/state/state_store_test.go | 312 +++++++++--------- nomad/stats_fetcher_test.go | 3 +- nomad/status_endpoint_test.go | 13 +- nomad/stream/event_broker_test.go | 11 + nomad/stream/event_buffer_test.go | 14 +- nomad/stream/ndjson_test.go | 7 +- nomad/stream/subscription_test.go | 22 +- nomad/structs/batch_future_test.go | 4 +- nomad/structs/bitmap_test.go | 4 + nomad/structs/config/audit_test.go | 3 + nomad/structs/config/autopilot_test.go | 4 + nomad/structs/config/consul_test.go | 19 +- nomad/structs/config/limits_test.go | 7 +- nomad/structs/config/plugins_test.go | 5 +- nomad/structs/config/tls_test.go | 13 + nomad/structs/config/ui_test.go | 4 +- nomad/structs/config/vault_test.go | 5 + nomad/structs/connect_test.go | 3 + nomad/structs/consul_oss_test.go | 3 +- nomad/structs/consul_test.go | 7 + nomad/structs/csi_test.go | 31 ++ nomad/structs/devices_test.go | 15 + nomad/structs/diff_test.go | 9 + nomad/structs/errors_test.go | 3 + nomad/structs/funcs_test.go | 33 ++ nomad/structs/network_test.go | 25 ++ nomad/structs/node_class_test.go | 13 + nomad/structs/node_test.go | 3 + nomad/structs/services_test.go | 73 ++-- nomad/structs/structs_periodic_test.go | 5 + nomad/structs/structs_test.go | 209 +++++++++++- nomad/system_endpoint_test.go | 9 +- nomad/timetable_test.go | 7 +- nomad/util_test.go | 11 +- nomad/vault_test.go | 71 ++-- nomad/volumewatcher/volume_watcher_test.go | 4 +- nomad/volumewatcher/volumes_watcher_test.go | 9 +- nomad/worker_test.go | 37 ++- plugins/base/plugin_test.go | 7 +- plugins/csi/client_test.go | 30 ++ plugins/device/plugin_test.go | 21 +- plugins/drivers/testutils/testing_test.go | 15 +- scheduler/annotate_test.go | 11 + scheduler/context_test.go | 19 +- scheduler/device_test.go | 11 + scheduler/feasible_test.go | 71 +++- scheduler/generic_sched_test.go | 137 +++++++- scheduler/preemption_test.go | 10 +- scheduler/reconcile_test.go | 152 ++++++++- scheduler/reconcile_util_test.go | 5 + scheduler/scheduler_sysbatch_test.go | 45 +++ scheduler/scheduler_system_test.go | 55 +++ scheduler/select_test.go | 7 + scheduler/spread_test.go | 19 +- scheduler/stack_test.go | 33 ++ scheduler/util_test.go | 59 +++- testutil/slow.go | 15 - 419 files changed, 4816 insertions(+), 2697 deletions(-) create mode 100644 ci/slow.go delete mode 100644 client/util_test.go create mode 100644 contributing/testing.md delete mode 100644 testutil/slow.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 047772df1..c97299c45 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,6 @@ references: # common references common_envs: &common_envs - GOMAXPROCS: 1 NOMAD_SLOW_TEST: 1 GOTESTSUM_JUNITFILE: /tmp/test-reports/results.xml GOTESTSUM_JSONFILE: /tmp/test-reports/testjsonfile.json @@ -520,7 +519,7 @@ executors: working_directory: ~/go/src/github.com/hashicorp/nomad machine: image: *go_machine_image - resource_class: medium + resource_class: large environment: &machine_env <<: *common_envs GOLANG_VERSION: 1.17.5 diff --git a/acl/acl_test.go b/acl/acl_test.go index b819bc8af..2ac22f702 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -3,10 +3,13 @@ package acl import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestCapabilitySet(t *testing.T) { + ci.Parallel(t) + var cs capabilitySet = make(map[string]struct{}) // Check no capabilities by default @@ -28,6 +31,8 @@ func TestCapabilitySet(t *testing.T) { } func TestMaxPrivilege(t *testing.T) { + ci.Parallel(t) + type tcase struct { Privilege string PrecedenceOver []string @@ -60,6 +65,8 @@ func TestMaxPrivilege(t *testing.T) { } func TestACLManagement(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) // Create management ACL @@ -88,6 +95,8 @@ func TestACLManagement(t *testing.T) { } func TestACLMerge(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) // Merge read + write policy @@ -222,6 +231,8 @@ quota { ` func TestAllowNamespace(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string Allow bool @@ -264,6 +275,8 @@ func TestAllowNamespace(t *testing.T) { } func TestWildcardNamespaceMatching(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string Allow bool @@ -315,6 +328,8 @@ func TestWildcardNamespaceMatching(t *testing.T) { } func TestWildcardHostVolumeMatching(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string Allow bool @@ -365,6 +380,8 @@ func TestWildcardHostVolumeMatching(t *testing.T) { } } func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string NS string @@ -411,6 +428,8 @@ func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) { } func TestACL_matchingCapabilitySet_difference(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string NS string diff --git a/acl/policy_test.go b/acl/policy_test.go index 9060147d0..e3a8afad6 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -5,10 +5,13 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestParse(t *testing.T) { + ci.Parallel(t) + type tcase struct { Raw string ErrStr string @@ -333,6 +336,8 @@ func TestParse(t *testing.T) { } func TestParse_BadInput(t *testing.T) { + ci.Parallel(t) + inputs := []string{ `namespace "\500" {}`, } diff --git a/ci/slow.go b/ci/slow.go new file mode 100644 index 000000000..5becb90b7 --- /dev/null +++ b/ci/slow.go @@ -0,0 +1,25 @@ +package ci + +import ( + "os" + "strconv" + "testing" +) + +// SkipSlow skips a slow test unless NOMAD_SLOW_TEST is set to a true value. +func SkipSlow(t *testing.T, reason string) { + value := os.Getenv("NOMAD_SLOW_TEST") + run, err := strconv.ParseBool(value) + if !run || err != nil { + t.Skipf("Skipping slow test: %s", reason) + } +} + +// Parallel runs t in parallel, unless CI is set to a true value. +func Parallel(t *testing.T) { + value := os.Getenv("CI") + isCI, err := strconv.ParseBool(value) + if !isCI || err != nil { + t.Parallel() + } +} diff --git a/client/acl_test.go b/client/acl_test.go index f076f0aa9..b1cc0a315 100644 --- a/client/acl_test.go +++ b/client/acl_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -13,6 +14,8 @@ import ( ) func TestClient_ACL_resolveTokenValue(t *testing.T) { + ci.Parallel(t) + s1, _, _, cleanupS1 := testACLServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -62,6 +65,8 @@ func TestClient_ACL_resolveTokenValue(t *testing.T) { } func TestClient_ACL_resolvePolicies(t *testing.T) { + ci.Parallel(t) + s1, _, root, cleanupS1 := testACLServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -102,6 +107,8 @@ func TestClient_ACL_resolvePolicies(t *testing.T) { } func TestClient_ACL_ResolveToken_Disabled(t *testing.T) { + ci.Parallel(t) + s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -118,6 +125,8 @@ func TestClient_ACL_ResolveToken_Disabled(t *testing.T) { } func TestClient_ACL_ResolveToken(t *testing.T) { + ci.Parallel(t) + s1, _, _, cleanupS1 := testACLServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -167,7 +176,7 @@ func TestClient_ACL_ResolveToken(t *testing.T) { } func TestClient_ACL_ResolveSecretToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, _, cleanupS1 := testACLServer(t, nil) defer cleanupS1() diff --git a/client/agent_endpoint_test.go b/client/agent_endpoint_test.go index 0c10b4ebb..501f80dd7 100644 --- a/client/agent_endpoint_test.go +++ b/client/agent_endpoint_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" @@ -24,7 +25,8 @@ import ( ) func TestMonitor_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server and client @@ -105,7 +107,8 @@ OUTER: } func TestMonitor_Monitor_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server @@ -217,7 +220,8 @@ func TestMonitor_Monitor_ACL(t *testing.T) { // Test that by default with no acl, endpoint is disabled func TestAgentProfile_DefaultDisabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server and client @@ -243,7 +247,8 @@ func TestAgentProfile_DefaultDisabled(t *testing.T) { } func TestAgentProfile(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server and client @@ -290,7 +295,8 @@ func TestAgentProfile(t *testing.T) { } func TestAgentProfile_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server @@ -355,7 +361,7 @@ func TestAgentProfile_ACL(t *testing.T) { } func TestAgentHost(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server and client s1, cleanup := nomad.TestServer(t, nil) @@ -380,7 +386,7 @@ func TestAgentHost(t *testing.T) { } func TestAgentHost_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanupS := nomad.TestACLServer(t, nil) defer cleanupS() diff --git a/client/alloc_endpoint_test.go b/client/alloc_endpoint_test.go index 97ceff391..2e51a3702 100644 --- a/client/alloc_endpoint_test.go +++ b/client/alloc_endpoint_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pluginutils/catalog" @@ -27,7 +28,8 @@ import ( ) func TestAllocations_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) client, cleanup := TestClient(t, nil) defer cleanup() @@ -66,7 +68,7 @@ func TestAllocations_Restart(t *testing.T) { } func TestAllocations_Restart_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -142,8 +144,9 @@ func TestAllocations_Restart_ACL(t *testing.T) { } func TestAllocations_GarbageCollectAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -153,7 +156,7 @@ func TestAllocations_GarbageCollectAll(t *testing.T) { } func TestAllocations_GarbageCollectAll_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -206,8 +209,9 @@ func TestAllocations_GarbageCollectAll_ACL(t *testing.T) { } func TestAllocations_GarbageCollect(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, func(c *config.Config) { c.GCDiskUsageThreshold = 100.0 }) @@ -249,7 +253,7 @@ func TestAllocations_GarbageCollect(t *testing.T) { } func TestAllocations_GarbageCollect_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -322,7 +326,7 @@ func TestAllocations_GarbageCollect_ACL(t *testing.T) { } func TestAllocations_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) client, cleanup := TestClient(t, nil) defer cleanup() @@ -348,7 +352,7 @@ func TestAllocations_Signal(t *testing.T) { } func TestAllocations_Signal_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -420,8 +424,9 @@ func TestAllocations_Signal_ACL(t *testing.T) { } func TestAllocations_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -453,7 +458,7 @@ func TestAllocations_Stats(t *testing.T) { } func TestAllocations_Stats_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -525,7 +530,7 @@ func TestAllocations_Stats_ACL(t *testing.T) { } func TestAlloc_ExecStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -629,7 +634,7 @@ OUTER: } func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -684,7 +689,7 @@ func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) { } func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -740,7 +745,7 @@ func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) { } func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server and client s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -843,7 +848,7 @@ func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) { // TestAlloc_ExecStreaming_ACL_WithIsolation_Image asserts that token only needs // alloc-exec acl policy when image isolation is used func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) { - t.Parallel() + ci.Parallel(t) isolation := drivers.FSIsolationImage // Start a server and client @@ -987,7 +992,7 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) { // TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot asserts that token only needs // alloc-exec acl policy when chroot isolation is used func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" || unix.Geteuid() != 0 { t.Skip("chroot isolation requires linux root") @@ -1136,7 +1141,7 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) { // TestAlloc_ExecStreaming_ACL_WithIsolation_None asserts that token needs // alloc-node-exec acl policy as well when no isolation is used func TestAlloc_ExecStreaming_ACL_WithIsolation_None(t *testing.T) { - t.Parallel() + ci.Parallel(t) isolation := drivers.FSIsolationNone // Start a server and client diff --git a/client/alloc_watcher_e2e_test.go b/client/alloc_watcher_e2e_test.go index 9cdc6fab0..c36afd7a8 100644 --- a/client/alloc_watcher_e2e_test.go +++ b/client/alloc_watcher_e2e_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/mock" @@ -26,7 +27,7 @@ func TestPrevAlloc_StreamAllocDir_TLS(t *testing.T) { clientCertFn = "../helper/tlsutil/testdata/global-client.pem" clientKeyFn = "../helper/tlsutil/testdata/global-client-key.pem" ) - t.Parallel() + ci.Parallel(t) require := require.New(t) server, cleanupS := nomad.TestServer(t, func(c *nomad.Config) { diff --git a/client/allocdir/alloc_dir_test.go b/client/allocdir/alloc_dir_test.go index 4a876c57b..f0764f26e 100644 --- a/client/allocdir/alloc_dir_test.go +++ b/client/allocdir/alloc_dir_test.go @@ -15,6 +15,7 @@ import ( "syscall" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -48,6 +49,8 @@ var ( // Test that AllocDir.Build builds just the alloc directory. func TestAllocDir_BuildAlloc(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -97,7 +100,9 @@ func MountCompatible(t *testing.T) { } func TestAllocDir_MountSharedAlloc(t *testing.T) { + ci.Parallel(t) MountCompatible(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -143,6 +148,8 @@ func TestAllocDir_MountSharedAlloc(t *testing.T) { } func TestAllocDir_Snapshot(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -223,6 +230,8 @@ func TestAllocDir_Snapshot(t *testing.T) { } func TestAllocDir_Move(t *testing.T) { + ci.Parallel(t) + tmp1, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -291,6 +300,8 @@ func TestAllocDir_Move(t *testing.T) { } func TestAllocDir_EscapeChecking(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -332,6 +343,7 @@ func TestAllocDir_EscapeChecking(t *testing.T) { // Test that `nomad fs` can't read secrets func TestAllocDir_ReadAt_SecretDir(t *testing.T) { + ci.Parallel(t) tmp := t.TempDir() d := NewAllocDir(testlog.HCLogger(t), tmp, "test") @@ -359,6 +371,8 @@ func TestAllocDir_ReadAt_SecretDir(t *testing.T) { } func TestAllocDir_SplitPath(t *testing.T) { + ci.Parallel(t) + dir, err := ioutil.TempDir("", "tmpdirtest") if err != nil { log.Fatal(err) @@ -382,6 +396,7 @@ func TestAllocDir_SplitPath(t *testing.T) { } func TestAllocDir_CreateDir(t *testing.T) { + ci.Parallel(t) if syscall.Geteuid() != 0 { t.Skip("Must be root to run test") } @@ -423,6 +438,8 @@ func TestAllocDir_CreateDir(t *testing.T) { } func TestPathFuncs(t *testing.T) { + ci.Parallel(t) + dir, err := ioutil.TempDir("", "nomadtest-pathfuncs") if err != nil { t.Fatalf("error creating temp dir: %v", err) @@ -458,7 +475,9 @@ func TestPathFuncs(t *testing.T) { } func TestAllocDir_DetectContentType(t *testing.T) { + ci.Parallel(t) require := require.New(t) + inputPath := "input/" var testFiles []string err := filepath.Walk(inputPath, func(path string, info os.FileInfo, err error) error { @@ -494,6 +513,7 @@ func TestAllocDir_DetectContentType(t *testing.T) { // Warning: If this test fails it may fill your disk before failing, so be // careful and/or confident. func TestAllocDir_SkipAllocDir(t *testing.T) { + ci.Parallel(t) MountCompatible(t) // Create root, alloc, and other dirs diff --git a/client/allocdir/fs_linux_test.go b/client/allocdir/fs_linux_test.go index c79dcb948..e8087086e 100644 --- a/client/allocdir/fs_linux_test.go +++ b/client/allocdir/fs_linux_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "golang.org/x/sys/unix" ) @@ -49,9 +50,11 @@ func isMount(path string) error { // TestLinuxRootSecretDir asserts secret dir creation and removal are // idempotent. func TestLinuxRootSecretDir(t *testing.T) { + ci.Parallel(t) if unix.Geteuid() != 0 { t.Skip("Must be run as root") } + tmpdir, err := ioutil.TempDir("", "nomadtest-rootsecretdir") if err != nil { t.Fatalf("unable to create tempdir for test: %v", err) @@ -109,9 +112,11 @@ func TestLinuxRootSecretDir(t *testing.T) { // TestLinuxUnprivilegedSecretDir asserts secret dir creation and removal are // idempotent. func TestLinuxUnprivilegedSecretDir(t *testing.T) { + ci.Parallel(t) if unix.Geteuid() == 0 { t.Skip("Must not be run as root") } + tmpdir, err := ioutil.TempDir("", "nomadtest-secretdir") if err != nil { t.Fatalf("unable to create tempdir for test: %s", err) diff --git a/client/allocdir/task_dir_test.go b/client/allocdir/task_dir_test.go index 61aa3b302..5ae12404b 100644 --- a/client/allocdir/task_dir_test.go +++ b/client/allocdir/task_dir_test.go @@ -6,11 +6,14 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" ) // Test that building a chroot will skip nonexistent directories. func TestTaskDir_EmbedNonexistent(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -33,6 +36,8 @@ func TestTaskDir_EmbedNonexistent(t *testing.T) { // Test that building a chroot copies files from the host into the task dir. func TestTaskDir_EmbedDirs(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -87,6 +92,7 @@ func TestTaskDir_EmbedDirs(t *testing.T) { // Test that task dirs for image based isolation don't require root. func TestTaskDir_NonRoot_Image(t *testing.T) { + ci.Parallel(t) if os.Geteuid() == 0 { t.Skip("test should be run as non-root user") } @@ -110,9 +116,11 @@ func TestTaskDir_NonRoot_Image(t *testing.T) { // Test that task dirs with no isolation don't require root. func TestTaskDir_NonRoot(t *testing.T) { + ci.Parallel(t) if os.Geteuid() == 0 { t.Skip("test should be run as non-root user") } + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -134,5 +142,4 @@ func TestTaskDir_NonRoot(t *testing.T) { if _, err = os.Stat(td.SharedTaskDir); !os.IsNotExist(err) { t.Fatalf("Expected a NotExist error for shared alloc dir in task dir: %q", td.SharedTaskDir) } - } diff --git a/client/allochealth/tracker_test.go b/client/allochealth/tracker_test.go index f4aec166d..6e9e6dd8c 100644 --- a/client/allochealth/tracker_test.go +++ b/client/allochealth/tracker_test.go @@ -8,6 +8,7 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/consul" cstructs "github.com/hashicorp/nomad/client/structs" agentconsul "github.com/hashicorp/nomad/command/agent/consul" @@ -19,7 +20,7 @@ import ( ) func TestTracker_Checks_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -90,7 +91,7 @@ func TestTracker_Checks_Healthy(t *testing.T) { } func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.LifecycleAllocWithPoststopDeploy() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -130,7 +131,7 @@ func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) { } func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.LifecycleAllocWithPoststartDeploy() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = time.Millisecond * 1 @@ -171,7 +172,7 @@ func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) { } func TestTracker_Checks_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -261,7 +262,7 @@ func TestTracker_Checks_Unhealthy(t *testing.T) { } func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() logger := testlog.HCLogger(t) @@ -312,7 +313,7 @@ func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) { // TestTracker_Checks_Healthy_Before_TaskHealth asserts that we mark an alloc // healthy, if the checks pass before task health pass func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -419,7 +420,7 @@ func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) { } func TestTracker_Checks_OnUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string diff --git a/client/allocrunner/alloc_runner_test.go b/client/allocrunner/alloc_runner_test.go index 749f72078..0b61a7c43 100644 --- a/client/allocrunner/alloc_runner_test.go +++ b/client/allocrunner/alloc_runner_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allochealth" "github.com/hashicorp/nomad/client/allocwatcher" cconsul "github.com/hashicorp/nomad/client/consul" @@ -30,7 +31,7 @@ func destroy(ar *allocRunner) { // TestAllocRunner_AllocState_Initialized asserts that getting TaskStates via // AllocState() are initialized even before the AllocRunner has run. func TestAllocRunner_AllocState_Initialized(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver" @@ -49,7 +50,7 @@ func TestAllocRunner_AllocState_Initialized(t *testing.T) { // TestAllocRunner_TaskLeader_KillTG asserts that when a leader task dies the // entire task group is killed. func TestAllocRunner_TaskLeader_KillTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -239,7 +240,7 @@ func TestAllocRunner_Lifecycle_Poststart(t *testing.T) { // TestAllocRunner_TaskMain_KillTG asserts that when main tasks die the // entire task group is killed. func TestAllocRunner_TaskMain_KillTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -398,6 +399,8 @@ func TestAllocRunner_TaskMain_KillTG(t *testing.T) { // postop lifecycle hook starts all 3 tasks, only // the ephemeral one finishes, and the other 2 exit when the alloc is stopped. func TestAllocRunner_Lifecycle_Poststop(t *testing.T) { + ci.Parallel(t) + alloc := mock.LifecycleAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -478,7 +481,7 @@ func TestAllocRunner_Lifecycle_Poststop(t *testing.T) { } func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -608,7 +611,7 @@ func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) { // TestAllocRunner_TaskLeader_StopTG asserts that when stopping an alloc with a // leader the leader is stopped before other tasks. func TestAllocRunner_TaskLeader_StopTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -707,7 +710,7 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) { // not stopped as it does not exist. // See https://github.com/hashicorp/nomad/issues/3420#issuecomment-341666932 func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -785,7 +788,7 @@ func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) { } func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.LifecycleAlloc() @@ -823,7 +826,7 @@ func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) { } func TestAllocRunner_Update_Semantics(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) updatedAlloc := func(a *structs.Allocation) *structs.Allocation { @@ -876,7 +879,7 @@ func TestAllocRunner_Update_Semantics(t *testing.T) { // TestAllocRunner_DeploymentHealth_Healthy_Migration asserts that health is // reported for services that got migrated; not just part of deployments. func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() @@ -924,7 +927,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) { // TestAllocRunner_DeploymentHealth_Healthy_NoChecks asserts that the health // watcher will mark the allocation as healthy based on task states alone. func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() @@ -987,7 +990,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) { // TestAllocRunner_DeploymentHealth_Unhealthy_Checks asserts that the health // watcher will mark the allocation as unhealthy with failing checks. func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1082,7 +1085,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) { // TestAllocRunner_Destroy asserts that Destroy kills and cleans up a running // alloc. func TestAllocRunner_Destroy(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Ensure task takes some time alloc := mock.BatchAlloc() @@ -1144,7 +1147,7 @@ func TestAllocRunner_Destroy(t *testing.T) { } func TestAllocRunner_SimpleRun(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() @@ -1179,7 +1182,7 @@ func TestAllocRunner_SimpleRun(t *testing.T) { // TestAllocRunner_MoveAllocDir asserts that a rescheduled // allocation copies ephemeral disk content from previous alloc run func TestAllocRunner_MoveAllocDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Step 1: start and run a task alloc := mock.BatchAlloc() @@ -1236,7 +1239,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) { // retrying fetching an artifact, other tasks in the group should be able // to proceed. func TestAllocRunner_HandlesArtifactFailure(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() rp := &structs.RestartPolicy{ @@ -1296,6 +1299,8 @@ func TestAllocRunner_HandlesArtifactFailure(t *testing.T) { // Test that alloc runner kills tasks in task group when another task fails func TestAllocRunner_TaskFailed_KillTG(t *testing.T) { + ci.Parallel(t) + alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0 @@ -1425,7 +1430,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) { // Test that alloc becoming terminal should destroy the alloc runner func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0 @@ -1513,7 +1518,7 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { // TestAllocRunner_PersistState_Destroyed asserts that destroyed allocs don't persist anymore func TestAllocRunner_PersistState_Destroyed(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() taskName := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0].Name diff --git a/client/allocrunner/alloc_runner_unix_test.go b/client/allocrunner/alloc_runner_unix_test.go index 41b5dabc8..c8bda921a 100644 --- a/client/allocrunner/alloc_runner_unix_test.go +++ b/client/allocrunner/alloc_runner_unix_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/nomad/mock" @@ -25,7 +26,7 @@ import ( // DesiredStatus=Stop, persisting the update, but crashing before terminating // the task. func TestAllocRunner_Restore_RunningTerminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) // 1. Run task // 2. Shutdown alloc runner @@ -143,7 +144,7 @@ func TestAllocRunner_Restore_RunningTerminal(t *testing.T) { // TestAllocRunner_Restore_CompletedBatch asserts that restoring a completed // batch alloc doesn't run it again func TestAllocRunner_Restore_CompletedBatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) // 1. Run task and wait for it to complete // 2. Start new alloc runner @@ -228,7 +229,7 @@ func TestAllocRunner_Restore_CompletedBatch(t *testing.T) { // prestart hooks failed, then the alloc and subsequent tasks transition // to failed state func TestAllocRunner_PreStartFailuresLeadToFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.Type = structs.JobTypeBatch diff --git a/client/allocrunner/consul_grpc_sock_hook_test.go b/client/allocrunner/consul_grpc_sock_hook_test.go index 2730ac627..d7e961db5 100644 --- a/client/allocrunner/consul_grpc_sock_hook_test.go +++ b/client/allocrunner/consul_grpc_sock_hook_test.go @@ -11,6 +11,7 @@ import ( "sync" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" @@ -24,7 +25,7 @@ import ( // Consul unix socket hook's Prerun method is called and stopped with the // Postrun method is called. func TestConsulGRPCSocketHook_PrerunPostrun_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) // As of Consul 1.6.0 the test server does not support the gRPC // endpoint so we have to fake it. @@ -101,7 +102,7 @@ func TestConsulGRPCSocketHook_PrerunPostrun_Ok(t *testing.T) { // TestConsulGRPCSocketHook_Prerun_Error asserts that invalid Consul addresses cause // Prerun to return an error if the alloc requires a grpc proxy. func TestConsulGRPCSocketHook_Prerun_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -153,7 +154,7 @@ func TestConsulGRPCSocketHook_Prerun_Error(t *testing.T) { // TestConsulGRPCSocketHook_proxy_Unix asserts that the destination can be a unix // socket path. func TestConsulGRPCSocketHook_proxy_Unix(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir, err := ioutil.TempDir("", "nomadtest_proxy_Unix") require.NoError(t, err) diff --git a/client/allocrunner/consul_http_sock_hook_test.go b/client/allocrunner/consul_http_sock_hook_test.go index 3d5a97ec5..9a03a9579 100644 --- a/client/allocrunner/consul_http_sock_hook_test.go +++ b/client/allocrunner/consul_http_sock_hook_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -14,7 +15,7 @@ import ( ) func TestConsulSocketHook_PrerunPostrun_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeConsul, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -89,7 +90,7 @@ func TestConsulSocketHook_PrerunPostrun_Ok(t *testing.T) { } func TestConsulHTTPSocketHook_Prerun_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) diff --git a/client/allocrunner/csi_hook_test.go b/client/allocrunner/csi_hook_test.go index 6ed9270d5..ea5c35d3a 100644 --- a/client/allocrunner/csi_hook_test.go +++ b/client/allocrunner/csi_hook_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -29,6 +30,7 @@ var _ interfaces.RunnerPostrunHook = (*csiHook)(nil) // var _ interfaces.RunnerUpdateHook = (*csiHook)(nil) func TestCSIHook(t *testing.T) { + ci.Parallel(t) alloc := mock.Alloc() logger := testlog.HCLogger(t) diff --git a/client/allocrunner/groupservice_hook_test.go b/client/allocrunner/groupservice_hook_test.go index 61d9a38b4..dbf3c5483 100644 --- a/client/allocrunner/groupservice_hook_test.go +++ b/client/allocrunner/groupservice_hook_test.go @@ -7,6 +7,7 @@ import ( consulapi "github.com/hashicorp/consul/api" ctestutil "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/taskenv" @@ -27,7 +28,7 @@ var _ interfaces.RunnerTaskRestartHook = (*groupServiceHook)(nil) // TestGroupServiceHook_NoGroupServices asserts calling group service hooks // without group services does not error. func TestGroupServiceHook_NoGroupServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Services = []*structs.Service{{ @@ -65,7 +66,7 @@ func TestGroupServiceHook_NoGroupServices(t *testing.T) { // TestGroupServiceHook_ShutdownDelayUpdate asserts calling group service hooks // update updates the hooks delay value. func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(10 * time.Second) @@ -102,7 +103,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { // TestGroupServiceHook_GroupServices asserts group service hooks with group // services does not error. func TestGroupServiceHook_GroupServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.ConnectAlloc() logger := testlog.HCLogger(t) @@ -136,7 +137,7 @@ func TestGroupServiceHook_GroupServices(t *testing.T) { // TestGroupServiceHook_Error asserts group service hooks with group // services but no group network is handled gracefully. func TestGroupServiceHook_NoNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{} @@ -180,7 +181,7 @@ func TestGroupServiceHook_NoNetwork(t *testing.T) { } func TestGroupServiceHook_getWorkloadServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{} diff --git a/client/allocrunner/health_hook_test.go b/client/allocrunner/health_hook_test.go index 62c148925..56de9926b 100644 --- a/client/allocrunner/health_hook_test.go +++ b/client/allocrunner/health_hook_test.go @@ -6,6 +6,7 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" cstructs "github.com/hashicorp/nomad/client/structs" @@ -84,7 +85,7 @@ func (m *mockHealthSetter) HasHealth() bool { // TestHealthHook_PrerunPostrun asserts a health hook does not error if it is // run and postrunned. func TestHealthHook_PrerunPostrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -121,7 +122,7 @@ func TestHealthHook_PrerunPostrun(t *testing.T) { // TestHealthHook_PrerunUpdatePostrun asserts Updates may be applied concurrently. func TestHealthHook_PrerunUpdatePostrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -160,7 +161,7 @@ func TestHealthHook_PrerunUpdatePostrun(t *testing.T) { // TestHealthHook_UpdatePrerunPostrun asserts that a hook may have Update // called before Prerun. func TestHealthHook_UpdatePrerunPostrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -203,7 +204,7 @@ func TestHealthHook_UpdatePrerunPostrun(t *testing.T) { // TestHealthHook_Postrun asserts that a hook may have only Postrun called. func TestHealthHook_Postrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -222,7 +223,7 @@ func TestHealthHook_Postrun(t *testing.T) { // TestHealthHook_SetHealth_healthy asserts SetHealth is called when health status is // set. Uses task state and health checks. func TestHealthHook_SetHealth_healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -302,7 +303,7 @@ func TestHealthHook_SetHealth_healthy(t *testing.T) { // TestHealthHook_SetHealth_unhealthy asserts SetHealth notices unhealthy allocs func TestHealthHook_SetHealth_unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -386,7 +387,7 @@ func TestHealthHook_SetHealth_unhealthy(t *testing.T) { // TestHealthHook_SystemNoop asserts that system jobs return the noop tracker. func TestHealthHook_SystemNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.SystemAlloc(), nil, nil, nil) @@ -407,7 +408,7 @@ func TestHealthHook_SystemNoop(t *testing.T) { // TestHealthHook_BatchNoop asserts that batch jobs return the noop tracker. func TestHealthHook_BatchNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.BatchAlloc(), nil, nil, nil) diff --git a/client/allocrunner/network_hook_test.go b/client/allocrunner/network_hook_test.go index c5dee542c..5041c2864 100644 --- a/client/allocrunner/network_hook_test.go +++ b/client/allocrunner/network_hook_test.go @@ -3,6 +3,7 @@ package allocrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper/testlog" @@ -42,6 +43,8 @@ func (m *mockNetworkStatusSetter) SetNetworkStatus(status *structs.AllocNetworkS // Test that the prerun and postrun hooks call the setter with the expected spec when // the network mode is not host func TestNetworkHook_Prerun_Postrun(t *testing.T) { + ci.Parallel(t) + alloc := mock.Alloc() alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{ { diff --git a/client/allocrunner/network_manager_linux_test.go b/client/allocrunner/network_manager_linux_test.go index ac2f97c8f..7d598d74c 100644 --- a/client/allocrunner/network_manager_linux_test.go +++ b/client/allocrunner/network_manager_linux_test.go @@ -3,6 +3,7 @@ package allocrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" @@ -63,6 +64,8 @@ func (m *mockDriverManager) Dispense(driver string) (drivers.DriverPlugin, error } func TestNewNetworkManager(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name string alloc *structs.Allocation diff --git a/client/allocrunner/networking_cni_test.go b/client/allocrunner/networking_cni_test.go index c4d761d4d..bc759272f 100644 --- a/client/allocrunner/networking_cni_test.go +++ b/client/allocrunner/networking_cni_test.go @@ -8,6 +8,7 @@ import ( "testing" cni "github.com/containerd/go-cni" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,6 +17,8 @@ import ( // TestCNI_cniToAllocNet_Fallback asserts if a CNI plugin result lacks an IP on // its sandbox interface, the first IP found is used. func TestCNI_cniToAllocNet_Fallback(t *testing.T) { + ci.Parallel(t) + // Calico's CNI plugin v3.12.3 has been observed to return the // following: cniResult := &cni.CNIResult{ @@ -47,6 +50,8 @@ func TestCNI_cniToAllocNet_Fallback(t *testing.T) { // result lacks any IP addresses. This has not been observed, but Nomad still // must guard against invalid results from external plugins. func TestCNI_cniToAllocNet_Invalid(t *testing.T) { + ci.Parallel(t) + cniResult := &cni.CNIResult{ Interfaces: map[string]*cni.Config{ "eth0": {}, diff --git a/client/allocrunner/task_hook_coordinator_test.go b/client/allocrunner/task_hook_coordinator_test.go index e4343d915..7399acdab 100644 --- a/client/allocrunner/task_hook_coordinator_test.go +++ b/client/allocrunner/task_hook_coordinator_test.go @@ -5,16 +5,17 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/taskrunner" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) { + ci.Parallel(t) + alloc := mock.Alloc() tasks := alloc.Job.TaskGroups[0].Tasks task := tasks[0] @@ -28,6 +29,8 @@ func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) { } func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -48,6 +51,8 @@ func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) { } func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -92,6 +97,8 @@ func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) { } func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -137,6 +144,8 @@ func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) { } func TestTaskHookCoordinator_FailedInitTask(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -182,6 +191,8 @@ func TestTaskHookCoordinator_FailedInitTask(t *testing.T) { } func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -225,6 +236,8 @@ func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) { } func TestTaskHookCoordinator_PoststartStartsAfterMain(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -280,6 +293,7 @@ func isChannelClosed(ch <-chan struct{}) bool { } func TestHasSidecarTasks(t *testing.T) { + ci.Parallel(t) falseV, trueV := false, true diff --git a/client/allocrunner/taskrunner/artifact_hook_test.go b/client/allocrunner/taskrunner/artifact_hook_test.go index 121370867..0a3f21e50 100644 --- a/client/allocrunner/taskrunner/artifact_hook_test.go +++ b/client/allocrunner/taskrunner/artifact_hook_test.go @@ -10,6 +10,7 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -33,7 +34,7 @@ func (m *mockEmitter) EmitEvent(ev *structs.TaskEvent) { // TestTaskRunner_ArtifactHook_Recoverable asserts that failures to download // artifacts are a recoverable error. func TestTaskRunner_ArtifactHook_Recoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) me := &mockEmitter{} artifactHook := newArtifactHook(me, testlog.HCLogger(t)) @@ -66,7 +67,7 @@ func TestTaskRunner_ArtifactHook_Recoverable(t *testing.T) { // already downloaded artifacts when subsequent artifacts fail and cause a // restart. func TestTaskRunner_ArtifactHook_PartialDone(t *testing.T) { - t.Parallel() + ci.Parallel(t) me := &mockEmitter{} artifactHook := newArtifactHook(me, testlog.HCLogger(t)) diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go index a9c43d210..5684e77ed 100644 --- a/client/allocrunner/taskrunner/connect_native_hook_test.go +++ b/client/allocrunner/taskrunner/connect_native_hook_test.go @@ -9,6 +9,7 @@ import ( consulapi "github.com/hashicorp/consul/api" consultest "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -35,7 +36,7 @@ func getTestConsul(t *testing.T) *consultest.TestServer { } func TestConnectNativeHook_Name(t *testing.T) { - t.Parallel() + ci.Parallel(t) name := new(connectNativeHook).Name() require.Equal(t, "connect_native", name) } @@ -61,7 +62,7 @@ func cleanupCertDirs(t *testing.T, original, secrets string) { } func TestConnectNativeHook_copyCertificate(t *testing.T) { - t.Parallel() + ci.Parallel(t) f, d := setupCertDirs(t) defer cleanupCertDirs(t, f, d) @@ -81,7 +82,7 @@ func TestConnectNativeHook_copyCertificate(t *testing.T) { } func TestConnectNativeHook_copyCertificates(t *testing.T) { - t.Parallel() + ci.Parallel(t) f, d := setupCertDirs(t) defer cleanupCertDirs(t, f, d) @@ -109,7 +110,7 @@ func TestConnectNativeHook_copyCertificates(t *testing.T) { } func TestConnectNativeHook_tlsEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) // the hook config comes from client config emptyHook := new(connectNativeHook) @@ -163,7 +164,7 @@ func TestConnectNativeHook_tlsEnv(t *testing.T) { } func TestConnectNativeHook_bridgeEnv_bridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("without tls", func(t *testing.T) { hook := new(connectNativeHook) @@ -208,7 +209,7 @@ func TestConnectNativeHook_bridgeEnv_bridge(t *testing.T) { } func TestConnectNativeHook_bridgeEnv_host(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := new(connectNativeHook) hook.alloc = mock.ConnectNativeAlloc("host") @@ -227,7 +228,7 @@ func TestConnectNativeHook_bridgeEnv_host(t *testing.T) { } func TestConnectNativeHook_hostEnv_host(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := new(connectNativeHook) hook.alloc = mock.ConnectNativeAlloc("host") @@ -249,7 +250,7 @@ func TestConnectNativeHook_hostEnv_host(t *testing.T) { } func TestConnectNativeHook_hostEnv_bridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := new(connectNativeHook) hook.alloc = mock.ConnectNativeAlloc("bridge") @@ -269,7 +270,7 @@ func TestConnectNativeHook_hostEnv_bridge(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) alloc := mock.Alloc() @@ -307,7 +308,7 @@ func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -372,7 +373,7 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -445,7 +446,7 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) try := func(t *testing.T, shareSSL *bool) { @@ -566,7 +567,7 @@ func checkFilesInDir(t *testing.T, dir string, includes, excludes []string) { } func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) fakeCert, fakeCertDir := setupCertDirs(t) diff --git a/client/allocrunner/taskrunner/device_hook_test.go b/client/allocrunner/taskrunner/device_hook_test.go index 9d9d6d7b3..9723f0de5 100644 --- a/client/allocrunner/taskrunner/device_hook_test.go +++ b/client/allocrunner/taskrunner/device_hook_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/devicemanager" "github.com/hashicorp/nomad/helper/testlog" @@ -15,7 +16,7 @@ import ( ) func TestDeviceHook_CorrectDevice(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dm := devicemanager.NoopMockManager() @@ -97,7 +98,7 @@ func TestDeviceHook_CorrectDevice(t *testing.T) { } func TestDeviceHook_IncorrectDevice(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dm := devicemanager.NoopMockManager() diff --git a/client/allocrunner/taskrunner/dispatch_hook_test.go b/client/allocrunner/taskrunner/dispatch_hook_test.go index 9f56fe0fd..6d7577612 100644 --- a/client/allocrunner/taskrunner/dispatch_hook_test.go +++ b/client/allocrunner/taskrunner/dispatch_hook_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/golang/snappy" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" @@ -21,7 +22,7 @@ var _ interfaces.TaskPrestartHook = (*dispatchHook)(nil) // TestTaskRunner_DispatchHook_NoPayload asserts that the hook is a noop and is // marked as done if there is no dispatch payload. func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx := context.Background() @@ -57,7 +58,7 @@ func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) { // TestTaskRunner_DispatchHook_Ok asserts that dispatch payloads are written to // a file in the task dir. func TestTaskRunner_DispatchHook_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx := context.Background() @@ -101,7 +102,7 @@ func TestTaskRunner_DispatchHook_Ok(t *testing.T) { // TestTaskRunner_DispatchHook_Error asserts that on an error dispatch payloads // are not written and Done=false. func TestTaskRunner_DispatchHook_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx := context.Background() diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go index a1a9857cb..b1337c288 100644 --- a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go @@ -17,6 +17,7 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -53,7 +54,7 @@ func writeTmp(t *testing.T, s string, fm os.FileMode) string { } func TestEnvoyBootstrapHook_maybeLoadSIToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) // This test fails when running as root because the test case for checking // the error condition when the file is unreadable fails (root can read the @@ -94,7 +95,7 @@ func TestEnvoyBootstrapHook_maybeLoadSIToken(t *testing.T) { } func TestEnvoyBootstrapHook_decodeTriState(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "", decodeTriState(nil)) require.Equal(t, "true", decodeTriState(helper.BoolToPtr(true))) @@ -118,7 +119,7 @@ var ( ) func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("excluding SI token", func(t *testing.T) { ebArgs := envoyBootstrapArgs{ @@ -227,7 +228,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) { } func TestEnvoyBootstrapHook_envoyBootstrapEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) environment := []string{"foo=bar", "baz=1"} @@ -291,7 +292,7 @@ type envoyConfig struct { // TestEnvoyBootstrapHook_with_SI_token asserts the bootstrap file written for // Envoy contains a Consul SI token. func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -392,7 +393,7 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) { // creates Envoy's bootstrap.json configuration based on Connect proxy sidecars // registered for the task. func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -487,7 +488,7 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) testConsul := getTestConsul(t) @@ -570,7 +571,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) { // TestTaskRunner_EnvoyBootstrapHook_Noop asserts that the Envoy bootstrap hook // is a noop for non-Connect proxy sidecar / gateway tasks. func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) alloc := mock.Alloc() @@ -607,7 +608,7 @@ func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) { // bootstrap hook returns a Recoverable error if the bootstrap command runs but // fails. func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -685,7 +686,7 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_retryTimeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) testConsul := getTestConsul(t) @@ -812,6 +813,8 @@ func TestTaskRunner_EnvoyBootstrapHook_extractNameAndKind(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) { + ci.Parallel(t) + bridgeH := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig( mock.ConnectIngressGatewayAlloc("bridge"), new(config.ConsulConfig), @@ -841,6 +844,8 @@ func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_isConnectKind(t *testing.T) { + ci.Parallel(t) + require.True(t, isConnectKind(structs.ConnectProxyPrefix)) require.True(t, isConnectKind(structs.ConnectIngressPrefix)) require.True(t, isConnectKind(structs.ConnectTerminatingPrefix)) diff --git a/client/allocrunner/taskrunner/envoy_version_hook_test.go b/client/allocrunner/taskrunner/envoy_version_hook_test.go index 225c8d6e6..43247284a 100644 --- a/client/allocrunner/taskrunner/envoy_version_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_version_hook_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" ifs "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -24,7 +25,7 @@ var ( ) func TestEnvoyVersionHook_semver(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("with v", func(t *testing.T) { result, err := semver("v1.2.3") @@ -45,7 +46,7 @@ func TestEnvoyVersionHook_semver(t *testing.T) { } func TestEnvoyVersionHook_taskImage(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("absent", func(t *testing.T) { result := (*envoyVersionHook)(nil).taskImage(map[string]interface{}{ @@ -70,7 +71,7 @@ func TestEnvoyVersionHook_taskImage(t *testing.T) { } func TestEnvoyVersionHook_tweakImage(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := envoy.ImageFormat @@ -106,7 +107,7 @@ func TestEnvoyVersionHook_tweakImage(t *testing.T) { } func TestEnvoyVersionHook_interpolateImage(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := (*envoyVersionHook)(nil) @@ -156,7 +157,7 @@ func TestEnvoyVersionHook_interpolateImage(t *testing.T) { } func TestEnvoyVersionHook_skip(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := new(envoyVersionHook) @@ -221,7 +222,7 @@ func TestEnvoyVersionHook_skip(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -264,7 +265,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -308,7 +309,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -355,7 +356,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_fallback(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -396,7 +397,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_fallback(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_error(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) diff --git a/client/allocrunner/taskrunner/errors_test.go b/client/allocrunner/taskrunner/errors_test.go index 9b32e9cdf..15ad61990 100644 --- a/client/allocrunner/taskrunner/errors_test.go +++ b/client/allocrunner/taskrunner/errors_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -14,7 +15,7 @@ var _ structs.Recoverable = (*hookError)(nil) // TestHookError_Recoverable asserts that a NewHookError is recoverable if // passed a recoverable error. func TestHookError_Recoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create root error root := errors.New("test error") @@ -36,7 +37,7 @@ func TestHookError_Recoverable(t *testing.T) { // TestHookError_Unrecoverable asserts that a NewHookError is not recoverable // unless it is passed a recoverable error. func TestHookError_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create error err := errors.New("test error") diff --git a/client/allocrunner/taskrunner/logmon_hook_test.go b/client/allocrunner/taskrunner/logmon_hook_test.go index b3a087995..8d17b7b66 100644 --- a/client/allocrunner/taskrunner/logmon_hook_test.go +++ b/client/allocrunner/taskrunner/logmon_hook_test.go @@ -9,6 +9,7 @@ import ( "testing" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" @@ -24,7 +25,7 @@ var _ interfaces.TaskStopHook = (*logmonHook)(nil) // TestTaskRunner_LogmonHook_LoadReattach unit tests loading logmon reattach // config from persisted hook state. func TestTaskRunner_LogmonHook_LoadReattach(t *testing.T) { - t.Parallel() + ci.Parallel(t) // No hook data should return nothing cfg, err := reattachConfigFromHookData(nil) @@ -60,7 +61,7 @@ func TestTaskRunner_LogmonHook_LoadReattach(t *testing.T) { // first time Prestart is called, reattached to on subsequent restarts, and // killed on Stop. func TestTaskRunner_LogmonHook_StartStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] diff --git a/client/allocrunner/taskrunner/logmon_hook_unix_test.go b/client/allocrunner/taskrunner/logmon_hook_unix_test.go index df85c054e..03ab80ea1 100644 --- a/client/allocrunner/taskrunner/logmon_hook_unix_test.go +++ b/client/allocrunner/taskrunner/logmon_hook_unix_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -25,7 +26,7 @@ import ( // Nomad client is restarting and asserts failing to reattach to logmon causes // nomad to spawn a new logmon. func TestTaskRunner_LogmonHook_StartCrashStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -94,7 +95,7 @@ func TestTaskRunner_LogmonHook_StartCrashStop(t *testing.T) { // TestTaskRunner_LogmonHook_ShutdownMidStart simulates logmon crashing while the // Nomad client is calling Start() and asserts that we recover and spawn a new logmon. func TestTaskRunner_LogmonHook_ShutdownMidStart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] diff --git a/client/allocrunner/taskrunner/restarts/restarts_test.go b/client/allocrunner/taskrunner/restarts/restarts_test.go index 48afe4915..f679e69f9 100644 --- a/client/allocrunner/taskrunner/restarts/restarts_test.go +++ b/client/allocrunner/taskrunner/restarts/restarts_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" + "github.com/stretchr/testify/require" ) func testPolicy(success bool, mode string) *structs.RestartPolicy { @@ -34,7 +34,7 @@ func testExitResult(exit int) *drivers.ExitResult { } func TestClient_RestartTracker_ModeDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeDelay) rt := NewRestartTracker(p, structs.JobTypeService, nil) for i := 0; i < p.Attempts; i++ { @@ -60,7 +60,7 @@ func TestClient_RestartTracker_ModeDelay(t *testing.T) { } func TestClient_RestartTracker_ModeFail(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) rt := NewRestartTracker(p, structs.JobTypeSystem, nil) for i := 0; i < p.Attempts; i++ { @@ -80,7 +80,7 @@ func TestClient_RestartTracker_ModeFail(t *testing.T) { } func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(false, structs.RestartPolicyModeDelay) rt := NewRestartTracker(p, structs.JobTypeBatch, nil) if state, _ := rt.SetExitResult(testExitResult(0)).GetState(); state != structs.TaskTerminated { @@ -89,7 +89,7 @@ func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) { } func TestClient_RestartTracker_ZeroAttempts(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 0 @@ -122,7 +122,7 @@ func TestClient_RestartTracker_ZeroAttempts(t *testing.T) { } func TestClient_RestartTracker_TaskKilled(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 0 rt := NewRestartTracker(p, structs.JobTypeService, nil) @@ -132,7 +132,7 @@ func TestClient_RestartTracker_TaskKilled(t *testing.T) { } func TestClient_RestartTracker_RestartTriggered(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 0 rt := NewRestartTracker(p, structs.JobTypeService, nil) @@ -142,7 +142,7 @@ func TestClient_RestartTracker_RestartTriggered(t *testing.T) { } func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 1 rt := NewRestartTracker(p, structs.JobTypeService, nil) @@ -155,7 +155,7 @@ func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) { } func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) rt := NewRestartTracker(p, structs.JobTypeSystem, nil) recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true) @@ -176,7 +176,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) { } func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeDelay) rt := NewRestartTracker(p, structs.JobTypeSystem, nil) recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true) @@ -201,7 +201,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) { } func TestClient_RestartTracker_Lifecycle(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCase := []struct { name string diff --git a/client/allocrunner/taskrunner/script_check_hook_test.go b/client/allocrunner/taskrunner/script_check_hook_test.go index 0d50c4fc0..eecdc6722 100644 --- a/client/allocrunner/taskrunner/script_check_hook_test.go +++ b/client/allocrunner/taskrunner/script_check_hook_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/api" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/taskenv" @@ -63,6 +64,8 @@ type heartbeat struct { // TestScript_Exec_Cancel asserts cancelling a script check shortcircuits // any running scripts. func TestScript_Exec_Cancel(t *testing.T) { + ci.Parallel(t) + exec, cancel := newBlockingScriptExec() defer cancel() @@ -89,7 +92,7 @@ func TestScript_Exec_Cancel(t *testing.T) { // TestScript_Exec_TimeoutBasic asserts a script will be killed when the // timeout is reached. func TestScript_Exec_TimeoutBasic(t *testing.T) { - t.Parallel() + ci.Parallel(t) exec, cancel := newBlockingScriptExec() defer cancel() @@ -130,7 +133,7 @@ func TestScript_Exec_TimeoutBasic(t *testing.T) { // the timeout is reached and always set a critical status regardless of what // Exec returns. func TestScript_Exec_TimeoutCritical(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) hb := newFakeHeartbeater() script := newScriptMock(hb, sleeperExec{}, logger, time.Hour, time.Nanosecond) @@ -151,6 +154,8 @@ func TestScript_Exec_TimeoutCritical(t *testing.T) { // TestScript_Exec_Shutdown asserts a script will be executed once more // when told to shutdown. func TestScript_Exec_Shutdown(t *testing.T) { + ci.Parallel(t) + shutdown := make(chan struct{}) exec := newSimpleExec(0, nil) logger := testlog.HCLogger(t) @@ -180,6 +185,7 @@ func TestScript_Exec_Shutdown(t *testing.T) { // TestScript_Exec_Codes asserts script exit codes are translated to their // corresponding Consul health check status. func TestScript_Exec_Codes(t *testing.T) { + ci.Parallel(t) exec := newScriptedExec([]execResult{ {[]byte("output"), 1, nil}, @@ -224,6 +230,7 @@ func TestScript_Exec_Codes(t *testing.T) { // TestScript_TaskEnvInterpolation asserts that script check hooks are // interpolated in the same way that services are func TestScript_TaskEnvInterpolation(t *testing.T) { + ci.Parallel(t) logger := testlog.HCLogger(t) consulClient := consul.NewMockConsulServiceClient(t, logger) @@ -288,6 +295,8 @@ func TestScript_TaskEnvInterpolation(t *testing.T) { } func TestScript_associated(t *testing.T) { + ci.Parallel(t) + t.Run("neither set", func(t *testing.T) { require.False(t, new(scriptCheckHook).associated("task1", "", "")) }) diff --git a/client/allocrunner/taskrunner/service_hook_test.go b/client/allocrunner/taskrunner/service_hook_test.go index bdae6bfd0..efcf14f3e 100644 --- a/client/allocrunner/taskrunner/service_hook_test.go +++ b/client/allocrunner/taskrunner/service_hook_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/helper/testlog" @@ -53,6 +54,7 @@ func TestUpdate_beforePoststart(t *testing.T) { } func Test_serviceHook_multipleDeRegisterCall(t *testing.T) { + ci.Parallel(t) alloc := mock.Alloc() logger := testlog.HCLogger(t) diff --git a/client/allocrunner/taskrunner/sids_hook_test.go b/client/allocrunner/taskrunner/sids_hook_test.go index c0adcb0e9..f475c6a2f 100644 --- a/client/allocrunner/taskrunner/sids_hook_test.go +++ b/client/allocrunner/taskrunner/sids_hook_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" consulapi "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/helper" @@ -46,7 +47,7 @@ func sidecar(task string) (string, structs.TaskKind) { } func TestSIDSHook_recoverToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) secrets := tmpDir(t) @@ -71,7 +72,7 @@ func TestSIDSHook_recoverToken(t *testing.T) { } func TestSIDSHook_recoverToken_empty(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) secrets := tmpDir(t) @@ -92,6 +93,7 @@ func TestSIDSHook_recoverToken_empty(t *testing.T) { } func TestSIDSHook_recoverToken_unReadable(t *testing.T) { + ci.Parallel(t) // This test fails when running as root because the test case for checking // the error condition when the file is unreadable fails (root can read the // file even though the permissions are set to 0200). @@ -99,7 +101,6 @@ func TestSIDSHook_recoverToken_unReadable(t *testing.T) { t.Skip("test only works as non-root") } - t.Parallel() r := require.New(t) secrets := tmpDir(t) @@ -122,7 +123,7 @@ func TestSIDSHook_recoverToken_unReadable(t *testing.T) { } func TestSIDSHook_writeToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) secrets := tmpDir(t) @@ -139,6 +140,7 @@ func TestSIDSHook_writeToken(t *testing.T) { } func TestSIDSHook_writeToken_unWritable(t *testing.T) { + ci.Parallel(t) // This test fails when running as root because the test case for checking // the error condition when the file is unreadable fails (root can read the // file even though the permissions are set to 0200). @@ -146,7 +148,6 @@ func TestSIDSHook_writeToken_unWritable(t *testing.T) { t.Skip("test only works as non-root") } - t.Parallel() r := require.New(t) secrets := tmpDir(t) @@ -162,7 +163,7 @@ func TestSIDSHook_writeToken_unWritable(t *testing.T) { } func Test_SIDSHook_writeToken_nonExistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) base := tmpDir(t) @@ -176,7 +177,7 @@ func Test_SIDSHook_writeToken_nonExistent(t *testing.T) { } func TestSIDSHook_deriveSIToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) taskName, taskKind := sidecar("task1") @@ -197,7 +198,7 @@ func TestSIDSHook_deriveSIToken(t *testing.T) { } func TestSIDSHook_deriveSIToken_timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) siClient := consulapi.NewMockServiceIdentitiesClient() @@ -227,7 +228,7 @@ func TestSIDSHook_deriveSIToken_timeout(t *testing.T) { } func TestSIDSHook_computeBackoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(i int, exp time.Duration) { result := computeBackoff(i) @@ -243,7 +244,7 @@ func TestSIDSHook_computeBackoff(t *testing.T) { } func TestSIDSHook_backoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctx := context.Background() @@ -252,7 +253,7 @@ func TestSIDSHook_backoff(t *testing.T) { } func TestSIDSHook_backoffKilled(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctx, cancel := context.WithTimeout(context.Background(), 1) @@ -263,6 +264,7 @@ func TestSIDSHook_backoffKilled(t *testing.T) { } func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) { + ci.Parallel(t) // Normally this test would live in test_runner_test.go, but since it requires // root and the check for root doesn't like Windows, we put this file in here // for now. @@ -274,7 +276,6 @@ func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) { t.Skip("test only works as non-root") } - t.Parallel() r := require.New(t) alloc := mock.BatchConnectAlloc() diff --git a/client/allocrunner/taskrunner/stats_hook_test.go b/client/allocrunner/taskrunner/stats_hook_test.go index 3294c9a05..2ab9f6569 100644 --- a/client/allocrunner/taskrunner/stats_hook_test.go +++ b/client/allocrunner/taskrunner/stats_hook_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/testlog" @@ -82,7 +83,7 @@ func (m *mockDriverStats) Called() int { // TestTaskRunner_StatsHook_PoststartExited asserts the stats hook starts and // stops. func TestTaskRunner_StatsHook_PoststartExited(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -114,7 +115,7 @@ func TestTaskRunner_StatsHook_PoststartExited(t *testing.T) { // TestTaskRunner_StatsHook_Periodic asserts the stats hook collects stats on // an interval. func TestTaskRunner_StatsHook_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -179,7 +180,7 @@ WAITING: // TestTaskRunner_StatsHook_NotImplemented asserts the stats hook stops if the // driver returns NotImplemented. func TestTaskRunner_StatsHook_NotImplemented(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -208,7 +209,7 @@ func TestTaskRunner_StatsHook_NotImplemented(t *testing.T) { // TestTaskRunner_StatsHook_Backoff asserts that stats hook does some backoff // even if the driver doesn't support intervals well func TestTaskRunner_StatsHook_Backoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) su := newMockStatsUpdater() diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go index 77741c802..a3bb1ccad 100644 --- a/client/allocrunner/taskrunner/task_runner_test.go +++ b/client/allocrunner/taskrunner/task_runner_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/golang/snappy" + "github.com/hashicorp/nomad/ci" "github.com/kr/pretty" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -141,7 +142,7 @@ func runTestTaskRunner(t *testing.T, alloc *structs.Allocation, taskName string) } func TestTaskRunner_BuildTaskConfig_CPU_Memory(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -209,7 +210,7 @@ func TestTaskRunner_BuildTaskConfig_CPU_Memory(t *testing.T) { // TestTaskRunner_Stop_ExitCode asserts that the exit code is captured on a task, even if it's stopped func TestTaskRunner_Stop_ExitCode(t *testing.T) { ctestutil.ExecCompatible(t) - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() alloc.Job.TaskGroups[0].Count = 1 @@ -258,7 +259,7 @@ func TestTaskRunner_Stop_ExitCode(t *testing.T) { // TestTaskRunner_Restore_Running asserts restoring a running task does not // rerun the task. func TestTaskRunner_Restore_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -314,7 +315,7 @@ func TestTaskRunner_Restore_Running(t *testing.T) { // returned once it is running and waiting in pending along with a cleanup // func. func setupRestoreFailureTest(t *testing.T, alloc *structs.Allocation) (*TaskRunner, *Config, func()) { - t.Parallel() + ci.Parallel(t) task := alloc.Job.TaskGroups[0].Tasks[0] task.Driver = "raw_exec" @@ -388,6 +389,8 @@ func setupRestoreFailureTest(t *testing.T, alloc *structs.Allocation) (*TaskRunn // TestTaskRunner_Restore_Restart asserts restoring a dead task blocks until // MarkAlive is called. #1795 func TestTaskRunner_Restore_Restart(t *testing.T) { + ci.Parallel(t) + newTR, conf, cleanup := setupRestoreFailureTest(t, mock.Alloc()) defer cleanup() @@ -405,6 +408,8 @@ func TestTaskRunner_Restore_Restart(t *testing.T) { // TestTaskRunner_Restore_Kill asserts restoring a dead task blocks until // the task is killed. #1795 func TestTaskRunner_Restore_Kill(t *testing.T) { + ci.Parallel(t) + newTR, _, cleanup := setupRestoreFailureTest(t, mock.Alloc()) defer cleanup() @@ -430,6 +435,8 @@ func TestTaskRunner_Restore_Kill(t *testing.T) { // TestTaskRunner_Restore_Update asserts restoring a dead task blocks until // Update is called. #1795 func TestTaskRunner_Restore_Update(t *testing.T) { + ci.Parallel(t) + newTR, conf, cleanup := setupRestoreFailureTest(t, mock.Alloc()) defer cleanup() @@ -454,7 +461,7 @@ func TestTaskRunner_Restore_Update(t *testing.T) { // TestTaskRunner_Restore_System asserts restoring a dead system task does not // block. func TestTaskRunner_Restore_System(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.Type = structs.JobTypeSystem @@ -527,7 +534,7 @@ func TestTaskRunner_Restore_System(t *testing.T) { // TestTaskRunner_TaskEnv_Interpolated asserts driver configurations are // interpolated. func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -571,7 +578,7 @@ func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) { // not host paths. func TestTaskRunner_TaskEnv_Chroot(t *testing.T) { ctestutil.ExecCompatible(t) - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -629,7 +636,7 @@ func TestTaskRunner_TaskEnv_Chroot(t *testing.T) { // not host paths. Host env vars should also be excluded. func TestTaskRunner_TaskEnv_Image(t *testing.T) { ctestutil.DockerCompatible(t) - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -672,7 +679,7 @@ func TestTaskRunner_TaskEnv_Image(t *testing.T) { // TestTaskRunner_TaskEnv_None asserts raw_exec uses host paths and env vars. func TestTaskRunner_TaskEnv_None(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -715,7 +722,7 @@ func TestTaskRunner_TaskEnv_None(t *testing.T) { // Test that devices get sent to the driver func TestTaskRunner_DevicePropogation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a mock alloc that has a gpu @@ -812,7 +819,7 @@ func (h *mockEnvHook) Prestart(ctx context.Context, req *interfaces.TaskPrestart // hook environments set restores the environment without re-running done // hooks. func TestTaskRunner_Restore_HookEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -849,7 +856,7 @@ func TestTaskRunner_Restore_HookEnv(t *testing.T) { // This test asserts that we can recover from an "external" plugin exiting by // retrieving a new instance of the driver and recovering the task. func TestTaskRunner_RecoverFromDriverExiting(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create an allocation using the mock driver that exits simulating the @@ -922,7 +929,7 @@ func TestTaskRunner_RecoverFromDriverExiting(t *testing.T) { // TestTaskRunner_ShutdownDelay asserts services are removed from Consul // ${shutdown_delay} seconds before killing the process. func TestTaskRunner_ShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1006,7 +1013,7 @@ WAIT: // Consul and tasks are killed without waiting for ${shutdown_delay} // when the alloc has the NoShutdownDelay transition flag set. func TestTaskRunner_NoShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) // don't set this too high so that we don't block the test runner // on shutting down the agent if the test fails @@ -1081,7 +1088,7 @@ func TestTaskRunner_NoShutdownDelay(t *testing.T) { // TestTaskRunner_Dispatch_Payload asserts that a dispatch job runs and the // payload was written to disk. func TestTaskRunner_Dispatch_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1127,7 +1134,7 @@ func TestTaskRunner_Dispatch_Payload(t *testing.T) { // TestTaskRunner_SignalFailure asserts that signal errors are properly // propagated from the driver to TaskRunner. func TestTaskRunner_SignalFailure(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1149,7 +1156,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) { // TestTaskRunner_RestartTask asserts that restarting a task works and emits a // Restarting event. func TestTaskRunner_RestartTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1201,7 +1208,7 @@ func TestTaskRunner_RestartTask(t *testing.T) { // TestTaskRunner_CheckWatcher_Restart asserts that when enabled an unhealthy // Consul check will cause a task to restart following restart policy rules. func TestTaskRunner_CheckWatcher_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() @@ -1319,7 +1326,7 @@ func useMockEnvoyBootstrapHook(tr *TaskRunner) { // TestTaskRunner_BlockForSIDSToken asserts tasks do not start until a Consul // Service Identity token is derived. func TestTaskRunner_BlockForSIDSToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) alloc := mock.BatchConnectAlloc() @@ -1387,7 +1394,7 @@ func TestTaskRunner_BlockForSIDSToken(t *testing.T) { } func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) alloc := mock.BatchConnectAlloc() @@ -1446,7 +1453,7 @@ func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) { // TestTaskRunner_DeriveSIToken_Unrecoverable asserts that an unrecoverable error // from deriving a service identity token will fail a task. func TestTaskRunner_DeriveSIToken_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) alloc := mock.BatchConnectAlloc() @@ -1503,7 +1510,7 @@ func TestTaskRunner_DeriveSIToken_Unrecoverable(t *testing.T) { // TestTaskRunner_BlockForVaultToken asserts tasks do not start until a vault token // is derived. func TestTaskRunner_BlockForVaultToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1581,7 +1588,7 @@ func TestTaskRunner_BlockForVaultToken(t *testing.T) { // returned when deriving a vault token a task will continue to block while // it's retried. func TestTaskRunner_DeriveToken_Retry(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] task.Vault = &structs.Vault{Policies: []string{"default"}} @@ -1645,7 +1652,7 @@ func TestTaskRunner_DeriveToken_Retry(t *testing.T) { // TestTaskRunner_DeriveToken_Unrecoverable asserts that an unrecoverable error // from deriving a vault token will fail a task. func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Use a batch job with no restarts alloc := mock.BatchAlloc() @@ -1690,7 +1697,7 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) { // TestTaskRunner_Download_ChrootExec asserts that downloaded artifacts may be // executed in a chroot. func TestTaskRunner_Download_ChrootExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctestutil.ExecCompatible(t) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir(".")))) @@ -1731,7 +1738,7 @@ func TestTaskRunner_Download_ChrootExec(t *testing.T) { // TestTaskRunner_Download_Exec asserts that downloaded artifacts may be // executed in a driver without filesystem isolation. func TestTaskRunner_Download_RawExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir(".")))) defer ts.Close() @@ -1771,7 +1778,7 @@ func TestTaskRunner_Download_RawExec(t *testing.T) { // TestTaskRunner_Download_List asserts that multiple artificats are downloaded // before a task is run. func TestTaskRunner_Download_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir(".")))) defer ts.Close() @@ -1820,7 +1827,7 @@ func TestTaskRunner_Download_List(t *testing.T) { // TestTaskRunner_Download_Retries asserts that failed artifact downloads are // retried according to the task's restart policy. func TestTaskRunner_Download_Retries(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create an allocation that has a task with bad artifacts. alloc := mock.BatchAlloc() @@ -1866,7 +1873,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) { // TestTaskRunner_DriverNetwork asserts that a driver's network is properly // used in services and checks. func TestTaskRunner_DriverNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2002,7 +2009,7 @@ func TestTaskRunner_DriverNetwork(t *testing.T) { // TestTaskRunner_RestartSignalTask_NotRunning asserts resilience to failures // when a restart or signal is triggered and the task is not running. func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2069,7 +2076,7 @@ func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) { // TestTaskRunner_Run_RecoverableStartError asserts tasks are restarted if they // return a recoverable error from StartTask. func TestTaskRunner_Run_RecoverableStartError(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2111,7 +2118,7 @@ func TestTaskRunner_Run_RecoverableStartError(t *testing.T) { // TestTaskRunner_Template_Artifact asserts that tasks can use artifacts as templates. func TestTaskRunner_Template_Artifact(t *testing.T) { - t.Parallel() + ci.Parallel(t) ts := httptest.NewServer(http.FileServer(http.Dir("."))) defer ts.Close() @@ -2171,7 +2178,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) { // that fails to render in PreStart can gracefully be shutdown by // either killCtx or shutdownCtx func TestTaskRunner_Template_BlockingPreStart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2233,7 +2240,7 @@ func TestTaskRunner_Template_BlockingPreStart(t *testing.T) { // TestTaskRunner_Template_NewVaultToken asserts that a new vault token is // created when rendering template and that it is revoked on alloc completion func TestTaskRunner_Template_NewVaultToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2312,7 +2319,7 @@ func TestTaskRunner_Template_NewVaultToken(t *testing.T) { // TestTaskRunner_VaultManager_Restart asserts that the alloc is restarted when the alloc // derived vault token expires, when task is configured with Restart change mode func TestTaskRunner_VaultManager_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2385,7 +2392,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) { // TestTaskRunner_VaultManager_Signal asserts that the alloc is signalled when the alloc // derived vault token expires, when task is configured with signal change mode func TestTaskRunner_VaultManager_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2449,7 +2456,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) { // TestTaskRunner_UnregisterConsul_Retries asserts a task is unregistered from // Consul when waiting to be retried. func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() // Make the restart policy try one ctx.update @@ -2509,7 +2516,7 @@ func testWaitForTaskToStart(t *testing.T, tr *TaskRunner) { // TestTaskRunner_BaseLabels tests that the base labels for the task metrics // are set appropriately. func TestTaskRunner_BaseLabels(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() diff --git a/client/allocrunner/taskrunner/tasklet_test.go b/client/allocrunner/taskrunner/tasklet_test.go index 4dc8f36f2..ea0cf2d7d 100644 --- a/client/allocrunner/taskrunner/tasklet_test.go +++ b/client/allocrunner/taskrunner/tasklet_test.go @@ -10,6 +10,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/testtask" @@ -23,6 +24,8 @@ func TestMain(m *testing.M) { } func TestTasklet_Exec_HappyPath(t *testing.T) { + ci.Parallel(t) + results := []execResult{ {[]byte("output"), 0, nil}, {[]byte("output"), 1, nil}, @@ -53,6 +56,8 @@ func TestTasklet_Exec_HappyPath(t *testing.T) { // TestTasklet_Exec_Cancel asserts cancelling a tasklet short-circuits // any running executions the tasklet func TestTasklet_Exec_Cancel(t *testing.T) { + ci.Parallel(t) + exec, cancel := newBlockingScriptExec() defer cancel() tm := newTaskletMock(exec, testlog.HCLogger(t), time.Hour, time.Hour) @@ -85,7 +90,7 @@ func TestTasklet_Exec_Cancel(t *testing.T) { // TestTasklet_Exec_Timeout asserts a tasklet script will be killed // when the timeout is reached. func TestTasklet_Exec_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) exec, cancel := newBlockingScriptExec() defer cancel() @@ -125,6 +130,8 @@ func TestTasklet_Exec_Timeout(t *testing.T) { // TestTasklet_Exec_Shutdown asserts a script will be executed once more // when told to shutdown. func TestTasklet_Exec_Shutdown(t *testing.T) { + ci.Parallel(t) + exec := newSimpleExec(0, nil) shutdown := make(chan struct{}) tm := newTaskletMock(exec, testlog.HCLogger(t), time.Hour, 3*time.Second) diff --git a/client/allocrunner/taskrunner/template/template_test.go b/client/allocrunner/taskrunner/template/template_test.go index dcd9a8eb0..3b943d864 100644 --- a/client/allocrunner/taskrunner/template/template_test.go +++ b/client/allocrunner/taskrunner/template/template_test.go @@ -18,6 +18,7 @@ import ( templateconfig "github.com/hashicorp/consul-template/config" ctestutil "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" @@ -230,7 +231,7 @@ func (h *testHarness) stop() { } func TestTaskTemplateManager_InvalidConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) hooks := NewMockTaskHooks() clientConfig := &config.Config{Region: "global"} taskDir := "foo" @@ -371,7 +372,7 @@ func TestTaskTemplateManager_InvalidConfig(t *testing.T) { } func TestTaskTemplateManager_HostPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately and write it to a tmp file f, err := ioutil.TempFile("", "") if err != nil { @@ -463,7 +464,7 @@ func TestTaskTemplateManager_HostPath(t *testing.T) { } func TestTaskTemplateManager_Unblock_Static(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := "hello, world!" file := "my.tmpl" @@ -497,7 +498,7 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) { } func TestTaskTemplateManager_Permissions(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := "hello, world!" file := "my.tmpl" @@ -532,7 +533,7 @@ func TestTaskTemplateManager_Permissions(t *testing.T) { } func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}` expected := fmt.Sprintf("Hello Nomad Task: %s", TestTaskName) @@ -567,7 +568,7 @@ func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { } func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := "hello, world!" file := "my.tmpl" @@ -608,7 +609,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { } func TestTaskTemplateManager_Unblock_Consul(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render based on a key in Consul key := "foo" content := "barbaz" @@ -654,7 +655,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) { } func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that will render based on a key in Vault vaultPath := "secret/data/password" @@ -704,7 +705,7 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { } func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately staticContent := "hello, world!" staticFile := "my.tmpl" @@ -772,7 +773,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { // TestTaskTemplateManager_FirstRender_Restored tests that a task that's been // restored renders and triggers its change mode if the template has changed func TestTaskTemplateManager_FirstRender_Restored(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that will render based on a key in Vault vaultPath := "secret/data/password" @@ -869,7 +870,7 @@ OUTER: } func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render based on a key in Consul key := "foo" content1 := "bar" @@ -938,7 +939,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { } func TestTaskTemplateManager_Rerender_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that renders based on a key in Consul and sends SIGALRM key1 := "foo" content1_1 := "bar" @@ -1038,7 +1039,7 @@ OUTER: } func TestTaskTemplateManager_Rerender_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that renders based on a key in Consul and sends restart key1 := "bam" content1_1 := "cat" @@ -1102,7 +1103,7 @@ OUTER: } func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will have its destination interpolated content := "hello, world!" file := "${node.unique.id}.tmpl" @@ -1137,7 +1138,7 @@ func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) { } func TestTaskTemplateManager_Signal_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that renders based on a key in Consul and sends SIGALRM @@ -1189,7 +1190,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) { // process environment variables. nomad host process environment variables // are to be treated the same as not found environment variables. func TestTaskTemplateManager_FiltersEnvVars(t *testing.T) { - t.Parallel() + ci.Parallel(t) defer os.Setenv("NOMAD_TASK_NAME", os.Getenv("NOMAD_TASK_NAME")) os.Setenv("NOMAD_TASK_NAME", "should be overridden by task") @@ -1233,7 +1234,7 @@ TEST_ENV_NOT_FOUND: {{env "` + testenv + `_NOTFOUND" }}` // TestTaskTemplateManager_Env asserts templates with the env flag set are read // into the task's environment. func TestTaskTemplateManager_Env(t *testing.T) { - t.Parallel() + ci.Parallel(t) template := &structs.Template{ EmbeddedTmpl: ` # Comment lines are ok @@ -1276,7 +1277,7 @@ ANYTHING_goes=Spaces are=ok! // TestTaskTemplateManager_Env_Missing asserts the core env // template processing function returns errors when files don't exist func TestTaskTemplateManager_Env_Missing(t *testing.T) { - t.Parallel() + ci.Parallel(t) d, err := ioutil.TempDir("", "ct_env_missing") if err != nil { t.Fatalf("err: %v", err) @@ -1311,7 +1312,7 @@ func TestTaskTemplateManager_Env_Missing(t *testing.T) { // TestTaskTemplateManager_Env_InterpolatedDest asserts the core env // template processing function handles interpolated destinations func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d, err := ioutil.TempDir("", "ct_env_interpolated") @@ -1352,7 +1353,7 @@ func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) { // template processing function returns combined env vars from multiple // templates correctly. func TestTaskTemplateManager_Env_Multi(t *testing.T) { - t.Parallel() + ci.Parallel(t) d, err := ioutil.TempDir("", "ct_env_missing") if err != nil { t.Fatalf("err: %v", err) @@ -1398,7 +1399,7 @@ func TestTaskTemplateManager_Env_Multi(t *testing.T) { } func TestTaskTemplateManager_Rerender_Env(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that renders based on a key in Consul and sends restart key1 := "bam" key2 := "bar" @@ -1480,7 +1481,7 @@ OUTER: // TestTaskTemplateManager_Config_ServerName asserts the tls_server_name // setting is propagated to consul-template's configuration. See #2776 func TestTaskTemplateManager_Config_ServerName(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := config.DefaultConfig() c.VaultConfig = &sconfig.VaultConfig{ Enabled: helper.BoolToPtr(true), @@ -1504,7 +1505,7 @@ func TestTaskTemplateManager_Config_ServerName(t *testing.T) { // TestTaskTemplateManager_Config_VaultNamespace asserts the Vault namespace setting is // propagated to consul-template's configuration. func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) testNS := "test-namespace" @@ -1535,7 +1536,7 @@ func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) { // TestTaskTemplateManager_Config_VaultNamespace asserts the Vault namespace setting is // propagated to consul-template's configuration. func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) testNS := "test-namespace" @@ -1570,7 +1571,7 @@ func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) { // TestTaskTemplateManager_Escapes asserts that when sandboxing is enabled // interpolated paths are not incorrectly treated as escaping the alloc dir. func TestTaskTemplateManager_Escapes(t *testing.T) { - t.Parallel() + ci.Parallel(t) clientConf := config.DefaultConfig() require.False(t, clientConf.TemplateConfig.DisableSandbox, "expected sandbox to be disabled") @@ -1822,7 +1823,7 @@ func TestTaskTemplateManager_BlockedEvents(t *testing.T) { // then subsequently sets 0, 1, 2 keys // then asserts that templates are still blocked on 3 and 4, // and check that we got the relevant task events - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that will render based on a key in Consul @@ -1920,7 +1921,7 @@ WAIT_LOOP: // configuration is accurately mapped from the client to the TaskTemplateManager // and that any operator defined boundaries are enforced. func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { - t.Parallel() + ci.Parallel(t) testNS := "test-namespace" @@ -2126,7 +2127,7 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { // configuration is accurately mapped from the template to the TaskTemplateManager's // template config. func TestTaskTemplateManager_Template_Wait_Set(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := config.DefaultConfig() c.Node = mock.Node() diff --git a/client/allocrunner/taskrunner/validate_hook_test.go b/client/allocrunner/taskrunner/validate_hook_test.go index e71301456..d346b6c47 100644 --- a/client/allocrunner/taskrunner/validate_hook_test.go +++ b/client/allocrunner/taskrunner/validate_hook_test.go @@ -3,6 +3,7 @@ package taskrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" @@ -10,7 +11,7 @@ import ( ) func TestTaskRunner_Validate_UserEnforcement(t *testing.T) { - t.Parallel() + ci.Parallel(t) taskEnv := taskenv.NewEmptyBuilder().Build() conf := config.DefaultConfig() @@ -35,7 +36,7 @@ func TestTaskRunner_Validate_UserEnforcement(t *testing.T) { } func TestTaskRunner_Validate_ServiceName(t *testing.T) { - t.Parallel() + ci.Parallel(t) builder := taskenv.NewEmptyBuilder() conf := config.DefaultConfig() diff --git a/client/allocrunner/taskrunner/volume_hook_test.go b/client/allocrunner/taskrunner/volume_hook_test.go index 951e4d7a7..0bfff5edb 100644 --- a/client/allocrunner/taskrunner/volume_hook_test.go +++ b/client/allocrunner/taskrunner/volume_hook_test.go @@ -3,6 +3,7 @@ package taskrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" cstructs "github.com/hashicorp/nomad/client/structs" @@ -16,6 +17,8 @@ import ( ) func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) { + ci.Parallel(t) + mounts := []*structs.VolumeMount{ { Volume: "foo", @@ -68,6 +71,7 @@ func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) { } func TestVolumeHook_prepareCSIVolumes(t *testing.T) { + ci.Parallel(t) req := &interfaces.TaskPrestartRequest{ Task: &structs.Task{ @@ -157,6 +161,7 @@ func TestVolumeHook_prepareCSIVolumes(t *testing.T) { } func TestVolumeHook_Interpolation(t *testing.T) { + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] diff --git a/client/allocwatcher/alloc_watcher_test.go b/client/allocwatcher/alloc_watcher_test.go index 4c4b63702..4aa36433b 100644 --- a/client/allocwatcher/alloc_watcher_test.go +++ b/client/allocwatcher/alloc_watcher_test.go @@ -13,6 +13,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/testlog" @@ -88,6 +89,8 @@ func newConfig(t *testing.T) (Config, func()) { // TestPrevAlloc_Noop asserts that when no previous allocation is set the noop // implementation is returned that does not block or perform migrations. func TestPrevAlloc_Noop(t *testing.T) { + ci.Parallel(t) + conf, cleanup := newConfig(t) defer cleanup() @@ -114,7 +117,8 @@ func TestPrevAlloc_Noop(t *testing.T) { // TestPrevAlloc_LocalPrevAlloc_Block asserts that when a previous alloc runner // is set a localPrevAlloc will block on it. func TestPrevAlloc_LocalPrevAlloc_Block(t *testing.T) { - t.Parallel() + ci.Parallel(t) + conf, cleanup := newConfig(t) defer cleanup() @@ -181,7 +185,8 @@ func TestPrevAlloc_LocalPrevAlloc_Block(t *testing.T) { // TestPrevAlloc_LocalPrevAlloc_Terminated asserts that when a previous alloc // runner has already terminated the watcher does not block on the broadcaster. func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) { - t.Parallel() + ci.Parallel(t) + conf, cleanup := newConfig(t) defer cleanup() @@ -201,7 +206,8 @@ func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) { // streaming a tar cause the migration to be cancelled and no files are written // (migrations are atomic). func TestPrevAlloc_StreamAllocDir_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) + dest, err := ioutil.TempDir("", "nomadtest-") if err != nil { t.Fatalf("err: %v", err) diff --git a/client/allocwatcher/alloc_watcher_unix_test.go b/client/allocwatcher/alloc_watcher_unix_test.go index 7967a69f0..79f8a2979 100644 --- a/client/allocwatcher/alloc_watcher_unix_test.go +++ b/client/allocwatcher/alloc_watcher_unix_test.go @@ -15,6 +15,7 @@ import ( "syscall" "testing" + "github.com/hashicorp/nomad/ci" ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" ) @@ -22,8 +23,9 @@ import ( // TestPrevAlloc_StreamAllocDir_Ok asserts that streaming a tar to an alloc dir // works. func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) { + ci.Parallel(t) ctestutil.RequireRoot(t) - t.Parallel() + dir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("err: %v", err) diff --git a/client/allocwatcher/group_alloc_watcher_test.go b/client/allocwatcher/group_alloc_watcher_test.go index f992f3410..79eeaf07e 100644 --- a/client/allocwatcher/group_alloc_watcher_test.go +++ b/client/allocwatcher/group_alloc_watcher_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( // TestPrevAlloc_GroupPrevAllocWatcher_Block asserts that when there are // prevAllocs is set a groupPrevAllocWatcher will block on them func TestPrevAlloc_GroupPrevAllocWatcher_Block(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf, cleanup := newConfig(t) defer cleanup() @@ -80,7 +81,8 @@ func TestPrevAlloc_GroupPrevAllocWatcher_Block(t *testing.T) { // multiple prevAllocs is set a groupPrevAllocWatcher will block until all // are complete func TestPrevAlloc_GroupPrevAllocWatcher_BlockMulti(t *testing.T) { - t.Parallel() + ci.Parallel(t) + conf1, cleanup1 := newConfig(t) defer cleanup1() conf1.Alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{ diff --git a/client/client_stats_endpoint_test.go b/client/client_stats_endpoint_test.go index 9802d84e6..03f6b7717 100644 --- a/client/client_stats_endpoint_test.go +++ b/client/client_stats_endpoint_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/mock" @@ -12,8 +13,9 @@ import ( ) func TestClientStats_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -26,7 +28,7 @@ func TestClientStats_Stats(t *testing.T) { } func TestClientStats_Stats_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) diff --git a/client/client_test.go b/client/client_test.go index d27ab0ed0..bb99e6cc9 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -12,6 +12,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" "github.com/hashicorp/nomad/client/config" consulApi "github.com/hashicorp/nomad/client/consul" @@ -45,7 +46,8 @@ func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, fu } func TestClient_StartStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, nil) defer cleanup() if err := client.Shutdown(); err != nil { @@ -56,7 +58,7 @@ func TestClient_StartStop(t *testing.T) { // Certain labels for metrics are dependant on client initial setup. This tests // that the client has properly initialized before we assign values to labels func TestClient_BaseLabels(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) client, cleanup := TestClient(t, nil) @@ -81,7 +83,7 @@ func TestClient_BaseLabels(t *testing.T) { } func TestClient_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) _, addr, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -102,7 +104,7 @@ func TestClient_RPC(t *testing.T) { } func TestClient_RPC_FireRetryWatchers(t *testing.T) { - t.Parallel() + ci.Parallel(t) _, addr, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -131,7 +133,7 @@ func TestClient_RPC_FireRetryWatchers(t *testing.T) { } func TestClient_RPC_Passthrough(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -152,7 +154,7 @@ func TestClient_RPC_Passthrough(t *testing.T) { } func TestClient_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -175,7 +177,7 @@ func TestClient_Fingerprint(t *testing.T) { // TestClient_Fingerprint_Periodic asserts that driver node attributes are // periodically fingerprinted. func TestClient_Fingerprint_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) c1, cleanup := TestClient(t, func(c *config.Config) { confs := []*nconfig.PluginConfig{ @@ -253,7 +255,8 @@ func TestClient_Fingerprint_Periodic(t *testing.T) { // TestClient_MixedTLS asserts that when a server is running with TLS enabled // it will reject any RPC connections from clients that lack TLS. See #2525 func TestClient_MixedTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) + const ( cafile = "../helper/tlsutil/testdata/ca.pem" foocert = "../helper/tlsutil/testdata/nomad-foo.pem" @@ -300,7 +303,7 @@ func TestClient_MixedTLS(t *testing.T) { // enabled -- but their certificates are signed by different CAs -- they're // unable to communicate. func TestClient_BadTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -356,7 +359,7 @@ func TestClient_BadTLS(t *testing.T) { } func TestClient_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -389,7 +392,7 @@ func TestClient_Register(t *testing.T) { } func TestClient_Heartbeat(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, func(c *nomad.Config) { c.MinHeartbeatTTL = 50 * time.Millisecond @@ -426,7 +429,7 @@ func TestClient_Heartbeat(t *testing.T) { // TestClient_UpdateAllocStatus that once running allocations send updates to // the server. func TestClient_UpdateAllocStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -452,7 +455,7 @@ func TestClient_UpdateAllocStatus(t *testing.T) { } func TestClient_WatchAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -552,7 +555,7 @@ func waitTilNodeReady(client *Client, t *testing.T) { } func TestClient_SaveRestoreState(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -653,7 +656,7 @@ func TestClient_SaveRestoreState(t *testing.T) { } func TestClient_AddAllocError(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, _, cleanupS1 := testServer(t, nil) @@ -729,7 +732,8 @@ func TestClient_AddAllocError(t *testing.T) { } func TestClient_Init(t *testing.T) { - t.Parallel() + ci.Parallel(t) + dir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -759,7 +763,7 @@ func TestClient_Init(t *testing.T) { } func TestClient_BlockedAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -872,7 +876,7 @@ func TestClient_BlockedAllocations(t *testing.T) { } func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, cleanup := TestClient(t, func(c *config.Config) { @@ -888,7 +892,7 @@ func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) { } func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, cleanup := TestClient(t, func(c *config.Config) { @@ -904,7 +908,7 @@ func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) { } func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, cleanup := TestClient(t, func(c *config.Config) {}) @@ -914,7 +918,7 @@ func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) { } func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, addr, cleanupS1 := testServer(t, func(c *nomad.Config) { @@ -990,7 +994,7 @@ func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) { } func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, addr, cleanupS1 := testServer(t, func(c *nomad.Config) { @@ -1067,7 +1071,8 @@ func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) { // TestClient_ServerList tests client methods that interact with the internal // nomad server list. func TestClient_ServerList(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, func(c *config.Config) {}) defer cleanup() @@ -1090,7 +1095,8 @@ func TestClient_ServerList(t *testing.T) { } func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, func(c *config.Config) {}) defer cleanup() @@ -1188,7 +1194,7 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { // TestClient_UpdateNodeFromFingerprintKeepsConfig asserts manually configured // network interfaces take precedence over fingerprinted ones. func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("assertions assume linux platform") } @@ -1266,7 +1272,7 @@ func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) { // Support multiple IP addresses (ipv4 vs. 6, e.g.) on the configured network interface func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) { - t.Parallel() + ci.Parallel(t) var dev string switch runtime.GOOS { @@ -1304,6 +1310,8 @@ func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) { } func TestClient_computeAllocatedDeviceStats(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) c := &Client{logger: logger} @@ -1400,8 +1408,9 @@ func TestClient_computeAllocatedDeviceStats(t *testing.T) { } func TestClient_getAllocatedResources(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -1515,7 +1524,8 @@ func TestClient_getAllocatedResources(t *testing.T) { } func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -1598,7 +1608,7 @@ func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) { // COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported func TestClient_hasLocalState(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -1638,7 +1648,7 @@ func TestClient_hasLocalState(t *testing.T) { } func Test_verifiedTasks(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) // produce a result and check against expected tasks and/or error output diff --git a/client/config/config_test.go b/client/config/config_test.go index bef9995c6..88f5bd1b7 100644 --- a/client/config/config_test.go +++ b/client/config/config_test.go @@ -5,11 +5,14 @@ import ( "time" "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestConfigRead(t *testing.T) { + ci.Parallel(t) + config := Config{} actual := config.Read("cake") @@ -26,6 +29,8 @@ func TestConfigRead(t *testing.T) { } func TestConfigReadDefault(t *testing.T) { + ci.Parallel(t) + config := Config{} expected := "vanilla" @@ -50,6 +55,8 @@ func mockWaitConfig() *WaitConfig { } func TestWaitConfig_Copy(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Wait *WaitConfig @@ -95,6 +102,8 @@ func TestWaitConfig_Copy(t *testing.T) { } func TestWaitConfig_IsEmpty(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Wait *WaitConfig @@ -127,6 +136,8 @@ func TestWaitConfig_IsEmpty(t *testing.T) { } func TestWaitConfig_IsEqual(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Wait *WaitConfig @@ -170,6 +181,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { } func TestWaitConfig_IsValid(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *WaitConfig @@ -223,6 +236,8 @@ func TestWaitConfig_IsValid(t *testing.T) { } func TestWaitConfig_Merge(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Target *WaitConfig @@ -280,6 +295,8 @@ func TestWaitConfig_Merge(t *testing.T) { } func TestWaitConfig_ToConsulTemplate(t *testing.T) { + ci.Parallel(t) + expected := config.WaitConfig{ Enabled: helper.BoolToPtr(true), Min: helper.TimeToPtr(5 * time.Second), @@ -307,6 +324,8 @@ func mockRetryConfig() *RetryConfig { } } func TestRetryConfig_Copy(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -382,6 +401,8 @@ func TestRetryConfig_Copy(t *testing.T) { } func TestRetryConfig_IsEmpty(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -414,6 +435,8 @@ func TestRetryConfig_IsEmpty(t *testing.T) { } func TestRetryConfig_IsEqual(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -502,6 +525,8 @@ func TestRetryConfig_IsEqual(t *testing.T) { } func TestRetryConfig_IsValid(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -570,6 +595,8 @@ func TestRetryConfig_IsValid(t *testing.T) { } func TestRetryConfig_Merge(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Target *RetryConfig @@ -645,6 +672,8 @@ func TestRetryConfig_Merge(t *testing.T) { } func TestRetryConfig_ToConsulTemplate(t *testing.T) { + ci.Parallel(t) + expected := config.RetryConfig{ Enabled: helper.BoolToPtr(true), Attempts: helper.IntToPtr(5), diff --git a/client/consul/identities_test.go b/client/consul/identities_test.go index 0ac7ac275..b41f3520e 100644 --- a/client/consul/identities_test.go +++ b/client/consul/identities_test.go @@ -4,12 +4,15 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestSI_DeriveTokens(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) dFunc := func(alloc *structs.Allocation, taskNames []string) (map[string]string, error) { return map[string]string{"a": "b"}, nil @@ -21,6 +24,8 @@ func TestSI_DeriveTokens(t *testing.T) { } func TestSI_DeriveTokens_error(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) dFunc := func(alloc *structs.Allocation, taskNames []string) (map[string]string, error) { return nil, errors.New("some failure") diff --git a/client/csi_endpoint_test.go b/client/csi_endpoint_test.go index 5e8382eb9..7b6df1534 100644 --- a/client/csi_endpoint_test.go +++ b/client/csi_endpoint_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" nstructs "github.com/hashicorp/nomad/nomad/structs" @@ -25,7 +26,7 @@ var fakeNodePlugin = &dynamicplugins.PluginInfo{ } func TestCSIController_AttachVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -172,7 +173,7 @@ func TestCSIController_AttachVolume(t *testing.T) { } func TestCSIController_ValidateVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -275,7 +276,7 @@ func TestCSIController_ValidateVolume(t *testing.T) { } func TestCSIController_DetachVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -358,7 +359,7 @@ func TestCSIController_DetachVolume(t *testing.T) { } func TestCSIController_CreateVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -460,7 +461,7 @@ func TestCSIController_CreateVolume(t *testing.T) { } func TestCSIController_DeleteVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -524,7 +525,7 @@ func TestCSIController_DeleteVolume(t *testing.T) { } func TestCSIController_ListVolumes(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -632,7 +633,7 @@ func TestCSIController_ListVolumes(t *testing.T) { } } func TestCSIController_CreateSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -725,7 +726,7 @@ func TestCSIController_CreateSnapshot(t *testing.T) { } func TestCSIController_DeleteSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -789,7 +790,7 @@ func TestCSIController_DeleteSnapshot(t *testing.T) { } func TestCSIController_ListSnapshots(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -893,7 +894,7 @@ func TestCSIController_ListSnapshots(t *testing.T) { } func TestCSINode_DetachVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string diff --git a/client/devicemanager/manager_test.go b/client/devicemanager/manager_test.go index 2cc78da5c..c3da419e2 100644 --- a/client/devicemanager/manager_test.go +++ b/client/devicemanager/manager_test.go @@ -9,6 +9,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pluginutils/loader" @@ -234,7 +235,7 @@ func nvidiaAndIntelDefaultPlugins(catalog *loader.MockCatalog) { // Test collecting statistics from all devices func TestManager_AllStats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) @@ -283,7 +284,7 @@ func TestManager_AllStats(t *testing.T) { // Test collecting statistics from a particular device func TestManager_DeviceStats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) @@ -330,7 +331,7 @@ func TestManager_DeviceStats(t *testing.T) { // Test reserving a particular device func TestManager_Reserve(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) config, _, catalog := baseTestConfig(t) @@ -428,7 +429,7 @@ func TestManager_Reserve(t *testing.T) { // Test that shutdown shutsdown the plugins func TestManager_Shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) @@ -455,7 +456,7 @@ func TestManager_Shutdown(t *testing.T) { // Test that startup shutsdown previously launched plugins func TestManager_Run_ShutdownOld(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) diff --git a/client/driver_manager_test.go b/client/driver_manager_test.go index 8514749dd..8a930b75e 100644 --- a/client/driver_manager_test.go +++ b/client/driver_manager_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/helper/pluginutils/catalog" @@ -16,7 +17,7 @@ import ( // TestDriverManager_Fingerprint_Run asserts that node is populated with // driver fingerprints func TestDriverManager_Fingerprint_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) testClient, cleanup := TestClient(t, nil) defer cleanup() @@ -54,7 +55,7 @@ func TestDriverManager_Fingerprint_Run(t *testing.T) { // TestDriverManager_Fingerprint_Run asserts that node is populated with // driver fingerprints and it's updated periodically func TestDriverManager_Fingerprint_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) testClient, cleanup := TestClient(t, func(c *config.Config) { pluginConfig := []*nconfig.PluginConfig{ @@ -124,7 +125,7 @@ func TestDriverManager_Fingerprint_Periodic(t *testing.T) { // TestDriverManager_NodeAttributes_Run asserts that node attributes are populated // in addition to node.Drivers until we fully deprecate it func TestDriverManager_NodeAttributes_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) testClient, cleanup := TestClient(t, func(c *config.Config) { c.Options = map[string]string{ diff --git a/client/dynamicplugins/registry_test.go b/client/dynamicplugins/registry_test.go index a820a675f..c55af5c5a 100644 --- a/client/dynamicplugins/registry_test.go +++ b/client/dynamicplugins/registry_test.go @@ -7,11 +7,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) { - t.Parallel() + ci.Parallel(t) + b := newPluginEventBroadcaster() defer close(b.stopCh) var rcv1, rcv2 bool @@ -37,7 +39,7 @@ func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) { } func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := newPluginEventBroadcaster() defer close(b.stopCh) @@ -66,7 +68,8 @@ func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { } func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) + r := NewRegistry(nil, nil) ctx, cancelFn := context.WithCancel(context.Background()) @@ -104,7 +107,8 @@ func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { } func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) + r := NewRegistry(nil, nil) ctx, cancelFn := context.WithCancel(context.Background()) @@ -147,6 +151,8 @@ func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { } func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { + ci.Parallel(t) + dispenseFn := func(i *PluginInfo) (interface{}, error) { return struct{}{}, nil } @@ -174,7 +180,8 @@ func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { } func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) { - t.Parallel() + ci.Parallel(t) + r := NewRegistry(nil, nil) err := r.RegisterPlugin(&PluginInfo{ @@ -200,7 +207,8 @@ func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) { } func TestDynamicRegistry_StateStore(t *testing.T) { - t.Parallel() + ci.Parallel(t) + dispenseFn := func(i *PluginInfo) (interface{}, error) { return i, nil } @@ -226,8 +234,8 @@ func TestDynamicRegistry_StateStore(t *testing.T) { } func TestDynamicRegistry_ConcurrentAllocs(t *testing.T) { + ci.Parallel(t) - t.Parallel() dispenseFn := func(i *PluginInfo) (interface{}, error) { return i, nil } diff --git a/client/fingerprint/arch_test.go b/client/fingerprint/arch_test.go index c5faa2fff..9861b95a7 100644 --- a/client/fingerprint/arch_test.go +++ b/client/fingerprint/arch_test.go @@ -3,12 +3,15 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestArchFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewArchFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/bridge_linux_test.go b/client/fingerprint/bridge_linux_test.go index 8917598e2..739ef73f4 100644 --- a/client/fingerprint/bridge_linux_test.go +++ b/client/fingerprint/bridge_linux_test.go @@ -8,11 +8,14 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" ) func TestBridgeFingerprint_detect(t *testing.T) { + ci.Parallel(t) + f := &BridgeFingerprint{logger: testlog.HCLogger(t)} require.NoError(t, f.detect("ip_tables")) @@ -73,6 +76,8 @@ kernel/net/bridge/bridgeRHEL.ko.xz: kernel/net/802/stp.ko.xz kernel/net/llc/llc. ) func TestBridgeFingerprint_search(t *testing.T) { + ci.Parallel(t) + f := &BridgeFingerprint{logger: testlog.HCLogger(t)} t.Run("dynamic loaded module", func(t *testing.T) { diff --git a/client/fingerprint/cgroup_test.go b/client/fingerprint/cgroup_test.go index d357c1e17..11119b1d0 100644 --- a/client/fingerprint/cgroup_test.go +++ b/client/fingerprint/cgroup_test.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -41,6 +42,8 @@ func (m *MountPointDetectorEmptyMountPoint) MountPoint() (string, error) { } func TestCGroupFingerprint(t *testing.T) { + ci.Parallel(t) + { f := &CGroupFingerprint{ logger: testlog.HCLogger(t), diff --git a/client/fingerprint/cni_test.go b/client/fingerprint/cni_test.go index 3fd125b7d..90186da0e 100644 --- a/client/fingerprint/cni_test.go +++ b/client/fingerprint/cni_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -13,6 +14,8 @@ import ( var _ ReloadableFingerprint = &CNIFingerprint{} func TestCNIFingerprint(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string req *FingerprintRequest diff --git a/client/fingerprint/consul_test.go b/client/fingerprint/consul_test.go index fb7ff3ca8..4b3887478 100644 --- a/client/fingerprint/consul_test.go +++ b/client/fingerprint/consul_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" agentconsul "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/testlog" @@ -47,7 +48,7 @@ func newConsulFingerPrint(t *testing.T) *ConsulFingerprint { } func TestConsulFingerprint_server(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -83,7 +84,7 @@ func TestConsulFingerprint_server(t *testing.T) { } func TestConsulFingerprint_version(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -119,7 +120,7 @@ func TestConsulFingerprint_version(t *testing.T) { } func TestConsulFingerprint_sku(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -171,7 +172,7 @@ func TestConsulFingerprint_sku(t *testing.T) { } func TestConsulFingerprint_revision(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -199,7 +200,7 @@ func TestConsulFingerprint_revision(t *testing.T) { } func TestConsulFingerprint_dc(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -227,7 +228,7 @@ func TestConsulFingerprint_dc(t *testing.T) { } func TestConsulFingerprint_segment(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -262,7 +263,7 @@ func TestConsulFingerprint_segment(t *testing.T) { } func TestConsulFingerprint_connect(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -291,7 +292,7 @@ func TestConsulFingerprint_connect(t *testing.T) { } func TestConsulFingerprint_grpc(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -321,7 +322,7 @@ func TestConsulFingerprint_grpc(t *testing.T) { } func TestConsulFingerprint_namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -362,6 +363,8 @@ func TestConsulFingerprint_namespaces(t *testing.T) { } func TestConsulFingerprint_Fingerprint_oss(t *testing.T) { + ci.Parallel(t) + cf := newConsulFingerPrint(t) ts, cfg := fakeConsul(fakeConsulPayload(t, "test_fixtures/consul/agent_self_oss.json")) @@ -449,6 +452,8 @@ func TestConsulFingerprint_Fingerprint_oss(t *testing.T) { } func TestConsulFingerprint_Fingerprint_ent(t *testing.T) { + ci.Parallel(t) + cf := newConsulFingerPrint(t) ts, cfg := fakeConsul(fakeConsulPayload(t, "test_fixtures/consul/agent_self_ent.json")) diff --git a/client/fingerprint/cpu_test.go b/client/fingerprint/cpu_test.go index b6f4fdaca..5d2e23e16 100644 --- a/client/fingerprint/cpu_test.go +++ b/client/fingerprint/cpu_test.go @@ -4,12 +4,15 @@ import ( "strconv" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestCPUFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewCPUFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), @@ -58,6 +61,8 @@ func TestCPUFingerprint(t *testing.T) { // TestCPUFingerprint_OverrideCompute asserts that setting cpu_total_compute in // the client config overrides the detected CPU freq (if any). func TestCPUFingerprint_OverrideCompute(t *testing.T) { + ci.Parallel(t) + f := NewCPUFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/env_aws_test.go b/client/fingerprint/env_aws_test.go index fb3f0510b..caca5f4ba 100644 --- a/client/fingerprint/env_aws_test.go +++ b/client/fingerprint/env_aws_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -13,6 +14,8 @@ import ( ) func TestEnvAWSFingerprint_nonAws(t *testing.T) { + ci.Parallel(t) + f := NewEnvAWSFingerprint(testlog.HCLogger(t)) f.(*EnvAWSFingerprint).endpoint = "http://127.0.0.1/latest" @@ -28,6 +31,8 @@ func TestEnvAWSFingerprint_nonAws(t *testing.T) { } func TestEnvAWSFingerprint_aws(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -69,6 +74,8 @@ func TestEnvAWSFingerprint_aws(t *testing.T) { } func TestNetworkFingerprint_AWS(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -97,6 +104,8 @@ func TestNetworkFingerprint_AWS(t *testing.T) { } func TestNetworkFingerprint_AWS_network(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -158,6 +167,8 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) { } func TestNetworkFingerprint_AWS_NoNetwork(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, noNetworkAWSStubs) defer cleanup() @@ -181,6 +192,8 @@ func TestNetworkFingerprint_AWS_NoNetwork(t *testing.T) { } func TestNetworkFingerprint_AWS_IncompleteImitation(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, incompleteAWSImitationStubs) defer cleanup() @@ -203,6 +216,8 @@ func TestNetworkFingerprint_AWS_IncompleteImitation(t *testing.T) { } func TestCPUFingerprint_AWS_InstanceFound(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -224,6 +239,8 @@ func TestCPUFingerprint_AWS_InstanceFound(t *testing.T) { } func TestCPUFingerprint_AWS_OverrideCompute(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -247,6 +264,8 @@ func TestCPUFingerprint_AWS_OverrideCompute(t *testing.T) { } func TestCPUFingerprint_AWS_InstanceNotFound(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, unknownInstanceType) defer cleanup() diff --git a/client/fingerprint/env_azure_test.go b/client/fingerprint/env_azure_test.go index 9bd0c9e02..91afb3229 100644 --- a/client/fingerprint/env_azure_test.go +++ b/client/fingerprint/env_azure_test.go @@ -9,12 +9,15 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestAzureFingerprint_nonAzure(t *testing.T) { + ci.Parallel(t) + os.Setenv("AZURE_ENV_URL", "http://127.0.0.1/metadata/instance/") f := NewEnvAzureFingerprint(testlog.HCLogger(t)) node := &structs.Node{ @@ -211,9 +214,13 @@ const AZURE_routes = ` ` func TestFingerprint_AzureWithExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_Azure(t, true) } func TestFingerprint_AzureWithoutExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_Azure(t, false) } diff --git a/client/fingerprint/env_digitalocean_test.go b/client/fingerprint/env_digitalocean_test.go index 8b0ccca5f..c70a08a97 100644 --- a/client/fingerprint/env_digitalocean_test.go +++ b/client/fingerprint/env_digitalocean_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -16,6 +17,8 @@ import ( ) func TestDigitalOceanFingerprint_nonDigitalOcean(t *testing.T) { + ci.Parallel(t) + os.Setenv("DO_ENV_URL", "http://127.0.0.1/metadata/v1/") f := NewEnvDigitalOceanFingerprint(testlog.HCLogger(t)) node := &structs.Node{ @@ -39,6 +42,8 @@ func TestDigitalOceanFingerprint_nonDigitalOcean(t *testing.T) { } func TestFingerprint_DigitalOcean(t *testing.T) { + ci.Parallel(t) + node := &structs.Node{ Attributes: make(map[string]string), } diff --git a/client/fingerprint/env_gce_test.go b/client/fingerprint/env_gce_test.go index 03f1f60f9..653d9d258 100644 --- a/client/fingerprint/env_gce_test.go +++ b/client/fingerprint/env_gce_test.go @@ -9,12 +9,15 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestGCEFingerprint_nonGCE(t *testing.T) { + ci.Parallel(t) + os.Setenv("GCE_ENV_URL", "http://127.0.0.1/computeMetadata/v1/instance/") f := NewEnvGCEFingerprint(testlog.HCLogger(t)) node := &structs.Node{ @@ -207,9 +210,13 @@ const GCE_routes = ` ` func TestFingerprint_GCEWithExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_GCE(t, true) } func TestFingerprint_GCEWithoutExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_GCE(t, false) } diff --git a/client/fingerprint/host_test.go b/client/fingerprint/host_test.go index 02f7b5d0e..5fd9dbf85 100644 --- a/client/fingerprint/host_test.go +++ b/client/fingerprint/host_test.go @@ -3,12 +3,15 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestHostFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewHostFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/memory_test.go b/client/fingerprint/memory_test.go index 8635b55fd..e93599e9b 100644 --- a/client/fingerprint/memory_test.go +++ b/client/fingerprint/memory_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -11,6 +12,8 @@ import ( ) func TestMemoryFingerprint(t *testing.T) { + ci.Parallel(t) + require := require.New(t) f := NewMemoryFingerprint(testlog.HCLogger(t)) @@ -31,6 +34,8 @@ func TestMemoryFingerprint(t *testing.T) { } func TestMemoryFingerprint_Override(t *testing.T) { + ci.Parallel(t) + f := NewMemoryFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/network_test.go b/client/fingerprint/network_test.go index 7628b814b..08332ba09 100644 --- a/client/fingerprint/network_test.go +++ b/client/fingerprint/network_test.go @@ -7,6 +7,7 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -182,6 +183,8 @@ func (n *NetworkInterfaceDetectorMultipleInterfaces) Addrs(intf *net.Interface) } func TestNetworkFingerprint_basic(t *testing.T) { + ci.Parallel(t) + if v := os.Getenv(skipOnlineTestsEnvVar); v != "" { t.Skipf("Environment variable %+q not empty, skipping test", skipOnlineTestsEnvVar) } @@ -237,6 +240,8 @@ func TestNetworkFingerprint_basic(t *testing.T) { } func TestNetworkFingerprint_default_device_absent(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -260,6 +265,8 @@ func TestNetworkFingerprint_default_device_absent(t *testing.T) { } func TestNetworkFingerPrint_default_device(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -311,6 +318,8 @@ func TestNetworkFingerPrint_default_device(t *testing.T) { } func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -358,6 +367,8 @@ func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) { } func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -412,6 +423,8 @@ func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) { } func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -441,6 +454,8 @@ func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) { } func TestNetworkFingerPrint_MultipleAliases(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -488,6 +503,8 @@ func TestNetworkFingerPrint_MultipleAliases(t *testing.T) { } func TestNetworkFingerPrint_HostNetworkReservedPorts(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string hostNetworks map[string]*structs.ClientHostNetworkConfig diff --git a/client/fingerprint/nomad_test.go b/client/fingerprint/nomad_test.go index c2dba204f..2e349ae5d 100644 --- a/client/fingerprint/nomad_test.go +++ b/client/fingerprint/nomad_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -10,6 +11,8 @@ import ( ) func TestNomadFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewNomadFingerprint(testlog.HCLogger(t)) v := "foo" diff --git a/client/fingerprint/signal_test.go b/client/fingerprint/signal_test.go index d88c4a85a..4cdc3b01d 100644 --- a/client/fingerprint/signal_test.go +++ b/client/fingerprint/signal_test.go @@ -3,11 +3,14 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestSignalFingerprint(t *testing.T) { + ci.Parallel(t) + fp := NewSignalFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/storage_test.go b/client/fingerprint/storage_test.go index 3227d4d2b..1c00fcbc4 100644 --- a/client/fingerprint/storage_test.go +++ b/client/fingerprint/storage_test.go @@ -4,11 +4,14 @@ import ( "strconv" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestStorageFingerprint(t *testing.T) { + ci.Parallel(t) + fp := NewStorageFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/vault_test.go b/client/fingerprint/vault_test.go index 2056dda4d..2891d5057 100644 --- a/client/fingerprint/vault_test.go +++ b/client/fingerprint/vault_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -10,6 +11,8 @@ import ( ) func TestVaultFingerprint(t *testing.T) { + ci.Parallel(t) + tv := testutil.NewTestVault(t) defer tv.Stop() diff --git a/client/fingerprint_manager_test.go b/client/fingerprint_manager_test.go index a4ba0184a..576b6514b 100644 --- a/client/fingerprint_manager_test.go +++ b/client/fingerprint_manager_test.go @@ -3,13 +3,15 @@ package client import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/stretchr/testify/require" ) func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + testClient, cleanup := TestClient(t, nil) defer cleanup() @@ -33,7 +35,7 @@ func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) { } func TestFimgerprintManager_Run_InWhitelist(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testClient, cleanup := TestClient(t, func(c *config.Config) { @@ -62,12 +64,13 @@ func TestFimgerprintManager_Run_InWhitelist(t *testing.T) { } func TestFingerprintManager_Run_InDenylist(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + testClient, cleanup := TestClient(t, func(c *config.Config) { c.Options = map[string]string{ "fingerprint.allowlist": " arch,memory,foo,bar ", - "fingerprint.denylist": " cpu ", + "fingerprint.denylist": " cpu ", } }) defer cleanup() @@ -91,13 +94,13 @@ func TestFingerprintManager_Run_InDenylist(t *testing.T) { } func TestFingerprintManager_Run_Combination(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testClient, cleanup := TestClient(t, func(c *config.Config) { c.Options = map[string]string{ "fingerprint.allowlist": " arch,cpu,memory,foo,bar ", - "fingerprint.denylist": " memory,host ", + "fingerprint.denylist": " memory,host ", } }) defer cleanup() @@ -123,7 +126,7 @@ func TestFingerprintManager_Run_Combination(t *testing.T) { } func TestFingerprintManager_Run_CombinationLegacyNames(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testClient, cleanup := TestClient(t, func(c *config.Config) { diff --git a/client/fs_endpoint_test.go b/client/fs_endpoint_test.go index 76fad1847..8df47b90d 100644 --- a/client/fs_endpoint_test.go +++ b/client/fs_endpoint_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" @@ -50,7 +51,7 @@ func (n nopWriteCloser) Close() error { } func TestFS_Stat_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -71,7 +72,7 @@ func TestFS_Stat_NoAlloc(t *testing.T) { } func TestFS_Stat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -108,7 +109,7 @@ func TestFS_Stat(t *testing.T) { } func TestFS_Stat_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -183,7 +184,7 @@ func TestFS_Stat_ACL(t *testing.T) { } func TestFS_List_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -204,7 +205,7 @@ func TestFS_List_NoAlloc(t *testing.T) { } func TestFS_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -241,7 +242,7 @@ func TestFS_List(t *testing.T) { } func TestFS_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -316,7 +317,7 @@ func TestFS_List_ACL(t *testing.T) { } func TestFS_Stream_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -391,7 +392,7 @@ OUTER: } func TestFS_Stream_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -519,7 +520,7 @@ func TestFS_Stream_ACL(t *testing.T) { } func TestFS_Stream(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -635,7 +636,7 @@ func (r *ReadWriteCloseChecker) Close() error { } func TestFS_Stream_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -732,7 +733,7 @@ OUTER: } func TestFS_Stream_Limit(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -826,7 +827,7 @@ OUTER: } func TestFS_Logs_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -904,7 +905,7 @@ OUTER: // TestFS_Logs_TaskPending asserts that trying to stream logs for tasks which // have not started returns a 404 error. func TestFS_Logs_TaskPending(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1019,7 +1020,7 @@ func TestFS_Logs_TaskPending(t *testing.T) { } func TestFS_Logs_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -1150,7 +1151,7 @@ func TestFS_Logs_ACL(t *testing.T) { } func TestFS_Logs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1251,7 +1252,7 @@ OUTER: } func TestFS_Logs_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1555,7 +1556,8 @@ func TestFS_findClosest(t *testing.T) { } func TestFS_streamFile_NoFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) + c, cleanup := TestClient(t, nil) defer cleanup() @@ -1578,7 +1580,7 @@ func TestFS_streamFile_NoFile(t *testing.T) { } func TestFS_streamFile_Modify(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -1649,7 +1651,8 @@ func TestFS_streamFile_Modify(t *testing.T) { } func TestFS_streamFile_Truncate(t *testing.T) { - t.Parallel() + ci.Parallel(t) + c, cleanup := TestClient(t, nil) defer cleanup() @@ -1752,10 +1755,10 @@ func TestFS_streamFile_Truncate(t *testing.T) { } func TestFS_streamImpl_Delete(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows does not allow us to delete a file while it is open") } - t.Parallel() c, cleanup := TestClient(t, nil) defer cleanup() @@ -1828,7 +1831,7 @@ func TestFS_streamImpl_Delete(t *testing.T) { } func TestFS_logsImpl_NoFollow(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -1897,7 +1900,7 @@ func TestFS_logsImpl_NoFollow(t *testing.T) { } func TestFS_logsImpl_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() diff --git a/client/gc_test.go b/client/gc_test.go index fd0061f47..83a25c50a 100644 --- a/client/gc_test.go +++ b/client/gc_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/stats" @@ -37,7 +38,8 @@ func exitAllocRunner(runners ...AllocRunner) { } func TestIndexedGCAllocPQ(t *testing.T) { - t.Parallel() + ci.Parallel(t) + pq := NewIndexedGCAllocPQ() ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc()) @@ -122,7 +124,8 @@ func (m *MockStatsCollector) Stats() *stats.HostStats { } func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) @@ -138,7 +141,8 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { } func TestAllocGarbageCollector_Collect(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) @@ -164,7 +168,8 @@ func TestAllocGarbageCollector_Collect(t *testing.T) { } func TestAllocGarbageCollector_CollectAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) @@ -184,7 +189,8 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -226,7 +232,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -269,7 +276,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -308,7 +316,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -348,6 +357,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) // TestAllocGarbageCollector_MakeRoomFor_MaxAllocs asserts that when making room for new // allocs, terminal allocs are GC'd until old_allocs + new_allocs <= limit func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { + ci.Parallel(t) + const maxAllocs = 6 require := require.New(t) @@ -494,7 +505,8 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { } func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -533,7 +545,8 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { } func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() diff --git a/client/heartbeatstop_test.go b/client/heartbeatstop_test.go index 86638cb01..55c54c2bf 100644 --- a/client/heartbeatstop_test.go +++ b/client/heartbeatstop_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -12,7 +13,7 @@ import ( ) func TestHeartbeatStop_allocHook(t *testing.T) { - t.Parallel() + ci.Parallel(t) server, _, cleanupS1 := testServer(t, nil) defer cleanupS1() diff --git a/client/logmon/logmon_test.go b/client/logmon/logmon_test.go index a3e62408b..d8481cb7e 100644 --- a/client/logmon/logmon_test.go +++ b/client/logmon/logmon_test.go @@ -9,6 +9,7 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/fifo" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -17,6 +18,8 @@ import ( ) func TestLogmon_Start_rotate(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var stdoutFifoPath, stderrFifoPath string @@ -77,6 +80,8 @@ func TestLogmon_Start_rotate(t *testing.T) { // asserts that calling Start twice restarts the log rotator and that any logs // published while the listener was unavailable are received. func TestLogmon_Start_restart_flusheslogs(t *testing.T) { + ci.Parallel(t) + if runtime.GOOS == "windows" { t.Skip("windows does not support pushing data to a pipe with no servers") } @@ -184,6 +189,8 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { // asserts that calling Start twice restarts the log rotator func TestLogmon_Start_restart(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var stdoutFifoPath, stderrFifoPath string @@ -280,7 +287,7 @@ func (panicWriter) Close() error { // TestLogmon_NewError asserts that newLogRotatorWrapper will return an error // if its unable to create the necessray files. func TestLogmon_NewError(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Pick a path that does not exist path := filepath.Join(uuid.Generate(), uuid.Generate(), uuid.Generate()) diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index af40227e3..94ef2cf06 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -8,6 +8,7 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/mount" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -39,7 +40,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -136,7 +137,7 @@ func TestVolumeManager_stageVolume(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -217,7 +218,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -280,7 +281,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -406,7 +407,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -471,7 +472,7 @@ func TestVolumeManager_MountVolumeEvents(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) tmpPath := tmpDir(t) defer os.RemoveAll(tmpPath) diff --git a/client/pluginmanager/drivermanager/manager_test.go b/client/pluginmanager/drivermanager/manager_test.go index 1a4773343..e3f7798a4 100644 --- a/client/pluginmanager/drivermanager/manager_test.go +++ b/client/pluginmanager/drivermanager/manager_test.go @@ -9,6 +9,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/helper/pluginutils/loader" @@ -101,7 +102,7 @@ func noopUpdater(string, *structs.DriverInfo) {} func noopEventHandlerFactory(string, string) EventHandler { return nil } func TestManager_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, _, mgr := testSetup(t) var infos []*structs.DriverInfo @@ -168,7 +169,7 @@ func TestManager_Fingerprint(t *testing.T) { } func TestManager_TaskEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, evChan, mgr := testSetup(t) go mgr.Run() @@ -199,7 +200,7 @@ func TestManager_TaskEvents(t *testing.T) { } func TestManager_Run_AllowedDrivers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, _, mgr := testSetup(t) mgr.allowedDrivers = map[string]struct{}{"foo": {}} @@ -219,7 +220,7 @@ func TestManager_Run_AllowedDrivers(t *testing.T) { } func TestManager_Run_BlockedDrivers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, _, mgr := testSetup(t) mgr.blockedDrivers = map[string]struct{}{"mock": {}} @@ -239,7 +240,7 @@ func TestManager_Run_BlockedDrivers(t *testing.T) { } func TestManager_Run_AllowedBlockedDrivers_Combined(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) drvs := map[string]drivers.DriverPlugin{} fpChs := map[string]chan *drivers.Fingerprint{} diff --git a/client/pluginmanager/group_test.go b/client/pluginmanager/group_test.go index 07448d542..fc91824e8 100644 --- a/client/pluginmanager/group_test.go +++ b/client/pluginmanager/group_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" ) func TestPluginGroup_RegisterAndRun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var hasRun bool @@ -29,7 +30,7 @@ func TestPluginGroup_RegisterAndRun(t *testing.T) { } func TestPluginGroup_Shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var stack []int @@ -66,7 +67,7 @@ func TestPluginGroup_Shutdown(t *testing.T) { } func TestPluginGroup_WaitForFirstFingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) managerCh := make(chan struct{}) @@ -95,7 +96,7 @@ func TestPluginGroup_WaitForFirstFingerprint(t *testing.T) { } func TestPluginGroup_WaitForFirstFingerprint_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) managerCh := make(chan struct{}) diff --git a/client/rpc_test.go b/client/rpc_test.go index 2f8c13378..162e4d790 100644 --- a/client/rpc_test.go +++ b/client/rpc_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/structs" @@ -13,7 +14,7 @@ import ( ) func TestRpc_streamingRpcConn_badEndpoint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := nomad.TestServer(t, nil) @@ -51,7 +52,7 @@ func TestRpc_streamingRpcConn_badEndpoint(t *testing.T) { } func TestRpc_streamingRpcConn_badEndpoint_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( diff --git a/client/servers/manager_internal_test.go b/client/servers/manager_internal_test.go index f28074dea..17cf29b58 100644 --- a/client/servers/manager_internal_test.go +++ b/client/servers/manager_internal_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" ) @@ -50,6 +51,8 @@ func testManagerFailProb(t *testing.T, failPct float64) (m *Manager) { } func TestManagerInternal_cycleServer(t *testing.T) { + ci.Parallel(t) + server0 := &Server{Addr: &fauxAddr{"server1"}} server1 := &Server{Addr: &fauxAddr{"server2"}} server2 := &Server{Addr: &fauxAddr{"server3"}} @@ -81,6 +84,8 @@ func TestManagerInternal_cycleServer(t *testing.T) { } func TestManagerInternal_New(t *testing.T) { + ci.Parallel(t) + m := testManager(t) if m == nil { t.Fatalf("Manager nil") @@ -97,6 +102,8 @@ func TestManagerInternal_New(t *testing.T) { // func (l *serverList) refreshServerRebalanceTimer() { func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) { + ci.Parallel(t) + type clusterSizes struct { numNodes int32 numServers int diff --git a/client/servers/manager_test.go b/client/servers/manager_test.go index 1c13889a7..e219c3f4b 100644 --- a/client/servers/manager_test.go +++ b/client/servers/manager_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" @@ -47,6 +48,8 @@ func testManagerFailProb(t *testing.T, failPct float64) (m *servers.Manager) { } func TestServers_SetServers(t *testing.T) { + ci.Parallel(t) + require := require.New(t) m := testManager(t) var num int @@ -82,6 +85,8 @@ func TestServers_SetServers(t *testing.T) { } func TestServers_FindServer(t *testing.T) { + ci.Parallel(t) + m := testManager(t) if m.FindServer() != nil { @@ -126,6 +131,8 @@ func TestServers_FindServer(t *testing.T) { } func TestServers_New(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) shutdownCh := make(chan struct{}) m := servers.New(logger, shutdownCh, &fauxConnPool{}) @@ -135,6 +142,8 @@ func TestServers_New(t *testing.T) { } func TestServers_NotifyFailedServer(t *testing.T) { + ci.Parallel(t) + m := testManager(t) if m.NumServers() != 0 { @@ -194,6 +203,8 @@ func TestServers_NotifyFailedServer(t *testing.T) { } func TestServers_NumServers(t *testing.T) { + ci.Parallel(t) + m := testManager(t) var num int num = m.NumServers() @@ -210,6 +221,8 @@ func TestServers_NumServers(t *testing.T) { } func TestServers_RebalanceServers(t *testing.T) { + ci.Parallel(t) + const failPct = 0.5 m := testManagerFailProb(t, failPct) const maxServers = 100 diff --git a/client/state/db_test.go b/client/state/db_test.go index c4f92ce75..05081d14d 100644 --- a/client/state/db_test.go +++ b/client/state/db_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" "github.com/hashicorp/nomad/client/dynamicplugins" @@ -62,7 +63,7 @@ func testDB(t *testing.T, f func(*testing.T, StateDB)) { // TestStateDB_Allocations asserts the behavior of GetAllAllocations, PutAllocation, and // DeleteAllocationBucket for all operational StateDB implementations. func TestStateDB_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -147,7 +148,7 @@ func ceilDiv(a, b int) int { // TestStateDB_Batch asserts the behavior of PutAllocation, PutNetworkStatus and // DeleteAllocationBucket in batch mode, for all operational StateDB implementations. func TestStateDB_Batch(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -255,7 +256,7 @@ func TestStateDB_Batch(t *testing.T) { // TestStateDB_TaskState asserts the behavior of task state related StateDB // methods. func TestStateDB_TaskState(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -307,7 +308,7 @@ func TestStateDB_TaskState(t *testing.T) { // TestStateDB_DeviceManager asserts the behavior of device manager state related StateDB // methods. func TestStateDB_DeviceManager(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -332,7 +333,7 @@ func TestStateDB_DeviceManager(t *testing.T) { // TestStateDB_DriverManager asserts the behavior of device manager state related StateDB // methods. func TestStateDB_DriverManager(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -357,7 +358,7 @@ func TestStateDB_DriverManager(t *testing.T) { // TestStateDB_DynamicRegistry asserts the behavior of dynamic registry state related StateDB // methods. func TestStateDB_DynamicRegistry(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -382,7 +383,7 @@ func TestStateDB_DynamicRegistry(t *testing.T) { // TestStateDB_Upgrade asserts calling Upgrade on new databases always // succeeds. func TestStateDB_Upgrade(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require.NoError(t, db.Upgrade()) diff --git a/client/state/upgrade_int_test.go b/client/state/upgrade_int_test.go index 96df3fbad..4cc8bdc40 100644 --- a/client/state/upgrade_int_test.go +++ b/client/state/upgrade_int_test.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner" "github.com/hashicorp/nomad/client/allocwatcher" clientconfig "github.com/hashicorp/nomad/client/config" @@ -32,7 +33,7 @@ import ( // TestBoltStateDB_Upgrade_Ok asserts upgading an old state db does not error // during upgrade and restore. func TestBoltStateDB_UpgradeOld_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) dbFromTestFile := func(t *testing.T, dir, fn string) *BoltStateDB { diff --git a/client/state/upgrade_test.go b/client/state/upgrade_test.go index 88cf6d112..5f248d787 100644 --- a/client/state/upgrade_test.go +++ b/client/state/upgrade_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/boltdd" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -32,7 +33,7 @@ func setupBoltDB(t *testing.T) (*bbolt.DB, func()) { // TestUpgrade_NeedsUpgrade_New asserts new state dbs do not need upgrading. func TestUpgrade_NeedsUpgrade_New(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Setting up a new StateDB should initialize it at the latest version. db, cleanup := setupBoltStateDB(t) @@ -47,7 +48,7 @@ func TestUpgrade_NeedsUpgrade_New(t *testing.T) { // TestUpgrade_NeedsUpgrade_Old asserts state dbs with just the alloctions // bucket *do* need upgrading. func TestUpgrade_NeedsUpgrade_Old(t *testing.T) { - t.Parallel() + ci.Parallel(t) db, cleanup := setupBoltDB(t) defer cleanup() @@ -77,7 +78,7 @@ func TestUpgrade_NeedsUpgrade_Old(t *testing.T) { // NeedsUpgrade if an invalid db version is found. This is a safety measure to // prevent invalid and unintentional upgrades when downgrading Nomad. func TestUpgrade_NeedsUpgrade_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := [][]byte{ {'"', '2', '"'}, // wrong type @@ -107,7 +108,7 @@ func TestUpgrade_NeedsUpgrade_Error(t *testing.T) { // TestUpgrade_DeleteInvalidAllocs asserts invalid allocations are deleted // during state upgades instead of failing the entire agent. func TestUpgrade_DeleteInvalidAllocs_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) bdb, cleanup := setupBoltDB(t) defer cleanup() @@ -152,7 +153,7 @@ func TestUpgrade_DeleteInvalidAllocs_NoAlloc(t *testing.T) { // TestUpgrade_DeleteInvalidTaskEntries asserts invalid entries under a task // bucket are deleted. func TestUpgrade_upgradeTaskBucket_InvalidEntries(t *testing.T) { - t.Parallel() + ci.Parallel(t) db, cleanup := setupBoltDB(t) defer cleanup() diff --git a/client/stats/cpu_test.go b/client/stats/cpu_test.go index 024fba93a..4dc9b19b4 100644 --- a/client/stats/cpu_test.go +++ b/client/stats/cpu_test.go @@ -6,12 +6,15 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" shelpers "github.com/hashicorp/nomad/helper/stats" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/assert" ) func TestCpuStatsPercent(t *testing.T) { + ci.Parallel(t) + cs := NewCpuStats() cs.Percent(79.7) time.Sleep(1 * time.Second) @@ -23,6 +26,8 @@ func TestCpuStatsPercent(t *testing.T) { } func TestHostStats_CPU(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) assert.Nil(shelpers.Init()) diff --git a/client/structs/broadcaster_test.go b/client/structs/broadcaster_test.go index f23de0b93..1bbc1006d 100644 --- a/client/structs/broadcaster_test.go +++ b/client/structs/broadcaster_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( // TestAllocBroadcaster_SendRecv asserts the latest sends to a broadcaster are // received by listeners. func TestAllocBroadcaster_SendRecv(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := NewAllocBroadcaster(testlog.HCLogger(t)) defer b.Close() @@ -47,7 +48,7 @@ func TestAllocBroadcaster_SendRecv(t *testing.T) { // TestAllocBroadcaster_RecvBlocks asserts listeners are blocked until a send occurs. func TestAllocBroadcaster_RecvBlocks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() b := NewAllocBroadcaster(testlog.HCLogger(t)) @@ -87,7 +88,7 @@ func TestAllocBroadcaster_RecvBlocks(t *testing.T) { // TestAllocBroadcaster_Concurrency asserts that the broadcaster behaves // correctly with concurrent listeners being added and closed. func TestAllocBroadcaster_Concurrency(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() b := NewAllocBroadcaster(testlog.HCLogger(t)) @@ -164,7 +165,7 @@ func TestAllocBroadcaster_Concurrency(t *testing.T) { // TestAllocBroadcaster_PrimeListener asserts that newly created listeners are // primed with the last sent alloc. func TestAllocBroadcaster_PrimeListener(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := NewAllocBroadcaster(testlog.HCLogger(t)) defer b.Close() @@ -188,7 +189,7 @@ func TestAllocBroadcaster_PrimeListener(t *testing.T) { // TestAllocBroadcaster_Closed asserts that newly created listeners are // primed with the last sent alloc even when the broadcaster is closed. func TestAllocBroadcaster_Closed(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := NewAllocBroadcaster(testlog.HCLogger(t)) diff --git a/client/taskenv/env_test.go b/client/taskenv/env_test.go index b4cde4029..7b3156e47 100644 --- a/client/taskenv/env_test.go +++ b/client/taskenv/env_test.go @@ -11,6 +11,7 @@ import ( hcl "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -62,6 +63,8 @@ func testEnvBuilder() *Builder { } func TestEnvironment_ParseAndReplace_Env(t *testing.T) { + ci.Parallel(t) + env := testEnvBuilder() input := []string{fmt.Sprintf(`"${%v}"!`, envOneKey), fmt.Sprintf("${%s}${%s}", envOneKey, envTwoKey)} @@ -74,6 +77,8 @@ func TestEnvironment_ParseAndReplace_Env(t *testing.T) { } func TestEnvironment_ParseAndReplace_Meta(t *testing.T) { + ci.Parallel(t) + input := []string{fmt.Sprintf("${%v%v}", nodeMetaPrefix, metaKey)} exp := []string{metaVal} env := testEnvBuilder() @@ -85,6 +90,8 @@ func TestEnvironment_ParseAndReplace_Meta(t *testing.T) { } func TestEnvironment_ParseAndReplace_Attr(t *testing.T) { + ci.Parallel(t) + input := []string{fmt.Sprintf("${%v%v}", nodeAttributePrefix, attrKey)} exp := []string{attrVal} env := testEnvBuilder() @@ -96,6 +103,8 @@ func TestEnvironment_ParseAndReplace_Attr(t *testing.T) { } func TestEnvironment_ParseAndReplace_Node(t *testing.T) { + ci.Parallel(t) + input := []string{fmt.Sprintf("${%v}", nodeNameKey), fmt.Sprintf("${%v}", nodeClassKey)} exp := []string{nodeName, nodeClass} env := testEnvBuilder() @@ -107,6 +116,8 @@ func TestEnvironment_ParseAndReplace_Node(t *testing.T) { } func TestEnvironment_ParseAndReplace_Mixed(t *testing.T) { + ci.Parallel(t) + input := []string{ fmt.Sprintf("${%v}${%v%v}", nodeNameKey, nodeAttributePrefix, attrKey), fmt.Sprintf("${%v}${%v%v}", nodeClassKey, nodeMetaPrefix, metaKey), @@ -126,6 +137,8 @@ func TestEnvironment_ParseAndReplace_Mixed(t *testing.T) { } func TestEnvironment_ReplaceEnv_Mixed(t *testing.T) { + ci.Parallel(t) + input := fmt.Sprintf("${%v}${%v%v}", nodeNameKey, nodeAttributePrefix, attrKey) exp := fmt.Sprintf("%v%v", nodeName, attrVal) env := testEnvBuilder() @@ -137,6 +150,8 @@ func TestEnvironment_ReplaceEnv_Mixed(t *testing.T) { } func TestEnvironment_AsList(t *testing.T) { + ci.Parallel(t) + n := mock.Node() n.Meta = map[string]string{ "metaKey": "metaVal", @@ -227,7 +242,7 @@ func TestEnvironment_AsList(t *testing.T) { } func TestEnvironment_AllValues(t *testing.T) { - t.Parallel() + ci.Parallel(t) n := mock.Node() n.Meta = map[string]string{ @@ -431,6 +446,8 @@ func TestEnvironment_AllValues(t *testing.T) { } func TestEnvironment_VaultToken(t *testing.T) { + ci.Parallel(t) + n := mock.Node() a := mock.Alloc() env := NewBuilder(n, a, a.Job.TaskGroups[0].Tasks[0], "global") @@ -491,6 +508,8 @@ func TestEnvironment_VaultToken(t *testing.T) { } func TestEnvironment_Envvars(t *testing.T) { + ci.Parallel(t) + envMap := map[string]string{"foo": "baz", "bar": "bang"} n := mock.Node() a := mock.Alloc() @@ -512,6 +531,8 @@ func TestEnvironment_Envvars(t *testing.T) { // TestEnvironment_HookVars asserts hook env vars are LWW and deletes of later // writes allow earlier hook's values to be visible. func TestEnvironment_HookVars(t *testing.T) { + ci.Parallel(t) + n := mock.Node() a := mock.Alloc() builder := NewBuilder(n, a, a.Job.TaskGroups[0].Tasks[0], "global") @@ -548,6 +569,8 @@ func TestEnvironment_HookVars(t *testing.T) { // TestEnvironment_DeviceHookVars asserts device hook env vars are accessible // separately. func TestEnvironment_DeviceHookVars(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := mock.Node() a := mock.Alloc() @@ -573,6 +596,8 @@ func TestEnvironment_DeviceHookVars(t *testing.T) { } func TestEnvironment_Interpolate(t *testing.T) { + ci.Parallel(t) + n := mock.Node() n.Attributes["arch"] = "x86" n.NodeClass = "test class" @@ -598,6 +623,8 @@ func TestEnvironment_Interpolate(t *testing.T) { } func TestEnvironment_AppendHostEnvvars(t *testing.T) { + ci.Parallel(t) + host := os.Environ() if len(host) < 2 { t.Skip("No host environment variables. Can't test") @@ -620,6 +647,8 @@ func TestEnvironment_AppendHostEnvvars(t *testing.T) { // converted to underscores in environment variables. // See: https://github.com/hashicorp/nomad/issues/2405 func TestEnvironment_DashesInTaskName(t *testing.T) { + ci.Parallel(t) + a := mock.Alloc() task := a.Job.TaskGroups[0].Tasks[0] task.Env = map[string]string{ @@ -639,6 +668,8 @@ func TestEnvironment_DashesInTaskName(t *testing.T) { // TestEnvironment_UpdateTask asserts env vars and task meta are updated when a // task is updated. func TestEnvironment_UpdateTask(t *testing.T) { + ci.Parallel(t) + a := mock.Alloc() a.Job.TaskGroups[0].Meta = map[string]string{"tgmeta": "tgmetaval"} task := a.Job.TaskGroups[0].Tasks[0] @@ -688,6 +719,8 @@ func TestEnvironment_UpdateTask(t *testing.T) { // job, if an optional meta field is not set, it will get interpolated as an // empty string. func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) { + ci.Parallel(t) + require := require.New(t) a := mock.Alloc() a.Job.ParameterizedJob = &structs.ParameterizedJobConfig{ @@ -704,7 +737,7 @@ func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) { // TestEnvironment_Upsteams asserts that group.service.upstreams entries are // added to the environment. func TestEnvironment_Upstreams(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some upstreams to the mock alloc a := mock.Alloc() @@ -754,6 +787,8 @@ func TestEnvironment_Upstreams(t *testing.T) { } func TestEnvironment_SetPortMapEnvs(t *testing.T) { + ci.Parallel(t) + envs := map[string]string{ "foo": "bar", "NOMAD_PORT_ssh": "2342", @@ -774,6 +809,8 @@ func TestEnvironment_SetPortMapEnvs(t *testing.T) { } func TestEnvironment_TasklessBuilder(t *testing.T) { + ci.Parallel(t) + node := mock.Node() alloc := mock.Alloc() alloc.Job.Meta["jobt"] = "foo" @@ -789,6 +826,8 @@ func TestEnvironment_TasklessBuilder(t *testing.T) { } func TestTaskEnv_ClientPath(t *testing.T) { + ci.Parallel(t) + builder := testEnvBuilder() builder.SetAllocDir("/tmp/testAlloc") builder.SetClientSharedAllocDir("/tmp/testAlloc/alloc") diff --git a/client/taskenv/network_test.go b/client/taskenv/network_test.go index ec892d4d3..5c1f3deba 100644 --- a/client/taskenv/network_test.go +++ b/client/taskenv/network_test.go @@ -3,11 +3,14 @@ package taskenv import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) func Test_InterpolateNetworks(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputTaskEnv *TaskEnv inputNetworks structs.Networks diff --git a/client/taskenv/services_test.go b/client/taskenv/services_test.go index dc6a5593a..bc1ce6d46 100644 --- a/client/taskenv/services_test.go +++ b/client/taskenv/services_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -12,7 +13,7 @@ import ( // TestInterpolateServices asserts that all service // and check fields are properly interpolated. func TestInterpolateServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) services := []*structs.Service{ { @@ -107,7 +108,7 @@ var testEnv = NewTaskEnv( nil, nil, "", "") func TestInterpolate_interpolateMapStringSliceString(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, interpolateMapStringSliceString(testEnv, nil)) @@ -125,7 +126,7 @@ func TestInterpolate_interpolateMapStringSliceString(t *testing.T) { } func TestInterpolate_interpolateMapStringString(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, interpolateMapStringString(testEnv, nil)) @@ -143,7 +144,7 @@ func TestInterpolate_interpolateMapStringString(t *testing.T) { } func TestInterpolate_interpolateMapStringInterface(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, interpolateMapStringInterface(testEnv, nil)) @@ -161,7 +162,7 @@ func TestInterpolate_interpolateMapStringInterface(t *testing.T) { } func TestInterpolate_interpolateConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) e := map[string]string{ "tag1": "_tag1", diff --git a/client/taskenv/util_test.go b/client/taskenv/util_test.go index e97cc5716..4f4538781 100644 --- a/client/taskenv/util_test.go +++ b/client/taskenv/util_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" "github.com/zclconf/go-cty/cty" ) @@ -11,6 +12,8 @@ import ( // TestAddNestedKey_Ok asserts test cases that succeed when passed to // addNestedKey. func TestAddNestedKey_Ok(t *testing.T) { + ci.Parallel(t) + cases := []struct { // M will be initialized if unset M map[string]interface{} @@ -209,7 +212,7 @@ func TestAddNestedKey_Ok(t *testing.T) { name = fmt.Sprintf("%s-%d", name, len(tc.M)) } t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) if tc.M == nil { tc.M = map[string]interface{}{} } @@ -222,6 +225,8 @@ func TestAddNestedKey_Ok(t *testing.T) { // TestAddNestedKey_Bad asserts test cases return an error when passed to // addNestedKey. func TestAddNestedKey_Bad(t *testing.T) { + ci.Parallel(t) + cases := []struct { // M will be initialized if unset M func() map[string]interface{} @@ -320,7 +325,7 @@ func TestAddNestedKey_Bad(t *testing.T) { name += "-cleanup" } t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Copy original M value to ensure it doesn't get altered if tc.M == nil { @@ -341,6 +346,8 @@ func TestAddNestedKey_Bad(t *testing.T) { } func TestCtyify_Ok(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string In map[string]interface{} @@ -402,7 +409,7 @@ func TestCtyify_Ok(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.Name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) // ctiyif and check for errors result, err := ctyify(tc.In) @@ -417,6 +424,8 @@ func TestCtyify_Ok(t *testing.T) { } func TestCtyify_Bad(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string In map[string]interface{} @@ -441,7 +450,7 @@ func TestCtyify_Bad(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.Name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) // ctiyif and check for errors result, err := ctyify(tc.In) diff --git a/client/util_test.go b/client/util_test.go deleted file mode 100644 index ca16bbeea..000000000 --- a/client/util_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package client - -/* -TODO(clientv2) -import ( - "reflect" - "testing" - - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" -) - -func TestDiffAllocs(t *testing.T) { - t.Parallel() - alloc1 := mock.Alloc() // Ignore - alloc2 := mock.Alloc() // Update - alloc2u := new(structs.Allocation) - *alloc2u = *alloc2 - alloc2u.AllocModifyIndex += 1 - alloc3 := mock.Alloc() // Remove - alloc4 := mock.Alloc() // Add - - exist := []*structs.Allocation{ - alloc1, - alloc2, - alloc3, - } - update := &allocUpdates{ - pulled: map[string]*structs.Allocation{ - alloc2u.ID: alloc2u, - alloc4.ID: alloc4, - }, - filtered: map[string]struct{}{ - alloc1.ID: {}, - }, - } - - result := diffAllocs(exist, update) - - if len(result.ignore) != 1 || result.ignore[0] != alloc1 { - t.Fatalf("Bad: %#v", result.ignore) - } - if len(result.added) != 1 || result.added[0] != alloc4 { - t.Fatalf("Bad: %#v", result.added) - } - if len(result.removed) != 1 || result.removed[0] != alloc3 { - t.Fatalf("Bad: %#v", result.removed) - } - if len(result.updated) != 1 { - t.Fatalf("Bad: %#v", result.updated) - } - if result.updated[0].exist != alloc2 || result.updated[0].updated != alloc2u { - t.Fatalf("Bad: %#v", result.updated) - } -} - -func TestShuffleStrings(t *testing.T) { - t.Parallel() - // Generate input - inp := make([]string, 10) - for idx := range inp { - inp[idx] = uuid.Generate() - } - - // Copy the input - orig := make([]string, len(inp)) - copy(orig, inp) - - // Shuffle - shuffleStrings(inp) - - // Ensure order is not the same - if reflect.DeepEqual(inp, orig) { - t.Fatalf("shuffle failed") - } -} -*/ diff --git a/client/vaultclient/vaultclient_test.go b/client/vaultclient/vaultclient_test.go index 9128d14b3..66f711761 100644 --- a/client/vaultclient/vaultclient_test.go +++ b/client/vaultclient/vaultclient_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" @@ -15,7 +16,8 @@ import ( ) func TestVaultClient_TokenRenewals(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -103,7 +105,8 @@ func TestVaultClient_TokenRenewals(t *testing.T) { // TestVaultClient_NamespaceSupport tests that the Vault namespace config, if present, will result in the // namespace header being set on the created Vault client. func TestVaultClient_NamespaceSupport(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) tr := true testNs := "test-namespace" @@ -120,7 +123,8 @@ func TestVaultClient_NamespaceSupport(t *testing.T) { } func TestVaultClient_Heap(t *testing.T) { - t.Parallel() + ci.Parallel(t) + tr := true conf := config.DefaultConfig() conf.VaultConfig.Enabled = &tr @@ -226,7 +230,8 @@ func TestVaultClient_Heap(t *testing.T) { } func TestVaultClient_RenewNonRenewableLease(t *testing.T) { - t.Parallel() + ci.Parallel(t) + v := testutil.NewTestVault(t) defer v.Stop() @@ -275,7 +280,8 @@ func TestVaultClient_RenewNonRenewableLease(t *testing.T) { } func TestVaultClient_RenewNonexistentLease(t *testing.T) { - t.Parallel() + ci.Parallel(t) + v := testutil.NewTestVault(t) defer v.Stop() @@ -311,7 +317,7 @@ func TestVaultClient_RenewNonexistentLease(t *testing.T) { // TestVaultClient_RenewalTime_Long asserts that for leases over 1m the renewal // time is jittered. func TestVaultClient_RenewalTime_Long(t *testing.T) { - t.Parallel() + ci.Parallel(t) // highRoller is a randIntn func that always returns the max value highRoller := func(n int) int { @@ -337,7 +343,7 @@ func TestVaultClient_RenewalTime_Long(t *testing.T) { // TestVaultClient_RenewalTime_Short asserts that for leases under 1m the renewal // time is lease/2. func TestVaultClient_RenewalTime_Short(t *testing.T) { - t.Parallel() + ci.Parallel(t) dice := func(int) int { require.Fail(t, "dice should not have been called") diff --git a/command/acl_bootstrap_test.go b/command/acl_bootstrap_test.go index 9b029f07a..c972f4488 100644 --- a/command/acl_bootstrap_test.go +++ b/command/acl_bootstrap_test.go @@ -3,13 +3,14 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestACLBootstrapCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) // create a acl-enabled server without bootstrapping the token @@ -36,7 +37,7 @@ func TestACLBootstrapCommand(t *testing.T) { // If a bootstrap token has already been created, attempts to create more should // fail. func TestACLBootstrapCommand_ExistingBootstrapToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) config := func(c *agent.Config) { @@ -60,7 +61,7 @@ func TestACLBootstrapCommand_ExistingBootstrapToken(t *testing.T) { // Attempting to bootstrap a token on a non-ACL enabled server should fail. func TestACLBootstrapCommand_NonACLServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) srv, _, url := testServer(t, true, nil) diff --git a/command/acl_policy_apply_test.go b/command/acl_policy_apply_test.go index 608dbf0e6..076d0a551 100644 --- a/command/acl_policy_apply_test.go +++ b/command/acl_policy_apply_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" @@ -13,8 +14,8 @@ import ( ) func TestACLPolicyApplyCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_policy_delete_test.go b/command/acl_policy_delete_test.go index 2ea171260..2ca293827 100644 --- a/command/acl_policy_delete_test.go +++ b/command/acl_policy_delete_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,8 +15,8 @@ import ( ) func TestACLPolicyDeleteCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_policy_info_test.go b/command/acl_policy_info_test.go index 1d8934303..828b4022b 100644 --- a/command/acl_policy_info_test.go +++ b/command/acl_policy_info_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -12,8 +13,8 @@ import ( ) func TestACLPolicyInfoCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_policy_list_test.go b/command/acl_policy_list_test.go index e18d3725f..ce3f2bcf7 100644 --- a/command/acl_policy_list_test.go +++ b/command/acl_policy_list_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -13,8 +14,8 @@ import ( ) func TestACLPolicyListCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_create_test.go b/command/acl_token_create_test.go index 7d8c59f6c..e24e4c507 100644 --- a/command/acl_token_create_test.go +++ b/command/acl_token_create_test.go @@ -4,14 +4,15 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestACLTokenCreateCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_delete_test.go b/command/acl_token_delete_test.go index cbacdae59..8da29208d 100644 --- a/command/acl_token_delete_test.go +++ b/command/acl_token_delete_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,8 +15,8 @@ import ( ) func TestACLTokenDeleteCommand_ViaEnvVariable(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_info_test.go b/command/acl_token_info_test.go index 095621452..23a6e15b9 100644 --- a/command/acl_token_info_test.go +++ b/command/acl_token_info_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,10 +15,10 @@ import ( ) func TestACLTokenInfoCommand_ViaEnvVar(t *testing.T) { + ci.Parallel(t) defer os.Setenv("NOMAD_TOKEN", os.Getenv("NOMAD_TOKEN")) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_list_test.go b/command/acl_token_list_test.go index df0f5cb29..59622838b 100644 --- a/command/acl_token_list_test.go +++ b/command/acl_token_list_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -13,8 +14,8 @@ import ( ) func TestACLTokenListCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_self_test.go b/command/acl_token_self_test.go index 30c66dab7..d907cd781 100644 --- a/command/acl_token_self_test.go +++ b/command/acl_token_self_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,10 +15,10 @@ import ( ) func TestACLTokenSelfCommand_ViaEnvVar(t *testing.T) { + ci.Parallel(t) defer os.Setenv("NOMAD_TOKEN", os.Getenv("NOMAD_TOKEN")) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_update_test.go b/command/acl_token_update_test.go index f8a573550..e98002217 100644 --- a/command/acl_token_update_test.go +++ b/command/acl_token_update_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -12,8 +13,9 @@ import ( ) func TestACLTokenUpdateCommand(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/agent/acl_endpoint_test.go b/command/agent/acl_endpoint_test.go index dc34a1e68..64f37a333 100644 --- a/command/agent/acl_endpoint_test.go +++ b/command/agent/acl_endpoint_test.go @@ -5,6 +5,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -12,7 +13,7 @@ import ( ) func TestHTTP_ACLPolicyList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLPolicy() p2 := mock.ACLPolicy() @@ -63,7 +64,7 @@ func TestHTTP_ACLPolicyList(t *testing.T) { } func TestHTTP_ACLPolicyQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLPolicy() args := structs.ACLPolicyUpsertRequest{ @@ -112,7 +113,7 @@ func TestHTTP_ACLPolicyQuery(t *testing.T) { } func TestHTTP_ACLPolicyCreate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Make the HTTP request p1 := mock.ACLPolicy() @@ -147,7 +148,7 @@ func TestHTTP_ACLPolicyCreate(t *testing.T) { } func TestHTTP_ACLPolicyDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLPolicy() args := structs.ACLPolicyUpsertRequest{ @@ -189,7 +190,7 @@ func TestHTTP_ACLPolicyDelete(t *testing.T) { } func TestHTTP_ACLTokenBootstrap(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.ACL.Enabled = true c.ACL.PolicyTTL = 0 // Special flag to disable auto-bootstrap @@ -221,7 +222,7 @@ func TestHTTP_ACLTokenBootstrap(t *testing.T) { } func TestHTTP_ACLTokenList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -275,7 +276,7 @@ func TestHTTP_ACLTokenList(t *testing.T) { } func TestHTTP_ACLTokenQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -324,7 +325,7 @@ func TestHTTP_ACLTokenQuery(t *testing.T) { } func TestHTTP_ACLTokenSelf(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -373,7 +374,7 @@ func TestHTTP_ACLTokenSelf(t *testing.T) { } func TestHTTP_ACLTokenCreate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Make the HTTP request p1 := mock.ACLToken() @@ -407,7 +408,7 @@ func TestHTTP_ACLTokenCreate(t *testing.T) { } func TestHTTP_ACLTokenDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -451,7 +452,7 @@ func TestHTTP_ACLTokenDelete(t *testing.T) { } func TestHTTP_OneTimeToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Setup the ACL token diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index efad48880..85bc61d44 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -21,6 +21,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/nomad/mock" @@ -31,7 +32,7 @@ import ( ) func TestHTTP_AgentSelf(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { @@ -94,7 +95,7 @@ func TestHTTP_AgentSelf(t *testing.T) { } func TestHTTP_AgentSelf_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -150,7 +151,7 @@ func TestHTTP_AgentSelf_ACL(t *testing.T) { } func TestHTTP_AgentJoin(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Determine the join address member := s.Agent.Server().LocalMember() @@ -182,7 +183,7 @@ func TestHTTP_AgentJoin(t *testing.T) { } func TestHTTP_AgentMembers(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/agent/members", nil) @@ -206,7 +207,7 @@ func TestHTTP_AgentMembers(t *testing.T) { } func TestHTTP_AgentMembers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -260,7 +261,7 @@ func TestHTTP_AgentMembers_ACL(t *testing.T) { } func TestHTTP_AgentMonitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid log_json parameter", func(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { @@ -452,6 +453,8 @@ func TestHTTP_AgentMonitor(t *testing.T) { // | /agent/pprof | `false` | on | **yes** | // +---------------+------------------+--------+------------------+ func TestAgent_PprofRequest_Permissions(t *testing.T) { + ci.Parallel(t) + trueP, falseP := helper.BoolToPtr(true), helper.BoolToPtr(false) cases := []struct { acl *bool @@ -524,6 +527,8 @@ func TestAgent_PprofRequest_Permissions(t *testing.T) { } func TestAgent_PprofRequest(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string url string @@ -634,7 +639,7 @@ func (r *closableRecorder) CloseNotify() <-chan bool { } func TestHTTP_AgentForceLeave(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/agent/force-leave?node=foo", nil) @@ -652,7 +657,7 @@ func TestHTTP_AgentForceLeave(t *testing.T) { } func TestHTTP_AgentForceLeave_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -702,7 +707,7 @@ func TestHTTP_AgentForceLeave_ACL(t *testing.T) { } func TestHTTP_AgentSetServers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { addr := s.Config.AdvertiseAddrs.RPC @@ -764,7 +769,7 @@ func TestHTTP_AgentSetServers(t *testing.T) { } func TestHTTP_AgentSetServers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -836,7 +841,7 @@ func TestHTTP_AgentSetServers_ACL(t *testing.T) { } func TestHTTP_AgentListServers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -901,7 +906,7 @@ func TestHTTP_AgentListServers_ACL(t *testing.T) { } func TestHTTP_AgentListKeys(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" @@ -922,7 +927,7 @@ func TestHTTP_AgentListKeys(t *testing.T) { } func TestHTTP_AgentListKeys_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" @@ -982,7 +987,7 @@ func TestHTTP_AgentListKeys_ACL(t *testing.T) { } func TestHTTP_AgentInstallKey(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" key2 := "wH1Bn9hlJ0emgWB1JttVRA==" @@ -1022,7 +1027,7 @@ func TestHTTP_AgentInstallKey(t *testing.T) { } func TestHTTP_AgentRemoveKey(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" key2 := "wH1Bn9hlJ0emgWB1JttVRA==" @@ -1071,7 +1076,7 @@ func TestHTTP_AgentRemoveKey(t *testing.T) { } func TestHTTP_AgentHealth_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Enable ACLs to ensure they're not enforced @@ -1151,7 +1156,7 @@ func TestHTTP_AgentHealth_Ok(t *testing.T) { } func TestHTTP_AgentHealth_BadServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverAgent := NewTestAgent(t, "server", nil) @@ -1197,7 +1202,7 @@ func TestHTTP_AgentHealth_BadServer(t *testing.T) { } func TestHTTP_AgentHealth_BadClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Disable client to make server unhealthy if requested @@ -1350,7 +1355,7 @@ func NewFakeRW() *fakeRW { // TestHTTP_XSS_Monitor asserts /v1/agent/monitor is safe against XSS attacks // even when log output contains HTML+Javascript. func TestHTTP_XSS_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -1382,7 +1387,7 @@ func TestHTTP_XSS_Monitor(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.Name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -1594,6 +1599,8 @@ func schedulerWorkerInfoTest_testCases() []schedulerWorkerAPITest_testCase { } func TestHTTP_AgentSchedulerWorkerInfoRequest(t *testing.T) { + ci.Parallel(t) + configFn := func(c *Config) { var numSchedulers = 4 c.Server.NumSchedulers = &numSchedulers @@ -1886,6 +1893,8 @@ func schedulerWorkerConfigTest_testCases() []scheduleWorkerConfigTest_workerRequ } func TestHTTP_AgentSchedulerWorkerConfigRequest_NoACL(t *testing.T) { + ci.Parallel(t) + configFn := func(c *Config) { var numSchedulers = 8 c.Server.NumSchedulers = &numSchedulers @@ -1917,6 +1926,8 @@ func TestHTTP_AgentSchedulerWorkerConfigRequest_NoACL(t *testing.T) { } func TestHTTP_AgentSchedulerWorkerConfigRequest_ACL(t *testing.T) { + ci.Parallel(t) + configFn := func(c *Config) { var numSchedulers = 8 c.Server.NumSchedulers = &numSchedulers @@ -2002,6 +2013,8 @@ func schedulerWorkerTest_parseError(t *testing.T, isACLEnabled bool, tc schedule } func TestHTTP_AgentSchedulerWorkerInfoRequest_Client(t *testing.T) { + ci.Parallel(t) + verbs := []string{"GET", "POST", "PUT"} path := "schedulers" @@ -2026,6 +2039,8 @@ func TestHTTP_AgentSchedulerWorkerInfoRequest_Client(t *testing.T) { } func TestHTTP_AgentSchedulerWorkerConfigRequest_Client(t *testing.T) { + ci.Parallel(t) + verbs := []string{"GET", "POST", "PUT"} path := "schedulers/config" diff --git a/command/agent/agent_test.go b/command/agent/agent_test.go index f61e47c04..5318c75d5 100644 --- a/command/agent/agent_test.go +++ b/command/agent/agent_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" @@ -27,7 +28,7 @@ func tmpDir(t testing.TB) string { } func TestAgent_RPC_Ping(t *testing.T) { - t.Parallel() + ci.Parallel(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() @@ -38,7 +39,7 @@ func TestAgent_RPC_Ping(t *testing.T) { } func TestAgent_ServerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := DefaultConfig() conf.DevMode = true // allow localhost for advertise addrs conf.Server.Enabled = true @@ -183,6 +184,8 @@ func TestAgent_ServerConfig(t *testing.T) { } func TestAgent_ServerConfig_SchedulerFlags(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input *structs.SchedulerConfiguration @@ -249,7 +252,7 @@ func TestAgent_ServerConfig_SchedulerFlags(t *testing.T) { // cause errors. This is the server-only (RPC) counterpart to // TestHTTPServer_Limits_Error. func TestAgent_ServerConfig_Limits_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -316,7 +319,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { // cause errors. This is the server-only (RPC) counterpart to // TestHTTPServer_Limits_OK. func TestAgent_ServerConfig_Limits_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -371,7 +374,7 @@ func TestAgent_ServerConfig_Limits_OK(t *testing.T) { } func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { multiplier *int @@ -456,7 +459,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { } func TestAgent_ServerConfig_RaftMultiplier_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []int{ -1, @@ -478,7 +481,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Bad(t *testing.T) { } func TestAgent_ClientConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := DefaultConfig() conf.Client.Enabled = true @@ -522,7 +525,7 @@ func TestAgent_ClientConfig(t *testing.T) { } func TestAgent_ClientConfig_ReservedCores(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := DefaultConfig() conf.Client.Enabled = true conf.Client.ReserveableCores = "0-7" @@ -536,6 +539,8 @@ func TestAgent_ClientConfig_ReservedCores(t *testing.T) { // Clients should inherit telemetry configuration func TestAgent_Client_TelemetryConfiguration(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) conf := DefaultConfig() @@ -556,7 +561,7 @@ func TestAgent_Client_TelemetryConfiguration(t *testing.T) { // TestAgent_HTTPCheck asserts Agent.agentHTTPCheck properly alters the HTTP // API health check depending on configuration. func TestAgent_HTTPCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) agent := func() *Agent { return &Agent{ @@ -634,7 +639,7 @@ func TestAgent_HTTPCheck(t *testing.T) { // TestAgent_HTTPCheckPath asserts clients and servers use different endpoints // for healthchecks. func TestAgent_HTTPCheckPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Agent.agentHTTPCheck only needs a config and logger a := &Agent{ config: DevConfig(nil), @@ -669,7 +674,7 @@ func TestAgent_HTTPCheckPath(t *testing.T) { // reloaded. I can't find a good way to fetch this from the logger itself, so // we pull it only from the agents configuration struct, not the logger. func TestAgent_Reload_LogLevel(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) agent := NewTestAgent(t, t.Name(), func(c *Config) { @@ -691,7 +696,7 @@ func TestAgent_Reload_LogLevel(t *testing.T) { // across the Agent, Server, and Client. This is essential for certificate // reloading to work. func TestServer_Reload_TLS_Shared_Keyloader(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) // We will start out with a bad cert and then reload with a good one. @@ -759,7 +764,7 @@ func TestServer_Reload_TLS_Shared_Keyloader(t *testing.T) { } func TestServer_Reload_TLS_Certificate(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -808,7 +813,7 @@ func TestServer_Reload_TLS_Certificate(t *testing.T) { } func TestServer_Reload_TLS_Certificate_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -853,6 +858,8 @@ func TestServer_Reload_TLS_Certificate_Invalid(t *testing.T) { } func Test_GetConfig(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) agentConfig := &Config{ @@ -877,7 +884,7 @@ func Test_GetConfig(t *testing.T) { } func TestServer_Reload_TLS_WithNilConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) logger := testlog.HCLogger(t) @@ -893,7 +900,7 @@ func TestServer_Reload_TLS_WithNilConfiguration(t *testing.T) { } func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -936,7 +943,7 @@ func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) { } func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -979,7 +986,7 @@ func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) { } func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -1019,7 +1026,7 @@ func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -1059,7 +1066,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -1099,7 +1106,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -1141,7 +1148,7 @@ func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) oldCertificate := ` @@ -1244,7 +1251,7 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) { } func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -1297,7 +1304,7 @@ func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForRPCUpgradeModeChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) sameAgentConfig := &Config{ TLSConfig: &config.TLSConfig{ @@ -1318,7 +1325,7 @@ func TestServer_ShouldReload_ReturnTrueForRPCUpgradeModeChanges(t *testing.T) { } func TestAgent_ProxyRPC_Dev(t *testing.T) { - t.Parallel() + ci.Parallel(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index ce1d994bc..9896cdbdd 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -15,6 +15,7 @@ import ( "github.com/golang/snappy" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -25,7 +26,7 @@ import ( ) func TestHTTP_AllocsList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -86,7 +87,7 @@ func TestHTTP_AllocsList(t *testing.T) { } func TestHTTP_AllocsPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -157,7 +158,7 @@ func TestHTTP_AllocsPrefixList(t *testing.T) { } func TestHTTP_AllocQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -198,7 +199,7 @@ func TestHTTP_AllocQuery(t *testing.T) { } func TestHTTP_AllocQuery_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -255,7 +256,7 @@ func TestHTTP_AllocQuery_Payload(t *testing.T) { } func TestHTTP_AllocRestart(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Validates that all methods of forwarding the request are processed correctly @@ -323,7 +324,7 @@ func TestHTTP_AllocRestart(t *testing.T) { } func TestHTTP_AllocRestart_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -388,7 +389,7 @@ func TestHTTP_AllocRestart_ACL(t *testing.T) { } func TestHTTP_AllocStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -434,7 +435,7 @@ func TestHTTP_AllocStop(t *testing.T) { } func TestHTTP_AllocStats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { @@ -498,7 +499,7 @@ func TestHTTP_AllocStats(t *testing.T) { } func TestHTTP_AllocStats_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -553,7 +554,7 @@ func TestHTTP_AllocStats_ACL(t *testing.T) { } func TestHTTP_AllocSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/client/allocation/123/snapshot", nil) @@ -571,7 +572,7 @@ func TestHTTP_AllocSnapshot(t *testing.T) { } func TestHTTP_AllocSnapshot_WithMigrateToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { // Request without a token fails @@ -607,7 +608,7 @@ func TestHTTP_AllocSnapshot_WithMigrateToken(t *testing.T) { // TestHTTP_AllocSnapshot_Atomic ensures that when a client encounters an error // snapshotting a valid tar is not returned. func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, func(c *Config) { // Disable the schedulers c.Server.NumSchedulers = helper.IntToPtr(0) @@ -716,7 +717,7 @@ func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { } func TestHTTP_AllocGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) path := fmt.Sprintf("/v1/client/allocation/%s/gc", uuid.Generate()) httpTest(t, nil, func(s *TestAgent) { @@ -786,7 +787,7 @@ func TestHTTP_AllocGC(t *testing.T) { } func TestHTTP_AllocGC_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) path := fmt.Sprintf("/v1/client/allocation/%s/gc", uuid.Generate()) @@ -842,7 +843,7 @@ func TestHTTP_AllocGC_ACL(t *testing.T) { } func TestHTTP_AllocAllGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Local node, local resp @@ -904,7 +905,7 @@ func TestHTTP_AllocAllGC(t *testing.T) { } func TestHTTP_AllocAllGC_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -953,6 +954,8 @@ func TestHTTP_AllocAllGC_ACL(t *testing.T) { } func TestHTTP_ReadWsHandshake(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string token string diff --git a/command/agent/command_test.go b/command/agent/command_test.go index bd722d9fb..ae6ed0185 100644 --- a/command/agent/command_test.go +++ b/command/agent/command_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,12 +18,12 @@ import ( ) func TestCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &Command{} } func TestCommand_Args(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -96,6 +97,8 @@ func TestCommand_Args(t *testing.T) { } func TestCommand_MetaConfigValidation(t *testing.T) { + ci.Parallel(t) + tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -149,6 +152,8 @@ func TestCommand_MetaConfigValidation(t *testing.T) { } func TestCommand_NullCharInDatacenter(t *testing.T) { + ci.Parallel(t) + tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -198,6 +203,8 @@ func TestCommand_NullCharInDatacenter(t *testing.T) { } func TestCommand_NullCharInRegion(t *testing.T) { + ci.Parallel(t) + tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -248,6 +255,7 @@ func TestCommand_NullCharInRegion(t *testing.T) { // TestIsValidConfig asserts that invalid configurations return false. func TestIsValidConfig(t *testing.T) { + ci.Parallel(t) cases := []struct { name string diff --git a/command/agent/config_parse_test.go b/command/agent/config_parse_test.go index 4b6129e7d..d2567c7fc 100644 --- a/command/agent/config_parse_test.go +++ b/command/agent/config_parse_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" @@ -411,7 +412,7 @@ var nonoptConfig = &Config{ } func TestConfig_ParseMerge(t *testing.T) { - t.Parallel() + ci.Parallel(t) path, err := filepath.Abs(filepath.Join(".", "testdata", "basic.hcl")) require.NoError(t, err) @@ -435,7 +436,7 @@ func TestConfig_ParseMerge(t *testing.T) { } func TestConfig_Parse(t *testing.T) { - t.Parallel() + ci.Parallel(t) basicConfig.addDefaults() pluginConfig.addDefaults() @@ -545,6 +546,8 @@ func (c *Config) addDefaults() { // length 1 described in // https://github.com/hashicorp/nomad/issues/1290 func TestConfig_ParsePanic(t *testing.T) { + ci.Parallel(t) + c, err := ParseConfigFile("./testdata/obj-len-one.hcl") if err != nil { t.Fatalf("parse error: %s\n", err) @@ -561,6 +564,8 @@ func TestConfig_ParsePanic(t *testing.T) { // Top level keys left by hcl when parsing slices in the config // structure should not be unexpected func TestConfig_ParseSliceExtra(t *testing.T) { + ci.Parallel(t) + c, err := ParseConfigFile("./testdata/config-slices.json") require.NoError(t, err) @@ -677,6 +682,8 @@ var sample0 = &Config{ } func TestConfig_ParseSample0(t *testing.T) { + ci.Parallel(t) + c, err := ParseConfigFile("./testdata/sample0.json") require.NoError(t, err) require.EqualValues(t, sample0, c) @@ -766,6 +773,8 @@ var sample1 = &Config{ } func TestConfig_ParseDir(t *testing.T) { + ci.Parallel(t) + c, err := LoadConfig("./testdata/sample1") require.NoError(t, err) @@ -798,6 +807,8 @@ func TestConfig_ParseDir(t *testing.T) { // that parsing a directory config is the equivalent of // parsing individual files in any order func TestConfig_ParseDir_Matches_IndividualParsing(t *testing.T) { + ci.Parallel(t) + dirConfig, err := LoadConfig("./testdata/sample1") require.NoError(t, err) diff --git a/command/agent/config_test.go b/command/agent/config_test.go index b795f8fad..213e2e40b 100644 --- a/command/agent/config_test.go +++ b/command/agent/config_test.go @@ -13,6 +13,7 @@ import ( "time" sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/nomad/ci" client "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper" @@ -29,6 +30,8 @@ var ( ) func TestConfig_Merge(t *testing.T) { + ci.Parallel(t) + c0 := &Config{} c1 := &Config{ @@ -437,6 +440,8 @@ func TestConfig_Merge(t *testing.T) { } func TestConfig_ParseConfigFile(t *testing.T) { + ci.Parallel(t) + // Fails if the file doesn't exist if _, err := ParseConfigFile("/unicorns/leprechauns"); err == nil { t.Fatalf("expected error, got nothing") @@ -477,6 +482,8 @@ func TestConfig_ParseConfigFile(t *testing.T) { } func TestConfig_LoadConfigDir(t *testing.T) { + ci.Parallel(t) + // Fails if the dir doesn't exist. if _, err := LoadConfigDir("/unicorns/leprechauns"); err == nil { t.Fatalf("expected error, got nothing") @@ -535,6 +542,8 @@ func TestConfig_LoadConfigDir(t *testing.T) { } func TestConfig_LoadConfig(t *testing.T) { + ci.Parallel(t) + // Fails if the target doesn't exist if _, err := LoadConfig("/unicorns/leprechauns"); err == nil { t.Fatalf("expected error, got nothing") @@ -594,6 +603,8 @@ func TestConfig_LoadConfig(t *testing.T) { } func TestConfig_LoadConfigsFileOrder(t *testing.T) { + ci.Parallel(t) + config1, err := LoadConfigDir("test-resources/etcnomad") if err != nil { t.Fatalf("Failed to load config: %s", err) @@ -620,6 +631,8 @@ func TestConfig_LoadConfigsFileOrder(t *testing.T) { } func TestConfig_Listener(t *testing.T) { + ci.Parallel(t) + config := DefaultConfig() // Fails on invalid input @@ -669,6 +682,8 @@ func TestConfig_Listener(t *testing.T) { } func TestConfig_DevModeFlag(t *testing.T) { + ci.Parallel(t) + cases := []struct { dev bool connect bool @@ -727,6 +742,8 @@ func TestConfig_DevModeFlag(t *testing.T) { // TestConfig_normalizeAddrs_DevMode asserts that normalizeAddrs allows // advertising localhost in dev mode. func TestConfig_normalizeAddrs_DevMode(t *testing.T) { + ci.Parallel(t) + // allow to advertise 127.0.0.1 if dev-mode is enabled c := &Config{ BindAddr: "127.0.0.1", @@ -777,6 +794,8 @@ func TestConfig_normalizeAddrs_DevMode(t *testing.T) { // TestConfig_normalizeAddrs_NoAdvertise asserts that normalizeAddrs will // fail if no valid advertise address available in non-dev mode. func TestConfig_normalizeAddrs_NoAdvertise(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "127.0.0.1", Ports: &Ports{ @@ -809,6 +828,8 @@ func TestConfig_normalizeAddrs_NoAdvertise(t *testing.T) { // TestConfig_normalizeAddrs_AdvertiseLocalhost asserts localhost can be // advertised if it's explicitly set in the config. func TestConfig_normalizeAddrs_AdvertiseLocalhost(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "127.0.0.1", Ports: &Ports{ @@ -846,6 +867,8 @@ func TestConfig_normalizeAddrs_AdvertiseLocalhost(t *testing.T) { // TestConfig_normalizeAddrs_IPv6Loopback asserts that an IPv6 loopback address // is normalized properly. See #2739 func TestConfig_normalizeAddrs_IPv6Loopback(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "::1", Ports: &Ports{ @@ -884,6 +907,8 @@ func TestConfig_normalizeAddrs_IPv6Loopback(t *testing.T) { // TestConfig_normalizeAddrs_MultipleInterface asserts that normalizeAddrs will // handle normalizing multiple interfaces in a single protocol. func TestConfig_normalizeAddrs_MultipleInterfaces(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string addressConfig *Addresses @@ -931,6 +956,8 @@ func TestConfig_normalizeAddrs_MultipleInterfaces(t *testing.T) { } func TestConfig_normalizeAddrs(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "169.254.1.5", Ports: &Ports{ @@ -1042,6 +1069,8 @@ func TestConfig_normalizeAddrs(t *testing.T) { } func TestConfig_templateNetworkInterface(t *testing.T) { + ci.Parallel(t) + // find the first interface ifaces, err := sockaddr.GetAllInterfaces() if err != nil { @@ -1139,6 +1168,8 @@ func TestConfig_templateNetworkInterface(t *testing.T) { } func TestIsMissingPort(t *testing.T) { + ci.Parallel(t) + _, _, err := net.SplitHostPort("localhost") if missing := isMissingPort(err); !missing { t.Errorf("expected missing port error, but got %v", err) @@ -1150,6 +1181,8 @@ func TestIsMissingPort(t *testing.T) { } func TestMergeServerJoin(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -1256,7 +1289,8 @@ func TestMergeServerJoin(t *testing.T) { } func TestTelemetry_PrefixFilters(t *testing.T) { - t.Parallel() + ci.Parallel(t) + cases := []struct { in []string expAllow []string @@ -1298,6 +1332,8 @@ func TestTelemetry_PrefixFilters(t *testing.T) { } func TestTelemetry_Parse(t *testing.T) { + ci.Parallel(t) + require := require.New(t) dir, err := ioutil.TempDir("", "nomad") require.NoError(err) @@ -1321,6 +1357,7 @@ func TestTelemetry_Parse(t *testing.T) { } func TestEventBroker_Parse(t *testing.T) { + ci.Parallel(t) require := require.New(t) { @@ -1367,6 +1404,8 @@ func TestEventBroker_Parse(t *testing.T) { } func TestConfig_LoadConsulTemplateConfig(t *testing.T) { + ci.Parallel(t) + defaultConfig := DefaultConfig() // Test that loading without template config didn't create load errors agentConfig, err := LoadConfig("test-resources/minimal_client.hcl") @@ -1414,6 +1453,8 @@ func TestConfig_LoadConsulTemplateConfig(t *testing.T) { } func TestConfig_LoadConsulTemplateBasic(t *testing.T) { + ci.Parallel(t) + defaultConfig := DefaultConfig() // hcl @@ -1449,6 +1490,8 @@ func TestConfig_LoadConsulTemplateBasic(t *testing.T) { } func TestParseMultipleIPTemplates(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string tmpl string diff --git a/command/agent/consul/check_watcher_test.go b/command/agent/consul/check_watcher_test.go index c24c4d10d..9323d32bb 100644 --- a/command/agent/consul/check_watcher_test.go +++ b/command/agent/consul/check_watcher_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -172,7 +173,7 @@ func testCheck() *structs.ServiceCheck { // TestCheckWatcher_Skip asserts unwatched checks are ignored. func TestCheckWatcher_Skip(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a check with restarting disabled check := testCheck() @@ -194,7 +195,7 @@ func TestCheckWatcher_Skip(t *testing.T) { // TestCheckWatcher_Healthy asserts healthy tasks are not restarted. func TestCheckWatcher_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -228,7 +229,7 @@ func TestCheckWatcher_Healthy(t *testing.T) { // TestCheckWatcher_Unhealthy asserts unhealthy tasks are restarted exactly once. func TestCheckWatcher_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -251,7 +252,7 @@ func TestCheckWatcher_Unhealthy(t *testing.T) { // TestCheckWatcher_HealthyWarning asserts checks in warning with // ignore_warnings=true do not restart tasks. func TestCheckWatcher_HealthyWarning(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -279,7 +280,7 @@ func TestCheckWatcher_HealthyWarning(t *testing.T) { // TestCheckWatcher_Flapping asserts checks that flap from healthy to unhealthy // before the unhealthy limit is reached do not restart tasks. func TestCheckWatcher_Flapping(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -308,7 +309,7 @@ func TestCheckWatcher_Flapping(t *testing.T) { // TestCheckWatcher_Unwatch asserts unwatching checks prevents restarts. func TestCheckWatcher_Unwatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -337,7 +338,7 @@ func TestCheckWatcher_Unwatch(t *testing.T) { // for a single task, all checks should be removed when any of them restart the // task to avoid multiple restarts. func TestCheckWatcher_MultipleChecks(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -386,7 +387,7 @@ func TestCheckWatcher_MultipleChecks(t *testing.T) { // attempting to restart a task even if its update queue is full. // https://github.com/hashicorp/nomad/issues/5395 func TestCheckWatcher_Deadlock(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) diff --git a/command/agent/consul/connect_proxies_test.go b/command/agent/consul/connect_proxies_test.go index 8ebcee193..b9dad693c 100644 --- a/command/agent/consul/connect_proxies_test.go +++ b/command/agent/consul/connect_proxies_test.go @@ -3,10 +3,13 @@ package consul import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestConnectProxies_Proxies(t *testing.T) { + ci.Parallel(t) + pc := NewConnectProxiesClient(NewMockAgent(ossFeatures)) proxies, err := pc.Proxies() diff --git a/command/agent/consul/connect_test.go b/command/agent/consul/connect_test.go index 4f3430664..4e7f1a6d2 100644 --- a/command/agent/consul/connect_test.go +++ b/command/agent/consul/connect_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -30,7 +31,7 @@ var ( ) func TestConnect_newConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { asr, err := newConnect("", "", nil, nil, nil) @@ -111,7 +112,7 @@ func TestConnect_newConnect(t *testing.T) { } func TestConnect_connectSidecarRegistration(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { sidecarReg, err := connectSidecarRegistration("", nil, testConnectNetwork, testConnectPorts) @@ -172,7 +173,7 @@ func TestConnect_connectSidecarRegistration(t *testing.T) { } func TestConnect_connectProxy(t *testing.T) { - t.Parallel() + ci.Parallel(t) // If the input proxy is nil, we expect the output to be a proxy with its // config set to default values. @@ -243,7 +244,7 @@ func TestConnect_connectProxy(t *testing.T) { } func TestConnect_connectProxyExpose(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { exposeConfig, err := connectProxyExpose(nil, nil) @@ -284,7 +285,7 @@ func TestConnect_connectProxyExpose(t *testing.T) { } func TestConnect_connectProxyExposePaths(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { upstreams, err := connectProxyExposePaths(nil, nil) @@ -332,7 +333,7 @@ func TestConnect_connectProxyExposePaths(t *testing.T) { } func TestConnect_connectUpstreams(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, connectUpstreams(nil)) @@ -363,7 +364,7 @@ func TestConnect_connectUpstreams(t *testing.T) { } func TestConnect_connectProxyConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil map", func(t *testing.T) { require.Equal(t, map[string]interface{}{ @@ -384,7 +385,7 @@ func TestConnect_connectProxyConfig(t *testing.T) { } func TestConnect_getConnectPort(t *testing.T) { - t.Parallel() + ci.Parallel(t) networks := structs.Networks{{ IP: "192.168.30.1", @@ -432,7 +433,7 @@ func TestConnect_getConnectPort(t *testing.T) { } func TestConnect_getExposePathPort(t *testing.T) { - t.Parallel() + ci.Parallel(t) networks := structs.Networks{{ Device: "eth0", @@ -470,7 +471,7 @@ func TestConnect_getExposePathPort(t *testing.T) { } func TestConnect_newConnectGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("not a gateway", func(t *testing.T) { result := newConnectGateway("s1", &structs.ConsulConnect{Native: true}) @@ -546,7 +547,7 @@ func TestConnect_newConnectGateway(t *testing.T) { } func Test_connectMeshGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := connectMeshGateway(nil) diff --git a/command/agent/consul/group_test.go b/command/agent/consul/group_test.go index a76aac73e..43c82dd3a 100644 --- a/command/agent/consul/group_test.go +++ b/command/agent/consul/group_test.go @@ -7,6 +7,7 @@ import ( consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,6 +15,8 @@ import ( ) func TestConsul_Connect(t *testing.T) { + ci.Parallel(t) + // Create an embedded Consul server testconsul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { // If -v wasn't specified squelch consul logging diff --git a/command/agent/consul/int_test.go b/command/agent/consul/int_test.go index 12aa80f8f..b49e082eb 100644 --- a/command/agent/consul/int_test.go +++ b/command/agent/consul/int_test.go @@ -10,6 +10,7 @@ import ( consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/taskrunner" "github.com/hashicorp/nomad/client/config" @@ -35,6 +36,8 @@ func (m *mockUpdater) TaskStateUpdated() { // TestConsul_Integration asserts TaskRunner properly registers and deregisters // services and checks with Consul using an embedded Consul agent. func TestConsul_Integration(t *testing.T) { + ci.Parallel(t) + if testing.Short() { t.Skip("-short set; skipping") } diff --git a/command/agent/consul/namespaces_client_test.go b/command/agent/consul/namespaces_client_test.go index bc7ebdf37..6b8704920 100644 --- a/command/agent/consul/namespaces_client_test.go +++ b/command/agent/consul/namespaces_client_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestNamespacesClient_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("oss", func(t *testing.T) { c := NewNamespacesClient(NewMockNamespaces(nil), NewMockAgent(Features{ @@ -45,7 +46,7 @@ func TestNamespacesClient_List(t *testing.T) { } func TestNewNamespacesClient_stale(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("ok", func(t *testing.T) { now := time.Now() @@ -63,7 +64,7 @@ func TestNewNamespacesClient_stale(t *testing.T) { } func TestNewNamespacesClient_allowable(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(ent, feature, enabled, exp bool, updated, now time.Time) { expired := now.After(updated.Add(namespaceEnabledCacheTTL)) diff --git a/command/agent/consul/self_test.go b/command/agent/consul/self_test.go index 3089c2422..db274d7f0 100644 --- a/command/agent/consul/self_test.go +++ b/command/agent/consul/self_test.go @@ -3,6 +3,7 @@ package consul import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -14,7 +15,7 @@ var ( ) func TestSelf_SKU(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("oss", func(t *testing.T) { s, ok := SKU(Self{ @@ -64,7 +65,7 @@ func TestSelf_SKU(t *testing.T) { } func TestSelf_Namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("supports namespaces", func(t *testing.T) { enabled := Namespaces(Self{ diff --git a/command/agent/consul/service_client_test.go b/command/agent/consul/service_client_test.go index 9cacaa38d..ffe5bc661 100644 --- a/command/agent/consul/service_client_test.go +++ b/command/agent/consul/service_client_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -13,7 +14,7 @@ import ( ) func TestSyncLogic_agentServiceUpdateRequired(t *testing.T) { - t.Parallel() + ci.Parallel(t) // the service as known by nomad wanted := func() api.AgentServiceRegistration { @@ -253,6 +254,8 @@ func TestSyncLogic_agentServiceUpdateRequired(t *testing.T) { } func TestSyncLogic_tagsDifferent(t *testing.T) { + ci.Parallel(t) + t.Run("nil nil", func(t *testing.T) { require.False(t, tagsDifferent(nil, nil)) }) @@ -284,6 +287,8 @@ func TestSyncLogic_tagsDifferent(t *testing.T) { } func TestSyncLogic_sidecarTagsDifferent(t *testing.T) { + ci.Parallel(t) + type tc struct { parent, wanted, sidecar []string expect bool @@ -310,7 +315,7 @@ func TestSyncLogic_sidecarTagsDifferent(t *testing.T) { } func TestSyncLogic_maybeTweakTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) differentPointers := func(a, b []string) bool { return &(a) != &(b) @@ -355,7 +360,7 @@ func TestSyncLogic_maybeTweakTags(t *testing.T) { } func TestSyncLogic_maybeTweakTags_emptySC(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Check the edge cases where the connect service is deleted on the nomad // side (i.e. are we checking multiple nil pointers). @@ -385,7 +390,7 @@ func TestSyncLogic_maybeTweakTags_emptySC(t *testing.T) { // TestServiceRegistration_CheckOnUpdate tests that a ServiceRegistrations // CheckOnUpdate is populated and updated properly func TestServiceRegistration_CheckOnUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) mockAgent := NewMockAgent(ossFeatures) namespacesClient := NewNamespacesClient(NewMockNamespaces(nil), mockAgent) @@ -467,7 +472,7 @@ func TestServiceRegistration_CheckOnUpdate(t *testing.T) { } func TestSyncLogic_proxyUpstreamsDifferent(t *testing.T) { - t.Parallel() + ci.Parallel(t) upstream1 := func() api.Upstream { return api.Upstream{ @@ -602,7 +607,7 @@ func TestSyncLogic_proxyUpstreamsDifferent(t *testing.T) { } func TestSyncReason_String(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "periodic", fmt.Sprintf("%s", syncPeriodic)) require.Equal(t, "shutdown", fmt.Sprintf("%s", syncShutdown)) @@ -611,7 +616,7 @@ func TestSyncReason_String(t *testing.T) { } func TestSyncOps_empty(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(ops *operations, exp bool) { require.Equal(t, exp, ops.empty()) @@ -626,6 +631,8 @@ func TestSyncOps_empty(t *testing.T) { } func TestSyncLogic_maybeSidecarProxyCheck(t *testing.T) { + ci.Parallel(t) + try := func(input string, exp bool) { result := maybeSidecarProxyCheck(input) require.Equal(t, exp, result) diff --git a/command/agent/consul/unit_test.go b/command/agent/consul/unit_test.go index b3f035ad3..2609ffd16 100644 --- a/command/agent/consul/unit_test.go +++ b/command/agent/consul/unit_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -121,7 +122,8 @@ func setupFake(t *testing.T) *testFakeCtx { } func TestConsul_ChangeTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) r := require.New(t) @@ -157,7 +159,8 @@ func TestConsul_ChangeTags(t *testing.T) { } func TestConsul_EnableTagOverride_Syncs(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) r := require.New(t) @@ -204,6 +207,8 @@ func TestConsul_EnableTagOverride_Syncs(t *testing.T) { // it in Consul. Pre-0.7.1 ports were not part of the service ID and this was a // slightly different code path than changing tags. func TestConsul_ChangePorts(t *testing.T) { + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) @@ -327,6 +332,8 @@ func TestConsul_ChangePorts(t *testing.T) { // TestConsul_ChangeChecks asserts that updating only the checks on a service // properly syncs with Consul. func TestConsul_ChangeChecks(t *testing.T) { + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services[0].Checks = []*structs.ServiceCheck{ { @@ -561,6 +568,8 @@ func TestConsul_ChangeChecks(t *testing.T) { // TestConsul_RegServices tests basic service registration. func TestConsul_RegServices(t *testing.T) { + ci.Parallel(t) + ctx := setupFake(t) // Add a check w/restarting @@ -697,6 +706,8 @@ func TestConsul_RegServices(t *testing.T) { // TestConsul_ShutdownOK tests the ok path for the shutdown logic in // ServiceClient. func TestConsul_ShutdownOK(t *testing.T) { + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) go ctx.ServiceClient.Run() @@ -735,8 +746,10 @@ func TestConsul_ShutdownOK(t *testing.T) { // TestConsul_ShutdownBlocked tests the blocked past deadline path for the // shutdown logic in ServiceClient. func TestConsul_ShutdownBlocked(t *testing.T) { + ci.Parallel(t) + require := require.New(t) - t.Parallel() + ci.Parallel(t) ctx := setupFake(t) // can be short because we're intentionally blocking, but needs to // be longer than the time we'll block Consul so we can be sure @@ -802,7 +815,8 @@ func TestConsul_ShutdownBlocked(t *testing.T) { // auto-use set then services should advertise it unless explicitly set to // host. Checks should always use host. func TestConsul_DriverNetwork_AutoUse(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services = []*structs.Service{ @@ -929,7 +943,8 @@ func TestConsul_DriverNetwork_AutoUse(t *testing.T) { // set auto-use only services which request the driver's network should // advertise it. func TestConsul_DriverNetwork_NoAutoUse(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services = []*structs.Service{ @@ -1003,7 +1018,8 @@ func TestConsul_DriverNetwork_NoAutoUse(t *testing.T) { // TestConsul_DriverNetwork_Change asserts that if a driver network is // specified and a service updates its use its properly updated in Consul. func TestConsul_DriverNetwork_Change(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services = []*structs.Service{ @@ -1075,7 +1091,8 @@ func TestConsul_DriverNetwork_Change(t *testing.T) { // TestConsul_CanaryTags asserts CanaryTags are used when Canary=true func TestConsul_CanaryTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1108,7 +1125,8 @@ func TestConsul_CanaryTags(t *testing.T) { // TestConsul_CanaryTags_NoTags asserts Tags are used when Canary=true and there // are no specified canary tags func TestConsul_CanaryTags_NoTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1140,7 +1158,8 @@ func TestConsul_CanaryTags_NoTags(t *testing.T) { // TestConsul_CanaryMeta asserts CanaryMeta are used when Canary=true func TestConsul_CanaryMeta(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1174,7 +1193,8 @@ func TestConsul_CanaryMeta(t *testing.T) { // TestConsul_CanaryMeta_NoMeta asserts Meta are used when Canary=true and there // are no specified canary meta func TestConsul_CanaryMeta_NoMeta(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1208,7 +1228,7 @@ func TestConsul_CanaryMeta_NoMeta(t *testing.T) { // TestConsul_PeriodicSync asserts that Nomad periodically reconciles with // Consul. func TestConsul_PeriodicSync(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx := setupFake(t) defer ctx.ServiceClient.Shutdown() @@ -1235,7 +1255,7 @@ func TestConsul_PeriodicSync(t *testing.T) { // TestIsNomadService asserts the isNomadService helper returns true for Nomad // task IDs and false for unknown IDs and Nomad agent IDs (see #2827). func TestIsNomadService(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []struct { id string @@ -1268,7 +1288,8 @@ func TestIsNomadService(t *testing.T) { // TestCreateCheckReg_HTTP asserts Nomad ServiceCheck structs are properly // converted to Consul API AgentCheckRegistrations for HTTP checks. func TestCreateCheckReg_HTTP(t *testing.T) { - t.Parallel() + ci.Parallel(t) + check := &structs.ServiceCheck{ Name: "name", Type: "http", @@ -1315,7 +1336,8 @@ func TestCreateCheckReg_HTTP(t *testing.T) { // TestCreateCheckReg_GRPC asserts Nomad ServiceCheck structs are properly // converted to Consul API AgentCheckRegistrations for GRPC checks. func TestCreateCheckReg_GRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) + check := &structs.ServiceCheck{ Name: "name", Type: "grpc", @@ -1352,6 +1374,8 @@ func TestCreateCheckReg_GRPC(t *testing.T) { // TestGetAddress asserts Nomad uses the correct ip and port for services and // checks depending on port labels, driver networks, and address mode. func TestGetAddress(t *testing.T) { + ci.Parallel(t) + const HostIP = "127.0.0.1" cases := []struct { @@ -1705,7 +1729,8 @@ func TestGetAddress(t *testing.T) { } func TestConsul_ServiceName_Duplicates(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) @@ -1768,7 +1793,8 @@ func TestConsul_ServiceName_Duplicates(t *testing.T) { // TestConsul_ServiceDeregistration_OutOfProbation asserts that during in steady // state we remove any services we don't reconize locally func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) @@ -1877,7 +1903,8 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) { // services untouched. This adds a grace period for restoring recovered tasks // before deregistering them func TestConsul_ServiceDeregistration_InProbation(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) diff --git a/command/agent/consul/version_checker_test.go b/command/agent/consul/version_checker_test.go index 351c89702..e098cf0ea 100644 --- a/command/agent/consul/version_checker_test.go +++ b/command/agent/consul/version_checker_test.go @@ -3,10 +3,13 @@ package consul import ( "encoding/json" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestConsulSupportsTLSSkipVerify(t *testing.T) { - t.Parallel() + ci.Parallel(t) + assertSupport := func(expected bool, blob string) { self := map[string]map[string]interface{}{} if err := json.Unmarshal([]byte("{"+blob+"}"), &self); err != nil { diff --git a/command/agent/csi_endpoint_test.go b/command/agent/csi_endpoint_test.go index bbb997857..7115027e2 100644 --- a/command/agent/csi_endpoint_test.go +++ b/command/agent/csi_endpoint_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -14,7 +15,7 @@ import ( ) func TestHTTP_CSIEndpointPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPlugin(server.State(), "foo") @@ -45,7 +46,7 @@ func TestHTTP_CSIEndpointPlugin(t *testing.T) { } func TestHTTP_CSIParseSecrets(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCases := []struct { val string expect structs.CSISecrets @@ -82,7 +83,7 @@ func TestHTTP_CSIEndpointUtils(t *testing.T) { } func TestHTTP_CSIEndpointRegisterVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPluginNodeOnly(server.State(), "foo") @@ -124,7 +125,7 @@ func TestHTTP_CSIEndpointRegisterVolume(t *testing.T) { } func TestHTTP_CSIEndpointCreateVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPlugin(server.State(), "foo") @@ -156,7 +157,7 @@ func TestHTTP_CSIEndpointCreateVolume(t *testing.T) { } func TestHTTP_CSIEndpointSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPlugin(server.State(), "foo") @@ -181,7 +182,7 @@ func TestHTTP_CSIEndpointSnapshot(t *testing.T) { // TestHTTP_CSIEndpoint_Cast is a smoke test for converting from structs to // API structs func TestHTTP_CSIEndpoint_Cast(t *testing.T) { - t.Parallel() + ci.Parallel(t) plugin := mock.CSIPlugin() plugin.Nodes["node1"] = &structs.CSIInfo{ diff --git a/command/agent/deployment_endpoint_test.go b/command/agent/deployment_endpoint_test.go index 1332776a3..2a1742157 100644 --- a/command/agent/deployment_endpoint_test.go +++ b/command/agent/deployment_endpoint_test.go @@ -5,13 +5,14 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) func TestHTTP_DeploymentList(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -42,7 +43,7 @@ func TestHTTP_DeploymentList(t *testing.T) { } func TestHTTP_DeploymentPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -76,7 +77,7 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) { } func TestHTTP_DeploymentAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -137,7 +138,7 @@ func TestHTTP_DeploymentAllocations(t *testing.T) { } func TestHTTP_DeploymentQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -166,7 +167,7 @@ func TestHTTP_DeploymentQuery(t *testing.T) { } func TestHTTP_DeploymentPause(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -207,7 +208,7 @@ func TestHTTP_DeploymentPause(t *testing.T) { } func TestHTTP_DeploymentPromote(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -248,7 +249,7 @@ func TestHTTP_DeploymentPromote(t *testing.T) { } func TestHTTP_DeploymentAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -293,7 +294,7 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) { } func TestHTTP_DeploymentFail(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state diff --git a/command/agent/eval_endpoint_test.go b/command/agent/eval_endpoint_test.go index f45bc9ede..15217ea1b 100644 --- a/command/agent/eval_endpoint_test.go +++ b/command/agent/eval_endpoint_test.go @@ -6,14 +6,14 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestHTTP_EvalList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -60,7 +60,7 @@ func TestHTTP_EvalList(t *testing.T) { } func TestHTTP_EvalPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -111,7 +111,7 @@ func TestHTTP_EvalPrefixList(t *testing.T) { } func TestHTTP_EvalAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -159,7 +159,7 @@ func TestHTTP_EvalAllocations(t *testing.T) { } func TestHTTP_EvalQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() diff --git a/command/agent/event_endpoint_test.go b/command/agent/event_endpoint_test.go index c450a9179..acef9f08e 100644 --- a/command/agent/event_endpoint_test.go +++ b/command/agent/event_endpoint_test.go @@ -10,9 +10,9 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,7 +23,7 @@ type testEvent struct { } func TestEventStream(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { ctx, cancel := context.WithCancel(context.Background()) @@ -68,7 +68,7 @@ func TestEventStream(t *testing.T) { } func TestEventStream_NamespaceQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { ctx, cancel := context.WithCancel(context.Background()) @@ -120,7 +120,7 @@ func TestEventStream_NamespaceQuery(t *testing.T) { } func TestEventStream_QueryParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index 7e0f286b6..1acb468a2 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -114,7 +115,7 @@ func mockFSAlloc(nodeID string, config map[string]interface{}) *structs.Allocati } func TestHTTP_FS_List_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/ls/", nil) @@ -126,7 +127,7 @@ func TestHTTP_FS_List_MissingParams(t *testing.T) { } func TestHTTP_FS_Stat_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/stat/", nil) @@ -146,7 +147,7 @@ func TestHTTP_FS_Stat_MissingParams(t *testing.T) { } func TestHTTP_FS_ReadAt_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/readat/", nil) @@ -170,7 +171,7 @@ func TestHTTP_FS_ReadAt_MissingParams(t *testing.T) { } func TestHTTP_FS_Cat_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/cat/", nil) @@ -190,7 +191,7 @@ func TestHTTP_FS_Cat_MissingParams(t *testing.T) { } func TestHTTP_FS_Stream_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/stream/", nil) @@ -220,7 +221,7 @@ func TestHTTP_FS_Stream_MissingParams(t *testing.T) { // TestHTTP_FS_Logs_MissingParams asserts proper error codes and messages are // returned for incorrect parameters (eg missing tasks). func TestHTTP_FS_Logs_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // AllocID Not Present @@ -262,7 +263,7 @@ func TestHTTP_FS_Logs_MissingParams(t *testing.T) { } func TestHTTP_FS_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -282,7 +283,7 @@ func TestHTTP_FS_List(t *testing.T) { } func TestHTTP_FS_Stat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -303,7 +304,7 @@ func TestHTTP_FS_Stat(t *testing.T) { } func TestHTTP_FS_ReadAt(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -329,7 +330,7 @@ func TestHTTP_FS_ReadAt(t *testing.T) { // TestHTTP_FS_ReadAt_XSS asserts that the readat API is safe from XSS. func TestHTTP_FS_ReadAt_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -353,7 +354,7 @@ func TestHTTP_FS_ReadAt_XSS(t *testing.T) { } func TestHTTP_FS_Cat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -375,7 +376,7 @@ func TestHTTP_FS_Cat(t *testing.T) { // TestHTTP_FS_Cat_XSS asserts that the cat API is safe from XSS. func TestHTTP_FS_Cat_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -398,7 +399,7 @@ func TestHTTP_FS_Cat_XSS(t *testing.T) { } func TestHTTP_FS_Stream_NoFollow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -443,7 +444,7 @@ func TestHTTP_FS_Stream_NoFollow(t *testing.T) { // TestHTTP_FS_Stream_NoFollow_XSS asserts that the stream API is safe from XSS. func TestHTTP_FS_Stream_NoFollow_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -462,7 +463,7 @@ func TestHTTP_FS_Stream_NoFollow_XSS(t *testing.T) { } func TestHTTP_FS_Stream_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -506,7 +507,7 @@ func TestHTTP_FS_Stream_Follow(t *testing.T) { } func TestHTTP_FS_Logs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -544,7 +545,7 @@ func TestHTTP_FS_Logs(t *testing.T) { // text/plain or application/json content regardless of whether the logs are // HTML+Javascript or not. func TestHTTP_FS_Logs_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -565,7 +566,7 @@ func TestHTTP_FS_Logs_XSS(t *testing.T) { } func TestHTTP_FS_Logs_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -607,7 +608,7 @@ func TestHTTP_FS_Logs_Follow(t *testing.T) { } func TestHTTP_FS_Logs_PropagatesErrors(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { path := fmt.Sprintf("/v1/client/fs/logs/%s?type=stdout&task=web&offset=0&origin=end&plain=true", uuid.Generate()) diff --git a/command/agent/helpers_test.go b/command/agent/helpers_test.go index 3dab65388..a53c6ea95 100644 --- a/command/agent/helpers_test.go +++ b/command/agent/helpers_test.go @@ -3,12 +3,13 @@ package agent import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/stretchr/testify/require" ) func TestHTTP_rpcHandlerForAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() @@ -52,7 +53,7 @@ func TestHTTP_rpcHandlerForAlloc(t *testing.T) { } func TestHTTP_rpcHandlerForNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() diff --git a/command/agent/host/host_test.go b/command/agent/host/host_test.go index 12131fa87..aebb09b8b 100644 --- a/command/agent/host/host_test.go +++ b/command/agent/host/host_test.go @@ -4,6 +4,7 @@ import ( "os" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -18,6 +19,8 @@ func TestHostUtils(t *testing.T) { } func TestMakeHostData(t *testing.T) { + ci.Parallel(t) + // setenv variables that should be redacted prev := os.Getenv("VAULT_TOKEN") os.Setenv("VAULT_TOKEN", "foo") diff --git a/command/agent/http_stdlog_test.go b/command/agent/http_stdlog_test.go index 523086b22..aa9430242 100644 --- a/command/agent/http_stdlog_test.go +++ b/command/agent/http_stdlog_test.go @@ -5,10 +5,13 @@ import ( "testing" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestHttpServerLoggerFilters_Level_Info(t *testing.T) { + ci.Parallel(t) + var buf bytes.Buffer hclogger := hclog.New(&hclog.LoggerOptions{ Name: "testlog", @@ -29,6 +32,8 @@ func TestHttpServerLoggerFilters_Level_Info(t *testing.T) { } func TestHttpServerLoggerFilters_Level_Trace(t *testing.T) { + ci.Parallel(t) + var buf bytes.Buffer hclogger := hclog.New(&hclog.LoggerOptions{ Name: "testlog", diff --git a/command/agent/http_test.go b/command/agent/http_test.go index dd1521387..e5cb11571 100644 --- a/command/agent/http_test.go +++ b/command/agent/http_test.go @@ -20,17 +20,17 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // makeHTTPServer returns a test server whose logs will be written to @@ -71,6 +71,8 @@ func BenchmarkHTTPRequests(b *testing.B) { } func TestMultipleInterfaces(t *testing.T) { + ci.Parallel(t) + httpIps := []string{"127.0.0.1", "127.0.0.2"} s := makeHTTPServer(t, func(c *Config) { @@ -91,7 +93,7 @@ func TestMultipleInterfaces(t *testing.T) { // TestRootFallthrough tests rootFallthrough handler to // verify redirect and 404 behavior func TestRootFallthrough(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string @@ -150,7 +152,7 @@ func TestRootFallthrough(t *testing.T) { } func TestSetIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() setIndex(resp, 1000) header := resp.Header().Get("X-Nomad-Index") @@ -164,7 +166,7 @@ func TestSetIndex(t *testing.T) { } func TestSetKnownLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() setKnownLeader(resp, true) header := resp.Header().Get("X-Nomad-KnownLeader") @@ -180,7 +182,7 @@ func TestSetKnownLeader(t *testing.T) { } func TestSetLastContact(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() setLastContact(resp, 123456*time.Microsecond) header := resp.Header().Get("X-Nomad-LastContact") @@ -190,7 +192,7 @@ func TestSetLastContact(t *testing.T) { } func TestSetMeta(t *testing.T) { - t.Parallel() + ci.Parallel(t) meta := structs.QueryMeta{ Index: 1000, KnownLeader: true, @@ -213,7 +215,7 @@ func TestSetMeta(t *testing.T) { } func TestSetHeaders(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) s.Agent.config.HTTPAPIResponseHeaders = map[string]string{"foo": "bar"} defer s.Shutdown() @@ -234,7 +236,7 @@ func TestSetHeaders(t *testing.T) { } func TestContentTypeIsJSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -255,7 +257,7 @@ func TestContentTypeIsJSON(t *testing.T) { } func TestWrapNonJSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -274,7 +276,7 @@ func TestWrapNonJSON(t *testing.T) { } func TestWrapNonJSON_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -309,17 +311,17 @@ func TestWrapNonJSON_Error(t *testing.T) { } func TestPrettyPrint(t *testing.T) { - t.Parallel() + ci.Parallel(t) testPrettyPrint("pretty=1", true, t) } func TestPrettyPrintOff(t *testing.T) { - t.Parallel() + ci.Parallel(t) testPrettyPrint("pretty=0", false, t) } func TestPrettyPrintBare(t *testing.T) { - t.Parallel() + ci.Parallel(t) testPrettyPrint("pretty", true, t) } @@ -409,7 +411,7 @@ func TestTokenNotFound(t *testing.T) { } func TestParseWait(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() var b structs.QueryOptions @@ -432,7 +434,7 @@ func TestParseWait(t *testing.T) { } func TestParseWait_InvalidTime(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() var b structs.QueryOptions @@ -452,7 +454,7 @@ func TestParseWait_InvalidTime(t *testing.T) { } func TestParseWait_InvalidIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() var b structs.QueryOptions @@ -472,7 +474,7 @@ func TestParseWait_InvalidIndex(t *testing.T) { } func TestParseConsistency(t *testing.T) { - t.Parallel() + ci.Parallel(t) var b structs.QueryOptions req, err := http.NewRequest("GET", @@ -500,7 +502,7 @@ func TestParseConsistency(t *testing.T) { } func TestParseRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -529,7 +531,7 @@ func TestParseRegion(t *testing.T) { } func TestParseToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -547,7 +549,7 @@ func TestParseToken(t *testing.T) { } func TestParseBool(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Input string @@ -594,7 +596,7 @@ func TestParseBool(t *testing.T) { } func Test_parseInt(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Input string @@ -641,7 +643,7 @@ func Test_parseInt(t *testing.T) { } func TestParsePagination(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -686,7 +688,7 @@ func TestParsePagination(t *testing.T) { // TestHTTP_VerifyHTTPSClient asserts that a client certificate signed by the // appropriate CA is required when VerifyHTTPSClient=true. func TestHTTP_VerifyHTTPSClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../../helper/tlsutil/testdata/ca.pem" foocert = "../../helper/tlsutil/testdata/nomad-foo.pem" @@ -807,7 +809,7 @@ func TestHTTP_VerifyHTTPSClient(t *testing.T) { } func TestHTTP_VerifyHTTPSClient_AfterConfigReload(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -911,7 +913,7 @@ func TestHTTP_VerifyHTTPSClient_AfterConfigReload(t *testing.T) { // TestHTTPServer_Limits_Error asserts invalid Limits cause errors. This is the // HTTP counterpart to TestAgent_ServerConfig_Limits_Error. func TestHTTPServer_Limits_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { tls bool @@ -961,7 +963,7 @@ func TestHTTPServer_Limits_Error(t *testing.T) { tc := cases[i] name := fmt.Sprintf("%d-tls-%t-timeout-%s-limit-%v", i, tc.tls, tc.timeout, tc.limit) t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := &Config{ normalizedAddrs: &NormalizedAddrs{ @@ -1001,7 +1003,7 @@ func limitStr(limit *int) string { // TestHTTPServer_Limits_OK asserts that all valid limits combinations // (tls/timeout/conns) work. func TestHTTPServer_Limits_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../../helper/tlsutil/testdata/ca.pem" @@ -1274,7 +1276,7 @@ func TestHTTPServer_Limits_OK(t *testing.T) { tc := cases[i] name := fmt.Sprintf("%d-tls-%t-timeout-%s-limit-%v", i, tc.tls, tc.timeout, limitStr(tc.limit)) t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) if tc.limit != nil && *tc.limit >= maxConns { t.Fatalf("test fixture failure: cannot assert limit (%d) >= max (%d)", *tc.limit, maxConns) @@ -1317,7 +1319,7 @@ func TestHTTPServer_Limits_OK(t *testing.T) { } func TestHTTPServer_ResolveToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Setup two servers, one with ACL enabled and another with ACL disabled. noACLServer := makeHTTPServer(t, func(c *Config) { @@ -1368,6 +1370,8 @@ func TestHTTPServer_ResolveToken(t *testing.T) { } func Test_IsAPIClientError(t *testing.T) { + ci.Parallel(t) + trueCases := []int{400, 403, 404, 499} for _, c := range trueCases { require.Truef(t, isAPIClientError(c), "code: %v", c) @@ -1380,6 +1384,7 @@ func Test_IsAPIClientError(t *testing.T) { } func Test_decodeBody(t *testing.T) { + ci.Parallel(t) testCases := []struct { inputReq *http.Request diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index ff4d615a6..08b489bb1 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -9,18 +9,18 @@ import ( "time" "github.com/golang/snappy" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" api "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestHTTP_JobsList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -71,12 +71,14 @@ func TestHTTP_JobsList(t *testing.T) { } func TestHTTP_PrefixJobsList(t *testing.T) { + ci.Parallel(t) + ids := []string{ "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706", "aabbbbbb-e8f7-fd38-c855-ab94ceb89706", "aabbcccc-e8f7-fd38-c855-ab94ceb89706", } - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -129,7 +131,7 @@ func TestHTTP_PrefixJobsList(t *testing.T) { } func TestHTTP_JobsList_AllNamespaces_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -169,7 +171,7 @@ func TestHTTP_JobsList_AllNamespaces_OSS(t *testing.T) { } func TestHTTP_JobsRegister(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -223,7 +225,7 @@ func TestHTTP_JobsRegister(t *testing.T) { } func TestHTTP_JobsRegister_IgnoresParentID(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -286,7 +288,7 @@ func TestHTTP_JobsRegister_IgnoresParentID(t *testing.T) { // Test that ACL token is properly threaded through to the RPC endpoint func TestHTTP_JobsRegister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -316,7 +318,7 @@ func TestHTTP_JobsRegister_ACL(t *testing.T) { } func TestHTTP_JobsRegister_Defaulting(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -377,7 +379,7 @@ func TestHTTP_JobsRegister_Defaulting(t *testing.T) { } func TestHTTP_JobsParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()}) req, err := http.NewRequest("POST", "/v1/jobs/parse", buf) @@ -410,7 +412,7 @@ func TestHTTP_JobsParse(t *testing.T) { } func TestHTTP_JobsParse_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -531,7 +533,7 @@ func TestHTTP_JobsParse_ACL(t *testing.T) { } func TestHTTP_JobQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -580,7 +582,7 @@ func TestHTTP_JobQuery(t *testing.T) { } func TestHTTP_JobQuery_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -634,7 +636,7 @@ func TestHTTP_JobQuery_Payload(t *testing.T) { } func TestHTTP_jobUpdate_systemScaling(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -664,7 +666,7 @@ func TestHTTP_jobUpdate_systemScaling(t *testing.T) { } func TestHTTP_JobUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -721,7 +723,7 @@ func TestHTTP_JobUpdate(t *testing.T) { } func TestHTTP_JobUpdate_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCases := []struct { inputEvalPriority int @@ -816,7 +818,7 @@ func TestHTTP_JobUpdate_EvalPriority(t *testing.T) { } func TestHTTP_JobUpdateRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -915,7 +917,7 @@ func TestHTTP_JobUpdateRegion(t *testing.T) { } func TestHTTP_JobDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1017,7 +1019,7 @@ func TestHTTP_JobDelete(t *testing.T) { } func TestHTTP_JobDelete_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCases := []struct { inputEvalPriority int @@ -1128,7 +1130,7 @@ func TestHTTP_JobDelete_EvalPriority(t *testing.T) { } func TestHTTP_Job_ScaleTaskGroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -1189,7 +1191,7 @@ func TestHTTP_Job_ScaleTaskGroup(t *testing.T) { } func TestHTTP_Job_ScaleStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -1228,7 +1230,7 @@ func TestHTTP_Job_ScaleStatus(t *testing.T) { } func TestHTTP_JobForceEvaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1271,7 +1273,7 @@ func TestHTTP_JobForceEvaluate(t *testing.T) { } func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1322,7 +1324,7 @@ func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) { } func TestHTTP_JobEvaluations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1373,7 +1375,7 @@ func TestHTTP_JobEvaluations(t *testing.T) { } func TestHTTP_JobAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job alloc1 := mock.Alloc() @@ -1438,8 +1440,8 @@ func TestHTTP_JobAllocations(t *testing.T) { } func TestHTTP_JobDeployments(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Create the job j := mock.Job() @@ -1482,8 +1484,8 @@ func TestHTTP_JobDeployments(t *testing.T) { } func TestHTTP_JobDeployment(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Create the job j := mock.Job() @@ -1525,7 +1527,7 @@ func TestHTTP_JobDeployment(t *testing.T) { } func TestHTTP_JobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1603,7 +1605,7 @@ func TestHTTP_JobVersions(t *testing.T) { } func TestHTTP_PeriodicForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create and register a periodic job. job := mock.PeriodicJob() @@ -1646,7 +1648,7 @@ func TestHTTP_PeriodicForce(t *testing.T) { } func TestHTTP_JobPlan(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -1686,7 +1688,7 @@ func TestHTTP_JobPlan(t *testing.T) { } func TestHTTP_JobPlanRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -1761,7 +1763,7 @@ func TestHTTP_JobPlanRegion(t *testing.T) { } func TestHTTP_JobDispatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the parameterized job job := mock.BatchJob() @@ -1816,7 +1818,7 @@ func TestHTTP_JobDispatch(t *testing.T) { } func TestHTTP_JobRevert(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job and register it twice job := mock.Job() @@ -1875,7 +1877,7 @@ func TestHTTP_JobRevert(t *testing.T) { } func TestHTTP_JobStable(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job and register it twice job := mock.Job() @@ -1933,7 +1935,7 @@ func TestHTTP_JobStable(t *testing.T) { } func TestJobs_ParsingWriteRequest(t *testing.T) { - t.Parallel() + ci.Parallel(t) // defaults agentRegion := "agentRegion" @@ -2074,7 +2076,7 @@ func TestJobs_ParsingWriteRequest(t *testing.T) { } func TestJobs_RegionForJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) // defaults agentRegion := "agentRegion" @@ -2176,7 +2178,7 @@ func TestJobs_RegionForJob(t *testing.T) { } func TestJobs_NamespaceForJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) // test namespace for pointer inputs ns := "dev" @@ -2247,6 +2249,8 @@ func TestJobs_NamespaceForJob(t *testing.T) { } func TestJobs_ApiJobToStructsJob(t *testing.T) { + ci.Parallel(t) + apiJob := &api.Job{ Stop: helper.BoolToPtr(true), Region: helper.StringToPtr("global"), @@ -3284,6 +3288,8 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { } func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { + ci.Parallel(t) + apiJob := &api.Job{ Update: &api.UpdateStrategy{ Stagger: helper.TimeToPtr(1 * time.Second), @@ -3362,7 +3368,7 @@ func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { // While this is an odd place to test that, this is where both are imported, // validated, and converted. func TestJobs_Matching_Resources(t *testing.T) { - t.Parallel() + ci.Parallel(t) // api.MinResources == structs.MinResources structsMinRes := ApiResourcesToStructs(api.MinResources()) @@ -3376,7 +3382,7 @@ func TestJobs_Matching_Resources(t *testing.T) { // TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate // stanza fails to validate but does not panic (see #5477). func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := &api.Job{ @@ -3417,13 +3423,13 @@ func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { } func TestConversion_dereferenceInt(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, 0, dereferenceInt(nil)) require.Equal(t, 42, dereferenceInt(helper.IntToPtr(42))) } func TestConversion_apiLogConfigToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiLogConfigToStructs(nil)) require.Equal(t, &structs.LogConfig{ MaxFiles: 2, @@ -3435,7 +3441,7 @@ func TestConversion_apiLogConfigToStructs(t *testing.T) { } func TestConversion_apiResourcesToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -3482,7 +3488,7 @@ func TestConversion_apiResourcesToStructs(t *testing.T) { } func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConnectSidecarTaskToStructs(nil)) delay := time.Duration(200) timeout := time.Duration(1000) @@ -3529,7 +3535,7 @@ func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) { } func TestConversion_apiConsulExposePathsToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConsulExposePathsToStructs(nil)) require.Nil(t, apiConsulExposePathsToStructs(make([]*api.ConsulExposePath, 0))) require.Equal(t, []structs.ConsulExposePath{{ @@ -3546,7 +3552,7 @@ func TestConversion_apiConsulExposePathsToStructs(t *testing.T) { } func TestConversion_apiConsulExposeConfigToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConsulExposeConfigToStructs(nil)) require.Equal(t, &structs.ConsulExposeConfig{ Paths: []structs.ConsulExposePath{{Path: "/health"}}, @@ -3556,7 +3562,7 @@ func TestConversion_apiConsulExposeConfigToStructs(t *testing.T) { } func TestConversion_apiUpstreamsToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiUpstreamsToStructs(nil)) require.Nil(t, apiUpstreamsToStructs(make([]*api.ConsulUpstream, 0))) require.Equal(t, []structs.ConsulUpstream{{ @@ -3575,14 +3581,14 @@ func TestConversion_apiUpstreamsToStructs(t *testing.T) { } func TestConversion_apiConsulMeshGatewayToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiMeshGatewayToStructs(nil)) require.Equal(t, &structs.ConsulMeshGateway{Mode: "remote"}, apiMeshGatewayToStructs(&api.ConsulMeshGateway{Mode: "remote"})) } func TestConversion_apiConnectSidecarServiceProxyToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConnectSidecarServiceProxyToStructs(nil)) config := make(map[string]interface{}) require.Equal(t, &structs.ConsulProxy{ @@ -3611,7 +3617,7 @@ func TestConversion_apiConnectSidecarServiceProxyToStructs(t *testing.T) { } func TestConversion_apiConnectSidecarServiceToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConnectSidecarTaskToStructs(nil)) require.Equal(t, &structs.ConsulSidecarService{ Tags: []string{"foo"}, @@ -3629,7 +3635,7 @@ func TestConversion_apiConnectSidecarServiceToStructs(t *testing.T) { } func TestConversion_ApiConsulConnectToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, ApiConsulConnectToStructs(nil)) diff --git a/command/agent/keyring_test.go b/command/agent/keyring_test.go index 3408d8df0..284a11314 100644 --- a/command/agent/keyring_test.go +++ b/command/agent/keyring_test.go @@ -8,10 +8,11 @@ import ( "testing" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" ) func TestAgent_LoadKeyrings(t *testing.T) { - t.Parallel() + ci.Parallel(t) key := "tbLJg26ZJyJ9pK3qhc9jig==" // Should be no configured keyring file by default @@ -45,7 +46,7 @@ func TestAgent_LoadKeyrings(t *testing.T) { } func TestAgent_InitKeyring(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "tbLJg26ZJyJ9pK3qhc9jig==" key2 := "4leC33rgtXKIVUr9Nr0snQ==" expected := fmt.Sprintf(`["%s"]`, key1) diff --git a/command/agent/log_file_test.go b/command/agent/log_file_test.go index 8bcc52a00..78e6ce5d9 100644 --- a/command/agent/log_file_test.go +++ b/command/agent/log_file_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/logutils" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -18,7 +19,7 @@ const ( ) func TestLogFile_timeRotation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterTimeTest") @@ -43,7 +44,7 @@ func TestLogFile_timeRotation(t *testing.T) { } func TestLogFile_openNew(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterOpenTest") @@ -80,7 +81,7 @@ func TestLogFile_openNew(t *testing.T) { } func TestLogFile_byteRotation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterByteTest") @@ -104,7 +105,7 @@ func TestLogFile_byteRotation(t *testing.T) { } func TestLogFile_logLevelFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterFilterTest") @@ -127,7 +128,7 @@ func TestLogFile_logLevelFiltering(t *testing.T) { } func TestLogFile_deleteArchives(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterDeleteArchivesTest") @@ -167,7 +168,7 @@ func TestLogFile_deleteArchives(t *testing.T) { } func TestLogFile_deleteArchivesDisabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterDeleteArchivesDisabledTest") diff --git a/command/agent/log_levels_test.go b/command/agent/log_levels_test.go index a3e863465..ab4d03842 100644 --- a/command/agent/log_levels_test.go +++ b/command/agent/log_levels_test.go @@ -4,10 +4,11 @@ import ( "testing" "github.com/hashicorp/logutils" + "github.com/hashicorp/nomad/ci" ) func TestLevelFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) filt := LevelFilter() filt.Levels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERR"} @@ -24,5 +25,4 @@ func TestLevelFilter(t *testing.T) { if ValidateLevelFilter(level, filt) { t.Fatalf("expected invalid LogLevel, %s was valid", level) } - } diff --git a/command/agent/metrics_endpoint_test.go b/command/agent/metrics_endpoint_test.go index 712fdb5d8..787f114b5 100644 --- a/command/agent/metrics_endpoint_test.go +++ b/command/agent/metrics_endpoint_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -16,9 +17,9 @@ import ( ) func TestHTTP_MetricsWithIllegalMethod(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("DELETE", "/v1/metrics", nil) assert.Nil(err) @@ -30,9 +31,9 @@ func TestHTTP_MetricsWithIllegalMethod(t *testing.T) { } func TestHTTP_MetricsPrometheusDisabled(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, func(c *Config) { c.Telemetry.PrometheusMetrics = false }, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/metrics?format=prometheus", nil) assert.Nil(err) @@ -44,9 +45,9 @@ func TestHTTP_MetricsPrometheusDisabled(t *testing.T) { } func TestHTTP_MetricsPrometheusEnabled(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/metrics?format=prometheus", nil) assert.Nil(err) @@ -64,9 +65,9 @@ func TestHTTP_MetricsPrometheusEnabled(t *testing.T) { } func TestHTTP_Metrics(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // make a separate HTTP request first, to ensure Nomad has written metrics // and prevent a race condition @@ -101,6 +102,8 @@ func TestHTTP_Metrics(t *testing.T) { // // **Cannot** be run in parallel as metrics are global. func TestHTTP_FreshClientAllocMetrics(t *testing.T) { + ci.Parallel(t) + require := require.New(t) numTasks := 10 diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 697b5bdc5..f005e4210 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -7,11 +7,12 @@ import ( "time" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestMonitor_Start(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ Level: log.Error, @@ -42,7 +43,7 @@ func TestMonitor_Start(t *testing.T) { // Ensure number of dropped messages are logged func TestMonitor_DroppedMessages(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ Level: log.Warn, diff --git a/command/agent/namespace_endpoint_test.go b/command/agent/namespace_endpoint_test.go index 3c5b1bf2f..b488fe16a 100644 --- a/command/agent/namespace_endpoint_test.go +++ b/command/agent/namespace_endpoint_test.go @@ -5,14 +5,15 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) func TestHTTP_NamespaceList(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { ns1 := mock.Namespace() ns2 := mock.Namespace() @@ -44,8 +45,8 @@ func TestHTTP_NamespaceList(t *testing.T) { } func TestHTTP_NamespaceQuery(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { ns1 := mock.Namespace() args := structs.NamespaceUpsertRequest{ @@ -75,8 +76,8 @@ func TestHTTP_NamespaceQuery(t *testing.T) { } func TestHTTP_NamespaceCreate(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request ns1 := mock.Namespace() @@ -106,8 +107,8 @@ func TestHTTP_NamespaceCreate(t *testing.T) { } func TestHTTP_NamespaceUpdate(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request ns1 := mock.Namespace() @@ -137,8 +138,8 @@ func TestHTTP_NamespaceUpdate(t *testing.T) { } func TestHTTP_NamespaceDelete(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { ns1 := mock.Namespace() args := structs.NamespaceUpsertRequest{ diff --git a/command/agent/node_endpoint_test.go b/command/agent/node_endpoint_test.go index c5ec234be..0b682b3ad 100644 --- a/command/agent/node_endpoint_test.go +++ b/command/agent/node_endpoint_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -14,7 +15,7 @@ import ( ) func TestHTTP_NodesList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the node @@ -62,7 +63,7 @@ func TestHTTP_NodesList(t *testing.T) { } func TestHTTP_NodesPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { ids := []string{ "12345678-abcd-efab-cdef-123456789abc", @@ -119,7 +120,7 @@ func TestHTTP_NodesPrefixList(t *testing.T) { } func TestHTTP_NodeForceEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the node node := mock.Node() @@ -171,7 +172,7 @@ func TestHTTP_NodeForceEval(t *testing.T) { } func TestHTTP_NodeAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job node := mock.Node() @@ -240,7 +241,7 @@ func TestHTTP_NodeAllocations(t *testing.T) { } func TestHTTP_NodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Create the node @@ -336,7 +337,7 @@ func TestHTTP_NodeDrain(t *testing.T) { } func TestHTTP_NodeEligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Create the node @@ -390,7 +391,7 @@ func TestHTTP_NodeEligible(t *testing.T) { } func TestHTTP_NodePurge(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the node node := mock.Node() @@ -455,7 +456,7 @@ func TestHTTP_NodePurge(t *testing.T) { } func TestHTTP_NodeQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job node := mock.Node() diff --git a/command/agent/operator_endpoint_test.go b/command/agent/operator_endpoint_test.go index b621d7d4e..e97ff6fcf 100644 --- a/command/agent/operator_endpoint_test.go +++ b/command/agent/operator_endpoint_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -25,7 +26,7 @@ import ( ) func TestHTTP_OperatorRaftConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer(nil) req, err := http.NewRequest("GET", "/v1/operator/raft/configuration", body) @@ -54,8 +55,8 @@ func TestHTTP_OperatorRaftConfiguration(t *testing.T) { } func TestHTTP_OperatorRaftPeer(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer(nil) req, err := http.NewRequest("DELETE", "/v1/operator/raft/peer?address=nope", body) @@ -88,7 +89,7 @@ func TestHTTP_OperatorRaftPeer(t *testing.T) { } func TestOperator_AutopilotGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer(nil) req, _ := http.NewRequest("GET", "/v1/operator/autopilot/configuration", body) @@ -111,7 +112,7 @@ func TestOperator_AutopilotGetConfiguration(t *testing.T) { } func TestOperator_AutopilotSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`)) req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body) @@ -140,7 +141,7 @@ func TestOperator_AutopilotSetConfiguration(t *testing.T) { } func TestOperator_AutopilotCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`)) req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body) @@ -208,6 +209,8 @@ func TestOperator_AutopilotCASConfiguration(t *testing.T) { } func TestOperator_ServerHealth(t *testing.T) { + ci.Parallel(t) + httpTest(t, func(c *Config) { c.Server.RaftProtocol = 3 }, func(s *TestAgent) { @@ -238,7 +241,7 @@ func TestOperator_ServerHealth(t *testing.T) { } func TestOperator_ServerHealth_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, func(c *Config) { c.Server.RaftProtocol = 3 c.Autopilot.LastContactThreshold = -1 * time.Second @@ -268,7 +271,7 @@ func TestOperator_ServerHealth_Unhealthy(t *testing.T) { } func TestOperator_SchedulerGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { require := require.New(t) body := bytes.NewBuffer(nil) @@ -290,7 +293,7 @@ func TestOperator_SchedulerGetConfiguration(t *testing.T) { } func TestOperator_SchedulerSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { require := require.New(t) body := bytes.NewBuffer([]byte(` @@ -328,7 +331,7 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) { } func TestOperator_SchedulerCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { require := require.New(t) body := bytes.NewBuffer([]byte(`{"PreemptionConfig": { @@ -406,7 +409,7 @@ func TestOperator_SchedulerCASConfiguration(t *testing.T) { } func TestOperator_SnapshotRequests(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir, err := ioutil.TempDir("", "nomadtest-operator-") require.NoError(t, err) @@ -498,5 +501,4 @@ func TestOperator_SnapshotRequests(t *testing.T) { require.True(t, jobExists()) }) - } diff --git a/command/agent/pprof/pprof_test.go b/command/agent/pprof/pprof_test.go index cf68fc7a7..08b6596de 100644 --- a/command/agent/pprof/pprof_test.go +++ b/command/agent/pprof/pprof_test.go @@ -4,10 +4,13 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestProfile(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string profile string @@ -58,6 +61,8 @@ func TestProfile(t *testing.T) { } func TestCPUProfile(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string expectedHeaders map[string]string @@ -84,6 +89,8 @@ func TestCPUProfile(t *testing.T) { } func TestTrace(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string expectedHeaders map[string]string @@ -110,6 +117,8 @@ func TestTrace(t *testing.T) { } func TestCmdline(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string expectedHeaders map[string]string diff --git a/command/agent/region_endpoint_test.go b/command/agent/region_endpoint_test.go index 2549c6a9f..30f84ed00 100644 --- a/command/agent/region_endpoint_test.go +++ b/command/agent/region_endpoint_test.go @@ -4,10 +4,12 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestHTTP_RegionList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/regions", nil) diff --git a/command/agent/retry_join_test.go b/command/agent/retry_join_test.go index 48a2e2514..a2c27aa3b 100644 --- a/command/agent/retry_join_test.go +++ b/command/agent/retry_join_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -29,7 +30,7 @@ func (m *MockDiscover) Names() []string { } func TestRetryJoin_Integration(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create two agents and have one retry join the other agent := NewTestAgent(t, t.Name(), nil) @@ -73,7 +74,7 @@ func TestRetryJoin_Integration(t *testing.T) { } func TestRetryJoin_Server_NonCloud(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -103,7 +104,7 @@ func TestRetryJoin_Server_NonCloud(t *testing.T) { } func TestRetryJoin_Server_Cloud(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -135,7 +136,7 @@ func TestRetryJoin_Server_Cloud(t *testing.T) { } func TestRetryJoin_Server_MixedProvider(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -167,7 +168,7 @@ func TestRetryJoin_Server_MixedProvider(t *testing.T) { } func TestRetryJoin_Client(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -197,7 +198,7 @@ func TestRetryJoin_Client(t *testing.T) { } func TestRetryJoin_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) type validateExpect struct { config *Config isValid bool diff --git a/command/agent/scaling_endpoint_test.go b/command/agent/scaling_endpoint_test.go index 0abb21c0d..fe7faa2e3 100644 --- a/command/agent/scaling_endpoint_test.go +++ b/command/agent/scaling_endpoint_test.go @@ -5,15 +5,15 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestHTTP_ScalingPoliciesList(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -52,8 +52,8 @@ func TestHTTP_ScalingPoliciesList(t *testing.T) { } func TestHTTP_ScalingPoliciesList_Filter(t *testing.T) { - t.Parallel() require := require.New(t) + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { var job *structs.Job for i := 0; i < 3; i++ { @@ -100,7 +100,7 @@ func TestHTTP_ScalingPoliciesList_Filter(t *testing.T) { } func TestHTTP_ScalingPolicyGet(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Create the job diff --git a/command/agent/search_endpoint_test.go b/command/agent/search_endpoint_test.go index 68ca16bb9..da0140e1d 100644 --- a/command/agent/search_endpoint_test.go +++ b/command/agent/search_endpoint_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -26,7 +27,7 @@ func createJobForTest(jobID string, s *TestAgent, t *testing.T) { } func TestHTTP_PrefixSearchWithIllegalMethod(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("DELETE", "/v1/search", nil) @@ -39,7 +40,7 @@ func TestHTTP_PrefixSearchWithIllegalMethod(t *testing.T) { } func TestHTTP_FuzzySearchWithIllegalMethod(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("DELETE", "/v1/search/fuzzy", nil) @@ -63,7 +64,7 @@ func createCmdJobForTest(name, cmd string, s *TestAgent, t *testing.T) *structs. } func TestHTTP_PrefixSearch_POST(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJob := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobPrefix := "aaaaaaaa-e8f7-fd38" @@ -93,7 +94,7 @@ func TestHTTP_PrefixSearch_POST(t *testing.T) { } func TestHTTP_FuzzySearch_POST(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobID := uuid.Generate() @@ -123,7 +124,7 @@ func TestHTTP_FuzzySearch_POST(t *testing.T) { } func TestHTTP_PrefixSearch_PUT(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJob := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobPrefix := "aaaaaaaa-e8f7-fd38" @@ -153,7 +154,7 @@ func TestHTTP_PrefixSearch_PUT(t *testing.T) { } func TestHTTP_FuzzySearch_PUT(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobID := uuid.Generate() @@ -183,7 +184,7 @@ func TestHTTP_FuzzySearch_PUT(t *testing.T) { } func TestHTTP_PrefixSearch_MultipleJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobA := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobB := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89707" @@ -219,7 +220,7 @@ func TestHTTP_PrefixSearch_MultipleJobs(t *testing.T) { } func TestHTTP_FuzzySearch_MultipleJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { job1ID := createCmdJobForTest("job1", "/bin/yes", s, t).ID @@ -262,7 +263,7 @@ func TestHTTP_FuzzySearch_MultipleJobs(t *testing.T) { } func TestHTTP_PrefixSearch_Evaluation(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -294,7 +295,7 @@ func TestHTTP_PrefixSearch_Evaluation(t *testing.T) { } func TestHTTP_FuzzySearch_Evaluation(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -335,7 +336,7 @@ func mockAlloc() *structs.Allocation { } func TestHTTP_PrefixSearch_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -366,7 +367,7 @@ func TestHTTP_PrefixSearch_Allocations(t *testing.T) { } func TestHTTP_FuzzySearch_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -396,7 +397,7 @@ func TestHTTP_FuzzySearch_Allocations(t *testing.T) { } func TestHTTP_PrefixSearch_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -427,7 +428,7 @@ func TestHTTP_PrefixSearch_Nodes(t *testing.T) { } func TestHTTP_FuzzySearch_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -457,7 +458,7 @@ func TestHTTP_FuzzySearch_Nodes(t *testing.T) { } func TestHTTP_PrefixSearch_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -485,7 +486,7 @@ func TestHTTP_PrefixSearch_Deployments(t *testing.T) { } func TestHTTP_FuzzySearch_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -514,7 +515,7 @@ func TestHTTP_FuzzySearch_Deployments(t *testing.T) { } func TestHTTP_PrefixSearch_NoJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { data := structs.SearchRequest{Prefix: "12345", Context: structs.Jobs} @@ -534,7 +535,7 @@ func TestHTTP_PrefixSearch_NoJob(t *testing.T) { } func TestHTTP_FuzzySearch_NoJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { data := structs.FuzzySearchRequest{Text: "12345", Context: structs.Jobs} @@ -553,7 +554,7 @@ func TestHTTP_FuzzySearch_NoJob(t *testing.T) { } func TestHTTP_PrefixSearch_AllContext(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobID := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobPrefix := "aaaaaaaa-e8f7-fd38" @@ -588,7 +589,7 @@ func TestHTTP_PrefixSearch_AllContext(t *testing.T) { } func TestHTTP_FuzzySearch_AllContext(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { jobID := createCmdJobForTest("job1", "/bin/aardvark", s, t).ID diff --git a/command/agent/stats_endpoint_test.go b/command/agent/stats_endpoint_test.go index 2720b1a0c..ad8c6d550 100644 --- a/command/agent/stats_endpoint_test.go +++ b/command/agent/stats_endpoint_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -16,7 +17,7 @@ import ( ) func TestClientStatsRequest(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { @@ -77,7 +78,7 @@ func TestClientStatsRequest(t *testing.T) { } func TestClientStatsRequest_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpACLTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() diff --git a/command/agent/status_endpoint_test.go b/command/agent/status_endpoint_test.go index 2dbe39cfd..9be35c4f2 100644 --- a/command/agent/status_endpoint_test.go +++ b/command/agent/status_endpoint_test.go @@ -4,10 +4,12 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestHTTP_StatusLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/status/leader", nil) @@ -30,7 +32,7 @@ func TestHTTP_StatusLeader(t *testing.T) { } func TestHTTP_StatusPeers(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/status/peers", nil) diff --git a/command/agent/syslog_test.go b/command/agent/syslog_test.go index 37c4e6fff..44fa82c5a 100644 --- a/command/agent/syslog_test.go +++ b/command/agent/syslog_test.go @@ -7,10 +7,11 @@ import ( gsyslog "github.com/hashicorp/go-syslog" "github.com/hashicorp/logutils" + "github.com/hashicorp/nomad/ci" ) func TestSyslogFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Syslog not supported on Windows") } diff --git a/command/agent/system_endpoint_test.go b/command/agent/system_endpoint_test.go index f45c17c2c..504b44f54 100644 --- a/command/agent/system_endpoint_test.go +++ b/command/agent/system_endpoint_test.go @@ -4,10 +4,12 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestHTTP_SystemGarbageCollect(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/system/gc", nil) @@ -24,7 +26,7 @@ func TestHTTP_SystemGarbageCollect(t *testing.T) { } func TestHTTP_ReconcileJobSummaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/system/reconcile/summaries", nil) diff --git a/command/agent_info_test.go b/command/agent_info_test.go index 5674a4f26..8b0869963 100644 --- a/command/agent_info_test.go +++ b/command/agent_info_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestAgentInfoCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AgentInfoCommand{} } func TestAgentInfoCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -27,7 +28,7 @@ func TestAgentInfoCommand_Run(t *testing.T) { } func TestAgentInfoCommand_Run_JSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -44,7 +45,7 @@ func TestAgentInfoCommand_Run_JSON(t *testing.T) { } func TestAgentInfoCommand_Run_Gotemplate(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -61,7 +62,7 @@ func TestAgentInfoCommand_Run_Gotemplate(t *testing.T) { } func TestAgentInfoCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &AgentInfoCommand{Meta: Meta{Ui: ui}} diff --git a/command/agent_monitor_test.go b/command/agent_monitor_test.go index ba5a2a5b7..cf8b186a0 100644 --- a/command/agent_monitor_test.go +++ b/command/agent_monitor_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestMonitorCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &MonitorCommand{} } func TestMonitorCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/alloc_exec_test.go b/command/alloc_exec_test.go index f064aa404..e5586e4df 100644 --- a/command/alloc_exec_test.go +++ b/command/alloc_exec_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -20,7 +21,7 @@ import ( var _ cli.Command = &AllocExecCommand{} func TestAllocExecCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -143,8 +144,8 @@ func TestAllocExecCommand_Fails(t *testing.T) { } func TestAllocExecCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -167,7 +168,7 @@ func TestAllocExecCommand_AutocompleteArgs(t *testing.T) { } func TestAllocExecCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_fs_test.go b/command/alloc_fs_test.go index 86042eec0..967c3dd11 100644 --- a/command/alloc_fs_test.go +++ b/command/alloc_fs_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestFSCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocFSCommand{} } func TestFSCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -88,8 +89,8 @@ func TestFSCommand_Fails(t *testing.T) { } func TestFSCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_logs_test.go b/command/alloc_logs_test.go index 6ac55bfb3..5698433fd 100644 --- a/command/alloc_logs_test.go +++ b/command/alloc_logs_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestLogsCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocLogsCommand{} } func TestLogsCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -70,8 +71,8 @@ func TestLogsCommand_Fails(t *testing.T) { } func TestLogsCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_restart_test.go b/command/alloc_restart_test.go index a37a20aca..fb1e88762 100644 --- a/command/alloc_restart_test.go +++ b/command/alloc_restart_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -19,6 +20,8 @@ func TestAllocRestartCommand_Implements(t *testing.T) { } func TestAllocRestartCommand_Fails(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -91,6 +94,8 @@ func TestAllocRestartCommand_Fails(t *testing.T) { } func TestAllocRestartCommand_Run(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -152,6 +157,8 @@ func TestAllocRestartCommand_Run(t *testing.T) { } func TestAllocRestartCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) diff --git a/command/alloc_signal_test.go b/command/alloc_signal_test.go index 7142564cc..46e18d146 100644 --- a/command/alloc_signal_test.go +++ b/command/alloc_signal_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -15,12 +16,12 @@ import ( ) func TestAllocSignalCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocSignalCommand{} } func TestAllocSignalCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -57,6 +58,8 @@ func TestAllocSignalCommand_Fails(t *testing.T) { } func TestAllocSignalCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) @@ -81,6 +84,8 @@ func TestAllocSignalCommand_AutocompleteArgs(t *testing.T) { } func TestAllocSignalCommand_Run(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index fca8a2a19..d377a16a8 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -21,12 +22,12 @@ import ( ) func TestAllocStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocStatusCommand{} } func TestAllocStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -88,7 +89,7 @@ func TestAllocStatusCommand_Fails(t *testing.T) { } func TestAllocStatusCommand_LifecycleInfo(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -151,7 +152,7 @@ func TestAllocStatusCommand_LifecycleInfo(t *testing.T) { } func TestAllocStatusCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -248,7 +249,7 @@ func TestAllocStatusCommand_Run(t *testing.T) { } func TestAllocStatusCommand_RescheduleInfo(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -297,7 +298,7 @@ func TestAllocStatusCommand_RescheduleInfo(t *testing.T) { } func TestAllocStatusCommand_ScoreMetrics(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -359,8 +360,8 @@ func TestAllocStatusCommand_ScoreMetrics(t *testing.T) { } func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -383,7 +384,7 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { } func TestAllocStatusCommand_HostVolumes(t *testing.T) { - t.Parallel() + ci.Parallel(t) // We have to create a tempdir for the host volume even though we're // not going to use it b/c the server validates the config on startup tmpDir, err := ioutil.TempDir("", "vol0") @@ -451,7 +452,7 @@ func TestAllocStatusCommand_HostVolumes(t *testing.T) { } func TestAllocStatusCommand_CSIVolumes(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() state := srv.Agent.Server().State() diff --git a/command/alloc_stop_test.go b/command/alloc_stop_test.go index 55d4cd1cc..54b0f7c1e 100644 --- a/command/alloc_stop_test.go +++ b/command/alloc_stop_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -12,7 +13,7 @@ import ( ) func TestAllocStopCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocStopCommand{} } @@ -51,6 +52,8 @@ func TestAllocStop_Fails(t *testing.T) { } func TestAllocStop_Run(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/check_test.go b/command/check_test.go index ecc47abdd..ea6af4f8d 100644 --- a/command/check_test.go +++ b/command/check_test.go @@ -4,11 +4,12 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestAgentCheckCommand_ServerHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -26,5 +27,4 @@ func TestAgentCheckCommand_ServerHealth(t *testing.T) { if code != HealthCritical { t.Fatalf("expected exitcode: %v, actual: %v", HealthCritical, code) } - } diff --git a/command/config_validate_test.go b/command/config_validate_test.go index 80ac91fe1..04cde7785 100644 --- a/command/config_validate_test.go +++ b/command/config_validate_test.go @@ -6,11 +6,12 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestConfigValidateCommand_FailWithEmptyDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -28,7 +29,7 @@ func TestConfigValidateCommand_FailWithEmptyDir(t *testing.T) { } func TestConfigValidateCommand_SucceedWithMinimalConfigFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -55,7 +56,7 @@ func TestConfigValidateCommand_SucceedWithMinimalConfigFile(t *testing.T) { } func TestConfigValidateCommand_FailOnParseBadConfigFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -79,7 +80,7 @@ func TestConfigValidateCommand_FailOnParseBadConfigFile(t *testing.T) { } func TestConfigValidateCommand_FailOnValidateParsableConfigFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) diff --git a/command/data_format_test.go b/command/data_format_test.go index 7765a0d72..103f8fb7c 100644 --- a/command/data_format_test.go +++ b/command/data_format_test.go @@ -3,6 +3,8 @@ package command import ( "strings" "testing" + + "github.com/hashicorp/nomad/ci" ) type testData struct { @@ -24,7 +26,7 @@ var ( ) func TestDataFormat(t *testing.T) { - t.Parallel() + ci.Parallel(t) for k, v := range testFormat { fm, err := DataFormat(k, v) if err != nil { @@ -43,7 +45,7 @@ func TestDataFormat(t *testing.T) { } func TestInvalidJSONTemplate(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Invalid template {{.foo}} fm, err := DataFormat("template", "{{.foo}}") if err != nil { diff --git a/command/deployment_fail_test.go b/command/deployment_fail_test.go index 0cc8af90c..463300c4a 100644 --- a/command/deployment_fail_test.go +++ b/command/deployment_fail_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentFailCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentFailCommand{} } func TestDeploymentFailCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentFailCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentFailCommand_Fails(t *testing.T) { } func TestDeploymentFailCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_list_test.go b/command/deployment_list_test.go index 433fa67a8..95099a1b3 100644 --- a/command/deployment_list_test.go +++ b/command/deployment_list_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestDeploymentListCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentListCommand{} } func TestDeploymentListCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentListCommand{Meta: Meta{Ui: ui}} diff --git a/command/deployment_pause_test.go b/command/deployment_pause_test.go index 1b0c5094e..a677461cd 100644 --- a/command/deployment_pause_test.go +++ b/command/deployment_pause_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentPauseCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentPauseCommand{} } func TestDeploymentPauseCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentPauseCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentPauseCommand_Fails(t *testing.T) { } func TestDeploymentPauseCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_promote_test.go b/command/deployment_promote_test.go index 9e9afd8e7..7259caca8 100644 --- a/command/deployment_promote_test.go +++ b/command/deployment_promote_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentPromoteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentPromoteCommand{} } func TestDeploymentPromoteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentPromoteCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentPromoteCommand_Fails(t *testing.T) { } func TestDeploymentPromoteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_resume_test.go b/command/deployment_resume_test.go index 605f82f64..b136b34be 100644 --- a/command/deployment_resume_test.go +++ b/command/deployment_resume_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentResumeCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentResumeCommand{} } func TestDeploymentResumeCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentResumeCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentResumeCommand_Fails(t *testing.T) { } func TestDeploymentResumeCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_status_test.go b/command/deployment_status_test.go index 7a28d9967..777addd1e 100644 --- a/command/deployment_status_test.go +++ b/command/deployment_status_test.go @@ -3,6 +3,7 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentStatusCommand{} } func TestDeploymentStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentStatusCommand{Meta: Meta{Ui: ui}} @@ -51,8 +52,8 @@ func TestDeploymentStatusCommand_Fails(t *testing.T) { } func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_unblock_test.go b/command/deployment_unblock_test.go index a55ec7259..ee1350b79 100644 --- a/command/deployment_unblock_test.go +++ b/command/deployment_unblock_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentUnblockCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentUnblockCommand{} } func TestDeploymentUnblockCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentUnblockCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentUnblockCommand_Fails(t *testing.T) { } func TestDeploymentUnblockCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/eval_list_test.go b/command/eval_list_test.go index 0984b3ac8..141e64a36 100644 --- a/command/eval_list_test.go +++ b/command/eval_list_test.go @@ -4,10 +4,12 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestEvalList_ArgsWithoutPageToken(t *testing.T) { + ci.Parallel(t) cases := []struct { cli string diff --git a/command/eval_status_test.go b/command/eval_status_test.go index f66d2c0f2..5315b4163 100644 --- a/command/eval_status_test.go +++ b/command/eval_status_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestEvalStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &EvalStatusCommand{} } func TestEvalStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -62,8 +63,8 @@ func TestEvalStatusCommand_Fails(t *testing.T) { } func TestEvalStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/event_test.go b/command/event_test.go index 5bc7c4dea..8c6a01651 100644 --- a/command/event_test.go +++ b/command/event_test.go @@ -3,12 +3,13 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestEventCommand_BaseCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/helper_devices_test.go b/command/helper_devices_test.go index a54af1cad..062a4c1b1 100644 --- a/command/helper_devices_test.go +++ b/command/helper_devices_test.go @@ -4,12 +4,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDeviceQualifiedID(t *testing.T) { + ci.Parallel(t) require := require.New(t) @@ -19,6 +21,8 @@ func TestDeviceQualifiedID(t *testing.T) { } func TestBuildDeviceStatsSummaryMap(t *testing.T) { + ci.Parallel(t) + hostDeviceStats := []*api.DeviceGroupStats{ { Vendor: "vendor1", @@ -74,6 +78,8 @@ func TestBuildDeviceStatsSummaryMap(t *testing.T) { } func TestFormatDeviceStats(t *testing.T) { + ci.Parallel(t) + statValue := func(v string) *api.StatValue { return &api.StatValue{ StringVal: helper.StringToPtr(v), @@ -129,6 +135,8 @@ func TestFormatDeviceStats(t *testing.T) { } func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { + ci.Parallel(t) + hostDeviceStats := []*api.DeviceGroupStats{ { Vendor: "vendor1", @@ -201,6 +209,8 @@ func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { } func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { + ci.Parallel(t) + hostDeviceStats := []*api.DeviceGroupStats{ { Vendor: "vendor1", @@ -248,6 +258,8 @@ func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { assert.Equal(t, expected, formattedDevices) } func TestGetDeviceAttributes(t *testing.T) { + ci.Parallel(t) + d := &api.NodeDeviceResource{ Vendor: "Vendor", Type: "Type", diff --git a/command/helpers_test.go b/command/helpers_test.go index 275a5d249..7884b3abe 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/flatmap" "github.com/kr/pretty" @@ -21,7 +22,7 @@ import ( ) func TestHelpers_FormatKV(t *testing.T) { - t.Parallel() + ci.Parallel(t) in := []string{"alpha|beta", "charlie|delta", "echo|"} out := formatKV(in) @@ -35,7 +36,7 @@ func TestHelpers_FormatKV(t *testing.T) { } func TestHelpers_FormatList(t *testing.T) { - t.Parallel() + ci.Parallel(t) in := []string{"alpha|beta||delta"} out := formatList(in) @@ -47,7 +48,7 @@ func TestHelpers_FormatList(t *testing.T) { } func TestHelpers_NodeID(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, _ := testServer(t, false, nil) defer srv.Shutdown() @@ -64,7 +65,7 @@ func TestHelpers_NodeID(t *testing.T) { } func TestHelpers_LineLimitReader_NoTimeLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) helloString := `hello world this @@ -166,7 +167,7 @@ func (t *testReadCloser) Close() error { } func TestHelpers_LineLimitReader_TimeLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create the test reader in := &testReadCloser{data: make(chan []byte)} @@ -256,7 +257,7 @@ var ( // Test APIJob with local jobfile func TestJobGetter_LocalFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempFile("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -283,7 +284,7 @@ func TestJobGetter_LocalFile(t *testing.T) { // TestJobGetter_LocalFile_InvalidHCL2 asserts that a custom message is emited // if the file is a valid HCL1 but not HCL2 func TestJobGetter_LocalFile_InvalidHCL2(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -331,7 +332,7 @@ func TestJobGetter_LocalFile_InvalidHCL2(t *testing.T) { // TestJobGetter_HCL2_Variables asserts variable arguments from CLI // and varfiles are both honored func TestJobGetter_HCL2_Variables(t *testing.T) { - t.Parallel() + ci.Parallel(t) hcl := ` variables { @@ -376,7 +377,7 @@ job "example" { } func TestJobGetter_HCL2_Variables_StrictFalse(t *testing.T) { - t.Parallel() + ci.Parallel(t) hcl := ` variables { @@ -396,7 +397,7 @@ job "example" { // Both the CLI and var file contain variables that are not used with the // template and therefore would error, if hcl2-strict was true. - cliArgs := []string{`var2=from-cli`,`unsedVar1=from-cli`} + cliArgs := []string{`var2=from-cli`, `unsedVar1=from-cli`} fileVars := ` var3 = "from-varfile" unsedVar2 = "from-varfile" @@ -428,7 +429,7 @@ unsedVar2 = "from-varfile" // Test StructJob with jobfile from HTTP Server func TestJobGetter_HTTPServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, job) }) @@ -493,7 +494,7 @@ func TestPrettyTimeDiff(t *testing.T) { // TestUiErrorWriter asserts that writer buffers and func TestUiErrorWriter(t *testing.T) { - t.Parallel() + ci.Parallel(t) var outBuf, errBuf bytes.Buffer ui := &cli.BasicUi{ diff --git a/command/integration_test.go b/command/integration_test.go index f509b26cb..1cf207010 100644 --- a/command/integration_test.go +++ b/command/integration_test.go @@ -11,11 +11,12 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestIntegration_Command_NomadInit(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomadtest-rootsecretdir") if err != nil { t.Fatalf("unable to create tempdir for test: %v", err) @@ -41,8 +42,8 @@ func TestIntegration_Command_NomadInit(t *testing.T) { } func TestIntegration_Command_RoundTripJob(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() tmpDir, err := ioutil.TempDir("", "nomadtest-rootsecretdir") assert.Nil(err) defer os.RemoveAll(tmpDir) @@ -91,5 +92,4 @@ func TestIntegration_Command_RoundTripJob(t *testing.T) { _, err := cmd.Output() assert.Nil(err) } - } diff --git a/command/job_allocs_test.go b/command/job_allocs_test.go index 449a12253..ce2cfea63 100644 --- a/command/job_allocs_test.go +++ b/command/job_allocs_test.go @@ -3,21 +3,21 @@ package command import ( "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/require" ) func TestJobAllocsCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobAllocsCommand{} } func TestJobAllocsCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -50,7 +50,7 @@ func TestJobAllocsCommand_Fails(t *testing.T) { } func TestJobAllocsCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -93,7 +93,7 @@ func TestJobAllocsCommand_Run(t *testing.T) { } func TestJobAllocsCommand_Template(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -152,7 +152,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { } func TestJobAllocsCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_deployments_test.go b/command/job_deployments_test.go index eb764ba2b..d727ac2fa 100644 --- a/command/job_deployments_test.go +++ b/command/job_deployments_test.go @@ -4,21 +4,21 @@ import ( "strings" "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestJobDeploymentsCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDeploymentsCommand{} } func TestJobDeploymentsCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobDeploymentsCommand{Meta: Meta{Ui: ui}} @@ -41,7 +41,8 @@ func TestJobDeploymentsCommand_Fails(t *testing.T) { } func TestJobDeploymentsCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -85,7 +86,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { } func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -129,8 +130,8 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { } func TestJobDeploymentsCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_dispatch_test.go b/command/job_dispatch_test.go index 37e4a3926..ff7410340 100644 --- a/command/job_dispatch_test.go +++ b/command/job_dispatch_test.go @@ -4,21 +4,21 @@ import ( "strings" "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/require" ) func TestJobDispatchCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDispatchCommand{} } func TestJobDispatchCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobDispatchCommand{Meta: Meta{Ui: ui}} @@ -50,7 +50,7 @@ func TestJobDispatchCommand_Fails(t *testing.T) { } func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_eval_test.go b/command/job_eval_test.go index 6bc72df8a..74f627601 100644 --- a/command/job_eval_test.go +++ b/command/job_eval_test.go @@ -1,11 +1,11 @@ package command import ( + "fmt" "strings" "testing" - "fmt" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -16,12 +16,12 @@ import ( ) func TestJobEvalCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobEvalCommand{} } func TestJobEvalCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobEvalCommand{Meta: Meta{Ui: ui}} @@ -46,7 +46,7 @@ func TestJobEvalCommand_Fails(t *testing.T) { } func TestJobEvalCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -103,8 +103,8 @@ func TestJobEvalCommand_Run(t *testing.T) { } func TestJobEvalCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_history_test.go b/command/job_history_test.go index c5c613d48..c85fee2a9 100644 --- a/command/job_history_test.go +++ b/command/job_history_test.go @@ -4,21 +4,21 @@ import ( "strings" "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestJobHistoryCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDispatchCommand{} } func TestJobHistoryCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobHistoryCommand{Meta: Meta{Ui: ui}} @@ -41,8 +41,8 @@ func TestJobHistoryCommand_Fails(t *testing.T) { } func TestJobHistoryCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_init_test.go b/command/job_init_test.go index 35575c657..32e628a6a 100644 --- a/command/job_init_test.go +++ b/command/job_init_test.go @@ -6,17 +6,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestInitCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobInitCommand{} } func TestInitCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobInitCommand{Meta: Meta{Ui: ui}} @@ -79,7 +80,7 @@ func TestInitCommand_Run(t *testing.T) { } func TestInitCommand_defaultJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Ensure the job file is always written with spaces instead of tabs. Since // the default job file is embedded in the go file, it's easy for tabs to // slip in. @@ -90,7 +91,7 @@ func TestInitCommand_defaultJob(t *testing.T) { } func TestInitCommand_customFilename(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobInitCommand{Meta: Meta{Ui: ui}} filename := "custom.nomad" diff --git a/command/job_inspect_test.go b/command/job_inspect_test.go index 6f72b4254..ac3809778 100644 --- a/command/job_inspect_test.go +++ b/command/job_inspect_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestInspectCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobInspectCommand{} } func TestInspectCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -61,8 +62,8 @@ func TestInspectCommand_Fails(t *testing.T) { } func TestInspectCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_periodic_force_test.go b/command/job_periodic_force_test.go index 8197d0fed..b7ffc19e0 100644 --- a/command/job_periodic_force_test.go +++ b/command/job_periodic_force_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,12 +16,12 @@ import ( ) func TestJobPeriodicForceCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobPeriodicForceCommand{} } func TestJobPeriodicForceCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPeriodicForceCommand{Meta: Meta{Ui: ui}} @@ -38,7 +39,7 @@ func TestJobPeriodicForceCommand_Fails(t *testing.T) { } func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -76,7 +77,7 @@ func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { } func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -113,7 +114,7 @@ func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { } func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -155,7 +156,7 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { } func TestJobPeriodicForceCommand_SuccessfulPeriodicForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/job_plan_test.go b/command/job_plan_test.go index 317370519..ad8a28ba3 100644 --- a/command/job_plan_test.go +++ b/command/job_plan_test.go @@ -8,18 +8,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestPlanCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobRunCommand{} } func TestPlanCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server s := testutil.NewTestServer(t, nil) @@ -113,7 +114,7 @@ job "job1" { } func TestPlanCommand_From_STDIN(t *testing.T) { - t.Parallel() + ci.Parallel(t) stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatalf("err: %s", err) @@ -156,7 +157,7 @@ job "job1" { } func TestPlanCommand_From_URL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPlanCommand{ Meta: Meta{Ui: ui}, @@ -173,7 +174,7 @@ func TestPlanCommand_From_URL(t *testing.T) { } func TestPlanCommad_Preemptions(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPlanCommand{Meta: Meta{Ui: ui}} require := require.New(t) diff --git a/command/job_promote_test.go b/command/job_promote_test.go index 43630cd73..ec036e9a1 100644 --- a/command/job_promote_test.go +++ b/command/job_promote_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/mock" @@ -13,12 +14,12 @@ import ( ) func TestJobPromoteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobPromoteCommand{} } func TestJobPromoteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPromoteCommand{Meta: Meta{Ui: ui}} @@ -41,8 +42,8 @@ func TestJobPromoteCommand_Fails(t *testing.T) { } func TestJobPromoteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_revert_test.go b/command/job_revert_test.go index dcab1851c..f7378f2ad 100644 --- a/command/job_revert_test.go +++ b/command/job_revert_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" structs "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestJobRevertCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDispatchCommand{} } func TestJobRevertCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRevertCommand{Meta: Meta{Ui: ui}} @@ -40,8 +41,8 @@ func TestJobRevertCommand_Fails(t *testing.T) { } func TestJobRevertCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_run_test.go b/command/job_run_test.go index 80609aed9..7caf907ca 100644 --- a/command/job_run_test.go +++ b/command/job_run_test.go @@ -6,17 +6,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestRunCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobRunCommand{} } func TestRunCommand_Output_Json(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRunCommand{Meta: Meta{Ui: ui}} @@ -52,7 +53,7 @@ job "job1" { } func TestRunCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server s := testutil.NewTestServer(t, nil) @@ -156,7 +157,7 @@ job "job1" { } func TestRunCommand_From_STDIN(t *testing.T) { - t.Parallel() + ci.Parallel(t) stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatalf("err: %s", err) @@ -199,7 +200,7 @@ job "job1" { } func TestRunCommand_From_URL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRunCommand{ Meta: Meta{Ui: ui}, diff --git a/command/job_scale_test.go b/command/job_scale_test.go index c61546594..d3726c55c 100644 --- a/command/job_scale_test.go +++ b/command/job_scale_test.go @@ -6,13 +6,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestJobScaleCommand_SingleGroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -53,7 +54,7 @@ func TestJobScaleCommand_SingleGroup(t *testing.T) { } func TestJobScaleCommand_MultiGroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/job_scaling_events_test.go b/command/job_scaling_events_test.go index e9954baab..530021280 100644 --- a/command/job_scaling_events_test.go +++ b/command/job_scaling_events_test.go @@ -5,13 +5,14 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestJobScalingEventsCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/job_status_test.go b/command/job_status_test.go index fbd169cdc..5c0aac2b2 100644 --- a/command/job_status_test.go +++ b/command/job_status_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -19,12 +20,12 @@ import ( ) func TestJobStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobStatusCommand{} } func TestJobStatusCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -225,7 +226,7 @@ func TestJobStatusCommand_Run(t *testing.T) { } func TestJobStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobStatusCommand{Meta: Meta{Ui: ui}} @@ -248,8 +249,8 @@ func TestJobStatusCommand_Fails(t *testing.T) { } func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -272,8 +273,8 @@ func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { } func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true @@ -339,7 +340,7 @@ func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { } func TestJobStatusCommand_RescheduleEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_stop_test.go b/command/job_stop_test.go index 4bfca4eb4..a73c08309 100644 --- a/command/job_stop_test.go +++ b/command/job_stop_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestStopCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobStopCommand{} } func TestStopCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -52,8 +53,8 @@ func TestStopCommand_Fails(t *testing.T) { } func TestStopCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_validate_test.go b/command/job_validate_test.go index 6e51f33d2..12c3980ef 100644 --- a/command/job_validate_test.go +++ b/command/job_validate_test.go @@ -6,17 +6,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestValidateCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobValidateCommand{} } func TestValidateCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server s := testutil.NewTestServer(t, nil) defer s.Stop() @@ -56,7 +57,7 @@ job "job1" { } func TestValidateCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobValidateCommand{Meta: Meta{Ui: ui}} @@ -114,7 +115,7 @@ func TestValidateCommand_Fails(t *testing.T) { } func TestValidateCommand_From_STDIN(t *testing.T) { - t.Parallel() + ci.Parallel(t) stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatalf("err: %s", err) @@ -160,7 +161,7 @@ job "job1" { } func TestValidateCommand_From_URL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRunCommand{ Meta: Meta{Ui: ui}, diff --git a/command/license_get_test.go b/command/license_get_test.go index 22ca08b25..f8f4de253 100644 --- a/command/license_get_test.go +++ b/command/license_get_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -12,7 +13,7 @@ import ( var _ cli.Command = &LicenseGetCommand{} func TestCommand_LicenseGet_OSSErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -30,6 +31,8 @@ func TestCommand_LicenseGet_OSSErr(t *testing.T) { } func TestOutputLicenseReply(t *testing.T) { + ci.Parallel(t) + now := time.Now() lic := &api.LicenseReply{ License: &api.License{ diff --git a/command/meta_test.go b/command/meta_test.go index 7ebce9036..27724faaa 100644 --- a/command/meta_test.go +++ b/command/meta_test.go @@ -8,12 +8,13 @@ import ( "testing" "github.com/creack/pty" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestMeta_FlagSet(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Flags FlagSetFlags Expected []string @@ -61,6 +62,8 @@ func TestMeta_FlagSet(t *testing.T) { } func TestMeta_Colorize(t *testing.T) { + ci.Parallel(t) + type testCaseSetupFn func(*testing.T, *Meta) cases := []struct { diff --git a/command/metrics_test.go b/command/metrics_test.go index a537f3997..4412f1e50 100644 --- a/command/metrics_test.go +++ b/command/metrics_test.go @@ -3,6 +3,7 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -10,7 +11,7 @@ import ( var _ cli.Command = &OperatorMetricsCommand{} func TestCommand_Metrics_Cases(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/monitor_test.go b/command/monitor_test.go index 297560f06..0d54b567d 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -6,13 +6,14 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestMonitor_Update_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() mon := newMonitor(ui, nil, fullId) @@ -66,7 +67,7 @@ func TestMonitor_Update_Eval(t *testing.T) { } func TestMonitor_Update_Allocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() mon := newMonitor(ui, nil, fullId) @@ -137,7 +138,7 @@ func TestMonitor_Update_Allocs(t *testing.T) { } func TestMonitor_Update_AllocModification(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() mon := newMonitor(ui, nil, fullId) @@ -173,7 +174,7 @@ func TestMonitor_Update_AllocModification(t *testing.T) { } func TestMonitor_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, _ := testServer(t, false, nil) defer srv.Shutdown() @@ -220,6 +221,8 @@ func TestMonitor_Monitor(t *testing.T) { } func TestMonitor_formatAllocMetric(t *testing.T) { + ci.Parallel(t) + tests := []struct { Name string Metrics *api.AllocationMetric diff --git a/command/namespace_apply_test.go b/command/namespace_apply_test.go index 95164b2ba..e27b93531 100644 --- a/command/namespace_apply_test.go +++ b/command/namespace_apply_test.go @@ -4,17 +4,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestNamespaceApplyCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceApplyCommand{} } func TestNamespaceApplyCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceApplyCommand{Meta: Meta{Ui: ui}} @@ -37,7 +38,7 @@ func TestNamespaceApplyCommand_Fails(t *testing.T) { } func TestNamespaceApplyCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) diff --git a/command/namespace_delete_test.go b/command/namespace_delete_test.go index 95fe7c8ba..b806b2aa7 100644 --- a/command/namespace_delete_test.go +++ b/command/namespace_delete_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestNamespaceDeleteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceDeleteCommand{} } func TestNamespaceDeleteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceDeleteCommand{Meta: Meta{Ui: ui}} @@ -39,7 +40,7 @@ func TestNamespaceDeleteCommand_Fails(t *testing.T) { } func TestNamespaceDeleteCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -66,8 +67,8 @@ func TestNamespaceDeleteCommand_Good(t *testing.T) { } func TestNamespaceDeleteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/namespace_inspect_test.go b/command/namespace_inspect_test.go index 35bfef085..e81744051 100644 --- a/command/namespace_inspect_test.go +++ b/command/namespace_inspect_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestNamespaceInspectCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceInspectCommand{} } func TestNamespaceInspectCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceInspectCommand{Meta: Meta{Ui: ui}} @@ -39,7 +40,7 @@ func TestNamespaceInspectCommand_Fails(t *testing.T) { } func TestNamespaceInspectCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -67,8 +68,8 @@ func TestNamespaceInspectCommand_Good(t *testing.T) { } func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -96,7 +97,7 @@ func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) { // command should pull the matching namespace rather than // displaying the multiple match error func TestNamespaceInspectCommand_NamespaceMatchesPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) diff --git a/command/namespace_list_test.go b/command/namespace_list_test.go index b8e662bdd..0cc2d67a4 100644 --- a/command/namespace_list_test.go +++ b/command/namespace_list_test.go @@ -4,13 +4,14 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) var _ cli.Command = (*NamespaceListCommand)(nil) func TestNamespaceListCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceListCommand{Meta: Meta{Ui: ui}} @@ -33,7 +34,7 @@ func TestNamespaceListCommand_Fails(t *testing.T) { } func TestNamespaceListCommand_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, _, url := testServer(t, true, nil) diff --git a/command/namespace_status_test.go b/command/namespace_status_test.go index fa70ed218..084ef3233 100644 --- a/command/namespace_status_test.go +++ b/command/namespace_status_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestNamespaceStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceStatusCommand{} } func TestNamespaceStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui}} @@ -39,7 +40,7 @@ func TestNamespaceStatusCommand_Fails(t *testing.T) { } func TestNamespaceStatusCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -68,7 +69,7 @@ func TestNamespaceStatusCommand_Good(t *testing.T) { } func TestNamespaceStatusCommand_Good_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -112,8 +113,8 @@ func TestNamespaceStatusCommand_Good_Quota(t *testing.T) { } func TestNamespaceStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -141,7 +142,7 @@ func TestNamespaceStatusCommand_AutocompleteArgs(t *testing.T) { // command should pull the matching namespace rather than // displaying the multiple match error func TestNamespaceStatusCommand_NamespaceMatchesPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) diff --git a/command/node_config_test.go b/command/node_config_test.go index c7dd3c57c..4583883a9 100644 --- a/command/node_config_test.go +++ b/command/node_config_test.go @@ -4,17 +4,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" ) func TestClientConfigCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeConfigCommand{} } func TestClientConfigCommand_UpdateServers(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, func(c *agent.Config) { c.Server.BootstrapExpect = 0 }) @@ -47,7 +48,7 @@ func TestClientConfigCommand_UpdateServers(t *testing.T) { } func TestClientConfigCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NodeConfigCommand{Meta: Meta{Ui: ui}} diff --git a/command/node_drain_test.go b/command/node_drain_test.go index 22aa5c2ed..02b3f11e0 100644 --- a/command/node_drain_test.go +++ b/command/node_drain_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" @@ -18,12 +19,12 @@ import ( ) func TestNodeDrainCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeDrainCommand{} } func TestNodeDrainCommand_Detach(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_detach_node" @@ -96,7 +97,7 @@ func TestNodeDrainCommand_Detach(t *testing.T) { } func TestNodeDrainCommand_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_monitor_node" @@ -256,7 +257,7 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { } func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_monitor_node2" @@ -298,7 +299,7 @@ func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) { } func TestNodeDrainCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -412,8 +413,8 @@ func TestNodeDrainCommand_Fails(t *testing.T) { } func TestNodeDrainCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/node_eligibility_test.go b/command/node_eligibility_test.go index 2e9120d86..bf2b0e546 100644 --- a/command/node_eligibility_test.go +++ b/command/node_eligibility_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -12,12 +13,12 @@ import ( ) func TestNodeEligibilityCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeEligibilityCommand{} } func TestNodeEligibilityCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -90,8 +91,8 @@ func TestNodeEligibilityCommand_Fails(t *testing.T) { } func TestNodeEligibilityCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/node_status_test.go b/command/node_status_test.go index 58dfb3e0f..eccd9f773 100644 --- a/command/node_status_test.go +++ b/command/node_status_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -15,12 +16,12 @@ import ( ) func TestNodeStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeStatusCommand{} } func TestNodeStatusCommand_Self(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start in dev mode so we get a node registration srv, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "mynode" @@ -71,7 +72,7 @@ func TestNodeStatusCommand_Self(t *testing.T) { } func TestNodeStatusCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start in dev mode so we get a node registration srv, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "mynode" @@ -163,7 +164,7 @@ func TestNodeStatusCommand_Run(t *testing.T) { } func TestNodeStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -216,8 +217,8 @@ func TestNodeStatusCommand_Fails(t *testing.T) { } func TestNodeStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -251,7 +252,7 @@ func TestNodeStatusCommand_AutocompleteArgs(t *testing.T) { } func TestNodeStatusCommand_FormatDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) node := &api.Node{} diff --git a/command/operator_api_test.go b/command/operator_api_test.go index 937db63aa..813534a8d 100644 --- a/command/operator_api_test.go +++ b/command/operator_api_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -15,6 +16,8 @@ import ( // TestOperatorAPICommand_Paths asserts that the op api command normalizes // various path formats to the proper full address. func TestOperatorAPICommand_Paths(t *testing.T) { + ci.Parallel(t) + hits := make(chan *url.URL, 1) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { hits <- r.URL @@ -74,6 +77,8 @@ func TestOperatorAPICommand_Paths(t *testing.T) { // TestOperatorAPICommand_Curl asserts that -dryrun outputs a valid curl // command. func TestOperatorAPICommand_Curl(t *testing.T) { + ci.Parallel(t) + buf := bytes.NewBuffer(nil) ui := &cli.BasicUi{ ErrorWriter: buf, diff --git a/command/operator_autopilot_get_test.go b/command/operator_autopilot_get_test.go index f5410b71d..07b510bd4 100644 --- a/command/operator_autopilot_get_test.go +++ b/command/operator_autopilot_get_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Autopilot_GetConfig_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftListCommand{} } func TestOperatorAutopilotGetConfigCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_autopilot_set_test.go b/command/operator_autopilot_set_test.go index 9977abc40..0bff571fa 100644 --- a/command/operator_autopilot_set_test.go +++ b/command/operator_autopilot_set_test.go @@ -5,17 +5,18 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestOperator_Autopilot_SetConfig_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftListCommand{} } func TestOperatorAutopilotSetConfigCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_autopilot_test.go b/command/operator_autopilot_test.go index 5bff69291..85fd99103 100644 --- a/command/operator_autopilot_test.go +++ b/command/operator_autopilot_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Autopilot_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorAutopilotCommand{} } diff --git a/command/operator_debug_test.go b/command/operator_debug_test.go index caeb3f814..943aa46ed 100644 --- a/command/operator_debug_test.go +++ b/command/operator_debug_test.go @@ -15,6 +15,7 @@ import ( consulapi "github.com/hashicorp/consul/api" consultest "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" clienttest "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper" @@ -74,7 +75,7 @@ func newClientAgentConfigFunc(region string, nodeClass string, srvRPCAddr string } func TestDebug_NodeClass(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start test server and API client srv, _, url := testServer(t, false, nil) @@ -124,7 +125,7 @@ func TestDebug_NodeClass(t *testing.T) { } func TestDebug_ClientToServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start test server and API client srv, _, url := testServer(t, false, nil) @@ -173,6 +174,8 @@ func TestDebug_ClientToServer(t *testing.T) { } func TestDebug_MultiRegion(t *testing.T) { + ci.Parallel(t) + region1 := "region1" region2 := "region2" @@ -269,7 +272,7 @@ func TestDebug_MultiRegion(t *testing.T) { } func TestDebug_SingleServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) testutil.WaitForLeader(t, srv.Agent.RPC) @@ -303,7 +306,7 @@ func TestDebug_SingleServer(t *testing.T) { } func TestDebug_Failures(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) testutil.WaitForLeader(t, srv.Agent.RPC) @@ -356,7 +359,7 @@ func TestDebug_Failures(t *testing.T) { } func TestDebug_Bad_CSIPlugin_Names(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start test server and API client srv, _, url := testServer(t, false, nil) @@ -407,7 +410,7 @@ func buildPathSlice(path string, files []string) []string { } func TestDebug_CapturedFiles(t *testing.T) { - // t.Parallel() + // ci.Parallel(t) srv, _, url := testServer(t, true, nil) testutil.WaitForLeader(t, srv.Agent.RPC) @@ -517,7 +520,7 @@ func TestDebug_CapturedFiles(t *testing.T) { } func TestDebug_ExistingOutput(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &OperatorDebugCommand{Meta: Meta{Ui: ui}} @@ -534,7 +537,7 @@ func TestDebug_ExistingOutput(t *testing.T) { } func TestDebug_Fail_Pprof(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Setup agent config with debug endpoints disabled agentConfFunc := func(c *agent.Config) { @@ -562,7 +565,7 @@ func TestDebug_Fail_Pprof(t *testing.T) { } func TestDebug_StringToSlice(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { input string @@ -581,7 +584,7 @@ func TestDebug_StringToSlice(t *testing.T) { } func TestDebug_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) // address calculation honors CONSUL_HTTP_SSL // ssl: true - Correct alignment @@ -623,7 +626,7 @@ func TestDebug_External(t *testing.T) { } func TestDebug_WriteBytes_Nil(t *testing.T) { - t.Parallel() + ci.Parallel(t) var testDir, testFile, testPath string var testBytes []byte @@ -646,7 +649,7 @@ func TestDebug_WriteBytes_Nil(t *testing.T) { } func TestDebug_WriteBytes_PathEscapesSandbox(t *testing.T) { - t.Parallel() + ci.Parallel(t) var testDir, testFile string var testBytes []byte @@ -669,7 +672,7 @@ func TestDebug_WriteBytes_PathEscapesSandbox(t *testing.T) { } func TestDebug_CollectConsul(t *testing.T) { - t.Parallel() + ci.Parallel(t) if testing.Short() { t.Skip("-short set; skipping") } @@ -724,7 +727,7 @@ func TestDebug_CollectConsul(t *testing.T) { } func TestDebug_CollectVault(t *testing.T) { - t.Parallel() + ci.Parallel(t) if testing.Short() { t.Skip("-short set; skipping") } @@ -768,6 +771,8 @@ func TestDebug_CollectVault(t *testing.T) { // TestDebug_RedirectError asserts that redirect errors are detected so they // can be translated into more understandable output. func TestDebug_RedirectError(t *testing.T) { + ci.Parallel(t) + // Create a test server that always returns the error many versions of // Nomad return instead of a 404 for unknown paths. // 1st request redirects to /ui/ @@ -798,6 +803,8 @@ func TestDebug_RedirectError(t *testing.T) { // complete a debug run have their query options configured with the // -stale flag func TestDebug_StaleLeadership(t *testing.T) { + ci.Parallel(t) + srv, _, url := testServerWithoutLeader(t, false, nil) addrServer := srv.HTTPAddr() @@ -854,6 +861,8 @@ type testOutput struct { } func TestDebug_EventStream_TopicsFromString(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string topicList string @@ -914,6 +923,8 @@ func TestDebug_EventStream_TopicsFromString(t *testing.T) { } func TestDebug_EventStream(t *testing.T) { + ci.Parallel(t) + // TODO dmay: specify output directory to allow inspection of eventstream.json // TODO dmay: require specific events in the eventstream.json file(s) // TODO dmay: scenario where no events are expected, verify "No events captured" diff --git a/command/operator_keygen_test.go b/command/operator_keygen_test.go index 1f12eb3c4..d003c7402 100644 --- a/command/operator_keygen_test.go +++ b/command/operator_keygen_test.go @@ -4,11 +4,13 @@ import ( "encoding/base64" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestKeygenCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ui := cli.NewMockUi() c := &OperatorKeygenCommand{Meta: Meta{Ui: ui}} code := c.Run(nil) diff --git a/command/operator_raft_list_test.go b/command/operator_raft_list_test.go index df283e88e..8c615bd3d 100644 --- a/command/operator_raft_list_test.go +++ b/command/operator_raft_list_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Raft_ListPeers_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftListCommand{} } func TestOperator_Raft_ListPeers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_raft_remove_test.go b/command/operator_raft_remove_test.go index 886f07032..eea792459 100644 --- a/command/operator_raft_remove_test.go +++ b/command/operator_raft_remove_test.go @@ -3,17 +3,18 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestOperator_Raft_RemovePeers_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftRemoveCommand{} } func TestOperator_Raft_RemovePeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -41,7 +42,7 @@ func TestOperator_Raft_RemovePeer(t *testing.T) { } func TestOperator_Raft_RemovePeerAddress(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -60,7 +61,7 @@ func TestOperator_Raft_RemovePeerAddress(t *testing.T) { } func TestOperator_Raft_RemovePeerID(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_raft_test.go b/command/operator_raft_test.go index 73934acff..6ee6d2d9a 100644 --- a/command/operator_raft_test.go +++ b/command/operator_raft_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Raft_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftCommand{} } diff --git a/command/operator_snapshot_inspect_test.go b/command/operator_snapshot_inspect_test.go index 16a408942..a0203cbf1 100644 --- a/command/operator_snapshot_inspect_test.go +++ b/command/operator_snapshot_inspect_test.go @@ -7,13 +7,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestOperatorSnapshotInspect_Works(t *testing.T) { - t.Parallel() + ci.Parallel(t) snapPath := generateSnapshotFile(t, nil) @@ -33,10 +34,10 @@ func TestOperatorSnapshotInspect_Works(t *testing.T) { } { require.Contains(t, output, key) } - } + func TestOperatorSnapshotInspect_HandlesFailure(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad-clitests-") require.NoError(t, err) @@ -65,11 +66,9 @@ func TestOperatorSnapshotInspect_HandlesFailure(t *testing.T) { require.NotZero(t, code) require.Contains(t, ui.ErrorWriter.String(), "Error verifying snapshot") }) - } func generateSnapshotFile(t *testing.T, prepare func(srv *agent.TestAgent, client *api.Client, url string)) string { - tmpDir, err := ioutil.TempDir("", "nomad-tempdir") require.NoError(t, err) diff --git a/command/operator_snapshot_restore_test.go b/command/operator_snapshot_restore_test.go index be88fee6f..af1143d5d 100644 --- a/command/operator_snapshot_restore_test.go +++ b/command/operator_snapshot_restore_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -15,7 +16,7 @@ import ( ) func TestOperatorSnapshotRestore_Works(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad-tempdir") require.NoError(t, err) @@ -77,7 +78,7 @@ job "snapshot-test-job" { } func TestOperatorSnapshotRestore_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &OperatorSnapshotRestoreCommand{Meta: Meta{Ui: ui}} diff --git a/command/operator_snapshot_save_test.go b/command/operator_snapshot_save_test.go index 2e712db0c..fd0c9a086 100644 --- a/command/operator_snapshot_save_test.go +++ b/command/operator_snapshot_save_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/snapshot" "github.com/mitchellh/cli" @@ -13,7 +14,7 @@ import ( ) func TestOperatorSnapshotSave_Works(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad-tempdir") require.NoError(t, err) @@ -51,7 +52,7 @@ func TestOperatorSnapshotSave_Works(t *testing.T) { } func TestOperatorSnapshotSave_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &OperatorSnapshotSaveCommand{Meta: Meta{Ui: ui}} diff --git a/command/operator_test.go b/command/operator_test.go index 7364a7cfa..d1d9d92f4 100644 --- a/command/operator_test.go +++ b/command/operator_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorCommand{} } diff --git a/command/plugin_status_test.go b/command/plugin_status_test.go index 15f037c24..d33317953 100644 --- a/command/plugin_status_test.go +++ b/command/plugin_status_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/state" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestPluginStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &PluginStatusCommand{} } func TestPluginStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &PluginStatusCommand{Meta: Meta{Ui: ui}} @@ -38,7 +39,7 @@ func TestPluginStatusCommand_Fails(t *testing.T) { } func TestPluginStatusCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/quota_apply_test.go b/command/quota_apply_test.go index 57b1b195c..487111d04 100644 --- a/command/quota_apply_test.go +++ b/command/quota_apply_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestQuotaApplyCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaApplyCommand{} } func TestQuotaApplyCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaApplyCommand{Meta: Meta{Ui: ui}} diff --git a/command/quota_delete_test.go b/command/quota_delete_test.go index 351332c62..f8afb7079 100644 --- a/command/quota_delete_test.go +++ b/command/quota_delete_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -15,12 +16,12 @@ import ( ) func TestQuotaDeleteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaDeleteCommand{} } func TestQuotaDeleteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaDeleteCommand{Meta: Meta{Ui: ui}} @@ -43,7 +44,7 @@ func TestQuotaDeleteCommand_Fails(t *testing.T) { } func TestQuotaDeleteCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -68,8 +69,8 @@ func TestQuotaDeleteCommand_Good(t *testing.T) { } func TestQuotaDeleteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/quota_init_test.go b/command/quota_init_test.go index 889d66a87..936e28988 100644 --- a/command/quota_init_test.go +++ b/command/quota_init_test.go @@ -5,17 +5,18 @@ import ( "os" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestQuotaInitCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaInitCommand{} } func TestQuotaInitCommand_Run_HCL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}} @@ -64,7 +65,7 @@ func TestQuotaInitCommand_Run_HCL(t *testing.T) { } func TestQuotaInitCommand_Run_JSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}} diff --git a/command/quota_inspect_test.go b/command/quota_inspect_test.go index 6fee89c60..73c941e07 100644 --- a/command/quota_inspect_test.go +++ b/command/quota_inspect_test.go @@ -7,18 +7,19 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestQuotaInspectCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaInspectCommand{} } func TestQuotaInspectCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInspectCommand{Meta: Meta{Ui: ui}} @@ -41,7 +42,7 @@ func TestQuotaInspectCommand_Fails(t *testing.T) { } func TestQuotaInspectCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -67,8 +68,8 @@ func TestQuotaInspectCommand_Good(t *testing.T) { } func TestQuotaInspectCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/quota_list_test.go b/command/quota_list_test.go index d50935e7c..90785e3e8 100644 --- a/command/quota_list_test.go +++ b/command/quota_list_test.go @@ -7,17 +7,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestQuotaListCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaListCommand{} } func TestQuotaListCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaListCommand{Meta: Meta{Ui: ui}} @@ -40,7 +41,7 @@ func TestQuotaListCommand_Fails(t *testing.T) { } func TestQuotaListCommand_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) // Create a server diff --git a/command/quota_status_test.go b/command/quota_status_test.go index 53689320f..b580414bf 100644 --- a/command/quota_status_test.go +++ b/command/quota_status_test.go @@ -7,18 +7,19 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestQuotaStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaStatusCommand{} } func TestQuotaStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaStatusCommand{Meta: Meta{Ui: ui}} @@ -41,7 +42,7 @@ func TestQuotaStatusCommand_Fails(t *testing.T) { } func TestQuotaStatusCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -73,8 +74,8 @@ func TestQuotaStatusCommand_Good(t *testing.T) { } func TestQuotaStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/recommendation_apply_test.go b/command/recommendation_apply_test.go index e3538b6b2..1ed797d7b 100644 --- a/command/recommendation_apply_test.go +++ b/command/recommendation_apply_test.go @@ -4,16 +4,16 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/testutil" ) func TestRecommendationApplyCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -92,6 +92,8 @@ func TestRecommendationApplyCommand_Run(t *testing.T) { } func TestRecommendationApplyCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/recommendation_dismiss_test.go b/command/recommendation_dismiss_test.go index 71553b7ae..ebe095369 100644 --- a/command/recommendation_dismiss_test.go +++ b/command/recommendation_dismiss_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/require" @@ -14,8 +15,8 @@ import ( ) func TestRecommendationDismissCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -109,8 +110,8 @@ func TestRecommendationDismissCommand_AutocompleteArgs(t *testing.T) { } func testRecommendationAutocompleteCommand(t *testing.T, client *api.Client, srv *agent.TestAgent, cmd *RecommendationAutocompleteCommand) { + ci.Parallel(t) require := require.New(t) - t.Parallel() // Register a test job to write a recommendation against. testJob := testJob("recommendation_autocomplete") diff --git a/command/recommendation_info_test.go b/command/recommendation_info_test.go index 1e1a31864..2529b5986 100644 --- a/command/recommendation_info_test.go +++ b/command/recommendation_info_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" @@ -12,8 +13,8 @@ import ( ) func TestRecommendationInfoCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -88,6 +89,8 @@ func TestRecommendationInfoCommand_Run(t *testing.T) { } func TestRecommendationInfoCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/recommendation_list_test.go b/command/recommendation_list_test.go index c7234bd7c..aef63191b 100644 --- a/command/recommendation_list_test.go +++ b/command/recommendation_list_test.go @@ -4,16 +4,16 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api" ) func TestRecommendationListCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -74,6 +74,8 @@ func TestRecommendationListCommand_Run(t *testing.T) { } func TestRecommendationListCommand_Sort(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputRecommendationList []*api.Recommendation expectedOutputList []*api.Recommendation diff --git a/command/scaling_policy_info_test.go b/command/scaling_policy_info_test.go index 29c93784c..962460bb7 100644 --- a/command/scaling_policy_info_test.go +++ b/command/scaling_policy_info_test.go @@ -6,13 +6,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestScalingPolicyInfoCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/scaling_policy_list_test.go b/command/scaling_policy_list_test.go index 860646559..20439379d 100644 --- a/command/scaling_policy_list_test.go +++ b/command/scaling_policy_list_test.go @@ -3,16 +3,16 @@ package command import ( "testing" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" ) func TestScalingPolicyListCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/scaling_policy_test.go b/command/scaling_policy_test.go index 8b7c62935..4aa4b35b2 100644 --- a/command/scaling_policy_test.go +++ b/command/scaling_policy_test.go @@ -3,10 +3,13 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func Test_formatScalingPolicyTarget(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputMap map[string]string expectedOutput string diff --git a/command/sentinel_apply_test.go b/command/sentinel_apply_test.go index 65f979cc1..29169c356 100644 --- a/command/sentinel_apply_test.go +++ b/command/sentinel_apply_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelApplyCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelApplyCommand{} } diff --git a/command/sentinel_delete_test.go b/command/sentinel_delete_test.go index 313e438aa..88682930f 100644 --- a/command/sentinel_delete_test.go +++ b/command/sentinel_delete_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelDeleteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelDeleteCommand{} } diff --git a/command/sentinel_list_test.go b/command/sentinel_list_test.go index 98d1a307b..bb109cc83 100644 --- a/command/sentinel_list_test.go +++ b/command/sentinel_list_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelListCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelListCommand{} } diff --git a/command/sentinel_read_test.go b/command/sentinel_read_test.go index 8abb9d0c8..5e874cce9 100644 --- a/command/sentinel_read_test.go +++ b/command/sentinel_read_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelReadCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelReadCommand{} } diff --git a/command/server_force_leave_test.go b/command/server_force_leave_test.go index 9f449b900..4c01769d7 100644 --- a/command/server_force_leave_test.go +++ b/command/server_force_leave_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestServerForceLeaveCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &ServerForceLeaveCommand{} } diff --git a/command/server_join_test.go b/command/server_join_test.go index f3ec540be..bf8c5ca05 100644 --- a/command/server_join_test.go +++ b/command/server_join_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestServerJoinCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &ServerJoinCommand{} } diff --git a/command/server_members_test.go b/command/server_members_test.go index 523b47932..bf2e423d7 100644 --- a/command/server_members_test.go +++ b/command/server_members_test.go @@ -5,17 +5,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" ) func TestServerMembersCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &ServerMembersCommand{} } func TestServerMembersCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, false, nil) defer srv.Shutdown() @@ -47,7 +48,7 @@ func TestServerMembersCommand_Run(t *testing.T) { } func TestMembersCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &ServerMembersCommand{Meta: Meta{Ui: ui}} @@ -72,7 +73,7 @@ func TestMembersCommand_Fails(t *testing.T) { // Tests that a single server region that left should still // not return an error and list other members in other regions func TestServerMembersCommand_MultiRegion_Leave(t *testing.T) { - t.Parallel() + ci.Parallel(t) config1 := func(c *agent.Config) { c.Region = "r1" diff --git a/command/status_test.go b/command/status_test.go index 4b3ec1a66..0e9e635f7 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -5,6 +5,7 @@ import ( "regexp" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -16,8 +17,8 @@ import ( ) func TestStatusCommand_Run_JobStatus(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -42,8 +43,8 @@ func TestStatusCommand_Run_JobStatus(t *testing.T) { } func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -72,7 +73,7 @@ func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { func TestStatusCommand_Run_EvalStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -98,7 +99,7 @@ func TestStatusCommand_Run_EvalStatus(t *testing.T) { func TestStatusCommand_Run_NodeStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) // Start in dev mode so we get a node registration srv, client, url := testServer(t, true, func(c *agent.Config) { @@ -138,7 +139,7 @@ func TestStatusCommand_Run_NodeStatus(t *testing.T) { func TestStatusCommand_Run_AllocStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -163,7 +164,7 @@ func TestStatusCommand_Run_AllocStatus(t *testing.T) { func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -189,7 +190,7 @@ func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { func TestStatusCommand_Run_NoPrefix(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -215,7 +216,7 @@ func TestStatusCommand_Run_NoPrefix(t *testing.T) { func TestStatusCommand_AutocompleteArgs(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -237,7 +238,7 @@ func TestStatusCommand_AutocompleteArgs(t *testing.T) { } func TestStatusCommand_Run_HostNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() diff --git a/command/system_gc_test.go b/command/system_gc_test.go index 4ced96815..10e636e91 100644 --- a/command/system_gc_test.go +++ b/command/system_gc_test.go @@ -3,16 +3,17 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemGCCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemGCCommand{} } func TestSystemGCCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, _, url := testServer(t, true, nil) diff --git a/command/system_reconcile_summaries_test.go b/command/system_reconcile_summaries_test.go index ae50c299a..ca10e734b 100644 --- a/command/system_reconcile_summaries_test.go +++ b/command/system_reconcile_summaries_test.go @@ -3,16 +3,17 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemReconcileSummariesCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemReconcileSummariesCommand{} } func TestSystemReconcileSummariesCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, _, url := testServer(t, true, nil) diff --git a/command/system_reconcile_test.go b/command/system_reconcile_test.go index defb5cdc8..f1445e793 100644 --- a/command/system_reconcile_test.go +++ b/command/system_reconcile_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemReconcileCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemCommand{} } diff --git a/command/system_test.go b/command/system_test.go index fa4ca3ceb..44ae3e370 100644 --- a/command/system_test.go +++ b/command/system_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemCommand{} } diff --git a/command/ui_test.go b/command/ui_test.go index aac4a1f69..8b0d049ae 100644 --- a/command/ui_test.go +++ b/command/ui_test.go @@ -5,12 +5,13 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestCommand_Ui(t *testing.T) { - t.Parallel() + ci.Parallel(t) type testCaseSetupFn func(*testing.T) diff --git a/command/version_test.go b/command/version_test.go index 07af3a006..2a39e3bc5 100644 --- a/command/version_test.go +++ b/command/version_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestVersionCommand_implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &VersionCommand{} } diff --git a/command/volume_register_test.go b/command/volume_register_test.go index b65b923cd..29bf025e5 100644 --- a/command/volume_register_test.go +++ b/command/volume_register_test.go @@ -5,11 +5,12 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestVolumeDispatchParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { hcl string @@ -43,7 +44,7 @@ rando = "bar" } func TestCSIVolumeDecode(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string diff --git a/command/volume_status_test.go b/command/volume_status_test.go index 313d57502..7eac9f0e0 100644 --- a/command/volume_status_test.go +++ b/command/volume_status_test.go @@ -3,6 +3,7 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -11,12 +12,12 @@ import ( ) func TestCSIVolumeStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &VolumeStatusCommand{} } func TestCSIVolumeStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui}} @@ -30,7 +31,7 @@ func TestCSIVolumeStatusCommand_Fails(t *testing.T) { } func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/contributing/testing.md b/contributing/testing.md new file mode 100644 index 000000000..178273bf0 --- /dev/null +++ b/contributing/testing.md @@ -0,0 +1,24 @@ +# Writing Tests + +The Nomad repository strives to maintain comprehensive unit test coverage. Any new +features, bug fixes, or refactoring should include additional or updated test cases +demonstrating correct functionality. + +Each unit test should meet a few criteria: + +- Use testify + - Prefer using require.* functions + +- Undo any changes to the environment + - Set environment variables must be unset + - Scratch files/dirs must be removed (use t.TempDir) + - Consumed ports must be freed (e.g. TestServer.Cleanup, freeport.Return) + +- Able to run in parallel + - All package level Test* functions should start with ci.Parallel + - Always use dynamic scratch dirs, files + - Always get ports from helpers (TestServer, TestClient, TestAgent, freeport.Get) + +- Log control + - Logging must go through the testing.T (use helper/testlog.HCLogger) + - Avoid excessive logging in test cases - prefer failure messages \ No newline at end of file diff --git a/drivers/docker/config_test.go b/drivers/docker/config_test.go index b9deb9213..df237a440 100644 --- a/drivers/docker/config_test.go +++ b/drivers/docker/config_test.go @@ -3,12 +3,15 @@ package docker import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" ) func TestConfig_ParseHCL(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string @@ -46,6 +49,8 @@ func TestConfig_ParseHCL(t *testing.T) { } func TestConfig_ParseJSON(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input string @@ -113,6 +118,8 @@ func TestConfig_ParseJSON(t *testing.T) { } func TestConfig_PortMap_Deserialization(t *testing.T) { + ci.Parallel(t) + parser := hclutils.NewConfigParser(taskConfigSpec) expectedMap := map[string]int{ @@ -185,6 +192,8 @@ config { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { image = "redis:3.2" @@ -493,6 +502,8 @@ config { // TestConfig_DriverConfig_GC asserts that gc is parsed // and populated with defaults as expected func TestConfig_DriverConfig_GC(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -598,6 +609,8 @@ func TestConfig_DriverConfig_GC(t *testing.T) { } func TestConfig_InternalCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -632,6 +645,8 @@ func TestConfig_InternalCapabilities(t *testing.T) { } func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -659,6 +674,8 @@ func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) { } func TestConfig_DriverConfig_PullActivityTimeout(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -686,6 +703,8 @@ func TestConfig_DriverConfig_PullActivityTimeout(t *testing.T) { } func TestConfig_DriverConfig_AllowRuntimes(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string diff --git a/drivers/docker/coordinator_test.go b/drivers/docker/coordinator_test.go index eddda78bd..755c6b99e 100644 --- a/drivers/docker/coordinator_test.go +++ b/drivers/docker/coordinator_test.go @@ -8,6 +8,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/testutil" @@ -55,7 +56,7 @@ func (m *mockImageClient) RemoveImage(id string) error { } func TestDockerCoordinator_ConcurrentPulls(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -107,7 +108,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) { } func TestDockerCoordinator_Pull_Remove(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -180,7 +181,7 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) { } func TestDockerCoordinator_Remove_Cancel(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -229,7 +230,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) { } func TestDockerCoordinator_No_Cleanup(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -265,6 +266,7 @@ func TestDockerCoordinator_No_Cleanup(t *testing.T) { } func TestDockerCoordinator_Cleanup_HonorsCtx(t *testing.T) { + ci.Parallel(t) image1ID := uuid.Generate() image2ID := uuid.Generate() diff --git a/drivers/docker/docklog/docker_logger_test.go b/drivers/docker/docklog/docker_logger_test.go index 0397e4e98..611877f3d 100644 --- a/drivers/docker/docklog/docker_logger_test.go +++ b/drivers/docker/docklog/docker_logger_test.go @@ -9,6 +9,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" ctu "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" @@ -34,9 +35,9 @@ func testContainerDetails() (image string, imageName string, imageTag string) { } func TestDockerLogger_Success(t *testing.T) { + ci.Parallel(t) ctu.DockerCompatible(t) - t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() @@ -115,9 +116,9 @@ func TestDockerLogger_Success(t *testing.T) { } func TestDockerLogger_Success_TTY(t *testing.T) { + ci.Parallel(t) ctu.DockerCompatible(t) - t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() @@ -212,9 +213,9 @@ func echoToContainer(t *testing.T, client *docker.Client, id string, line string } func TestDockerLogger_LoggingNotSupported(t *testing.T) { + ci.Parallel(t) ctu.DockerCompatible(t) - t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() @@ -303,6 +304,8 @@ func (*noopCloser) Close() error { } func TestNextBackoff(t *testing.T) { + ci.Parallel(t) + cases := []struct { currentBackoff float64 min float64 @@ -325,6 +328,8 @@ func TestNextBackoff(t *testing.T) { } func TestIsLoggingTerminalError(t *testing.T) { + ci.Parallel(t) + terminalErrs := []error{ errors.New("docker returned: configured logging driver does not support reading"), &docker.Error{ diff --git a/drivers/docker/driver_linux_test.go b/drivers/docker/driver_linux_test.go index ba79af839..006517b28 100644 --- a/drivers/docker/driver_linux_test.go +++ b/drivers/docker/driver_linux_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/freeport" @@ -17,6 +18,8 @@ import ( ) func TestDockerDriver_authFromHelper(t *testing.T) { + ci.Parallel(t) + dir, err := ioutil.TempDir("", "test-docker-driver_authfromhelper") require.NoError(t, err) defer os.RemoveAll(dir) @@ -47,9 +50,7 @@ func TestDockerDriver_authFromHelper(t *testing.T) { } func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) @@ -72,9 +73,8 @@ func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { } func TestDockerDriver_PidsLimit(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) + testutil.DockerCompatible(t) require := require.New(t) diff --git a/drivers/docker/driver_test.go b/drivers/docker/driver_test.go index a149da1b9..26cbf9916 100644 --- a/drivers/docker/driver_test.go +++ b/drivers/docker/driver_test.go @@ -17,6 +17,7 @@ import ( docker "github.com/fsouza/go-dockerclient" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/freeport" @@ -228,9 +229,7 @@ func newTestDockerClient(t *testing.T) *docker.Client { // If you want to checkout/revert those tests, please check commit: 41715b1860778aa80513391bd64abd721d768ab0 func TestDockerDriver_Start_Wait(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", busyboxLongRunningCmd) @@ -264,9 +263,7 @@ func TestDockerDriver_Start_Wait(t *testing.T) { } func TestDockerDriver_Start_WaitFinish(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"echo", "hello"}) @@ -307,9 +304,7 @@ func TestDockerDriver_Start_WaitFinish(t *testing.T) { // // See https://github.com/hashicorp/nomad/issues/3419 func TestDockerDriver_Start_StoppedContainer(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"sleep", "9001"}) @@ -368,9 +363,7 @@ func TestDockerDriver_Start_StoppedContainer(t *testing.T) { } func TestDockerDriver_Start_LoadImage(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"}) @@ -419,9 +412,7 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) { // Tests that starting a task without an image fails func TestDockerDriver_Start_NoImage(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := TaskConfig{ @@ -448,9 +439,7 @@ func TestDockerDriver_Start_NoImage(t *testing.T) { } func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := TaskConfig{ @@ -486,9 +475,7 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { } func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) // This test requires that the alloc dir be mounted into docker as a volume. // Because this cannot happen when docker is run remotely, e.g. when running // docker in a VM, we skip this when we detect Docker is being run remotely. @@ -549,9 +536,7 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { } func TestDockerDriver_Start_Kill_Wait(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", busyboxLongRunningCmd) @@ -597,9 +582,7 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) { } func TestDockerDriver_Start_KillTimeout(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { @@ -649,12 +632,10 @@ func TestDockerDriver_Start_KillTimeout(t *testing.T) { } func TestDockerDriver_StartN(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows Docker does not support SIGINT") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) require := require.New(t) @@ -705,12 +686,10 @@ func TestDockerDriver_StartN(t *testing.T) { } func TestDockerDriver_StartNVersions(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Skipped on windows, we don't have image variants available") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) require := require.New(t) @@ -776,9 +755,7 @@ func TestDockerDriver_StartNVersions(t *testing.T) { } func TestDockerDriver_Labels(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -807,9 +784,7 @@ func TestDockerDriver_Labels(t *testing.T) { } func TestDockerDriver_ExtraLabels(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -844,9 +819,7 @@ func TestDockerDriver_ExtraLabels(t *testing.T) { } func TestDockerDriver_LoggingConfiguration(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -873,9 +846,7 @@ func TestDockerDriver_LoggingConfiguration(t *testing.T) { } func TestDockerDriver_ForcePull(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -896,13 +867,10 @@ func TestDockerDriver_ForcePull(t *testing.T) { } func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("TODO: Skipped digest test on Windows") } - - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -925,12 +893,10 @@ func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { } func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support seccomp") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -951,13 +917,10 @@ func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { } func TestDockerDriver_SecurityOptFromFile(t *testing.T) { - + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support seccomp") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -976,9 +939,7 @@ func TestDockerDriver_SecurityOptFromFile(t *testing.T) { } func TestDockerDriver_Runtime(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -999,7 +960,7 @@ func TestDockerDriver_Runtime(t *testing.T) { } func TestDockerDriver_CreateContainerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1023,7 +984,7 @@ func TestDockerDriver_CreateContainerConfig(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1048,7 +1009,7 @@ func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { - t.Parallel() + ci.Parallel(t) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) @@ -1087,7 +1048,7 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1105,7 +1066,7 @@ func TestDockerDriver_CreateContainerConfig_User(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1139,7 +1100,7 @@ func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -1218,7 +1179,7 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1296,9 +1257,7 @@ func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) { } func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testCases := []struct { description string gpuRuntimeSet bool @@ -1368,9 +1327,7 @@ func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { } func TestDockerDriver_Capabilities(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Capabilities not supported on windows") @@ -1480,9 +1437,7 @@ func TestDockerDriver_Capabilities(t *testing.T) { } func TestDockerDriver_DNS(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) testutil.ExecCompatible(t) @@ -1527,9 +1482,7 @@ func TestDockerDriver_DNS(t *testing.T) { } func TestDockerDriver_Init(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support init.") @@ -1552,9 +1505,7 @@ func TestDockerDriver_Init(t *testing.T) { } func TestDockerDriver_CPUSetCPUs(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support CPUSetCPUs.") @@ -1599,9 +1550,7 @@ func TestDockerDriver_CPUSetCPUs(t *testing.T) { } func TestDockerDriver_MemoryHardLimit(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support MemoryReservation") @@ -1625,9 +1574,7 @@ func TestDockerDriver_MemoryHardLimit(t *testing.T) { } func TestDockerDriver_MACAddress(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows docker does not support setting MacAddress") @@ -1649,9 +1596,7 @@ func TestDockerDriver_MACAddress(t *testing.T) { } func TestDockerWorkDir(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1678,9 +1623,7 @@ func inSlice(needle string, haystack []string) bool { } func TestDockerDriver_PortsNoMap(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, _, ports := dockerTask(t) @@ -1722,9 +1665,7 @@ func TestDockerDriver_PortsNoMap(t *testing.T) { } func TestDockerDriver_PortsMapping(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1774,7 +1715,7 @@ func TestDockerDriver_PortsMapping(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1817,7 +1758,7 @@ func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1853,9 +1794,7 @@ func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { } func TestDockerDriver_CleanupContainer(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1893,6 +1832,7 @@ func TestDockerDriver_CleanupContainer(t *testing.T) { } func TestDockerDriver_EnableImageGC(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1959,6 +1899,7 @@ func TestDockerDriver_EnableImageGC(t *testing.T) { } func TestDockerDriver_DisableImageGC(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2021,6 +1962,7 @@ func TestDockerDriver_DisableImageGC(t *testing.T) { } func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2087,9 +2029,7 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { } func TestDockerDriver_Stats(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2162,9 +2102,7 @@ func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath strin } func TestDockerDriver_VolumesDisabled(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) cfg := map[string]interface{}{ @@ -2234,9 +2172,7 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) { } func TestDockerDriver_VolumesEnabled(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) cfg := map[string]interface{}{ @@ -2279,9 +2215,7 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) { } func TestDockerDriver_Mounts(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) goodMount := DockerMount{ @@ -2351,9 +2285,7 @@ func TestDockerDriver_Mounts(t *testing.T) { } func TestDockerDriver_AuthConfiguration(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) path := "./test-resources/docker/auth.json" @@ -2402,9 +2334,7 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { } func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) cases := []struct { Auth DockerAuth @@ -2456,9 +2386,7 @@ func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { } func TestDockerDriver_OOMKilled(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { @@ -2507,9 +2435,7 @@ func TestDockerDriver_OOMKilled(t *testing.T) { } func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) brokenConfigs := []DockerDevice{ @@ -2547,9 +2473,7 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { } func TestDockerDriver_Device_Success(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS != "linux" { @@ -2587,9 +2511,7 @@ func TestDockerDriver_Device_Success(t *testing.T) { } func TestDockerDriver_Entrypoint(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) entrypoint := []string{"sh", "-c"} @@ -2614,9 +2536,7 @@ func TestDockerDriver_Entrypoint(t *testing.T) { } func TestDockerDriver_ReadonlyRootfs(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { @@ -2658,9 +2578,7 @@ func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) erro // TestDockerDriver_VolumeError asserts volume related errors when creating a // container are recoverable. func TestDockerDriver_VolumeError(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) // setup _, cfg, ports := dockerTask(t) @@ -2673,9 +2591,7 @@ func TestDockerDriver_VolumeError(t *testing.T) { } func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) expectedPrefix := "2001:db8:1::242:ac11" @@ -2725,6 +2641,8 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { } func TestParseDockerImage(t *testing.T) { + ci.Parallel(t) + tests := []struct { Image string Repo string @@ -2745,6 +2663,7 @@ func TestParseDockerImage(t *testing.T) { } func TestDockerImageRef(t *testing.T) { + ci.Parallel(t) tests := []struct { Image string Repo string @@ -2781,9 +2700,7 @@ func waitForExist(t *testing.T, client *docker.Client, containerID string) { // and startContainers functions are idempotent, as we have some retry // logic there without ensureing we delete/destroy containers func TestDockerDriver_CreationIdempotent(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2852,7 +2769,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) { // TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default // CPU quota and period are set when cpu_hard_limit = true. func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, _, ports := dockerTask(t) defer freeport.Return(ports) @@ -2878,7 +2795,7 @@ func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) { } func TestDockerDriver_memoryLimits(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -2934,7 +2851,7 @@ func TestDockerDriver_memoryLimits(t *testing.T) { } func TestDockerDriver_parseSignal(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []struct { name string @@ -2983,11 +2900,12 @@ func TestDockerDriver_parseSignal(t *testing.T) { // This test asserts that Nomad isn't overriding the STOPSIGNAL in a Dockerfile func TestDockerDriver_StopSignal(t *testing.T) { + ci.Parallel(t) + testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Skipped on windows, we don't have image variants available") } - testutil.DockerCompatible(t) cases := []struct { name string variant string diff --git a/drivers/docker/driver_unix_test.go b/drivers/docker/driver_unix_test.go index 701ed8985..fb4fbba25 100644 --- a/drivers/docker/driver_unix_test.go +++ b/drivers/docker/driver_unix_test.go @@ -16,6 +16,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/freeport" @@ -28,10 +29,9 @@ import ( ) func TestDockerDriver_User(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) + task, cfg, ports := dockerTask(t) defer freeport.Return(ports) task.User = "alice" @@ -56,10 +56,9 @@ func TestDockerDriver_User(t *testing.T) { } func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) + require := require.New(t) // Because go-dockerclient doesn't provide api for query network aliases, just check that @@ -105,9 +104,7 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { } func TestDockerDriver_NetworkMode_Host(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) expected := "host" @@ -149,9 +146,7 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) { } func TestDockerDriver_CPUCFSPeriod(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -172,7 +167,9 @@ func TestDockerDriver_CPUCFSPeriod(t *testing.T) { } func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) + task, cfg, ports := dockerTask(t) defer freeport.Return(ports) expectedUlimits := map[string]string{ @@ -219,7 +216,9 @@ func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { } func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) + brokenConfigs := []map[string]string{ { "nofile": "", @@ -262,8 +261,7 @@ func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) { // negative case for non existent mount paths. We should write a similar test // for windows. func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) { - t.Parallel() - + ci.Parallel(t) testutil.DockerCompatible(t) allocDir := "/tmp/nomad/alloc-dir" @@ -398,7 +396,7 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) { // an absolute path, changing path expansion behaviour. A similar test should // be written for windows. func TestDockerDriver_MountsSerialization(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.DockerCompatible(t) allocDir := "/tmp/nomad/alloc-dir" @@ -567,7 +565,7 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { // and present in docker.CreateContainerOptions, and that it is appended // to any devices/mounts a user sets in the task config. func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -667,6 +665,7 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { // TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images. // Doesn't run on windows because it requires an image variant func TestDockerDriver_Cleanup(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) // using a small image and an specific point release to avoid accidental conflicts with other tasks @@ -711,9 +710,7 @@ func TestDockerDriver_Cleanup(t *testing.T) { // Tests that images prefixed with "https://" are supported func TestDockerDriver_Start_Image_HTTPS(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := TaskConfig{ @@ -788,9 +785,7 @@ func copyFile(src, dst string, t *testing.T) { } func TestDocker_ExecTaskStreaming(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"/bin/sleep", "1000"}) @@ -818,10 +813,9 @@ func TestDocker_ExecTaskStreaming(t *testing.T) { // Tests that a given DNSConfig properly configures dns func Test_dnsConfig(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) + require := require.New(t) harness := dockerDriverHarness(t, nil) defer harness.Kill() diff --git a/drivers/docker/fingerprint_test.go b/drivers/docker/fingerprint_test.go index 52389dfb2..b6303164d 100644 --- a/drivers/docker/fingerprint_test.go +++ b/drivers/docker/fingerprint_test.go @@ -4,10 +4,10 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/plugins/drivers" - tu "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) @@ -16,9 +16,7 @@ import ( // // In Linux CI and AppVeyor Windows environment, it should be enabled. func TestDockerDriver_FingerprintHealth(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) ctx, cancel := context.WithCancel(context.Background()) diff --git a/drivers/docker/network_test.go b/drivers/docker/network_test.go index 9fada5bf4..4b1ccd517 100644 --- a/drivers/docker/network_test.go +++ b/drivers/docker/network_test.go @@ -1,14 +1,16 @@ package docker import ( - "github.com/hashicorp/nomad/plugins/drivers" "testing" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/assert" ) func TestDriver_createSandboxContainerConfig(t *testing.T) { + ci.Parallel(t) testCases := []struct { inputAllocID string inputNetworkCreateRequest *drivers.NetworkCreateRequest diff --git a/drivers/docker/ports_test.go b/drivers/docker/ports_test.go index 2500f19ff..ca7e13573 100644 --- a/drivers/docker/ports_test.go +++ b/drivers/docker/ports_test.go @@ -3,12 +3,14 @@ package docker import ( "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" + "github.com/stretchr/testify/require" ) func TestPublishedPorts_add(t *testing.T) { + ci.Parallel(t) + p := newPublishedPorts(testlog.HCLogger(t)) p.add("label", "10.0.0.1", 1234, 80) p.add("label", "10.0.0.1", 5678, 80) diff --git a/drivers/docker/progress_test.go b/drivers/docker/progress_test.go index 7f5b5dc46..4df095415 100644 --- a/drivers/docker/progress_test.go +++ b/drivers/docker/progress_test.go @@ -4,10 +4,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func Test_DockerImageProgressManager(t *testing.T) { + ci.Parallel(t) pm := &imageProgressManager{ imageProgress: &imageProgress{ diff --git a/drivers/docker/reconciler_test.go b/drivers/docker/reconciler_test.go index ff5284cf1..dc6e45e27 100644 --- a/drivers/docker/reconciler_test.go +++ b/drivers/docker/reconciler_test.go @@ -7,6 +7,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/uuid" @@ -31,6 +32,8 @@ func fakeContainerList(t *testing.T) (nomadContainer, nonNomadContainer docker.A } func Test_HasMount(t *testing.T) { + ci.Parallel(t) + nomadContainer, nonNomadContainer := fakeContainerList(t) require.True(t, hasMount(nomadContainer, "/alloc")) @@ -45,6 +48,8 @@ func Test_HasMount(t *testing.T) { } func Test_HasNomadName(t *testing.T) { + ci.Parallel(t) + nomadContainer, nonNomadContainer := fakeContainerList(t) require.True(t, hasNomadName(nomadContainer)) @@ -54,6 +59,7 @@ func Test_HasNomadName(t *testing.T) { // TestDanglingContainerRemoval asserts containers without corresponding tasks // are removed after the creation grace period. func TestDanglingContainerRemoval(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) // start two containers: one tracked nomad container, and one unrelated container @@ -157,6 +163,7 @@ func TestDanglingContainerRemoval(t *testing.T) { // TestDanglingContainerRemoval_Stopped asserts stopped containers without // corresponding tasks are not removed even if after creation grace period. func TestDanglingContainerRemoval_Stopped(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) _, cfg, ports := dockerTask(t) diff --git a/drivers/docker/stats_test.go b/drivers/docker/stats_test.go index 4ae932bf4..f17a80c84 100644 --- a/drivers/docker/stats_test.go +++ b/drivers/docker/stats_test.go @@ -7,14 +7,15 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/stretchr/testify/require" ) func TestDriver_DockerStatsCollector(t *testing.T) { - t.Parallel() - + ci.Parallel(t) require := require.New(t) + src := make(chan *docker.Stats) defer close(src) dst, recvCh := newStatsChanPipe() @@ -69,7 +70,7 @@ func TestDriver_DockerStatsCollector(t *testing.T) { // TestDriver_DockerUsageSender asserts that the TaskResourceUsage chan wrapper // supports closing and sending on a chan from concurrent goroutines. func TestDriver_DockerUsageSender(t *testing.T) { - t.Parallel() + ci.Parallel(t) // sample payload res := &cstructs.TaskResourceUsage{} diff --git a/drivers/docker/utils_test.go b/drivers/docker/utils_test.go index c6ae1995f..c99cf69c9 100644 --- a/drivers/docker/utils_test.go +++ b/drivers/docker/utils_test.go @@ -3,10 +3,12 @@ package docker import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestIsParentPath(t *testing.T) { + ci.Parallel(t) require.True(t, isParentPath("/a/b/c", "/a/b/c")) require.True(t, isParentPath("/a/b/c", "/a/b/c/d")) require.True(t, isParentPath("/a/b/c", "/a/b/c/d/e")) @@ -18,6 +20,7 @@ func TestIsParentPath(t *testing.T) { } func TestParseVolumeSpec_Linux(t *testing.T) { + ci.Parallel(t) validCases := []struct { name string bindSpec string diff --git a/drivers/docker/utils_unix_test.go b/drivers/docker/utils_unix_test.go index 29f526964..e53c72bec 100644 --- a/drivers/docker/utils_unix_test.go +++ b/drivers/docker/utils_unix_test.go @@ -7,10 +7,13 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestValidateCgroupPermission(t *testing.T) { + ci.Parallel(t) + positiveCases := []string{ "r", "rw", @@ -40,6 +43,8 @@ func TestValidateCgroupPermission(t *testing.T) { } func TestExpandPath(t *testing.T) { + ci.Parallel(t) + cases := []struct { base string target string diff --git a/drivers/exec/driver_test.go b/drivers/exec/driver_test.go index 2188dc8be..f5c8d9a2b 100644 --- a/drivers/exec/driver_test.go +++ b/drivers/exec/driver_test.go @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" ctestutils "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/drivers/shared/executor" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" @@ -52,9 +53,7 @@ var testResources = &drivers.Resources{ } func TestExecDriver_Fingerprint_NonLinux(t *testing.T) { - if !testutil.IsCI() { - t.Parallel() - } + ci.Parallel(t) require := require.New(t) if runtime.GOOS == "linux" { t.Skip("Test only available not on Linux") @@ -77,7 +76,7 @@ func TestExecDriver_Fingerprint_NonLinux(t *testing.T) { } func TestExecDriver_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -100,7 +99,7 @@ func TestExecDriver_Fingerprint(t *testing.T) { } func TestExecDriver_StartWait(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -135,7 +134,7 @@ func TestExecDriver_StartWait(t *testing.T) { } func TestExecDriver_StartWaitStopKill(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -199,7 +198,7 @@ func TestExecDriver_StartWaitStopKill(t *testing.T) { } func TestExecDriver_StartWaitRecover(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -274,7 +273,7 @@ func TestExecDriver_StartWaitRecover(t *testing.T) { // TestExecDriver_NoOrphans asserts that when the main // task dies, the orphans in the PID namespaces are killed by the kernel func TestExecDriver_NoOrphans(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctestutils.ExecCompatible(t) @@ -390,7 +389,7 @@ func TestExecDriver_NoOrphans(t *testing.T) { } func TestExecDriver_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -436,7 +435,7 @@ func TestExecDriver_Stats(t *testing.T) { } func TestExecDriver_Start_Wait_AllocDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -488,7 +487,7 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) { } func TestExecDriver_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -525,7 +524,7 @@ func TestExecDriver_User(t *testing.T) { // TestExecDriver_HandlerExec ensures the exec driver's handle properly // executes commands inside the container. func TestExecDriver_HandlerExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -609,7 +608,7 @@ func TestExecDriver_HandlerExec(t *testing.T) { } func TestExecDriver_DevicesAndMounts(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -705,6 +704,8 @@ touch: cannot touch '/tmp/task-path-ro/testfile-from-ro': Read-only file system` } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { command = "/bin/bash" @@ -723,7 +724,7 @@ config { } func TestExecDriver_NoPivotRoot(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctestutils.ExecCompatible(t) @@ -764,6 +765,7 @@ func TestExecDriver_NoPivotRoot(t *testing.T) { } func TestDriver_Config_validate(t *testing.T) { + ci.Parallel(t) t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string @@ -804,6 +806,7 @@ func TestDriver_Config_validate(t *testing.T) { } func TestDriver_TaskConfig_validate(t *testing.T) { + ci.Parallel(t) t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string diff --git a/drivers/exec/driver_unix_test.go b/drivers/exec/driver_unix_test.go index a9da57c02..6d62902f8 100644 --- a/drivers/exec/driver_unix_test.go +++ b/drivers/exec/driver_unix_test.go @@ -10,22 +10,22 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" + ctestutils "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/drivers/shared/capabilities" "github.com/hashicorp/nomad/drivers/shared/executor" - basePlug "github.com/hashicorp/nomad/plugins/base" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" - - ctestutils "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" + basePlug "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" ) func TestExecDriver_StartWaitStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -87,7 +87,7 @@ func TestExecDriver_StartWaitStop(t *testing.T) { } func TestExec_ExecTaskStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -121,7 +121,7 @@ func TestExec_ExecTaskStreaming(t *testing.T) { // Tests that a given DNSConfig properly configures dns func TestExec_dnsConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctestutils.RequireRoot(t) ctestutils.ExecCompatible(t) require := require.New(t) @@ -181,6 +181,7 @@ func TestExec_dnsConfig(t *testing.T) { } func TestExecDriver_Capabilities(t *testing.T) { + ci.Parallel(t) ctestutils.ExecCompatible(t) task := &drivers.TaskConfig{ diff --git a/drivers/java/driver_test.go b/drivers/java/driver_test.go index e407e3c19..63cb4c120 100644 --- a/drivers/java/driver_test.go +++ b/drivers/java/driver_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" ctestutil "github.com/hashicorp/nomad/client/testutil" @@ -33,10 +34,8 @@ func javaCompatible(t *testing.T) { } func TestJavaDriver_Fingerprint(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -58,10 +57,8 @@ func TestJavaDriver_Fingerprint(t *testing.T) { } func TestJavaDriver_Jar_Start_Wait(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -101,10 +98,8 @@ func TestJavaDriver_Jar_Start_Wait(t *testing.T) { } func TestJavaDriver_Jar_Stop_Wait(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -165,10 +160,8 @@ func TestJavaDriver_Jar_Stop_Wait(t *testing.T) { } func TestJavaDriver_Class_Start_Wait(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -207,6 +200,8 @@ func TestJavaDriver_Class_Start_Wait(t *testing.T) { } func TestJavaCmdArgs(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string cfg TaskConfig @@ -256,10 +251,8 @@ func TestJavaCmdArgs(t *testing.T) { } func TestJavaDriver_ExecTaskStreaming(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -335,6 +328,8 @@ func copyFile(src, dst string, t *testing.T) { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { class = "java.main" @@ -360,7 +355,7 @@ config { // Tests that a given DNSConfig properly configures dns func Test_dnsConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctestutil.RequireRoot(t) javaCompatible(t) require := require.New(t) @@ -415,6 +410,8 @@ func Test_dnsConfig(t *testing.T) { } func TestDriver_Config_validate(t *testing.T) { + ci.Parallel(t) + t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string @@ -455,6 +452,8 @@ func TestDriver_Config_validate(t *testing.T) { } func TestDriver_TaskConfig_validate(t *testing.T) { + ci.Parallel(t) + t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string diff --git a/drivers/java/utils_test.go b/drivers/java/utils_test.go index 8f2d7ad5b..f07acd4df 100644 --- a/drivers/java/utils_test.go +++ b/drivers/java/utils_test.go @@ -5,10 +5,13 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestDriver_parseJavaVersionOutput(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string output string @@ -88,6 +91,7 @@ func TestDriver_parseJavaVersionOutput(t *testing.T) { } func TestDriver_javaVersionInfo(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("test requires bash to run") } @@ -114,6 +118,7 @@ func TestDriver_javaVersionInfo(t *testing.T) { } func TestDriver_javaVersionInfo_UnexpectedOutput(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("test requires bash to run") } @@ -136,6 +141,7 @@ func TestDriver_javaVersionInfo_UnexpectedOutput(t *testing.T) { } func TestDriver_javaVersionInfo_JavaVersionFails(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("test requires bash to run") } diff --git a/drivers/mock/utils_test.go b/drivers/mock/utils_test.go index 42458c978..8cd8cd471 100644 --- a/drivers/mock/utils_test.go +++ b/drivers/mock/utils_test.go @@ -4,10 +4,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestParseDuration(t *testing.T) { + ci.Parallel(t) + t.Run("valid case", func(t *testing.T) { v, err := parseDuration("10m") require.NoError(t, err) diff --git a/drivers/qemu/driver_test.go b/drivers/qemu/driver_test.go index 8777c7de5..5d2b71538 100644 --- a/drivers/qemu/driver_test.go +++ b/drivers/qemu/driver_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" "github.com/hashicorp/nomad/helper/testlog" @@ -26,10 +27,8 @@ import ( // Verifies starting a qemu image and stopping it func TestQemuDriver_Start_Wait_Stop(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -91,10 +90,8 @@ func TestQemuDriver_Start_Wait_Stop(t *testing.T) { // Verifies monitor socket path for old qemu func TestQemuDriver_GetMonitorPathOldQemu(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -149,10 +146,8 @@ func TestQemuDriver_GetMonitorPathOldQemu(t *testing.T) { // Verifies monitor socket path for new qemu version func TestQemuDriver_GetMonitorPathNewQemu(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -232,10 +227,8 @@ func copyFile(src, dst string, t *testing.T) { // Verifies starting a qemu image and stopping it func TestQemuDriver_User(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -292,10 +285,8 @@ func TestQemuDriver_User(t *testing.T) { // Verifies getting resource usage stats // TODO(preetha) this test needs random sleeps to pass func TestQemuDriver_Stats(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -371,12 +362,10 @@ func TestQemuDriver_Stats(t *testing.T) { } func TestQemuDriver_Fingerprint(t *testing.T) { + ci.Parallel(t) require := require.New(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -396,6 +385,8 @@ func TestQemuDriver_Fingerprint(t *testing.T) { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { image_path = "/tmp/image_path" @@ -426,6 +417,8 @@ config { } func TestIsAllowedImagePath(t *testing.T) { + ci.Parallel(t) + allowedPaths := []string{"/tmp", "/opt/qemu"} allocDir := "/opt/nomad/some-alloc-dir" @@ -455,7 +448,8 @@ func TestIsAllowedImagePath(t *testing.T) { } func TestArgsAllowList(t *testing.T) { - + ci.Parallel(t) + pluginConfigAllowList := []string{"-drive", "-net", "-snapshot"} validArgs := [][]string{ diff --git a/drivers/rawexec/driver_test.go b/drivers/rawexec/driver_test.go index d6bf2f0bf..4b7635098 100644 --- a/drivers/rawexec/driver_test.go +++ b/drivers/rawexec/driver_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" "github.com/hashicorp/nomad/helper/testlog" @@ -42,7 +43,7 @@ func newEnabledRawExecDriver(t *testing.T) *Driver { } func TestRawExecDriver_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -80,7 +81,7 @@ func TestRawExecDriver_SetConfig(t *testing.T) { } func TestRawExecDriver_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) fingerprintTest := func(config *Config, expected *drivers.Fingerprint) func(t *testing.T) { return func(t *testing.T) { @@ -142,7 +143,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) { } func TestRawExecDriver_StartWait(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -184,7 +185,7 @@ func TestRawExecDriver_StartWait(t *testing.T) { } func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -267,7 +268,7 @@ func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) { } func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -320,8 +321,8 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) { // processes cleanup of the children would not be possible. Thus the test // asserts that the processes get killed properly when using cgroups. func TestRawExecDriver_Start_Kill_Wait_Cgroup(t *testing.T) { + ci.Parallel(t) ctestutil.ExecCompatible(t) - t.Parallel() require := require.New(t) pidFile := "pid" @@ -412,7 +413,7 @@ func TestRawExecDriver_Start_Kill_Wait_Cgroup(t *testing.T) { } func TestRawExecDriver_Exec(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -467,6 +468,8 @@ func TestRawExecDriver_Exec(t *testing.T) { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { command = "/bin/bash" @@ -485,7 +488,7 @@ config { } func TestRawExecDriver_Disabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) diff --git a/drivers/rawexec/driver_unix_test.go b/drivers/rawexec/driver_unix_test.go index 9d6d6d70a..bf7e0c4e2 100644 --- a/drivers/rawexec/driver_unix_test.go +++ b/drivers/rawexec/driver_unix_test.go @@ -5,19 +5,19 @@ package rawexec import ( "context" + "fmt" + "io/ioutil" "os" + "path/filepath" "regexp" "runtime" "strconv" + "strings" "syscall" "testing" - - "fmt" - "io/ioutil" - "path/filepath" - "strings" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testtask" "github.com/hashicorp/nomad/helper/uuid" basePlug "github.com/hashicorp/nomad/plugins/base" @@ -29,7 +29,7 @@ import ( ) func TestRawExecDriver_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("Linux only test") } @@ -61,7 +61,7 @@ func TestRawExecDriver_User(t *testing.T) { } func TestRawExecDriver_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("Linux only test") } @@ -135,7 +135,7 @@ done } func TestRawExecDriver_StartWaitStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -204,7 +204,7 @@ func TestRawExecDriver_StartWaitStop(t *testing.T) { // TestRawExecDriver_DestroyKillsAll asserts that when TaskDestroy is called all // task processes are cleaned up. func TestRawExecDriver_DestroyKillsAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) // This only works reliably with cgroup PID tracking, happens in linux only if runtime.GOOS != "linux" { @@ -307,7 +307,7 @@ func TestRawExecDriver_DestroyKillsAll(t *testing.T) { } func TestRawExec_ExecTaskStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS == "darwin" { t.Skip("skip running exec tasks on darwin as darwin has restrictions on starting tty shells") } @@ -341,7 +341,7 @@ func TestRawExec_ExecTaskStreaming(t *testing.T) { } func TestRawExec_ExecTaskStreaming_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("skip, requires running on Linux for testing custom user setting") } @@ -380,7 +380,7 @@ func TestRawExec_ExecTaskStreaming_User(t *testing.T) { } func TestRawExecDriver_NoCgroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("Linux only test") } diff --git a/drivers/shared/capabilities/defaults_test.go b/drivers/shared/capabilities/defaults_test.go index 7fd03513e..4b4e238cb 100644 --- a/drivers/shared/capabilities/defaults_test.go +++ b/drivers/shared/capabilities/defaults_test.go @@ -5,10 +5,13 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestSet_NomadDefaults(t *testing.T) { + ci.Parallel(t) + result := NomadDefaults() require.Len(t, result.Slice(false), 13) defaults := strings.ToLower(HCLSpecLiteral) @@ -18,12 +21,16 @@ func TestSet_NomadDefaults(t *testing.T) { } func TestSet_DockerDefaults(t *testing.T) { + ci.Parallel(t) + result := DockerDefaults() require.Len(t, result.Slice(false), 14) require.Contains(t, result.String(), "net_raw") } func TestCaps_Calculate(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name string @@ -149,6 +156,8 @@ func TestCaps_Calculate(t *testing.T) { } func TestCaps_Delta(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name string diff --git a/drivers/shared/capabilities/set_test.go b/drivers/shared/capabilities/set_test.go index 2134719f2..b6cbaf2d2 100644 --- a/drivers/shared/capabilities/set_test.go +++ b/drivers/shared/capabilities/set_test.go @@ -3,11 +3,12 @@ package capabilities import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestSet_Empty(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := New(nil).Empty() @@ -26,7 +27,7 @@ func TestSet_Empty(t *testing.T) { } func TestSet_New(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("duplicates", func(t *testing.T) { result := New([]string{"chown", "sys_time", "chown"}) @@ -46,7 +47,7 @@ func TestSet_New(t *testing.T) { } func TestSet_Slice(t *testing.T) { - t.Parallel() + ci.Parallel(t) exp := []string{"chown", "net_raw", "sys_time"} @@ -67,7 +68,7 @@ func TestSet_Slice(t *testing.T) { } func TestSet_String(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("empty", func(t *testing.T) { result := New(nil).String() @@ -83,7 +84,7 @@ func TestSet_String(t *testing.T) { } func TestSet_Add(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("add one", func(t *testing.T) { s := New([]string{"chown", "net_raw"}) @@ -114,7 +115,7 @@ func TestSet_Add(t *testing.T) { } func TestSet_Remove(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("remove one", func(t *testing.T) { s := New([]string{"af_net", "chown", "net_raw", "seteuid", "sys_time"}) @@ -137,7 +138,7 @@ func TestSet_Remove(t *testing.T) { } func TestSet_Difference(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("a is empty", func(t *testing.T) { a := New(nil) @@ -162,7 +163,7 @@ func TestSet_Difference(t *testing.T) { } func TestSet_Intersect(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("empty", func(t *testing.T) { a := New(nil) @@ -188,7 +189,7 @@ func TestSet_Intersect(t *testing.T) { } func TestSet_Union(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("empty", func(t *testing.T) { a := New(nil) diff --git a/drivers/shared/eventer/eventer_test.go b/drivers/shared/eventer/eventer_test.go index ce81bcca9..7ca0234f7 100644 --- a/drivers/shared/eventer/eventer_test.go +++ b/drivers/shared/eventer/eventer_test.go @@ -6,13 +6,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" ) func TestEventer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -78,7 +79,7 @@ func TestEventer(t *testing.T) { } func TestEventer_iterateConsumers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) e := &Eventer{ diff --git a/drivers/shared/executor/executor_linux_test.go b/drivers/shared/executor/executor_linux_test.go index 687c64635..67a991111 100644 --- a/drivers/shared/executor/executor_linux_test.go +++ b/drivers/shared/executor/executor_linux_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" @@ -90,6 +91,7 @@ func testExecutorCommandWithChroot(t *testing.T) *testExecCmd { } func TestExecutor_configureNamespaces(t *testing.T) { + ci.Parallel(t) t.Run("host host", func(t *testing.T) { require.Equal(t, lconfigs.Namespaces{ {Type: lconfigs.NEWNS}, @@ -120,7 +122,7 @@ func TestExecutor_configureNamespaces(t *testing.T) { } func TestExecutor_Isolation_PID_and_IPC_hostMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) testutil.ExecCompatible(t) @@ -161,7 +163,7 @@ func TestExecutor_Isolation_PID_and_IPC_hostMode(t *testing.T) { } func TestExecutor_IsolationAndConstraints(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) testutil.ExecCompatible(t) @@ -253,7 +255,7 @@ passwd` // TestExecutor_CgroupPaths asserts that process starts with independent cgroups // hierarchy created for this process func TestExecutor_CgroupPaths(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testutil.ExecCompatible(t) @@ -308,7 +310,7 @@ func TestExecutor_CgroupPaths(t *testing.T) { // TestExecutor_CgroupPaths asserts that all cgroups created for a task // are destroyed on shutdown func TestExecutor_CgroupPathsAreDestroyed(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testutil.ExecCompatible(t) @@ -388,7 +390,7 @@ func TestExecutor_CgroupPathsAreDestroyed(t *testing.T) { } func TestUniversalExecutor_LookupTaskBin(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a temp dir @@ -430,7 +432,7 @@ func TestUniversalExecutor_LookupTaskBin(t *testing.T) { // Exec Launch looks for the binary only inside the chroot func TestExecutor_EscapeContainer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testutil.ExecCompatible(t) @@ -468,7 +470,7 @@ func TestExecutor_EscapeContainer(t *testing.T) { // TestExecutor_DoesNotInheritOomScoreAdj asserts that the exec processes do not // inherit the oom_score_adj value of Nomad agent/executor process func TestExecutor_DoesNotInheritOomScoreAdj(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) oomPath := "/proc/self/oom_score_adj" @@ -522,7 +524,7 @@ func TestExecutor_DoesNotInheritOomScoreAdj(t *testing.T) { } func TestExecutor_Capabilities(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) cases := []struct { @@ -602,7 +604,7 @@ CapAmb: 0000000000000000`, } func TestExecutor_ClientCleanup(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) require := require.New(t) @@ -647,6 +649,7 @@ func TestExecutor_ClientCleanup(t *testing.T) { } func TestExecutor_cmdDevices(t *testing.T) { + ci.Parallel(t) input := []*drivers.DeviceConfig{ { HostPath: "/dev/null", @@ -680,6 +683,7 @@ func TestExecutor_cmdDevices(t *testing.T) { } func TestExecutor_cmdMounts(t *testing.T) { + ci.Parallel(t) input := []*drivers.MountConfig{ { HostPath: "/host/path-ro", @@ -716,7 +720,7 @@ func TestExecutor_cmdMounts(t *testing.T) { // TestUniversalExecutor_NoCgroup asserts that commands are executed in the // same cgroup as parent process func TestUniversalExecutor_NoCgroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) expectedBytes, err := ioutil.ReadFile("/proc/self/cgroup") diff --git a/drivers/shared/executor/executor_test.go b/drivers/shared/executor/executor_test.go index 0b7e8b000..4d930843c 100644 --- a/drivers/shared/executor/executor_test.go +++ b/drivers/shared/executor/executor_test.go @@ -16,6 +16,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper/testlog" @@ -129,11 +130,11 @@ func configureTLogging(t *testing.T, testcmd *testExecCmd) { return } -func TestExecutor_Start_Invalid(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Invalid(t *testing.T) { + ci.Parallel(t) invalid := "/bin/foobar" for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -150,10 +151,10 @@ func TestExecutor_Start_Invalid(pt *testing.T) { } } -func TestExecutor_Start_Wait_Failure_Code(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Wait_Failure_Code(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -174,10 +175,10 @@ func TestExecutor_Start_Wait_Failure_Code(pt *testing.T) { } } -func TestExecutor_Start_Wait(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Wait(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -211,10 +212,10 @@ func TestExecutor_Start_Wait(pt *testing.T) { } } -func TestExecutor_Start_Wait_Children(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Wait_Children(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -250,10 +251,10 @@ func TestExecutor_Start_Wait_Children(pt *testing.T) { } } -func TestExecutor_WaitExitSignal(pt *testing.T) { - pt.Parallel() +func TestExecutor_WaitExitSignal(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir execCmd.Cmd = "/bin/sleep" @@ -308,10 +309,10 @@ func TestExecutor_WaitExitSignal(pt *testing.T) { } } -func TestExecutor_Start_Kill(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Kill(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -341,8 +342,8 @@ func TestExecutor_Start_Kill(pt *testing.T) { } func TestExecutor_Shutdown_Exit(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir execCmd.Cmd = "/bin/sleep" @@ -372,7 +373,7 @@ func TestExecutor_Shutdown_Exit(t *testing.T) { } func TestUniversalExecutor_MakeExecutable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a temp file f, err := ioutil.TempFile("", "") if err != nil { @@ -403,7 +404,7 @@ func TestUniversalExecutor_MakeExecutable(t *testing.T) { } func TestUniversalExecutor_LookupPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a temp dir tmpDir, err := ioutil.TempDir("", "") @@ -515,10 +516,10 @@ func copyFile(t *testing.T, src, dst string) { // TestExecutor_Start_Kill_Immediately_NoGrace asserts that executors shutdown // immediately when sent a kill signal with no grace period. -func TestExecutor_Start_Kill_Immediately_NoGrace(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Kill_Immediately_NoGrace(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -551,10 +552,10 @@ func TestExecutor_Start_Kill_Immediately_NoGrace(pt *testing.T) { } } -func TestExecutor_Start_Kill_Immediately_WithGrace(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Kill_Immediately_WithGrace(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -589,11 +590,11 @@ func TestExecutor_Start_Kill_Immediately_WithGrace(pt *testing.T) { // TestExecutor_Start_NonExecutableBinaries asserts that executor marks binary as executable // before starting -func TestExecutor_Start_NonExecutableBinaries(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_NonExecutableBinaries(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) tmpDir, err := ioutil.TempDir("", "nomad-executor-tests") @@ -645,5 +646,4 @@ func TestExecutor_Start_NonExecutableBinaries(pt *testing.T) { }) }) } - } diff --git a/drivers/shared/executor/pid_collector_test.go b/drivers/shared/executor/pid_collector_test.go index c42397b68..f71c4d827 100644 --- a/drivers/shared/executor/pid_collector_test.go +++ b/drivers/shared/executor/pid_collector_test.go @@ -3,11 +3,12 @@ package executor import ( "testing" - ps "github.com/mitchellh/go-ps" + "github.com/hashicorp/nomad/ci" + "github.com/mitchellh/go-ps" ) func TestScanPids(t *testing.T) { - t.Parallel() + ci.Parallel(t) p1 := NewFakeProcess(2, 5) p2 := NewFakeProcess(10, 2) p3 := NewFakeProcess(15, 6) diff --git a/helper/boltdd/boltdd_test.go b/helper/boltdd/boltdd_test.go index 19c4e6ec2..838b7ba6a 100644 --- a/helper/boltdd/boltdd_test.go +++ b/helper/boltdd/boltdd_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -46,7 +47,7 @@ func setupBoltDB(t testingT) (*DB, func()) { } func TestDB_Open(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) @@ -56,7 +57,7 @@ func TestDB_Open(t *testing.T) { } func TestDB_Close(t *testing.T) { - t.Parallel() + ci.Parallel(t) db, cleanup := setupBoltDB(t) defer cleanup() @@ -75,7 +76,7 @@ func TestDB_Close(t *testing.T) { } func TestBucket_Create(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) @@ -112,7 +113,7 @@ func TestBucket_Create(t *testing.T) { } func TestBucket_DedupeWrites(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) @@ -166,7 +167,7 @@ func TestBucket_DedupeWrites(t *testing.T) { } func TestBucket_Delete(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) diff --git a/helper/envoy/envoy_test.go b/helper/envoy/envoy_test.go index 87a979c85..90baa2d88 100644 --- a/helper/envoy/envoy_test.go +++ b/helper/envoy/envoy_test.go @@ -4,12 +4,13 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestEnvoy_PortLabel(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, tc := range []struct { prefix string diff --git a/helper/flags/autopilot_flags_test.go b/helper/flags/autopilot_flags_test.go index ac9dbe092..8d44e7219 100644 --- a/helper/flags/autopilot_flags_test.go +++ b/helper/flags/autopilot_flags_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestFlagHelper_Pointers_Set(t *testing.T) { - t.Parallel() + ci.Parallel(t) var ( B BoolValue @@ -44,7 +45,7 @@ func TestFlagHelper_Pointers_Set(t *testing.T) { } func TestFlagHelper_Pointers_Ignored(t *testing.T) { - t.Parallel() + ci.Parallel(t) var ( B BoolValue diff --git a/helper/flags/flag_test.go b/helper/flags/flag_test.go index 03b064824..9f94c601d 100644 --- a/helper/flags/flag_test.go +++ b/helper/flags/flag_test.go @@ -5,11 +5,12 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestStringFlag_implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var raw interface{} raw = new(StringFlag) @@ -19,7 +20,7 @@ func TestStringFlag_implements(t *testing.T) { } func TestStringFlagSet(t *testing.T) { - t.Parallel() + ci.Parallel(t) sv := new(StringFlag) err := sv.Set("foo") @@ -38,7 +39,7 @@ func TestStringFlagSet(t *testing.T) { } } func TestStringFlagSet_Append(t *testing.T) { - t.Parallel() + ci.Parallel(t) var ( // A test to make sure StringFlag can replace AppendSliceValue diff --git a/helper/freeport/freeport_test.go b/helper/freeport/freeport_test.go index 6d8b1f4c5..4f70491ad 100644 --- a/helper/freeport/freeport_test.go +++ b/helper/freeport/freeport_test.go @@ -61,7 +61,7 @@ func stats() (numTotal, numPending, numFree int) { func TestTakeReturn(t *testing.T) { // NOTE: for global var reasons this cannot execute in parallel - // t.Parallel() + // ci.Parallel(t) // Since this test is destructive (i.e. it leaks all ports) it means that // any other test cases in this package will not function after it runs. To diff --git a/helper/pluginutils/hclspecutils/dec_test.go b/helper/pluginutils/hclspecutils/dec_test.go index 067739024..82dcd4ee8 100644 --- a/helper/pluginutils/hclspecutils/dec_test.go +++ b/helper/pluginutils/hclspecutils/dec_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/plugins/shared/hclspec" "github.com/stretchr/testify/require" "github.com/zclconf/go-cty/cty" @@ -38,7 +39,7 @@ func testSpecConversions(t *testing.T, cases []testConversions) { } func TestDec_Convert_Object(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -98,7 +99,7 @@ func TestDec_Convert_Object(t *testing.T) { } func TestDec_Convert_Array(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -148,7 +149,7 @@ func TestDec_Convert_Array(t *testing.T) { } func TestDec_Convert_Attr(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -206,7 +207,7 @@ func TestDec_Convert_Attr(t *testing.T) { } func TestDec_Convert_Block(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -283,7 +284,7 @@ func TestDec_Convert_Block(t *testing.T) { } func TestDec_Convert_BlockAttrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -321,7 +322,7 @@ func TestDec_Convert_BlockAttrs(t *testing.T) { } func TestDec_Convert_BlockList(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -380,7 +381,7 @@ func TestDec_Convert_BlockList(t *testing.T) { } func TestDec_Convert_BlockSet(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -439,7 +440,7 @@ func TestDec_Convert_BlockSet(t *testing.T) { } func TestDec_Convert_BlockMap(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -514,7 +515,7 @@ func TestDec_Convert_BlockMap(t *testing.T) { } func TestDec_Convert_Default(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -558,7 +559,7 @@ func TestDec_Convert_Default(t *testing.T) { } func TestDec_Convert_Literal(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { diff --git a/helper/pluginutils/hclutils/testing.go b/helper/pluginutils/hclutils/testing.go index d5b67eab2..469cec7d5 100644 --- a/helper/pluginutils/hclutils/testing.go +++ b/helper/pluginutils/hclutils/testing.go @@ -6,14 +6,13 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/require" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/nomad/helper/pluginutils/hclspecutils" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" "github.com/hashicorp/nomad/plugins/shared/hclspec" + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "github.com/zclconf/go-cty/cty" ) type HCLParser struct { diff --git a/helper/pluginutils/loader/loader_test.go b/helper/pluginutils/loader/loader_test.go index 1825104d7..de99d012e 100644 --- a/helper/pluginutils/loader/loader_test.go +++ b/helper/pluginutils/loader/loader_test.go @@ -12,6 +12,7 @@ import ( log "github.com/hashicorp/go-hclog" version "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/plugins/base" @@ -107,7 +108,7 @@ func (h *harness) cleanup() { } func TestPluginLoader_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -167,7 +168,7 @@ func TestPluginLoader_External(t *testing.T) { } func TestPluginLoader_External_ApiVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -271,7 +272,7 @@ func TestPluginLoader_External_ApiVersions(t *testing.T) { } func TestPluginLoader_External_NoApiVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -301,7 +302,7 @@ func TestPluginLoader_External_NoApiVersion(t *testing.T) { } func TestPluginLoader_External_Config(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -368,7 +369,7 @@ func TestPluginLoader_External_Config(t *testing.T) { // Pass a config but make sure it is fatal func TestPluginLoader_External_Config_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a plugin @@ -403,7 +404,7 @@ func TestPluginLoader_External_Config_Bad(t *testing.T) { } func TestPluginLoader_External_VersionOverlap(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -455,7 +456,7 @@ func TestPluginLoader_External_VersionOverlap(t *testing.T) { } func TestPluginLoader_Internal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -517,7 +518,7 @@ func TestPluginLoader_Internal(t *testing.T) { } func TestPluginLoader_Internal_ApiVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -599,7 +600,7 @@ func TestPluginLoader_Internal_ApiVersions(t *testing.T) { } func TestPluginLoader_Internal_NoApiVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -630,7 +631,7 @@ func TestPluginLoader_Internal_NoApiVersion(t *testing.T) { } func TestPluginLoader_Internal_Config(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -701,7 +702,7 @@ func TestPluginLoader_Internal_Config(t *testing.T) { // Tests that an external config can override the config of an internal plugin func TestPluginLoader_Internal_ExternalConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -772,7 +773,7 @@ func TestPluginLoader_Internal_ExternalConfig(t *testing.T) { // Pass a config but make sure it is fatal func TestPluginLoader_Internal_Config_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -810,7 +811,7 @@ func TestPluginLoader_Internal_Config_Bad(t *testing.T) { } func TestPluginLoader_InternalOverrideExternal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -867,7 +868,7 @@ func TestPluginLoader_InternalOverrideExternal(t *testing.T) { } func TestPluginLoader_ExternalOverrideInternal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -924,7 +925,7 @@ func TestPluginLoader_ExternalOverrideInternal(t *testing.T) { } func TestPluginLoader_Dispense_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -971,7 +972,7 @@ func TestPluginLoader_Dispense_External(t *testing.T) { } func TestPluginLoader_Dispense_Internal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -1030,7 +1031,7 @@ func TestPluginLoader_Dispense_Internal(t *testing.T) { } func TestPluginLoader_Dispense_NoConfigSchema_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -1078,7 +1079,7 @@ func TestPluginLoader_Dispense_NoConfigSchema_External(t *testing.T) { } func TestPluginLoader_Dispense_NoConfigSchema_Internal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -1129,7 +1130,7 @@ func TestPluginLoader_Dispense_NoConfigSchema_Internal(t *testing.T) { } func TestPluginLoader_Reattach_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a plugin @@ -1193,7 +1194,7 @@ func TestPluginLoader_Reattach_External(t *testing.T) { // Test the loader trying to launch a non-plugin binary func TestPluginLoader_Bad_Executable(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a plugin @@ -1222,10 +1223,10 @@ func TestPluginLoader_Bad_Executable(t *testing.T) { // Test that we skip directories, non-executables and follow symlinks func TestPluginLoader_External_SkipBadFiles(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows currently does not skip non exe files") } - t.Parallel() require := require.New(t) // Create two plugins @@ -1285,6 +1286,8 @@ func TestPluginLoader_External_SkipBadFiles(t *testing.T) { } func TestPluginLoader_ConvertVersions(t *testing.T) { + ci.Parallel(t) + v010 := version.Must(version.NewVersion("v0.1.0")) v020 := version.Must(version.NewVersion("v0.2.0")) v021 := version.Must(version.NewVersion("v0.2.1")) diff --git a/helper/pluginutils/singleton/singleton_test.go b/helper/pluginutils/singleton/singleton_test.go index cfe067844..6252ec0e0 100644 --- a/helper/pluginutils/singleton/singleton_test.go +++ b/helper/pluginutils/singleton/singleton_test.go @@ -8,6 +8,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pluginutils/loader" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/plugins/base" @@ -22,7 +23,7 @@ func harness(t *testing.T) (*SingletonLoader, *loader.MockCatalog) { // Test that multiple dispenses return the same instance func TestSingleton_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled := 0 @@ -71,7 +72,7 @@ func TestSingleton_Dispense(t *testing.T) { // Test that after a plugin is dispensed, if it exits, an error is returned on // the next dispense func TestSingleton_Dispense_Exit_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) exited := false @@ -121,7 +122,7 @@ func TestSingleton_Dispense_Exit_Dispense(t *testing.T) { // Test that if a plugin errors while being dispensed, the error is returned but // not saved func TestSingleton_DispenseError_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled := 0 @@ -164,7 +165,7 @@ func TestSingleton_DispenseError_Dispense(t *testing.T) { // Test that if a plugin errors while being reattached, the error is returned but // not saved func TestSingleton_ReattachError_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled, reattachCalled := 0, 0 @@ -204,7 +205,7 @@ func TestSingleton_ReattachError_Dispense(t *testing.T) { // Test that after reattaching, dispense returns the same instance func TestSingleton_Reattach_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled, reattachCalled := 0, 0 diff --git a/helper/raftutil/msgpack_test.go b/helper/raftutil/msgpack_test.go index f75488292..111e08146 100644 --- a/helper/raftutil/msgpack_test.go +++ b/helper/raftutil/msgpack_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -28,6 +29,8 @@ func TestMaybeDecodeTimeIgnoresASCII(t *testing.T) { } func TestDecodesTime(t *testing.T) { + ci.Parallel(t) + type Value struct { CreateTime time.Time Mode string @@ -59,6 +62,8 @@ func TestDecodesTime(t *testing.T) { } func TestMyDate(t *testing.T) { + ci.Parallel(t) + handler := &codec.MsgpackHandle{} handler.TimeNotBuiltin = true diff --git a/helper/raftutil/state_test.go b/helper/raftutil/state_test.go index 3d653fbc9..d4384c8ab 100644 --- a/helper/raftutil/state_test.go +++ b/helper/raftutil/state_test.go @@ -4,6 +4,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,7 +15,7 @@ import ( // logs" fail with a helpful error message when called on an inuse // database. func TestRaftStateInfo_InUse(t *testing.T) { - t.Parallel() // since there's a 1s timeout. + ci.Parallel(t) // since there's a 1s timeout. // First create an empty raft db dir := filepath.Join(t.TempDir(), "raft.db") diff --git a/helper/tlsutil/config_test.go b/helper/tlsutil/config_test.go index f1837db7b..4a5afdb28 100644 --- a/helper/tlsutil/config_test.go +++ b/helper/tlsutil/config_test.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/yamux" "github.com/stretchr/testify/assert" @@ -27,6 +28,8 @@ const ( ) func TestConfig_AppendCA_None(t *testing.T) { + ci.Parallel(t) + require := require.New(t) conf := &Config{} @@ -37,6 +40,8 @@ func TestConfig_AppendCA_None(t *testing.T) { } func TestConfig_AppendCA_Valid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) conf := &Config{ @@ -49,6 +54,8 @@ func TestConfig_AppendCA_Valid(t *testing.T) { } func TestConfig_AppendCA_Valid_MultipleCerts(t *testing.T) { + ci.Parallel(t) + require := require.New(t) certs := ` @@ -102,6 +109,8 @@ TttDu+g2VdbcBwVDZ49X2Md6OY2N3G8Irdlj+n+mCQJaHwVt52DRzz0= // TestConfig_AppendCA_Valid_Whitespace asserts that a PEM file containing // trailing whitespace is valid. func TestConfig_AppendCA_Valid_Whitespace(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const cacertWhitespace = "./testdata/ca-whitespace.pem" @@ -117,6 +126,8 @@ func TestConfig_AppendCA_Valid_Whitespace(t *testing.T) { // TestConfig_AppendCA_Invalid_MultipleCerts_Whitespace asserts that a PEM file // containing non-PEM data between certificate blocks is still valid. func TestConfig_AppendCA_Valid_MultipleCerts_ExtraData(t *testing.T) { + ci.Parallel(t) + require := require.New(t) certs := ` @@ -176,6 +187,8 @@ TttDu+g2VdbcBwVDZ49X2Md6OY2N3G8Irdlj+n+mCQJaHwVt52DRzz0= // TestConfig_AppendCA_Invalid_MultipleCerts asserts only the valid certificate // is returned. func TestConfig_AppendCA_Invalid_MultipleCerts(t *testing.T) { + ci.Parallel(t) + require := require.New(t) certs := ` @@ -214,6 +227,8 @@ Invalid } func TestConfig_AppendCA_Invalid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { conf := &Config{ @@ -245,6 +260,8 @@ func TestConfig_AppendCA_Invalid(t *testing.T) { } func TestConfig_CACertificate_Valid(t *testing.T) { + ci.Parallel(t) + conf := &Config{ CAFile: cacert, } @@ -259,6 +276,8 @@ func TestConfig_CACertificate_Valid(t *testing.T) { } func TestConfig_LoadKeyPair_None(t *testing.T) { + ci.Parallel(t) + conf := &Config{ KeyLoader: &config.KeyLoader{}, } @@ -272,6 +291,8 @@ func TestConfig_LoadKeyPair_None(t *testing.T) { } func TestConfig_LoadKeyPair_Valid(t *testing.T) { + ci.Parallel(t) + conf := &Config{ CertFile: foocert, KeyFile: fookey, @@ -287,6 +308,8 @@ func TestConfig_LoadKeyPair_Valid(t *testing.T) { } func TestConfig_OutgoingTLS_MissingCA(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyOutgoing: true, } @@ -300,6 +323,8 @@ func TestConfig_OutgoingTLS_MissingCA(t *testing.T) { } func TestConfig_OutgoingTLS_OnlyCA(t *testing.T) { + ci.Parallel(t) + conf := &Config{ CAFile: cacert, } @@ -313,6 +338,8 @@ func TestConfig_OutgoingTLS_OnlyCA(t *testing.T) { } func TestConfig_OutgoingTLS_VerifyOutgoing(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyOutgoing: true, CAFile: cacert, @@ -333,6 +360,8 @@ func TestConfig_OutgoingTLS_VerifyOutgoing(t *testing.T) { } func TestConfig_OutgoingTLS_VerifyHostname(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyServerHostname: true, CAFile: cacert, @@ -353,6 +382,8 @@ func TestConfig_OutgoingTLS_VerifyHostname(t *testing.T) { } func TestConfig_OutgoingTLS_WithKeyPair(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) conf := &Config{ @@ -375,6 +406,8 @@ func TestConfig_OutgoingTLS_WithKeyPair(t *testing.T) { } func TestConfig_OutgoingTLS_PreferServerCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -399,6 +432,8 @@ func TestConfig_OutgoingTLS_PreferServerCipherSuites(t *testing.T) { } func TestConfig_OutgoingTLS_TLSCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -466,6 +501,8 @@ func startTLSServer(config *Config) (net.Conn, chan error) { // TODO sign the certificates for "server.regionFoo.nomad func TestConfig_outgoingWrapper_OK(t *testing.T) { + ci.Parallel(t) + config := &Config{ CAFile: cacert, CertFile: foocert, @@ -501,6 +538,7 @@ func TestConfig_outgoingWrapper_OK(t *testing.T) { } func TestConfig_outgoingWrapper_BadCert(t *testing.T) { + ci.Parallel(t) // TODO this test is currently hanging, need to investigate more. t.SkipNow() config := &Config{ @@ -536,6 +574,8 @@ func TestConfig_outgoingWrapper_BadCert(t *testing.T) { } func TestConfig_wrapTLS_OK(t *testing.T) { + ci.Parallel(t) + config := &Config{ CAFile: cacert, CertFile: foocert, @@ -567,6 +607,8 @@ func TestConfig_wrapTLS_OK(t *testing.T) { } func TestConfig_wrapTLS_BadCert(t *testing.T) { + ci.Parallel(t) + serverConfig := &Config{ CAFile: cacert, CertFile: badcert, @@ -604,6 +646,8 @@ func TestConfig_wrapTLS_BadCert(t *testing.T) { } func TestConfig_IncomingTLS(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) conf := &Config{ @@ -634,6 +678,8 @@ func TestConfig_IncomingTLS(t *testing.T) { } func TestConfig_IncomingTLS_MissingCA(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyIncoming: true, CertFile: foocert, @@ -647,6 +693,8 @@ func TestConfig_IncomingTLS_MissingCA(t *testing.T) { } func TestConfig_IncomingTLS_MissingKey(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyIncoming: true, CAFile: cacert, @@ -658,6 +706,8 @@ func TestConfig_IncomingTLS_MissingKey(t *testing.T) { } func TestConfig_IncomingTLS_NoVerify(t *testing.T) { + ci.Parallel(t) + conf := &Config{} tlsC, err := conf.IncomingTLSConfig() if err != nil { @@ -678,6 +728,8 @@ func TestConfig_IncomingTLS_NoVerify(t *testing.T) { } func TestConfig_IncomingTLS_PreferServerCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -697,6 +749,8 @@ func TestConfig_IncomingTLS_PreferServerCipherSuites(t *testing.T) { } func TestConfig_IncomingTLS_TLSCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -725,6 +779,8 @@ func TestConfig_IncomingTLS_TLSCipherSuites(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_Valid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) tlsConfig := &config.TLSConfig{ @@ -780,6 +836,8 @@ func TestConfig_ParseCiphers_Valid(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_Default(t *testing.T) { + ci.Parallel(t) + require := require.New(t) expectedCiphers := []uint16{ @@ -808,6 +866,8 @@ func TestConfig_ParseCiphers_Default(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_Invalid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) invalidCiphers := []string{ @@ -832,6 +892,8 @@ func TestConfig_ParseCiphers_Invalid(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_SupportedSignature(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Supported signature @@ -862,6 +924,8 @@ func TestConfig_ParseCiphers_SupportedSignature(t *testing.T) { } func TestConfig_ParseMinVersion_Valid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) validVersions := []string{"tls10", @@ -883,6 +947,8 @@ func TestConfig_ParseMinVersion_Valid(t *testing.T) { } func TestConfig_ParseMinVersion_Invalid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) invalidVersions := []string{"tls13", @@ -898,6 +964,8 @@ func TestConfig_ParseMinVersion_Invalid(t *testing.T) { } func TestConfig_NewTLSConfiguration(t *testing.T) { + ci.Parallel(t) + require := require.New(t) conf := &config.TLSConfig{ @@ -920,6 +988,8 @@ func TestConfig_NewTLSConfiguration(t *testing.T) { } func TestConfig_ShouldReloadRPCConnections(t *testing.T) { + ci.Parallel(t) + require := require.New(t) type shouldReloadTestInput struct { diff --git a/helper/tlsutil/generate_test.go b/helper/tlsutil/generate_test.go index 5be9f7e2b..8df3fb270 100644 --- a/helper/tlsutil/generate_test.go +++ b/helper/tlsutil/generate_test.go @@ -10,11 +10,11 @@ import ( "encoding/pem" "io" "net" + "strings" "testing" "time" - "strings" - + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -34,7 +34,7 @@ func TestSerialNumber(t *testing.T) { } func TestGeneratePrivateKey(t *testing.T) { - t.Parallel() + ci.Parallel(t) _, p, err := GeneratePrivateKey() require.Nil(t, err) require.NotEmpty(t, p) @@ -62,6 +62,8 @@ func (s *TestSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) } func TestGenerateCA(t *testing.T) { + ci.Parallel(t) + t.Run("no signer", func(t *testing.T) { ca, pk, err := GenerateCA(CAOpts{Signer: &TestSigner{}}) require.Error(t, err) @@ -114,7 +116,8 @@ func TestGenerateCA(t *testing.T) { } func TestGenerateCert(t *testing.T) { - t.Parallel() + ci.Parallel(t) + signer, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.Nil(t, err) ca, _, err := GenerateCA(CAOpts{Signer: signer}) diff --git a/internal/testing/apitests/jobs_test.go b/internal/testing/apitests/jobs_test.go index fd7001930..7c373ae4c 100644 --- a/internal/testing/apitests/jobs_test.go +++ b/internal/testing/apitests/jobs_test.go @@ -4,12 +4,13 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/assert" ) func TestJobs_Parse(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -46,7 +47,7 @@ func TestJobs_Parse(t *testing.T) { } func TestJobs_Summary_WithACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, s, root := makeACLClient(t, nil, nil) diff --git a/internal/testing/apitests/nodes_test.go b/internal/testing/apitests/nodes_test.go index 2bc67b0c9..becba2f2d 100644 --- a/internal/testing/apitests/nodes_test.go +++ b/internal/testing/apitests/nodes_test.go @@ -3,13 +3,14 @@ package apitests import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestNodes_GC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -21,7 +22,7 @@ func TestNodes_GC(t *testing.T) { } func TestNodes_GcAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/internal/testing/apitests/operator_autopilot_test.go b/internal/testing/apitests/operator_autopilot_test.go index 8c22bf644..84945ec55 100644 --- a/internal/testing/apitests/operator_autopilot_test.go +++ b/internal/testing/apitests/operator_autopilot_test.go @@ -1,18 +1,18 @@ package apitests import ( - "testing" - "fmt" + "testing" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -37,7 +37,7 @@ func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) { } func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -75,7 +75,7 @@ func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) { } func TestAPI_OperatorAutopilotServerHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.Server.RaftProtocol = 3 }) diff --git a/internal/testing/apitests/operator_test.go b/internal/testing/apitests/operator_test.go index bdf7477ad..fe0f1f173 100644 --- a/internal/testing/apitests/operator_test.go +++ b/internal/testing/apitests/operator_test.go @@ -5,11 +5,12 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -47,7 +48,7 @@ func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) { } func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/internal/testing/apitests/streamingsync_test.go b/internal/testing/apitests/streamingsync_test.go index e17853331..edf110594 100644 --- a/internal/testing/apitests/streamingsync_test.go +++ b/internal/testing/apitests/streamingsync_test.go @@ -5,12 +5,15 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" ) // TestExecStreamingInputIsInSync asserts that a rountrip of exec streaming input doesn't lose any data func TestExecStreamingInputIsInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input api.ExecStreamingInput @@ -52,6 +55,8 @@ func TestExecStreamingInputIsInSync(t *testing.T) { // TestExecStreamingOutputIsInSync asserts that a rountrip of exec streaming input doesn't lose any data func TestExecStreamingOutputIsInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input api.ExecStreamingOutput diff --git a/internal/testing/apitests/structsync_test.go b/internal/testing/apitests/structsync_test.go index 6a3858c41..a40c2c718 100644 --- a/internal/testing/apitests/structsync_test.go +++ b/internal/testing/apitests/structsync_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -16,6 +17,8 @@ import ( // such dependency without affecting api clients. func TestDefaultResourcesAreInSync(t *testing.T) { + ci.Parallel(t) + apiR := api.DefaultResources() structsR := structs.DefaultResources() @@ -28,6 +31,8 @@ func TestDefaultResourcesAreInSync(t *testing.T) { } func TestMinResourcesAreInSync(t *testing.T) { + ci.Parallel(t) + apiR := api.MinResources() structsR := structs.MinResources() @@ -40,6 +45,8 @@ func TestMinResourcesAreInSync(t *testing.T) { } func TestNewDefaultRescheulePolicyInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { typ string expected structs.ReschedulePolicy @@ -62,6 +69,8 @@ func TestNewDefaultRescheulePolicyInSync(t *testing.T) { } func TestNewDefaultRestartPolicyInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { typ string expected structs.RestartPolicy diff --git a/internal/testing/apitests/tasks_test.go b/internal/testing/apitests/tasks_test.go index 7c9a45d80..b4cbe8cf9 100644 --- a/internal/testing/apitests/tasks_test.go +++ b/internal/testing/apitests/tasks_test.go @@ -5,12 +5,15 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) // Verifies that reschedule policy is merged correctly func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string jobReschedulePolicy *api.ReschedulePolicy diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index aa219c117..389d01245 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -8,6 +8,7 @@ import ( capi "github.com/hashicorp/consul/api" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -24,6 +25,8 @@ const ( ) func TestParse(t *testing.T) { + ci.Parallel(t) + cases := []struct { File string Result *api.Job @@ -1784,6 +1787,8 @@ func TestParse(t *testing.T) { } func TestBadPorts(t *testing.T) { + ci.Parallel(t) + path, err := filepath.Abs(filepath.Join("./test-fixtures", "bad-ports.hcl")) if err != nil { t.Fatalf("Can't get absolute path for file: %s", err) @@ -1797,6 +1802,8 @@ func TestBadPorts(t *testing.T) { } func TestOverlappingPorts(t *testing.T) { + ci.Parallel(t) + path, err := filepath.Abs(filepath.Join("./test-fixtures", "overlapping-ports.hcl")) if err != nil { t.Fatalf("Can't get absolute path for file: %s", err) @@ -1814,6 +1821,8 @@ func TestOverlappingPorts(t *testing.T) { } func TestIncorrectKey(t *testing.T) { + ci.Parallel(t) + path, err := filepath.Abs(filepath.Join("./test-fixtures", "basic_wrong_key.hcl")) if err != nil { t.Fatalf("Can't get absolute path for file: %s", err) diff --git a/jobspec/utils_test.go b/jobspec/utils_test.go index c571b7cba..810f6d0cc 100644 --- a/jobspec/utils_test.go +++ b/jobspec/utils_test.go @@ -3,13 +3,14 @@ package jobspec import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) // TestFlattenMapSlice asserts flattenMapSlice recursively flattens a slice of maps into a // single map. func TestFlattenMapSlice(t *testing.T) { - t.Parallel() + ci.Parallel(t) input := map[string]interface{}{ "foo": 123, diff --git a/jobspec2/parse_test.go b/jobspec2/parse_test.go index 2cb0496a9..b3e7e9963 100644 --- a/jobspec2/parse_test.go +++ b/jobspec2/parse_test.go @@ -8,11 +8,14 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/jobspec" "github.com/stretchr/testify/require" ) func TestEquivalentToHCL1(t *testing.T) { + ci.Parallel(t) + hclSpecDir := "../jobspec/test-fixtures/" fis, err := ioutil.ReadDir(hclSpecDir) require.NoError(t, err) @@ -41,6 +44,8 @@ func TestEquivalentToHCL1(t *testing.T) { } func TestEquivalentToHCL1_ComplexConfig(t *testing.T) { + ci.Parallel(t) + name := "./test-fixtures/config-compatibility.hcl" f, err := os.Open(name) require.NoError(t, err) @@ -58,6 +63,8 @@ func TestEquivalentToHCL1_ComplexConfig(t *testing.T) { } func TestParse_VarsAndFunctions(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default" @@ -82,6 +89,8 @@ job "example" { } func TestParse_VariablesDefaultsAndSet(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default_region" @@ -179,6 +188,8 @@ job "example" { // TestParse_UnknownVariables asserts that unknown variables are left intact for further processing func TestParse_UnknownVariables(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default" @@ -212,6 +223,8 @@ job "example" { // TestParse_UnsetVariables asserts that variables that have neither types nor // values return early instead of panicking. func TestParse_UnsetVariables(t *testing.T) { + ci.Parallel(t) + hcl := ` variable "region_var" {} job "example" { @@ -232,6 +245,8 @@ job "example" { } func TestParse_Locals(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default_region" @@ -279,6 +294,8 @@ job "example" { } func TestParse_FileOperators(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { region = file("parse_test.go") @@ -314,6 +331,8 @@ job "example" { } func TestParseDynamic(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { @@ -375,6 +394,8 @@ job "example" { } func TestParse_InvalidHCL(t *testing.T) { + ci.Parallel(t) + t.Run("invalid body", func(t *testing.T) { hcl := `invalid{hcl` @@ -418,6 +439,8 @@ job "example" { } func TestParse_InvalidScalingSyntax(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string expectedErr string @@ -582,6 +605,8 @@ job "example" { } func TestParseJob_JobWithFunctionsAndLookups(t *testing.T) { + ci.Parallel(t) + hcl := ` variable "env" { description = "target environment for the job" @@ -711,6 +736,8 @@ job "job-webserver" { } func TestParse_TaskEnvs(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string envSnippet string @@ -784,6 +811,8 @@ job "example" { } func TestParse_TaskEnvs_Multiple(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { group "group" { @@ -808,6 +837,8 @@ job "example" { } func Test_TaskEnvs_Invalid(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string envSnippet string @@ -856,6 +887,8 @@ job "example" { } func TestParse_Meta_Alternatives(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { group "group" { task "task" { @@ -904,6 +937,7 @@ func TestParse_Meta_Alternatives(t *testing.T) { // TestParse_UndefinedVariables asserts that values with undefined variables are left // intact in the job representation func TestParse_UndefinedVariables(t *testing.T) { + ci.Parallel(t) cases := []string{ "plain", @@ -947,6 +981,8 @@ func TestParse_UndefinedVariables(t *testing.T) { } func TestParseServiceCheck(t *testing.T) { + ci.Parallel(t) + hcl := ` job "group_service_check_script" { group "group" { service { @@ -996,6 +1032,8 @@ func TestParseServiceCheck(t *testing.T) { } func TestWaitConfig(t *testing.T) { + ci.Parallel(t) + hclBytes, err := os.ReadFile("test-fixtures/template-wait-config.hcl") require.NoError(t, err) diff --git a/lib/circbufwriter/writer_test.go b/lib/circbufwriter/writer_test.go index e4075b496..d48a03ddf 100644 --- a/lib/circbufwriter/writer_test.go +++ b/lib/circbufwriter/writer_test.go @@ -6,11 +6,14 @@ import ( "io/ioutil" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) func TestWriter_NonBlockingWrite(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var buf bytes.Buffer w := New(&buf, 64) @@ -40,6 +43,8 @@ func (b *blockingWriter) Write(p []byte) (nn int, err error) { } func TestWriter_BlockingWrite(t *testing.T) { + ci.Parallel(t) + require := require.New(t) blockCh := make(chan struct{}) bw := &blockingWriter{unblock: blockCh} @@ -64,6 +69,8 @@ func TestWriter_BlockingWrite(t *testing.T) { } func TestWriter_CloseClose(t *testing.T) { + ci.Parallel(t) + require := require.New(t) w := New(ioutil.Discard, 64) require.NoError(w.Close()) diff --git a/lib/cpuset/cpuset_test.go b/lib/cpuset/cpuset_test.go index ea7e72108..178517680 100644 --- a/lib/cpuset/cpuset_test.go +++ b/lib/cpuset/cpuset_test.go @@ -3,16 +3,21 @@ package cpuset import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestCPUSet_Size(t *testing.T) { + ci.Parallel(t) + set := New(0, 1, 2, 3) require.Equal(t, 4, set.Size()) require.Equal(t, 0, New().Size()) } func TestCPUSet_ToSlice(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string in CPUSet @@ -41,6 +46,8 @@ func TestCPUSet_ToSlice(t *testing.T) { } func TestCPUSet_Equals(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -64,6 +71,8 @@ func TestCPUSet_Equals(t *testing.T) { } func TestCPUSet_Union(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -87,6 +96,8 @@ func TestCPUSet_Union(t *testing.T) { } func TestCPUSet_Difference(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -109,6 +120,8 @@ func TestCPUSet_Difference(t *testing.T) { } func TestCPUSet_IsSubsetOf(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -128,6 +141,8 @@ func TestCPUSet_IsSubsetOf(t *testing.T) { } func TestCPUSet_IsSupersetOf(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -147,6 +162,8 @@ func TestCPUSet_IsSupersetOf(t *testing.T) { } func TestCPUSet_ContainsAny(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -166,6 +183,8 @@ func TestCPUSet_ContainsAny(t *testing.T) { } func TestParse(t *testing.T) { + ci.Parallel(t) + cases := []struct { cpuset string expected CPUSet @@ -187,6 +206,8 @@ func TestParse(t *testing.T) { } func TestCPUSet_String(t *testing.T) { + ci.Parallel(t) + cases := []struct { cpuset CPUSet expected string diff --git a/lib/delayheap/delay_heap_test.go b/lib/delayheap/delay_heap_test.go index 3b44acc19..28d524591 100644 --- a/lib/delayheap/delay_heap_test.go +++ b/lib/delayheap/delay_heap_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -27,6 +28,8 @@ func (d *heapNodeImpl) Namespace() string { } func TestDelayHeap_PushPop(t *testing.T) { + ci.Parallel(t) + delayHeap := NewDelayHeap() now := time.Now() require := require.New(t) @@ -72,6 +75,8 @@ func TestDelayHeap_PushPop(t *testing.T) { } func TestDelayHeap_Update(t *testing.T) { + ci.Parallel(t) + delayHeap := NewDelayHeap() now := time.Now() require := require.New(t) diff --git a/lib/kheap/score_heap_test.go b/lib/kheap/score_heap_test.go index 932c08ccf..ca500bc86 100644 --- a/lib/kheap/score_heap_test.go +++ b/lib/kheap/score_heap_test.go @@ -4,6 +4,7 @@ import ( "container/heap" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -21,6 +22,8 @@ func (h *heapItem) Score() float64 { } func TestScoreHeap(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string items map[string]float64 diff --git a/nomad/acl_endpoint_test.go b/nomad/acl_endpoint_test.go index 6e934b004..4485aa4c4 100644 --- a/nomad/acl_endpoint_test.go +++ b/nomad/acl_endpoint_test.go @@ -10,6 +10,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -19,7 +20,7 @@ import ( ) func TestACLEndpoint_GetPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -105,7 +106,7 @@ func TestACLEndpoint_GetPolicy(t *testing.T) { } func TestACLEndpoint_GetPolicy_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -185,7 +186,7 @@ func TestACLEndpoint_GetPolicy_Blocking(t *testing.T) { } func TestACLEndpoint_GetPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -225,7 +226,7 @@ func TestACLEndpoint_GetPolicies(t *testing.T) { } func TestACLEndpoint_GetPolicies_TokenSubset(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -266,7 +267,7 @@ func TestACLEndpoint_GetPolicies_TokenSubset(t *testing.T) { } func TestACLEndpoint_GetPolicies_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -346,8 +347,8 @@ func TestACLEndpoint_GetPolicies_Blocking(t *testing.T) { } func TestACLEndpoint_ListPolicies(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -417,7 +418,7 @@ func TestACLEndpoint_ListPolicies(t *testing.T) { // unauthenticated ListPolicies returns anonymous policy if one // exists, otherwise, empty func TestACLEndpoint_ListPolicies_Unauthenticated(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -466,7 +467,7 @@ func TestACLEndpoint_ListPolicies_Unauthenticated(t *testing.T) { } func TestACLEndpoint_ListPolicies_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -527,7 +528,7 @@ func TestACLEndpoint_ListPolicies_Blocking(t *testing.T) { } func TestACLEndpoint_DeletePolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -554,7 +555,7 @@ func TestACLEndpoint_DeletePolicies(t *testing.T) { } func TestACLEndpoint_UpsertPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -585,7 +586,7 @@ func TestACLEndpoint_UpsertPolicies(t *testing.T) { } func TestACLEndpoint_UpsertPolicies_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -613,7 +614,7 @@ func TestACLEndpoint_UpsertPolicies_Invalid(t *testing.T) { } func TestACLEndpoint_GetToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -659,7 +660,7 @@ func TestACLEndpoint_GetToken(t *testing.T) { } func TestACLEndpoint_GetToken_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -739,7 +740,7 @@ func TestACLEndpoint_GetToken_Blocking(t *testing.T) { } func TestACLEndpoint_GetTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -778,7 +779,7 @@ func TestACLEndpoint_GetTokens(t *testing.T) { } func TestACLEndpoint_GetTokens_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -858,7 +859,7 @@ func TestACLEndpoint_GetTokens_Blocking(t *testing.T) { } func TestACLEndpoint_ListTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -920,7 +921,7 @@ func TestACLEndpoint_ListTokens(t *testing.T) { } func TestACLEndpoint_ListTokens_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.ACLEnabled = true }) @@ -1108,7 +1109,7 @@ func TestACLEndpoint_ListTokens_PaginationFiltering(t *testing.T) { } func TestACLEndpoint_ListTokens_Order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.ACLEnabled = true @@ -1200,7 +1201,7 @@ func TestACLEndpoint_ListTokens_Order(t *testing.T) { } func TestACLEndpoint_ListTokens_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1261,7 +1262,7 @@ func TestACLEndpoint_ListTokens_Blocking(t *testing.T) { } func TestACLEndpoint_DeleteTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1288,7 +1289,7 @@ func TestACLEndpoint_DeleteTokens(t *testing.T) { } func TestACLEndpoint_DeleteTokens_WithNonexistentToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -1315,7 +1316,7 @@ func TestACLEndpoint_DeleteTokens_WithNonexistentToken(t *testing.T) { } func TestACLEndpoint_Bootstrap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.ACLEnabled = true }) @@ -1350,7 +1351,7 @@ func TestACLEndpoint_Bootstrap(t *testing.T) { } func TestACLEndpoint_Bootstrap_Reset(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -1412,7 +1413,7 @@ func TestACLEndpoint_Bootstrap_Reset(t *testing.T) { } func TestACLEndpoint_UpsertTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1469,7 +1470,7 @@ func TestACLEndpoint_UpsertTokens(t *testing.T) { } func TestACLEndpoint_UpsertTokens_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1497,7 +1498,7 @@ func TestACLEndpoint_UpsertTokens_Invalid(t *testing.T) { } func TestACLEndpoint_ResolveToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1529,7 +1530,7 @@ func TestACLEndpoint_ResolveToken(t *testing.T) { } func TestACLEndpoint_OneTimeToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/acl_test.go b/nomad/acl_test.go index 43337295b..867150639 100644 --- a/nomad/acl_test.go +++ b/nomad/acl_test.go @@ -5,6 +5,7 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -14,7 +15,7 @@ import ( ) func TestResolveACLToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create mock state store and cache state := state.TestStateStore(t) @@ -93,7 +94,7 @@ func TestResolveACLToken(t *testing.T) { } func TestResolveACLToken_LeaderToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -109,7 +110,7 @@ func TestResolveACLToken_LeaderToken(t *testing.T) { } func TestResolveSecretToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 9efdc06bb..259a6311d 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -6,19 +6,19 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAllocEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -92,7 +92,7 @@ func TestAllocEndpoint_List(t *testing.T) { } func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -313,7 +313,7 @@ func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { } func TestAllocEndpoint_List_order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -401,7 +401,7 @@ func TestAllocEndpoint_List_order(t *testing.T) { } func TestAllocEndpoint_List_Fields(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -502,7 +502,7 @@ func TestAllocEndpoint_List_Fields(t *testing.T) { } func TestAllocEndpoint_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -559,7 +559,7 @@ func TestAllocEndpoint_List_ACL(t *testing.T) { } func TestAllocEndpoint_List_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -637,7 +637,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) { // TestAllocEndpoint_List_AllNamespaces_OSS asserts that server // returns all allocations across namespaces. func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -718,7 +718,7 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { } func TestAllocEndpoint_GetAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -759,7 +759,7 @@ func TestAllocEndpoint_GetAlloc(t *testing.T) { } func TestAllocEndpoint_GetAlloc_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -870,7 +870,7 @@ func TestAllocEndpoint_GetAlloc_ACL(t *testing.T) { } func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -926,7 +926,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { } func TestAllocEndpoint_GetAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -974,7 +974,7 @@ func TestAllocEndpoint_GetAllocs(t *testing.T) { } func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1030,7 +1030,7 @@ func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { } func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, _, cleanupS1 := TestACLServer(t, nil) @@ -1114,7 +1114,7 @@ func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { } func TestAllocEndpoint_Stop_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, _, cleanupS1 := TestACLServer(t, nil) @@ -1176,7 +1176,7 @@ func TestAllocEndpoint_Stop_ACL(t *testing.T) { } func TestAllocEndpoint_List_AllNamespaces_ACL_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/autopilot_test.go b/nomad/autopilot_test.go index 6e8dae543..8be118a58 100644 --- a/nomad/autopilot_test.go +++ b/nomad/autopilot_test.go @@ -1,13 +1,13 @@ package nomad import ( + "fmt" "testing" "time" - "fmt" - "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" @@ -67,7 +67,7 @@ func wantRaft(servers []*Server) error { } func TestAutopilot_CleanupDeadServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("raft_v2", func(t *testing.T) { testCleanupDeadServer(t, 2) }) t.Run("raft_v3", func(t *testing.T) { testCleanupDeadServer(t, 3) }) } @@ -143,7 +143,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) { } func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.BootstrapExpect = 5 @@ -193,7 +193,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) { } func TestAutopilot_RollingUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.BootstrapExpect = 3 @@ -270,7 +270,7 @@ func TestAutopilot_RollingUpdate(t *testing.T) { func TestAutopilot_CleanupStaleRaftServer(t *testing.T) { t.Skip("TestAutopilot_CleanupDeadServer is very flaky, removing it for now") - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.BootstrapExpect = 3 @@ -319,7 +319,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) { } func TestAutopilot_PromoteNonVoter(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 diff --git a/nomad/blocked_evals_stats_test.go b/nomad/blocked_evals_stats_test.go index 235e451c1..7e6fcd525 100644 --- a/nomad/blocked_evals_stats_test.go +++ b/nomad/blocked_evals_stats_test.go @@ -8,6 +8,7 @@ import ( "testing/quick" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -98,7 +99,7 @@ func clearTimestampFromBlockedResourceStats(b *BlockedResourcesStats) { // TestBlockedEvalsStats_BlockedResources generates random evals and processes // them using the expected code paths and a manual check of the expeceted result. func TestBlockedEvalsStats_BlockedResources(t *testing.T) { - t.Parallel() + ci.Parallel(t) blocked, _ := testBlockedEvals(t) // evalHistory stores all evals generated during the test. diff --git a/nomad/blocked_evals_test.go b/nomad/blocked_evals_test.go index 244308fba..cb96a6f89 100644 --- a/nomad/blocked_evals_test.go +++ b/nomad/blocked_evals_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -21,7 +22,7 @@ func testBlockedEvals(t *testing.T) (*BlockedEvals, *EvalBroker) { } func TestBlockedEvals_Block_Disabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -40,7 +41,7 @@ func TestBlockedEvals_Block_Disabled(t *testing.T) { } func TestBlockedEvals_Block_SameJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -60,7 +61,7 @@ func TestBlockedEvals_Block_SameJob(t *testing.T) { } func TestBlockedEvals_Block_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -78,7 +79,7 @@ func TestBlockedEvals_Block_Quota(t *testing.T) { } func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -101,7 +102,7 @@ func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) { } func TestBlockedEvals_GetDuplicates(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -163,7 +164,7 @@ func TestBlockedEvals_GetDuplicates(t *testing.T) { } func TestBlockedEvals_UnblockEscaped(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -210,7 +211,7 @@ func requireBlockedEvalsEnqueued(t *testing.T, blocked *BlockedEvals, broker *Ev } func TestBlockedEvals_UnblockEligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -231,7 +232,7 @@ func TestBlockedEvals_UnblockEligible(t *testing.T) { } func TestBlockedEvals_UnblockIneligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -275,7 +276,7 @@ func TestBlockedEvals_UnblockIneligible(t *testing.T) { } func TestBlockedEvals_UnblockUnknown(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -298,7 +299,7 @@ func TestBlockedEvals_UnblockUnknown(t *testing.T) { } func TestBlockedEvals_UnblockEligible_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -319,7 +320,7 @@ func TestBlockedEvals_UnblockEligible_Quota(t *testing.T) { } func TestBlockedEvals_UnblockIneligible_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -363,7 +364,7 @@ func TestBlockedEvals_UnblockIneligible_Quota(t *testing.T) { } func TestBlockedEvals_Reblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -404,7 +405,7 @@ func TestBlockedEvals_Reblock(t *testing.T) { // Test the block case in which the eval should be immediately unblocked since // it is escaped and old func TestBlockedEvals_Block_ImmediateUnblock_Escaped(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -432,7 +433,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_Escaped(t *testing.T) { // there is an unblock on an unseen class that occurred while it was in the // scheduler func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_After(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -460,7 +461,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_After(t *testing.T) { // there is an unblock on an unseen class that occurred before it was in the // scheduler func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_Before(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -485,7 +486,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_Before(t *testing.T) { // Test the block case in which the eval should be immediately unblocked since // it a class it is eligible for has been unblocked func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -512,7 +513,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) { // Test the block case in which the eval should be immediately unblocked since // it a quota has changed that it is using func TestBlockedEvals_Block_ImmediateUnblock_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -538,7 +539,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_Quota(t *testing.T) { } func TestBlockedEvals_UnblockFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -584,7 +585,7 @@ func TestBlockedEvals_UnblockFailed(t *testing.T) { } func TestBlockedEvals_Untrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -612,7 +613,7 @@ func TestBlockedEvals_Untrack(t *testing.T) { } func TestBlockedEvals_Untrack_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -640,7 +641,7 @@ func TestBlockedEvals_Untrack_Quota(t *testing.T) { } func TestBlockedEvals_UnblockNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -670,7 +671,7 @@ func TestBlockedEvals_UnblockNode(t *testing.T) { } func TestBlockedEvals_SystemUntrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -699,7 +700,7 @@ func TestBlockedEvals_SystemUntrack(t *testing.T) { } func TestBlockedEvals_SystemDisableFlush(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) diff --git a/nomad/client_agent_endpoint_test.go b/nomad/client_agent_endpoint_test.go index e3c11ee83..2b7578600 100644 --- a/nomad/client_agent_endpoint_test.go +++ b/nomad/client_agent_endpoint_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" @@ -27,7 +28,7 @@ import ( ) func TestMonitor_Monitor_Remote_Client(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server and client @@ -124,7 +125,7 @@ OUTER: } func TestMonitor_Monitor_RemoteServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) foreignRegion := "foo" // start servers @@ -325,7 +326,7 @@ func TestMonitor_Monitor_RemoteServer(t *testing.T) { } func TestMonitor_MonitorServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -419,7 +420,7 @@ OUTER: } func TestMonitor_Monitor_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -524,7 +525,7 @@ func TestMonitor_Monitor_ACL(t *testing.T) { } func TestAgentProfile_RemoteClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server and client @@ -648,7 +649,7 @@ func TestAgentProfile_RemoteRegion(t *testing.T) { } func TestAgentProfile_Server(t *testing.T) { - + ci.Parallel(t) // start servers s1, cleanup := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -750,7 +751,7 @@ func TestAgentProfile_Server(t *testing.T) { } func TestAgentProfile_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -809,7 +810,7 @@ func TestAgentProfile_ACL(t *testing.T) { } func TestAgentHost_Server(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start servers s1, cleanup := TestServer(t, func(c *Config) { @@ -932,7 +933,7 @@ func TestAgentHost_Server(t *testing.T) { } func TestAgentHost_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server s, root, cleanupS := TestACLServer(t, nil) @@ -989,7 +990,7 @@ func TestAgentHost_ACL(t *testing.T) { } func TestAgentHost_ACLDebugRequired(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server s, cleanupS := TestServer(t, func(c *Config) { diff --git a/nomad/client_alloc_endpoint_test.go b/nomad/client_alloc_endpoint_test.go index 24916904e..68cbfe276 100644 --- a/nomad/client_alloc_endpoint_test.go +++ b/nomad/client_alloc_endpoint_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -25,7 +26,7 @@ import ( ) func TestClientAllocations_GarbageCollectAll_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -65,7 +66,7 @@ func TestClientAllocations_GarbageCollectAll_Local(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -125,7 +126,7 @@ func TestClientAllocations_GarbageCollectAll_Local_ACL(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_NoNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -148,7 +149,7 @@ func TestClientAllocations_GarbageCollectAll_NoNode(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and fake an old client @@ -181,7 +182,7 @@ func TestClientAllocations_GarbageCollectAll_OldNode(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -241,7 +242,7 @@ func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) { } func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and fake an old client @@ -281,7 +282,7 @@ func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) { } func TestClientAllocations_GarbageCollect_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -363,7 +364,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) { } func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -429,7 +430,7 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { } func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -532,7 +533,7 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { } func TestClientAllocations_Stats_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and fake an old client @@ -571,7 +572,7 @@ func TestClientAllocations_Stats_OldNode(t *testing.T) { } func TestClientAllocations_Stats_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -653,7 +654,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) { } func TestClientAllocations_Stats_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -719,7 +720,7 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) { } func TestClientAllocations_Stats_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -810,7 +811,7 @@ func TestClientAllocations_Stats_Remote(t *testing.T) { } func TestClientAllocations_Restart_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -916,7 +917,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) { } func TestClientAllocations_Restart_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1009,6 +1010,8 @@ func TestClientAllocations_Restart_Remote(t *testing.T) { } func TestClientAllocations_Restart_ACL(t *testing.T) { + ci.Parallel(t) + // Start a server s, root, cleanupS := TestACLServer(t, nil) defer cleanupS() @@ -1075,7 +1078,7 @@ func TestClientAllocations_Restart_ACL(t *testing.T) { // TestAlloc_ExecStreaming asserts that exec task requests are forwarded // to appropriate server or remote regions func TestAlloc_ExecStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) ////// Nomad clusters topology - not specific to test localServer, cleanupLS := TestServer(t, func(c *Config) { diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go index 45c288f9a..9291b2fcf 100644 --- a/nomad/client_csi_endpoint_test.go +++ b/nomad/client_csi_endpoint_test.go @@ -8,6 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -97,7 +98,7 @@ func (c *MockClientCSI) NodeDetachVolume(req *cstructs.ClientCSINodeDetachVolume } func TestClientCSIController_AttachVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -113,7 +114,7 @@ func TestClientCSIController_AttachVolume_Local(t *testing.T) { } func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -129,7 +130,7 @@ func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { } func TestClientCSIController_DetachVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -145,7 +146,7 @@ func TestClientCSIController_DetachVolume_Local(t *testing.T) { } func TestClientCSIController_DetachVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -161,7 +162,7 @@ func TestClientCSIController_DetachVolume_Forwarded(t *testing.T) { } func TestClientCSIController_ValidateVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -178,7 +179,7 @@ func TestClientCSIController_ValidateVolume_Local(t *testing.T) { } func TestClientCSIController_ValidateVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -195,7 +196,7 @@ func TestClientCSIController_ValidateVolume_Forwarded(t *testing.T) { } func TestClientCSIController_CreateVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -211,7 +212,7 @@ func TestClientCSIController_CreateVolume_Local(t *testing.T) { } func TestClientCSIController_CreateVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -227,7 +228,7 @@ func TestClientCSIController_CreateVolume_Forwarded(t *testing.T) { } func TestClientCSIController_DeleteVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -244,7 +245,7 @@ func TestClientCSIController_DeleteVolume_Local(t *testing.T) { } func TestClientCSIController_DeleteVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -261,7 +262,7 @@ func TestClientCSIController_DeleteVolume_Forwarded(t *testing.T) { } func TestClientCSIController_ListVolumes_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -277,7 +278,7 @@ func TestClientCSIController_ListVolumes_Local(t *testing.T) { } func TestClientCSIController_ListVolumes_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -293,7 +294,7 @@ func TestClientCSIController_ListVolumes_Forwarded(t *testing.T) { } func TestClientCSIController_CreateSnapshot_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -309,7 +310,7 @@ func TestClientCSIController_CreateSnapshot_Local(t *testing.T) { } func TestClientCSIController_CreateSnapshot_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -325,7 +326,7 @@ func TestClientCSIController_CreateSnapshot_Forwarded(t *testing.T) { } func TestClientCSIController_DeleteSnapshot_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -342,7 +343,7 @@ func TestClientCSIController_DeleteSnapshot_Local(t *testing.T) { } func TestClientCSIController_DeleteSnapshot_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -359,7 +360,7 @@ func TestClientCSIController_DeleteSnapshot_Forwarded(t *testing.T) { } func TestClientCSIController_ListSnapshots_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -375,7 +376,7 @@ func TestClientCSIController_ListSnapshots_Local(t *testing.T) { } func TestClientCSIController_ListSnapshots_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -391,7 +392,7 @@ func TestClientCSIController_ListSnapshots_Forwarded(t *testing.T) { } func TestClientCSI_NodeForControllerPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) {}) testutil.WaitForLeader(t, srv.RPC) defer shutdown() diff --git a/nomad/client_fs_endpoint_test.go b/nomad/client_fs_endpoint_test.go index fc5a68e48..709e0d77b 100644 --- a/nomad/client_fs_endpoint_test.go +++ b/nomad/client_fs_endpoint_test.go @@ -11,6 +11,7 @@ import ( codec "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -22,7 +23,7 @@ import ( ) func TestClientFS_List_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -106,7 +107,7 @@ func TestClientFS_List_Local(t *testing.T) { } func TestClientFS_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -173,7 +174,7 @@ func TestClientFS_List_ACL(t *testing.T) { } func TestClientFS_List_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -267,7 +268,7 @@ func TestClientFS_List_Remote(t *testing.T) { } func TestClientFS_Stat_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -298,7 +299,7 @@ func TestClientFS_Stat_OldNode(t *testing.T) { } func TestClientFS_Stat_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -382,7 +383,7 @@ func TestClientFS_Stat_Local(t *testing.T) { } func TestClientFS_Stat_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -449,7 +450,7 @@ func TestClientFS_Stat_ACL(t *testing.T) { } func TestClientFS_Stat_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -543,7 +544,7 @@ func TestClientFS_Stat_Remote(t *testing.T) { } func TestClientFS_Streaming_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -614,7 +615,7 @@ OUTER: } func TestClientFS_Streaming_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -730,7 +731,7 @@ func TestClientFS_Streaming_ACL(t *testing.T) { } func TestClientFS_Streaming_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -862,7 +863,7 @@ OUTER: } func TestClientFS_Streaming_Local_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1000,7 +1001,7 @@ OUTER: } func TestClientFS_Streaming_Remote_Server(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1148,7 +1149,7 @@ OUTER: } func TestClientFS_Streaming_Remote_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1292,7 +1293,7 @@ OUTER: } func TestClientFS_Logs_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1363,7 +1364,7 @@ OUTER: } func TestClientFS_Logs_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -1443,7 +1444,7 @@ OUTER: } func TestClientFS_Logs_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -1559,7 +1560,7 @@ func TestClientFS_Logs_ACL(t *testing.T) { } func TestClientFS_Logs_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1692,7 +1693,7 @@ OUTER: } func TestClientFS_Logs_Local_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1831,7 +1832,7 @@ OUTER: } func TestClientFS_Logs_Remote_Server(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1980,7 +1981,7 @@ OUTER: } func TestClientFS_Logs_Remote_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client diff --git a/nomad/client_rpc_test.go b/nomad/client_rpc_test.go index a65344028..efb3ce658 100644 --- a/nomad/client_rpc_test.go +++ b/nomad/client_rpc_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/uuid" @@ -28,7 +29,7 @@ func (n namedConnWrapper) LocalAddr() net.Addr { } func TestServer_removeNodeConn_differentAddrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -85,7 +86,7 @@ func TestServer_removeNodeConn_differentAddrs(t *testing.T) { } func TestServerWithNodeConn_NoPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -107,7 +108,7 @@ func TestServerWithNodeConn_NoPath(t *testing.T) { } func TestServerWithNodeConn_NoPath_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -121,7 +122,7 @@ func TestServerWithNodeConn_NoPath_Region(t *testing.T) { } func TestServerWithNodeConn_Path(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -149,7 +150,7 @@ func TestServerWithNodeConn_Path(t *testing.T) { } func TestServerWithNodeConn_Path_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -175,7 +176,7 @@ func TestServerWithNodeConn_Path_Region(t *testing.T) { } func TestServerWithNodeConn_Path_Newest(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -211,7 +212,7 @@ func TestServerWithNodeConn_Path_Newest(t *testing.T) { } func TestServerWithNodeConn_PathAndErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -247,7 +248,7 @@ func TestServerWithNodeConn_PathAndErr(t *testing.T) { } func TestServerWithNodeConn_NoPathAndErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -281,7 +282,7 @@ func TestServerWithNodeConn_NoPathAndErr(t *testing.T) { } func TestNodeStreamingRpc_badEndpoint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() diff --git a/nomad/client_stats_endpoint_test.go b/nomad/client_stats_endpoint_test.go index f36c838d2..de6da4bec 100644 --- a/nomad/client_stats_endpoint_test.go +++ b/nomad/client_stats_endpoint_test.go @@ -6,6 +6,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -17,7 +18,7 @@ import ( ) func TestClientStats_Stats_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -58,7 +59,7 @@ func TestClientStats_Stats_Local(t *testing.T) { } func TestClientStats_Stats_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -118,7 +119,7 @@ func TestClientStats_Stats_Local_ACL(t *testing.T) { } func TestClientStats_Stats_NoNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -142,7 +143,7 @@ func TestClientStats_Stats_NoNode(t *testing.T) { } func TestClientStats_Stats_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -169,7 +170,7 @@ func TestClientStats_Stats_OldNode(t *testing.T) { } func TestClientStats_Stats_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client diff --git a/nomad/consul_oss_test.go b/nomad/consul_oss_test.go index 520467d71..c1cd375b4 100644 --- a/nomad/consul_oss_test.go +++ b/nomad/consul_oss_test.go @@ -8,6 +8,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -15,13 +16,12 @@ import ( ) func TestConsulACLsAPI_CheckPermissions_oss(t *testing.T) { + ci.Parallel(t) // In Nomad OSS, CheckPermissions will only receive "" as input for the // namespace parameter - as the ConsulUsage map from namespace to usages will // always contain one key - the empty string. - t.Parallel() - try := func(t *testing.T, namespace string, usage *structs.ConsulUsage, secretID string, exp error) { logger := testlog.HCLogger(t) aclAPI := consul.NewMockACLsAPI(logger) diff --git a/nomad/consul_policy_oss_test.go b/nomad/consul_policy_oss_test.go index 464bfcf09..9936c4fe9 100644 --- a/nomad/consul_policy_oss_test.go +++ b/nomad/consul_policy_oss_test.go @@ -7,13 +7,14 @@ import ( "testing" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" ) func TestConsulACLsAPI_hasSufficientPolicy_oss(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, namespace, task string, token *api.ACLToken, exp bool) { logger := testlog.HCLogger(t) diff --git a/nomad/consul_policy_test.go b/nomad/consul_policy_test.go index 150780d15..c0e648399 100644 --- a/nomad/consul_policy_test.go +++ b/nomad/consul_policy_test.go @@ -4,12 +4,13 @@ import ( "testing" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/stretchr/testify/require" ) func TestConsulPolicy_ParseConsulPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, text string, expPolicy *ConsulPolicy, expErr string) { policy, err := parseConsulPolicy(text) @@ -103,7 +104,7 @@ namespace "foo" { } func TestConsulACLsAPI_allowsServiceWrite(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, matches bool, namespace, task string, cp *ConsulPolicy, exp bool) { // If matches is false, the implication is that the consul acl token is in @@ -342,6 +343,8 @@ func TestConsulACLsAPI_allowsServiceWrite(t *testing.T) { } func TestConsulPolicy_isManagementToken(t *testing.T) { + ci.Parallel(t) + aclsAPI := new(consulACLsAPI) t.Run("nil", func(t *testing.T) { @@ -394,6 +397,8 @@ func TestConsulPolicy_isManagementToken(t *testing.T) { } func TestConsulPolicy_namespaceCheck(t *testing.T) { + ci.Parallel(t) + withoutNS := &api.ACLToken{Namespace: ""} withDefault := &api.ACLToken{Namespace: "default"} withOther := &api.ACLToken{Namespace: "other"} @@ -455,6 +460,8 @@ func TestConsulPolicy_namespaceCheck(t *testing.T) { } func TestConsulPolicy_allowKeystoreRead(t *testing.T) { + ci.Parallel(t) + t.Run("empty", func(t *testing.T) { require.False(t, new(ConsulPolicy).allowsKeystoreRead(true, "default")) }) diff --git a/nomad/consul_test.go b/nomad/consul_test.go index f1561d651..5c6b5a834 100644 --- a/nomad/consul_test.go +++ b/nomad/consul_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" @@ -21,7 +22,7 @@ var _ ConsulACLsAPI = (*mockConsulACLsAPI)(nil) var _ ConsulConfigsAPI = (*consulConfigsAPI)(nil) func TestConsulConfigsAPI_SetCE(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, expect error, f func(ConsulConfigsAPI) error) { logger := testlog.HCLogger(t) @@ -140,7 +141,7 @@ func (m *mockConsulACLsAPI) storeForRevocation(accessors []*structs.SITokenAcces } func TestConsulACLsAPI_CreateToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, expErr error) { logger := testlog.HCLogger(t) @@ -182,7 +183,7 @@ func TestConsulACLsAPI_CreateToken(t *testing.T) { } func TestConsulACLsAPI_RevokeTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) setup := func(t *testing.T, exp error) (context.Context, ConsulACLsAPI, *structs.SIToken) { logger := testlog.HCLogger(t) @@ -237,7 +238,7 @@ func TestConsulACLsAPI_RevokeTokens(t *testing.T) { } func TestConsulACLsAPI_MarkForRevocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) aclAPI := consul.NewMockACLsAPI(logger) @@ -266,7 +267,7 @@ func TestConsulACLsAPI_MarkForRevocation(t *testing.T) { } func TestConsulACLsAPI_bgRetryRevoke(t *testing.T) { - t.Parallel() + ci.Parallel(t) // manually create so the bg daemon does not run, letting us explicitly // call and test bgRetryRevoke @@ -327,7 +328,7 @@ func TestConsulACLsAPI_bgRetryRevoke(t *testing.T) { } func TestConsulACLsAPI_Stop(t *testing.T) { - t.Parallel() + ci.Parallel(t) setup := func(t *testing.T) *consulACLsAPI { logger := testlog.HCLogger(t) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 095975a31..120c058d0 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -7,6 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -17,7 +18,7 @@ import ( ) func TestCoreScheduler_EvalGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -111,7 +112,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) { // Tests GC behavior on allocations being rescheduled func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -216,7 +217,7 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { // Tests GC behavior on stopped job with reschedulable allocs func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -292,7 +293,7 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { // An EvalGC should never reap a batch job that has not been stopped func TestCoreScheduler_EvalGC_Batch(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -396,7 +397,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { // An EvalGC should reap allocations from jobs with an older modify index func TestCoreScheduler_EvalGC_Batch_OldVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -519,7 +520,7 @@ func TestCoreScheduler_EvalGC_Batch_OldVersion(t *testing.T) { // An EvalGC should reap a batch job that has been stopped func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -616,7 +617,7 @@ func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) { } func TestCoreScheduler_EvalGC_Partial(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -733,7 +734,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { } func TestCoreScheduler_EvalGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { require := require.New(t) @@ -817,7 +818,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { } func TestCoreScheduler_NodeGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { var server *Server @@ -874,7 +875,7 @@ func TestCoreScheduler_NodeGC(t *testing.T) { } func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -930,7 +931,7 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { } func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -988,7 +989,7 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { } func TestCoreScheduler_NodeGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1032,7 +1033,7 @@ func TestCoreScheduler_NodeGC_Force(t *testing.T) { } func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1156,7 +1157,7 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { } func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1302,7 +1303,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { // This test ensures that batch jobs are GC'd in one shot, meaning it all // allocs/evals and job or nothing func TestCoreScheduler_JobGC_OneShot(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1415,7 +1416,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { // This test ensures that stopped jobs are GCd func TestCoreScheduler_JobGC_Stopped(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1516,7 +1517,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { } func TestCoreScheduler_JobGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { var server *Server @@ -1588,7 +1589,7 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { // This test ensures parameterized jobs only get gc'd when stopped func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1668,7 +1669,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { // This test ensures periodic jobs don't get GCd until they are stopped func TestCoreScheduler_JobGC_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1742,7 +1743,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) { } func TestCoreScheduler_DeploymentGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1793,7 +1794,7 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { } func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { var server *Server @@ -1839,7 +1840,7 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { } func TestCoreScheduler_PartitionEvalReap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1882,7 +1883,7 @@ func TestCoreScheduler_PartitionEvalReap(t *testing.T) { } func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1919,7 +1920,7 @@ func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) { } func TestCoreScheduler_PartitionJobReap(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2197,7 +2198,7 @@ func TestAllocation_GCEligible(t *testing.T) { } func TestCoreScheduler_CSIPluginGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, cleanupSRV := TestServer(t, nil) defer cleanupSRV() @@ -2251,7 +2252,7 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { } func TestCoreScheduler_CSIVolumeClaimGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv, shutdown := TestServer(t, func(c *Config) { @@ -2402,7 +2403,7 @@ func TestCoreScheduler_CSIVolumeClaimGC(t *testing.T) { } func TestCoreScheduler_CSIBadState_ClaimGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv, shutdown := TestServer(t, func(c *Config) { @@ -2442,7 +2443,7 @@ func TestCoreScheduler_CSIBadState_ClaimGC(t *testing.T) { } func TestCoreScheduler_FailLoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv, cleanupSrv := TestServer(t, func(c *Config) { diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index c8591e38e..6e91c4e78 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -7,6 +7,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" cconfig "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -19,7 +20,7 @@ import ( ) func TestCSIVolumeEndpoint_Get(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -65,7 +66,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { } func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -116,7 +117,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { } func TestCSIVolumeEndpoint_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -216,7 +217,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { // are honored only if the volume exists, the mode is permitted, and the volume // is schedulable according to its count of claims. func TestCSIVolumeEndpoint_Claim(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -372,7 +373,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { // TestCSIVolumeEndpoint_ClaimWithController exercises the VolumeClaim RPC // when a controller is required. func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.ACLEnabled = true c.NumSchedulers = 0 // Prevent automatic dequeue @@ -458,7 +459,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { } func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) defer shutdown() testutil.WaitForLeader(t, srv.RPC) @@ -590,7 +591,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { } func TestCSIVolumeEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -672,7 +673,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { } func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -754,7 +755,7 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { } func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -948,7 +949,7 @@ func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { } func TestCSIVolumeEndpoint_Create(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1093,7 +1094,7 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { } func TestCSIVolumeEndpoint_Delete(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1207,7 +1208,7 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { } func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1312,7 +1313,7 @@ func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { } func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1411,7 +1412,7 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { } func TestCSIVolumeEndpoint_DeleteSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1491,7 +1492,7 @@ func TestCSIVolumeEndpoint_DeleteSnapshot(t *testing.T) { } func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1587,7 +1588,7 @@ func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { } func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -1659,7 +1660,7 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { } func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, nil) defer shutdown() testutil.WaitForLeader(t, srv.RPC) @@ -1736,7 +1737,7 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { } func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -1817,6 +1818,8 @@ func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) { } func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { + ci.Parallel(t) + srv, shutdown := TestServer(t, func(c *Config) {}) defer shutdown() testutil.WaitForLeader(t, srv.RPC) diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index 08982aeda..e58c62da3 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -7,6 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -17,7 +18,7 @@ import ( ) func TestDeploymentEndpoint_GetDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -49,7 +50,7 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) { } func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -103,7 +104,7 @@ func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { } func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -153,7 +154,7 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { } func TestDeploymentEndpoint_Fail(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -203,7 +204,7 @@ func TestDeploymentEndpoint_Fail(t *testing.T) { } func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -279,7 +280,7 @@ func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { } func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -357,7 +358,7 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { } func TestDeploymentEndpoint_Pause(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -400,7 +401,7 @@ func TestDeploymentEndpoint_Pause(t *testing.T) { } func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -469,7 +470,7 @@ func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { } func TestDeploymentEndpoint_Promote(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -534,7 +535,7 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { } func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -625,7 +626,7 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { } func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -693,7 +694,7 @@ func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { } func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -787,7 +788,7 @@ func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { } func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -877,7 +878,7 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { // tests rollback upon alloc health failure to job with identical spec does not succeed func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -964,7 +965,7 @@ func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { } func TestDeploymentEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1033,7 +1034,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { } func TestDeploymentEndpoint_List_order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1121,7 +1122,7 @@ func TestDeploymentEndpoint_List_order(t *testing.T) { } func TestDeploymentEndpoint_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1190,7 +1191,7 @@ func TestDeploymentEndpoint_List_ACL(t *testing.T) { } func TestDeploymentEndpoint_List_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1248,7 +1249,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { } func TestDeploymentEndpoint_List_Pagination(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1464,7 +1465,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { } func TestDeploymentEndpoint_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1502,7 +1503,7 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) { } func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1577,7 +1578,7 @@ func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { } func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1645,7 +1646,7 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { } func TestDeploymentEndpoint_Reap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index 50835b74c..467ffcca2 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -6,6 +6,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -29,7 +30,7 @@ func defaultTestDeploymentWatcher(t *testing.T) (*Watcher, *mockBackend) { // Tests that the watcher properly watches for deployments and reconciles them func TestWatcher_WatchDeployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -84,7 +85,7 @@ func TestWatcher_WatchDeployments(t *testing.T) { // Tests that calls against an unknown deployment fail func TestWatcher_UnknownDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -141,7 +142,7 @@ func TestWatcher_UnknownDeployment(t *testing.T) { // Test setting an unknown allocation's health func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -186,7 +187,7 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { // Test setting allocation health func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -231,7 +232,7 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { // Test setting allocation unhealthy func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -283,7 +284,7 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { // Test setting allocation unhealthy and that there should be a rollback func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -350,7 +351,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { // Test setting allocation unhealthy on job with identical spec and there should be no rollback func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -415,7 +416,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { // Test promoting a deployment func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -475,7 +476,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { // Test promoting a deployment with unhealthy canaries func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -531,7 +532,7 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { } func TestWatcher_AutoPromoteDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) w, m := defaultTestDeploymentWatcher(t) now := time.Now() @@ -697,7 +698,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { // Test pausing a deployment that is running func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -739,7 +740,7 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { // Test pausing a deployment that is paused func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -782,7 +783,7 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { // Test unpausing a deployment that is paused func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -823,7 +824,7 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { // Test unpausing a deployment that is running func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -863,7 +864,7 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { // Test failing a deployment that is running func TestWatcher_FailDeployment_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -903,7 +904,7 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { // Tests that the watcher properly watches for allocation changes and takes the // proper actions func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1023,7 +1024,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { } func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1099,7 +1100,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { // Test that progress deadline handling works when there are multiple groups func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1205,7 +1206,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { // Test that we will allow the progress deadline to be reached when the canaries // are healthy but we haven't promoted func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1287,7 +1288,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { // Test that a promoted deployment with alloc healthy updates create // evals to move the deployment forward func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1376,7 +1377,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { } func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mtype := structs.MsgTypeTestSetup @@ -1584,7 +1585,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { // Test scenario where deployment initially has no progress deadline // After the deployment is updated, a failed alloc's DesiredTransition should be set func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1646,7 +1647,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { // Tests that the watcher fails rollback when the spec hasn't changed func TestDeploymentWatcher_RollbackFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1756,7 +1757,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { // Test allocation updates and evaluation creation is batched between watchers func TestWatcher_BatchAllocUpdates(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Second) diff --git a/nomad/drainer/drain_heap_test.go b/nomad/drainer/drain_heap_test.go index 930801d92..22ed77d96 100644 --- a/nomad/drainer/drain_heap_test.go +++ b/nomad/drainer/drain_heap_test.go @@ -5,19 +5,20 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) func TestDeadlineHeap_Interface(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) require.Implements((*DrainDeadlineNotifier)(nil), h) } func TestDeadlineHeap_WatchAndGet(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) @@ -39,7 +40,7 @@ func TestDeadlineHeap_WatchAndGet(t *testing.T) { } func TestDeadlineHeap_WatchThenUpdateAndGet(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) @@ -66,7 +67,7 @@ func TestDeadlineHeap_WatchThenUpdateAndGet(t *testing.T) { } func TestDeadlineHeap_MultiwatchAndDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) @@ -94,7 +95,7 @@ func TestDeadlineHeap_MultiwatchAndDelete(t *testing.T) { } func TestDeadlineHeap_WatchCoalesce(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 100*time.Millisecond) @@ -150,7 +151,7 @@ func TestDeadlineHeap_WatchCoalesce(t *testing.T) { } func TestDeadlineHeap_MultipleForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) diff --git a/nomad/drainer/drainer_util_test.go b/nomad/drainer/drainer_util_test.go index d18a22d1b..75fc531ff 100644 --- a/nomad/drainer/drainer_util_test.go +++ b/nomad/drainer/drainer_util_test.go @@ -3,12 +3,13 @@ package drainer import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestDrainer_PartitionAllocDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Set the max ids per reap to something lower. maxIdsPerTxn := 2 @@ -32,7 +33,7 @@ func TestDrainer_PartitionAllocDrain(t *testing.T) { } func TestDrainer_PartitionIds(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Set the max ids per reap to something lower. diff --git a/nomad/drainer/draining_node_test.go b/nomad/drainer/draining_node_test.go index b508fd702..b93efcb77 100644 --- a/nomad/drainer/draining_node_test.go +++ b/nomad/drainer/draining_node_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -44,6 +45,8 @@ func assertDrainingNode(t *testing.T, dn *drainingNode, isDone bool, remaining, } func TestDrainingNode_Table(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string isDone bool @@ -206,7 +209,7 @@ func TestDrainingNode_Table(t *testing.T) { for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) dn := testDrainingNode(t) tc.setup(t, dn) assertDrainingNode(t, dn, tc.isDone, tc.remaining, tc.running) diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index f15f4e0ed..192fb1124 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -47,6 +48,8 @@ func testDrainingJobWatcher(t *testing.T, state *state.StateStore) (*drainingJob // TestDrainingJobWatcher_Interface is a compile-time assertion that we // implement the intended interface. func TestDrainingJobWatcher_Interface(t *testing.T) { + ci.Parallel(t) + w, cancel := testDrainingJobWatcher(t, state.TestStateStore(t)) cancel() var _ DrainingJobWatcher = w @@ -99,7 +102,7 @@ func assertJobWatcherOps(t *testing.T, jw DrainingJobWatcher, drained, migrated // TestDrainingJobWatcher_DrainJobs asserts DrainingJobWatcher batches // allocation changes from multiple jobs. func TestDrainingJobWatcher_DrainJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := state.TestStateStore(t) @@ -326,6 +329,8 @@ type handleTaskGroupTestCase struct { } func TestHandeTaskGroup_Table(t *testing.T) { + ci.Parallel(t) + cases := []handleTaskGroupTestCase{ { // All allocs on draining node @@ -543,7 +548,8 @@ func TestHandeTaskGroup_Table(t *testing.T) { } func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) assert := assert.New(t) @@ -599,7 +605,7 @@ func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { } func TestHandleTaskGroup_Migrations(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a draining node @@ -668,7 +674,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { // This test asserts that handle task group works when an allocation is on a // garbage collected node func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a draining node diff --git a/nomad/drainer/watch_nodes_test.go b/nomad/drainer/watch_nodes_test.go index 6484c1750..f1154c460 100644 --- a/nomad/drainer/watch_nodes_test.go +++ b/nomad/drainer/watch_nodes_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -25,14 +26,14 @@ func testNodeDrainWatcher(t *testing.T) (*nodeDrainWatcher, *state.StateStore, * } func TestNodeDrainWatcher_Interface(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, _, _ := testNodeDrainWatcher(t) require.Implements((*DrainingNodeWatcher)(nil), w) } func TestNodeDrainWatcher_AddDraining(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) @@ -62,7 +63,7 @@ func TestNodeDrainWatcher_AddDraining(t *testing.T) { } func TestNodeDrainWatcher_Remove(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) @@ -100,7 +101,7 @@ func TestNodeDrainWatcher_Remove(t *testing.T) { } func TestNodeDrainWatcher_Remove_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) @@ -138,7 +139,7 @@ func TestNodeDrainWatcher_Remove_Nonexistent(t *testing.T) { } func TestNodeDrainWatcher_Update(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index cb8aca2ea..5c23ac891 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -10,6 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -120,7 +121,7 @@ func getNodeAllocsImpl(nodeID string) func(ws memdb.WatchSet, state *state.State } func TestDrainer_Simple_ServiceOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -225,7 +226,7 @@ func TestDrainer_Simple_ServiceOnly(t *testing.T) { } func TestDrainer_Simple_ServiceOnly_Deadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -322,7 +323,7 @@ func TestDrainer_Simple_ServiceOnly_Deadline(t *testing.T) { } func TestDrainer_DrainEmptyNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -373,7 +374,7 @@ func TestDrainer_DrainEmptyNode(t *testing.T) { } func TestDrainer_AllTypes_Deadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -540,7 +541,7 @@ func TestDrainer_AllTypes_Deadline(t *testing.T) { // Test that drain is unset when batch jobs naturally finish func TestDrainer_AllTypes_NoDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -707,7 +708,7 @@ func TestDrainer_AllTypes_NoDeadline(t *testing.T) { } func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -883,7 +884,7 @@ func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) { // TestDrainer_MultipleNSes_ServiceOnly asserts that all jobs on an alloc, even // when they belong to different namespaces and share the same ID func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1011,7 +1012,7 @@ func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { // Test that transitions to force drain work. func TestDrainer_Batch_TransitionToForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, inf := range []bool{true, false} { name := "Infinite" diff --git a/nomad/eval_broker_test.go b/nomad/eval_broker_test.go index af1f9a88f..3b0988eae 100644 --- a/nomad/eval_broker_test.go +++ b/nomad/eval_broker_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -52,7 +53,7 @@ func testBrokerFromConfig(t *testing.T, c *Config) *EvalBroker { } func TestEvalBroker_Enqueue_Dequeue_Nack_Ack(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) // Enqueue, but broker is disabled! @@ -228,7 +229,7 @@ func TestEvalBroker_Enqueue_Dequeue_Nack_Ack(t *testing.T) { } func TestEvalBroker_Nack_Delay(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) // Enqueue, but broker is disabled! @@ -386,7 +387,7 @@ func TestEvalBroker_Nack_Delay(t *testing.T) { } func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -625,7 +626,7 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) { } func TestEvalBroker_Enqueue_Disable(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) // Enqueue @@ -650,7 +651,7 @@ func TestEvalBroker_Enqueue_Disable(t *testing.T) { } func TestEvalBroker_Enqueue_Disable_Delay(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) baseEval := mock.Eval() b.SetEnabled(true) @@ -708,7 +709,7 @@ func TestEvalBroker_Enqueue_Disable_Delay(t *testing.T) { } func TestEvalBroker_Dequeue_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -729,7 +730,7 @@ func TestEvalBroker_Dequeue_Timeout(t *testing.T) { } func TestEvalBroker_Dequeue_Empty_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -773,7 +774,7 @@ func TestEvalBroker_Dequeue_Empty_Timeout(t *testing.T) { // Ensure higher priority dequeued first func TestEvalBroker_Dequeue_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -807,7 +808,7 @@ func TestEvalBroker_Dequeue_Priority(t *testing.T) { // Ensure FIFO at fixed priority func TestEvalBroker_Dequeue_FIFO(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) NUM := 100 @@ -829,7 +830,7 @@ func TestEvalBroker_Dequeue_FIFO(t *testing.T) { // Ensure fairness between schedulers func TestEvalBroker_Dequeue_Fairness(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) NUM := 1000 @@ -871,7 +872,7 @@ func TestEvalBroker_Dequeue_Fairness(t *testing.T) { // Ensure we get unblocked func TestEvalBroker_Dequeue_Blocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -928,7 +929,7 @@ func TestEvalBroker_Dequeue_Blocked(t *testing.T) { // Ensure we nack in a timely manner func TestEvalBroker_Nack_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 5*time.Millisecond) b.SetEnabled(true) @@ -964,7 +965,7 @@ func TestEvalBroker_Nack_Timeout(t *testing.T) { // Ensure we nack in a timely manner func TestEvalBroker_Nack_TimeoutReset(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 50*time.Millisecond) b.SetEnabled(true) @@ -1005,7 +1006,7 @@ func TestEvalBroker_Nack_TimeoutReset(t *testing.T) { } func TestEvalBroker_PauseResumeNackTimeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 50*time.Millisecond) b.SetEnabled(true) @@ -1065,7 +1066,7 @@ func TestEvalBroker_PauseResumeNackTimeout(t *testing.T) { } func TestEvalBroker_DeliveryLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1155,7 +1156,7 @@ func TestEvalBroker_DeliveryLimit(t *testing.T) { } func TestEvalBroker_AckAtDeliveryLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1199,7 +1200,7 @@ func TestEvalBroker_AckAtDeliveryLimit(t *testing.T) { // TestEvalBroker_Wait asserts delayed evaluations cannot be dequeued until // their wait duration has elapsed. func TestEvalBroker_Wait(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1247,7 +1248,7 @@ func TestEvalBroker_Wait(t *testing.T) { // Ensure that delayed evaluations work as expected func TestEvalBroker_WaitUntil(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1293,7 +1294,7 @@ func TestEvalBroker_WaitUntil(t *testing.T) { // Ensure that priority is taken into account when enqueueing many evaluations. func TestEvalBroker_EnqueueAll_Dequeue_Fair(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1355,7 +1356,7 @@ func TestEvalBroker_EnqueueAll_Dequeue_Fair(t *testing.T) { } func TestEvalBroker_EnqueueAll_Requeue_Ack(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1412,7 +1413,7 @@ func TestEvalBroker_EnqueueAll_Requeue_Ack(t *testing.T) { } func TestEvalBroker_EnqueueAll_Requeue_Nack(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1465,7 +1466,7 @@ func TestEvalBroker_EnqueueAll_Requeue_Nack(t *testing.T) { } func TestEvalBroker_NamespacedJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 6a2bf4575..500b3502c 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -10,6 +10,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -20,7 +21,7 @@ import ( ) func TestEvalEndpoint_GetEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -62,7 +63,7 @@ func TestEvalEndpoint_GetEval(t *testing.T) { } func TestEvalEndpoint_GetEval_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -123,7 +124,7 @@ func TestEvalEndpoint_GetEval_ACL(t *testing.T) { } func TestEvalEndpoint_GetEval_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -202,7 +203,7 @@ func TestEvalEndpoint_GetEval_Blocking(t *testing.T) { } func TestEvalEndpoint_Dequeue(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -247,7 +248,7 @@ func TestEvalEndpoint_Dequeue(t *testing.T) { // TestEvalEndpoint_Dequeue_WaitIndex_Snapshot asserts that an eval's wait // index will be equal to the highest eval modify index in the state store. func TestEvalEndpoint_Dequeue_WaitIndex_Snapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -298,7 +299,7 @@ func TestEvalEndpoint_Dequeue_WaitIndex_Snapshot(t *testing.T) { // indexes in the state store. This can happen if Dequeue receives an eval that // has not yet been applied from the Raft log to the local node's state store. func TestEvalEndpoint_Dequeue_WaitIndex_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -337,7 +338,7 @@ func TestEvalEndpoint_Dequeue_WaitIndex_Eval(t *testing.T) { func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { // test enqueuing an eval, updating a plan result for the same eval and de-queueing the eval - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -403,7 +404,7 @@ func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { } func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -430,7 +431,7 @@ func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) { } func TestEvalEndpoint_Ack(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -471,7 +472,7 @@ func TestEvalEndpoint_Ack(t *testing.T) { } func TestEvalEndpoint_Nack(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { // Disable all of the schedulers so we can manually dequeue @@ -525,7 +526,7 @@ func TestEvalEndpoint_Nack(t *testing.T) { } func TestEvalEndpoint_Update(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -574,7 +575,7 @@ func TestEvalEndpoint_Update(t *testing.T) { } func TestEvalEndpoint_Create(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -627,7 +628,7 @@ func TestEvalEndpoint_Create(t *testing.T) { } func TestEvalEndpoint_Reap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -663,7 +664,7 @@ func TestEvalEndpoint_Reap(t *testing.T) { } func TestEvalEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -718,7 +719,7 @@ func TestEvalEndpoint_List(t *testing.T) { } func TestEvalEndpoint_List_order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -806,7 +807,7 @@ func TestEvalEndpoint_List_order(t *testing.T) { } func TestEvalEndpoint_ListAllNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -841,7 +842,7 @@ func TestEvalEndpoint_ListAllNamespaces(t *testing.T) { } func TestEvalEndpoint_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -907,7 +908,7 @@ func TestEvalEndpoint_List_ACL(t *testing.T) { } func TestEvalEndpoint_List_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -974,7 +975,7 @@ func TestEvalEndpoint_List_Blocking(t *testing.T) { } func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1299,7 +1300,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { } func TestEvalEndpoint_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1337,7 +1338,7 @@ func TestEvalEndpoint_Allocations(t *testing.T) { } func TestEvalEndpoint_Allocations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1402,7 +1403,7 @@ func TestEvalEndpoint_Allocations_ACL(t *testing.T) { } func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1458,7 +1459,7 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { } func TestEvalEndpoint_Reblock_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1495,7 +1496,7 @@ func TestEvalEndpoint_Reblock_Nonexistent(t *testing.T) { } func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1538,7 +1539,7 @@ func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) { } func TestEvalEndpoint_Reblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue diff --git a/nomad/event_endpoint_test.go b/nomad/event_endpoint_test.go index a314444c9..31dddfa7e 100644 --- a/nomad/event_endpoint_test.go +++ b/nomad/event_endpoint_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" @@ -23,7 +24,7 @@ import ( ) func TestEventStream(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnableEventBroker = true @@ -127,7 +128,7 @@ OUTER: // TestEventStream_StreamErr asserts an error is returned when an event publisher // closes its subscriptions func TestEventStream_StreamErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnableEventBroker = true @@ -209,7 +210,7 @@ OUTER: // TestEventStream_RegionForward tests event streaming from one server // to another in a different region func TestEventStream_RegionForward(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnableEventBroker = true @@ -307,7 +308,7 @@ OUTER: } func TestEventStream_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -507,7 +508,7 @@ func TestEventStream_ACL(t *testing.T) { // TestEventStream_ACL_Update_Close_Stream asserts that an active subscription // is closed after the token is no longer valid func TestEventStream_ACL_Update_Close_Stream(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server s1, root, cleanupS := TestACLServer(t, nil) diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 42b3a7e25..292a43f26 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -11,11 +11,7 @@ import ( "github.com/google/go-cmp/cmp" memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/raft" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -24,6 +20,10 @@ import ( "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/hashicorp/raft" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type MockSink struct { @@ -81,7 +81,7 @@ func makeLog(buf []byte) *raft.Log { } func TestFSM_UpsertNodeEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) state := fsm.State() @@ -125,7 +125,7 @@ func TestFSM_UpsertNodeEvents(t *testing.T) { } func TestFSM_UpsertNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -182,7 +182,7 @@ func TestFSM_UpsertNode(t *testing.T) { } func TestFSM_UpsertNode_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -209,7 +209,7 @@ func TestFSM_UpsertNode_Canonicalize(t *testing.T) { } func TestFSM_UpsertNode_Canonicalize_Ineligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -236,7 +236,7 @@ func TestFSM_UpsertNode_Canonicalize_Ineligible(t *testing.T) { } func TestFSM_DeregisterNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) node := mock.Node() @@ -278,7 +278,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } func TestFSM_UpdateNodeStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -335,7 +335,7 @@ func TestFSM_UpdateNodeStatus(t *testing.T) { } func TestFSM_BatchUpdateNodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -384,7 +384,7 @@ func TestFSM_BatchUpdateNodeDrain(t *testing.T) { } func TestFSM_UpdateNodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -427,7 +427,7 @@ func TestFSM_UpdateNodeDrain(t *testing.T) { } func TestFSM_UpdateNodeEligibility(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -497,7 +497,7 @@ func TestFSM_UpdateNodeEligibility(t *testing.T) { } func TestFSM_UpdateNodeEligibility_Unblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -551,7 +551,7 @@ func TestFSM_UpdateNodeEligibility_Unblock(t *testing.T) { } func TestFSM_RegisterJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.PeriodicJob() @@ -607,7 +607,7 @@ func TestFSM_RegisterJob(t *testing.T) { } func TestFSM_RegisterPeriodicJob_NonLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) // Disable the dispatcher @@ -666,7 +666,7 @@ func TestFSM_RegisterPeriodicJob_NonLeader(t *testing.T) { } func TestFSM_RegisterJob_BadNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.Job() @@ -706,7 +706,7 @@ func TestFSM_RegisterJob_BadNamespace(t *testing.T) { } func TestFSM_DeregisterJob_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.Job() @@ -729,7 +729,7 @@ func TestFSM_DeregisterJob_Error(t *testing.T) { } func TestFSM_DeregisterJob_Purge(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.PeriodicJob() @@ -796,7 +796,7 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) { } func TestFSM_DeregisterJob_NoPurge(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.PeriodicJob() @@ -866,7 +866,7 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) { } func TestFSM_BatchDeregisterJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -944,7 +944,7 @@ func TestFSM_BatchDeregisterJob(t *testing.T) { } func TestFSM_UpdateEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) @@ -982,7 +982,7 @@ func TestFSM_UpdateEval(t *testing.T) { } func TestFSM_UpdateEval_Blocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) fsm.blockedEvals.SetEnabled(true) @@ -1031,7 +1031,7 @@ func TestFSM_UpdateEval_Blocked(t *testing.T) { } func TestFSM_UpdateEval_Untrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) fsm.blockedEvals.SetEnabled(true) @@ -1086,7 +1086,7 @@ func TestFSM_UpdateEval_Untrack(t *testing.T) { } func TestFSM_UpdateEval_NoUntrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) fsm.blockedEvals.SetEnabled(true) @@ -1143,7 +1143,7 @@ func TestFSM_UpdateEval_NoUntrack(t *testing.T) { } func TestFSM_DeleteEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) eval := mock.Eval() @@ -1185,7 +1185,7 @@ func TestFSM_DeleteEval(t *testing.T) { } func TestFSM_UpsertAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1244,7 +1244,7 @@ func TestFSM_UpsertAllocs(t *testing.T) { } func TestFSM_UpsertAllocs_SharedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1317,7 +1317,7 @@ func TestFSM_UpsertAllocs_SharedJob(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1398,7 +1398,7 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { // TestFSM_UpsertAllocs_Canonicalize asserts that allocations are Canonicalized // to handle logs emited by servers running old versions func TestFSM_UpsertAllocs_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1435,7 +1435,7 @@ func TestFSM_UpsertAllocs_Canonicalize(t *testing.T) { } func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) state := fsm.State() @@ -1520,7 +1520,7 @@ func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) { } func TestFSM_UpdateAllocFromClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() require := require.New(t) @@ -1568,7 +1568,7 @@ func TestFSM_UpdateAllocFromClient(t *testing.T) { } func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() require := require.New(t) @@ -1625,7 +1625,7 @@ func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { } func TestFSM_UpsertVaultAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -1675,7 +1675,7 @@ func TestFSM_UpsertVaultAccessor(t *testing.T) { } func TestFSM_DeregisterVaultAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -1718,7 +1718,7 @@ func TestFSM_DeregisterVaultAccessor(t *testing.T) { } func TestFSM_UpsertSITokenAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) fsm := testFSM(t) @@ -1753,7 +1753,7 @@ func TestFSM_UpsertSITokenAccessor(t *testing.T) { } func TestFSM_DeregisterSITokenAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) fsm := testFSM(t) @@ -1791,7 +1791,7 @@ func TestFSM_DeregisterSITokenAccessor(t *testing.T) { } func TestFSM_ApplyPlanResults(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) // Create the request and create a deployment @@ -1934,7 +1934,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) { } func TestFSM_DeploymentStatusUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2003,7 +2003,7 @@ func TestFSM_DeploymentStatusUpdate(t *testing.T) { } func TestFSM_JobStabilityUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2044,7 +2044,7 @@ func TestFSM_JobStabilityUpdate(t *testing.T) { } func TestFSM_DeploymentPromotion(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2149,7 +2149,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { } func TestFSM_DeploymentAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2256,7 +2256,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) { } func TestFSM_DeleteDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() @@ -2291,7 +2291,7 @@ func TestFSM_DeleteDeployment(t *testing.T) { } func TestFSM_UpsertACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) policy := mock.ACLPolicy() @@ -2316,7 +2316,7 @@ func TestFSM_UpsertACLPolicies(t *testing.T) { } func TestFSM_DeleteACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) policy := mock.ACLPolicy() @@ -2344,7 +2344,7 @@ func TestFSM_DeleteACLPolicies(t *testing.T) { } func TestFSM_BootstrapACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) token := mock.ACLToken() @@ -2389,7 +2389,7 @@ func TestFSM_BootstrapACLTokens(t *testing.T) { } func TestFSM_UpsertACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) token := mock.ACLToken() @@ -2414,7 +2414,7 @@ func TestFSM_UpsertACLTokens(t *testing.T) { } func TestFSM_DeleteACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) token := mock.ACLToken() @@ -2481,7 +2481,7 @@ func testSnapshotRestore(t *testing.T, fsm *nomadFSM) *nomadFSM { } func TestFSM_SnapshotRestore_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2498,7 +2498,7 @@ func TestFSM_SnapshotRestore_Nodes(t *testing.T) { } func TestFSM_SnapshotRestore_Jobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2522,7 +2522,7 @@ func TestFSM_SnapshotRestore_Jobs(t *testing.T) { } func TestFSM_SnapshotRestore_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2546,7 +2546,7 @@ func TestFSM_SnapshotRestore_Evals(t *testing.T) { } func TestFSM_SnapshotRestore_Allocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2572,7 +2572,7 @@ func TestFSM_SnapshotRestore_Allocs(t *testing.T) { } func TestFSM_SnapshotRestore_Allocs_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2599,7 +2599,7 @@ func TestFSM_SnapshotRestore_Allocs_Canonicalize(t *testing.T) { } func TestFSM_SnapshotRestore_Indexes(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2620,7 +2620,7 @@ func TestFSM_SnapshotRestore_Indexes(t *testing.T) { } func TestFSM_SnapshotRestore_TimeTable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) @@ -2642,7 +2642,7 @@ func TestFSM_SnapshotRestore_TimeTable(t *testing.T) { } func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2677,7 +2677,7 @@ func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) { } func TestFSM_SnapshotRestore_JobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2705,7 +2705,7 @@ func TestFSM_SnapshotRestore_JobSummary(t *testing.T) { } func TestFSM_SnapshotRestore_VaultAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2728,7 +2728,7 @@ func TestFSM_SnapshotRestore_VaultAccessors(t *testing.T) { } func TestFSM_SnapshotRestore_JobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2756,7 +2756,7 @@ func TestFSM_SnapshotRestore_JobVersions(t *testing.T) { } func TestFSM_SnapshotRestore_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2786,7 +2786,7 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) { } func TestFSM_SnapshotRestore_ACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2805,7 +2805,7 @@ func TestFSM_SnapshotRestore_ACLPolicy(t *testing.T) { } func TestFSM_SnapshotRestore_ACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2824,7 +2824,7 @@ func TestFSM_SnapshotRestore_ACLTokens(t *testing.T) { } func TestFSM_SnapshotRestore_SchedulerConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2847,7 +2847,7 @@ func TestFSM_SnapshotRestore_SchedulerConfiguration(t *testing.T) { } func TestFSM_SnapshotRestore_ClusterMetadata(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() @@ -2866,7 +2866,7 @@ func TestFSM_SnapshotRestore_ClusterMetadata(t *testing.T) { } func TestFSM_ReconcileSummaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2946,7 +2946,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) { // COMPAT: Remove in 0.11 func TestFSM_ReconcileParentJobSummary(t *testing.T) { // This test exercises code to handle https://github.com/hashicorp/nomad/issues/3886 - t.Parallel() + ci.Parallel(t) require := require.New(t) // Add some state @@ -3016,7 +3016,7 @@ func TestFSM_ReconcileParentJobSummary(t *testing.T) { } func TestFSM_LeakedDeployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Add some state @@ -3034,7 +3034,7 @@ func TestFSM_LeakedDeployments(t *testing.T) { } func TestFSM_Autopilot(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) // Set the autopilot config using a request. @@ -3097,7 +3097,7 @@ func TestFSM_Autopilot(t *testing.T) { } func TestFSM_SchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) require := require.New(t) @@ -3146,7 +3146,7 @@ func TestFSM_SchedulerConfig(t *testing.T) { } func TestFSM_ClusterMetadata(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) fsm := testFSM(t) @@ -3186,7 +3186,7 @@ func TestFSM_ClusterMetadata(t *testing.T) { func TestFSM_UpsertNamespaces(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) ns1 := mock.Namespace() @@ -3211,7 +3211,7 @@ func TestFSM_UpsertNamespaces(t *testing.T) { func TestFSM_DeleteNamespaces(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) ns1 := mock.Namespace() @@ -3237,7 +3237,7 @@ func TestFSM_DeleteNamespaces(t *testing.T) { } func TestFSM_SnapshotRestore_Namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -3260,7 +3260,7 @@ func TestFSM_SnapshotRestore_Namespaces(t *testing.T) { } func TestFSM_ACLEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string @@ -3408,7 +3408,7 @@ func TestFSM_ACLEvents(t *testing.T) { // TestFSM_EventBroker_JobRegisterFSMEvents asserts that only a single job // register event is emitted when registering a job func TestFSM_EventBroker_JobRegisterFSMEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.Job() diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index a7f9223f9..ec7aaa165 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -7,6 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -14,7 +15,7 @@ import ( ) func TestHeartbeat_InitializeHeartbeatTimers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -41,7 +42,7 @@ func TestHeartbeat_InitializeHeartbeatTimers(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -64,7 +65,7 @@ func TestHeartbeat_ResetHeartbeatTimer(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimer_Nonleader(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -81,7 +82,7 @@ func TestHeartbeat_ResetHeartbeatTimer_Nonleader(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimerLocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -103,7 +104,7 @@ func TestHeartbeat_ResetHeartbeatTimerLocked(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimerLocked_Renew(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -143,7 +144,7 @@ func TestHeartbeat_ResetHeartbeatTimerLocked_Renew(t *testing.T) { } func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -168,7 +169,7 @@ func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { } func TestHeartbeat_ClearHeartbeatTimer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -189,7 +190,7 @@ func TestHeartbeat_ClearHeartbeatTimer(t *testing.T) { } func TestHeartbeat_ClearAllHeartbeatTimers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -212,7 +213,7 @@ func TestHeartbeat_ClearAllHeartbeatTimers(t *testing.T) { } func TestHeartbeat_Server_HeartbeatTTL_Failover(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 diff --git a/nomad/job_endpoint_hook_connect_test.go b/nomad/job_endpoint_hook_connect_test.go index 75253ce13..ed6b28e95 100644 --- a/nomad/job_endpoint_hook_connect_test.go +++ b/nomad/job_endpoint_hook_connect_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -13,7 +14,7 @@ import ( ) func TestJobEndpointConnect_isSidecarForService(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { task *structs.Task @@ -54,7 +55,7 @@ func TestJobEndpointConnect_isSidecarForService(t *testing.T) { } func TestJobEndpointConnect_groupConnectHook(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that connect-proxy task is inserted for backend service job := mock.Job() @@ -113,7 +114,7 @@ func TestJobEndpointConnect_groupConnectHook(t *testing.T) { } func TestJobEndpointConnect_groupConnectHook_IngressGateway_BridgeNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect ingress gateway task is inserted if a gateway service // exists and since this is a bridge network, will rewrite the default gateway proxy @@ -146,7 +147,7 @@ func TestJobEndpointConnect_groupConnectHook_IngressGateway_BridgeNetwork(t *tes } func TestJobEndpointConnect_groupConnectHook_IngressGateway_HostNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect ingress gateway task is inserted if a gateway service // exists. In host network mode, the default values are used. @@ -178,7 +179,7 @@ func TestJobEndpointConnect_groupConnectHook_IngressGateway_HostNetwork(t *testi } func TestJobEndpointConnect_groupConnectHook_IngressGateway_CustomTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect gateway task is inserted if a gateway service exists // and since this is a bridge network, will rewrite the default gateway proxy @@ -247,7 +248,7 @@ func TestJobEndpointConnect_groupConnectHook_IngressGateway_CustomTask(t *testin } func TestJobEndpointConnect_groupConnectHook_TerminatingGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Tests that the connect terminating gateway task is inserted if a gateway // service exists and since this is a bridge network, will rewrite the default @@ -280,7 +281,7 @@ func TestJobEndpointConnect_groupConnectHook_TerminatingGateway(t *testing.T) { } func TestJobEndpointConnect_groupConnectHook_MeshGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect mesh gateway task is inserted if a gateway service // exists and since this is a bridge network, will rewrite the default gateway @@ -326,7 +327,7 @@ func TestJobEndpointConnect_groupConnectHook_MeshGateway(t *testing.T) { // // See https://github.com/hashicorp/nomad/issues/6853 func TestJobEndpointConnect_ConnectInterpolation(t *testing.T) { - t.Parallel() + ci.Parallel(t) server := &Server{logger: testlog.HCLogger(t)} jobEndpoint := NewJobEndpoints(server) @@ -342,7 +343,7 @@ func TestJobEndpointConnect_ConnectInterpolation(t *testing.T) { } func TestJobEndpointConnect_groupConnectSidecarValidate(t *testing.T) { - t.Parallel() + ci.Parallel(t) // network validation @@ -457,6 +458,8 @@ func TestJobEndpointConnect_groupConnectSidecarValidate(t *testing.T) { } func TestJobEndpointConnect_groupConnectUpstreamsValidate(t *testing.T) { + ci.Parallel(t) + t.Run("no connect services", func(t *testing.T) { err := groupConnectUpstreamsValidate("group", []*structs.Service{{Name: "s1"}, {Name: "s2"}}) @@ -543,7 +546,7 @@ func TestJobEndpointConnect_groupConnectUpstreamsValidate(t *testing.T) { } func TestJobEndpointConnect_getNamedTaskForNativeService(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("named exists", func(t *testing.T) { task, err := getNamedTaskForNativeService(&structs.TaskGroup{ @@ -583,7 +586,7 @@ func TestJobEndpointConnect_getNamedTaskForNativeService(t *testing.T) { } func TestJobEndpointConnect_groupConnectGatewayValidate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no group network", func(t *testing.T) { err := groupConnectGatewayValidate(&structs.TaskGroup{ @@ -605,7 +608,7 @@ func TestJobEndpointConnect_groupConnectGatewayValidate(t *testing.T) { } func TestJobEndpointConnect_newConnectGatewayTask_host(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("ingress", func(t *testing.T) { task := newConnectGatewayTask(structs.ConnectIngressPrefix, "foo", true) @@ -627,14 +630,14 @@ func TestJobEndpointConnect_newConnectGatewayTask_host(t *testing.T) { } func TestJobEndpointConnect_newConnectGatewayTask_bridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) task := newConnectGatewayTask(structs.ConnectIngressPrefix, "service1", false) require.NotContains(t, task.Config, "network_mode") } func TestJobEndpointConnect_hasGatewayTaskForService(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no gateway task", func(t *testing.T) { result := hasGatewayTaskForService(&structs.TaskGroup{ @@ -682,7 +685,7 @@ func TestJobEndpointConnect_hasGatewayTaskForService(t *testing.T) { } func TestJobEndpointConnect_gatewayProxyIsDefault(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := gatewayProxyIsDefault(nil) @@ -725,7 +728,7 @@ func TestJobEndpointConnect_gatewayProxyIsDefault(t *testing.T) { } func TestJobEndpointConnect_gatewayBindAddressesForBridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { @@ -792,7 +795,7 @@ func TestJobEndpointConnect_gatewayBindAddressesForBridge(t *testing.T) { } func TestJobEndpointConnect_gatewayProxy(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := gatewayProxy(nil, "bridge") @@ -986,5 +989,4 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { EnvoyGatewayBindAddresses: nil, }, result) }) - } diff --git a/nomad/job_endpoint_hook_expose_check_test.go b/nomad/job_endpoint_hook_expose_check_test.go index f3a2cbe3a..8e71883ba 100644 --- a/nomad/job_endpoint_hook_expose_check_test.go +++ b/nomad/job_endpoint_hook_expose_check_test.go @@ -3,18 +3,19 @@ package nomad import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestJobExposeCheckHook_Name(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "expose-check", new(jobExposeCheckHook).Name()) } func TestJobExposeCheckHook_tgUsesExposeCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no check.expose", func(t *testing.T) { require.False(t, tgUsesExposeCheck(&structs.TaskGroup{ @@ -40,7 +41,7 @@ func TestJobExposeCheckHook_tgUsesExposeCheck(t *testing.T) { } func TestJobExposeCheckHook_tgValidateUseOfBridgeMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1 := &structs.Service{ Name: "s1", @@ -88,7 +89,7 @@ func TestJobExposeCheckHook_tgValidateUseOfBridgeMode(t *testing.T) { } func TestJobExposeCheckHook_tgValidateUseOfCheckExpose(t *testing.T) { - t.Parallel() + ci.Parallel(t) withCustomProxyTask := &structs.Service{ Name: "s1", @@ -138,6 +139,8 @@ func TestJobExposeCheckHook_tgValidateUseOfCheckExpose(t *testing.T) { } func TestJobExposeCheckHook_Validate(t *testing.T) { + ci.Parallel(t) + s1 := &structs.Service{ Name: "s1", Checks: []*structs.ServiceCheck{{ @@ -224,7 +227,7 @@ func TestJobExposeCheckHook_Validate(t *testing.T) { } func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) const checkIdx = 0 @@ -314,7 +317,7 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { Name: "group1", Services: []*structs.Service{s}, Networks: structs.Networks{{ - Mode: "bridge", + Mode: "bridge", DynamicPorts: []structs.Port{ // service declares "sPort", but does not exist }, @@ -400,7 +403,7 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { } func TestJobExposeCheckHook_containsExposePath(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("contains path", func(t *testing.T) { require.True(t, containsExposePath([]structs.ConsulExposePath{{ @@ -442,7 +445,7 @@ func TestJobExposeCheckHook_containsExposePath(t *testing.T) { } func TestJobExposeCheckHook_serviceExposeConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("proxy is nil", func(t *testing.T) { require.NotNil(t, serviceExposeConfig(&structs.Service{ @@ -521,7 +524,7 @@ func TestJobExposeCheckHook_serviceExposeConfig(t *testing.T) { } func TestJobExposeCheckHook_checkIsExposable(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("grpc", func(t *testing.T) { require.True(t, checkIsExposable(&structs.ServiceCheck{ @@ -561,7 +564,7 @@ func TestJobExposeCheckHook_checkIsExposable(t *testing.T) { } func TestJobExposeCheckHook_Mutate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("typical", func(t *testing.T) { result, warnings, err := new(jobExposeCheckHook).Mutate(&structs.Job{ diff --git a/nomad/job_endpoint_oss_test.go b/nomad/job_endpoint_oss_test.go index e90e7cb82..304422d81 100644 --- a/nomad/job_endpoint_oss_test.go +++ b/nomad/job_endpoint_oss_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -21,7 +22,7 @@ import ( // submission fails allow_unauthenticated is false, and either an invalid or no // operator Consul token is provided. func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse_oss(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 8cb936399..3cf238b35 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -10,21 +10,21 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/hashicorp/raft" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/hashicorp/raft" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJobEndpoint_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -110,7 +110,7 @@ func TestJobEndpoint_Register(t *testing.T) { } func TestJobEndpoint_Register_PreserveCounts(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -170,7 +170,7 @@ func TestJobEndpoint_Register_PreserveCounts(t *testing.T) { } func TestJobEndpoint_Register_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssert := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -202,7 +202,7 @@ func TestJobEndpoint_Register_EvalPriority(t *testing.T) { } func TestJobEndpoint_Register_Connect(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -271,7 +271,7 @@ func TestJobEndpoint_Register_Connect(t *testing.T) { } func TestJobEndpoint_Register_ConnectIngressGateway_minimum(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -346,7 +346,7 @@ func TestJobEndpoint_Register_ConnectIngressGateway_minimum(t *testing.T) { } func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -473,7 +473,7 @@ func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) { } func TestJobEndpoint_Register_ConnectExposeCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -587,7 +587,7 @@ func TestJobEndpoint_Register_ConnectExposeCheck(t *testing.T) { } func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -684,7 +684,7 @@ func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) { } func TestJobEndpoint_Register_Connect_ValidatesWithoutSidecarTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -735,7 +735,7 @@ func TestJobEndpoint_Register_Connect_ValidatesWithoutSidecarTask(t *testing.T) } func TestJobEndpoint_Register_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -899,7 +899,7 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { } func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -939,7 +939,7 @@ func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) { } func TestJobEndpoint_Register_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -973,7 +973,7 @@ func TestJobEndpoint_Register_Payload(t *testing.T) { } func TestJobEndpoint_Register_Existing(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1096,7 +1096,7 @@ func TestJobEndpoint_Register_Existing(t *testing.T) { } func TestJobEndpoint_Register_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1149,7 +1149,7 @@ func TestJobEndpoint_Register_Periodic(t *testing.T) { } func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1197,7 +1197,7 @@ func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) { } func TestJobEndpoint_Register_Dispatched(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -1227,7 +1227,7 @@ func TestJobEndpoint_Register_Dispatched(t *testing.T) { } func TestJobEndpoint_Register_EnforceIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1359,7 +1359,7 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) { // TestJobEndpoint_Register_Vault_Disabled asserts that submitting a job that // uses Vault when Vault is *disabled* results in an error. func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1396,7 +1396,7 @@ func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) { // with a Vault policy but without a Vault token is *succeeds* if // allow_unauthenticated=true. func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1453,7 +1453,7 @@ func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) { // submitters can specify their own Vault constraint to override the // automatically injected one. func TestJobEndpoint_Register_Vault_OverrideConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1510,7 +1510,7 @@ func TestJobEndpoint_Register_Vault_OverrideConstraint(t *testing.T) { } func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1551,7 +1551,7 @@ func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) { } func TestJobEndpoint_Register_Vault_Policies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1693,7 +1693,7 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) { } func TestJobEndpoint_Register_Vault_MultiNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1747,7 +1747,7 @@ func TestJobEndpoint_Register_Vault_MultiNamespaces(t *testing.T) { // TestJobEndpoint_Register_SemverConstraint asserts that semver ordering is // used when evaluating semver constraints. func TestJobEndpoint_Register_SemverConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1829,7 +1829,7 @@ func TestJobEndpoint_Register_SemverConstraint(t *testing.T) { // TestJobEndpoint_Register_EvalCreation_Modern asserts that job register creates an eval // atomically with the registration func TestJobEndpoint_Register_EvalCreation_Modern(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1950,7 +1950,7 @@ func TestJobEndpoint_Register_EvalCreation_Modern(t *testing.T) { // TestJobEndpoint_Register_EvalCreation_Legacy asserts that job register creates an eval // atomically with the registration, but handle legacy clients by adding a new eval update func TestJobEndpoint_Register_EvalCreation_Legacy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -2092,7 +2092,7 @@ func TestJobEndpoint_Register_EvalCreation_Legacy(t *testing.T) { } func TestJobEndpoint_Register_ValidateMemoryMax(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2172,7 +2172,7 @@ func evalUpdateFromRaft(t *testing.T, s *Server, evalID string) *structs.Evaluat } func TestJobEndpoint_Register_ACL_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -2249,7 +2249,7 @@ func TestJobEndpoint_Register_ACL_Namespace(t *testing.T) { } func TestJobRegister_ACL_RejectedBySchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -2343,7 +2343,7 @@ func TestJobRegister_ACL_RejectedBySchedulerConfig(t *testing.T) { } func TestJobEndpoint_Revert(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2513,7 +2513,7 @@ func TestJobEndpoint_Revert(t *testing.T) { } func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2613,7 +2613,7 @@ func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) { // TestJobEndpoint_Revert_Vault_Policies asserts that job revert uses the // revert request's Vault token when authorizing policies. func TestJobEndpoint_Revert_Vault_Policies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2726,7 +2726,7 @@ func TestJobEndpoint_Revert_Vault_Policies(t *testing.T) { } func TestJobEndpoint_Revert_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -2791,7 +2791,7 @@ func TestJobEndpoint_Revert_ACL(t *testing.T) { } func TestJobEndpoint_Stable(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2855,7 +2855,7 @@ func TestJobEndpoint_Stable(t *testing.T) { } func TestJobEndpoint_Stable_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -2922,7 +2922,7 @@ func TestJobEndpoint_Stable_ACL(t *testing.T) { } func TestJobEndpoint_Evaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3008,7 +3008,7 @@ func TestJobEndpoint_Evaluate(t *testing.T) { } func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3086,7 +3086,7 @@ func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) { } func TestJobEndpoint_Evaluate_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -3160,7 +3160,7 @@ func TestJobEndpoint_Evaluate_ACL(t *testing.T) { } func TestJobEndpoint_Evaluate_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3204,7 +3204,7 @@ func TestJobEndpoint_Evaluate_Periodic(t *testing.T) { } func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3249,7 +3249,7 @@ func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) { } func TestJobEndpoint_Deregister(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3340,7 +3340,7 @@ func TestJobEndpoint_Deregister(t *testing.T) { } func TestJobEndpoint_Deregister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -3425,7 +3425,7 @@ func TestJobEndpoint_Deregister_ACL(t *testing.T) { } func TestJobEndpoint_Deregister_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3492,7 +3492,7 @@ func TestJobEndpoint_Deregister_Nonexistent(t *testing.T) { } func TestJobEndpoint_Deregister_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssert := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3534,7 +3534,7 @@ func TestJobEndpoint_Deregister_EvalPriority(t *testing.T) { } func TestJobEndpoint_Deregister_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3593,7 +3593,7 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) { } func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3655,7 +3655,7 @@ func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) { // TestJobEndpoint_Deregister_EvalCreation_Modern asserts that job deregister creates an eval // atomically with the registration func TestJobEndpoint_Deregister_EvalCreation_Modern(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3736,7 +3736,7 @@ func TestJobEndpoint_Deregister_EvalCreation_Modern(t *testing.T) { // creates an eval atomically with the registration, but handle legacy clients // by adding a new eval update func TestJobEndpoint_Deregister_EvalCreation_Legacy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -3832,7 +3832,7 @@ func TestJobEndpoint_Deregister_EvalCreation_Legacy(t *testing.T) { } func TestJobEndpoint_Deregister_NoShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3923,7 +3923,7 @@ func TestJobEndpoint_Deregister_NoShutdownDelay(t *testing.T) { } func TestJobEndpoint_BatchDeregister(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -4016,7 +4016,7 @@ func TestJobEndpoint_BatchDeregister(t *testing.T) { } func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -4085,7 +4085,7 @@ func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) { } func TestJobEndpoint_Deregister_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssertion := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -4135,7 +4135,7 @@ func TestJobEndpoint_Deregister_Priority(t *testing.T) { } func TestJobEndpoint_GetJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4213,7 +4213,7 @@ func TestJobEndpoint_GetJob(t *testing.T) { } func TestJobEndpoint_GetJob_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -4271,7 +4271,7 @@ func TestJobEndpoint_GetJob_ACL(t *testing.T) { } func TestJobEndpoint_GetJob_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4348,7 +4348,7 @@ func TestJobEndpoint_GetJob_Blocking(t *testing.T) { } func TestJobEndpoint_GetJobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4421,7 +4421,7 @@ func TestJobEndpoint_GetJobVersions(t *testing.T) { } func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -4488,7 +4488,7 @@ func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) { } func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4585,7 +4585,7 @@ func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) { } func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4671,7 +4671,7 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) { } func TestJobEndpoint_GetJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -4733,7 +4733,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) { } func TestJobEndpoint_Summary_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -4820,7 +4820,7 @@ func TestJobEndpoint_Summary_ACL(t *testing.T) { } func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4914,7 +4914,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { } func TestJobEndpoint_ListJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4963,7 +4963,7 @@ func TestJobEndpoint_ListJobs(t *testing.T) { // returns all jobs across namespace. // func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5025,7 +5025,7 @@ func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) { } func TestJobEndpoint_ListJobs_WithACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -5085,7 +5085,7 @@ func TestJobEndpoint_ListJobs_WithACL(t *testing.T) { } func TestJobEndpoint_ListJobs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5152,7 +5152,7 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) { } func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -5330,7 +5330,7 @@ func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) { } func TestJobEndpoint_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5371,7 +5371,7 @@ func TestJobEndpoint_Allocations(t *testing.T) { } func TestJobEndpoint_Allocations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5433,7 +5433,7 @@ func TestJobEndpoint_Allocations_ACL(t *testing.T) { } func TestJobEndpoint_Allocations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5493,7 +5493,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) { // TestJobEndpoint_Allocations_NoJobID asserts not setting a JobID in the // request returns an error. func TestJobEndpoint_Allocations_NoJobID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5515,7 +5515,7 @@ func TestJobEndpoint_Allocations_NoJobID(t *testing.T) { } func TestJobEndpoint_Evaluations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5554,7 +5554,7 @@ func TestJobEndpoint_Evaluations(t *testing.T) { } func TestJobEndpoint_Evaluations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5614,7 +5614,7 @@ func TestJobEndpoint_Evaluations_ACL(t *testing.T) { } func TestJobEndpoint_Evaluations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5670,7 +5670,7 @@ func TestJobEndpoint_Evaluations_Blocking(t *testing.T) { } func TestJobEndpoint_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5707,7 +5707,7 @@ func TestJobEndpoint_Deployments(t *testing.T) { } func TestJobEndpoint_Deployments_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5771,7 +5771,7 @@ func TestJobEndpoint_Deployments_ACL(t *testing.T) { } func TestJobEndpoint_Deployments_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5818,7 +5818,7 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) { } func TestJobEndpoint_LatestDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5857,7 +5857,7 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) { } func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5926,7 +5926,7 @@ func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { } func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5974,7 +5974,7 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { } func TestJobEndpoint_Plan_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6008,7 +6008,7 @@ func TestJobEndpoint_Plan_ACL(t *testing.T) { } func TestJobEndpoint_Plan_WithDiff(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6068,7 +6068,7 @@ func TestJobEndpoint_Plan_WithDiff(t *testing.T) { } func TestJobEndpoint_Plan_NoDiff(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6130,7 +6130,7 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) { // TestJobEndpoint_Plan_Scaling asserts that the plan endpoint handles // jobs with scaling stanza func TestJobEndpoint_Plan_Scaling(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6164,7 +6164,7 @@ func TestJobEndpoint_Plan_Scaling(t *testing.T) { } func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6234,7 +6234,7 @@ func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) { } func TestJobEndpoint_ValidateJob_ConsulConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -6324,7 +6324,7 @@ func TestJobEndpoint_ValidateJob_ConsulConnect(t *testing.T) { } func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6397,7 +6397,7 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) { } func TestJobEndpoint_ValidateJobUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) old := mock.Job() new := mock.Job() @@ -6441,7 +6441,7 @@ func TestJobEndpoint_ValidateJobUpdate(t *testing.T) { } func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -6477,7 +6477,7 @@ func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) { } func TestJobEndpoint_Dispatch_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -6554,7 +6554,7 @@ func TestJobEndpoint_Dispatch_ACL(t *testing.T) { } func TestJobEndpoint_Dispatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) // No requirements d1 := mock.BatchJob() @@ -6901,7 +6901,7 @@ func TestJobEndpoint_Dispatch(t *testing.T) { // TestJobEndpoint_Dispatch_JobChildrenSummary asserts that the job summary is updated // appropriately as its dispatched/children jobs status are updated. func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -7010,7 +7010,7 @@ func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { } func TestJobEndpoint_Dispatch_ACL_RejectedBySchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -7099,7 +7099,7 @@ func TestJobEndpoint_Dispatch_ACL_RejectedBySchedulerConfig(t *testing.T) { } func TestJobEndpoint_Scale(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7146,7 +7146,7 @@ func TestJobEndpoint_Scale(t *testing.T) { } func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7225,7 +7225,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { } func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7302,7 +7302,7 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { } func TestJobEndpoint_Scale_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -7387,7 +7387,7 @@ func TestJobEndpoint_Scale_ACL(t *testing.T) { } func TestJobEndpoint_Scale_ACL_RejectedBySchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -7478,7 +7478,7 @@ func TestJobEndpoint_Scale_ACL_RejectedBySchedulerConfig(t *testing.T) { } func TestJobEndpoint_Scale_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7529,7 +7529,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { } func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7572,7 +7572,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { } func TestJobEndpoint_Scale_NoEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7637,7 +7637,7 @@ func TestJobEndpoint_Scale_NoEval(t *testing.T) { } func TestJobEndpoint_Scale_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssertion := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7687,7 +7687,7 @@ func TestJobEndpoint_Scale_Priority(t *testing.T) { } func TestJobEndpoint_InvalidCount(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7717,7 +7717,7 @@ func TestJobEndpoint_InvalidCount(t *testing.T) { } func TestJobEndpoint_GetScaleStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7833,7 +7833,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { } func TestJobEndpoint_GetScaleStatus_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) diff --git a/nomad/job_endpoint_validators_test.go b/nomad/job_endpoint_validators_test.go index f84fd90e2..de8acaa90 100644 --- a/nomad/job_endpoint_validators_test.go +++ b/nomad/job_endpoint_validators_test.go @@ -3,6 +3,7 @@ package nomad import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -10,13 +11,13 @@ import ( ) func TestJobNamespaceConstraintCheckHook_Name(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "namespace-constraint-check", new(jobNamespaceConstraintCheckHook).Name()) } func TestJobNamespaceConstraintCheckHook_taskValidateDriver(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { description string @@ -85,7 +86,7 @@ func TestJobNamespaceConstraintCheckHook_taskValidateDriver(t *testing.T) { } func TestJobNamespaceConstraintCheckHook_validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) diff --git a/nomad/leader_test.go b/nomad/leader_test.go index 07f854d09..b244273b0 100644 --- a/nomad/leader_test.go +++ b/nomad/leader_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -24,6 +25,8 @@ import ( ) func TestLeader_LeftServer(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 }) @@ -86,6 +89,8 @@ func TestLeader_LeftServer(t *testing.T) { } func TestLeader_LeftLeader(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 }) @@ -132,6 +137,8 @@ func TestLeader_LeftLeader(t *testing.T) { } func TestLeader_MultiBootstrap(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -162,6 +169,8 @@ func TestLeader_MultiBootstrap(t *testing.T) { } func TestLeader_PlanQueue_Reset(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 }) @@ -218,6 +227,8 @@ func TestLeader_PlanQueue_Reset(t *testing.T) { } func TestLeader_EvalBroker_Reset(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -276,6 +287,8 @@ func TestLeader_EvalBroker_Reset(t *testing.T) { } func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -366,6 +379,8 @@ func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) { } func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -422,6 +437,8 @@ func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) { } func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -479,6 +496,8 @@ func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) { } func TestLeader_PeriodicDispatch(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 c.EvalGCInterval = 5 * time.Millisecond @@ -499,6 +518,8 @@ func TestLeader_PeriodicDispatch(t *testing.T) { } func TestLeader_ReapFailedEval(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 c.EvalDeliveryLimit = 1 @@ -577,6 +598,8 @@ func TestLeader_ReapFailedEval(t *testing.T) { } func TestLeader_ReapDuplicateEval(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -607,6 +630,8 @@ func TestLeader_ReapDuplicateEval(t *testing.T) { } func TestLeader_revokeVaultAccessorsOnRestore(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -635,7 +660,7 @@ func TestLeader_revokeVaultAccessorsOnRestore(t *testing.T) { } func TestLeader_revokeSITokenAccessorsOnRestore(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -667,7 +692,7 @@ func TestLeader_revokeSITokenAccessorsOnRestore(t *testing.T) { } func TestLeader_ClusterID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -683,7 +708,7 @@ func TestLeader_ClusterID(t *testing.T) { } func TestLeader_ClusterID_upgradePath(t *testing.T) { - t.Parallel() + ci.Parallel(t) before := version.Must(version.NewVersion("0.10.1")).String() after := minClusterIDVersion.String() @@ -791,7 +816,7 @@ func TestLeader_ClusterID_upgradePath(t *testing.T) { } func TestLeader_ClusterID_noUpgrade(t *testing.T) { - t.Parallel() + ci.Parallel(t) type server struct { s *Server @@ -856,7 +881,7 @@ func agreeClusterID(t *testing.T, servers []*Server) { } func TestLeader_ReplicateACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" @@ -893,7 +918,7 @@ func TestLeader_ReplicateACLPolicies(t *testing.T) { } func TestLeader_DiffACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := state.TestStateStore(t) @@ -925,7 +950,7 @@ func TestLeader_DiffACLPolicies(t *testing.T) { } func TestLeader_ReplicateACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" @@ -963,7 +988,7 @@ func TestLeader_ReplicateACLTokens(t *testing.T) { } func TestLeader_DiffACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := state.TestStateStore(t) @@ -1001,7 +1026,7 @@ func TestLeader_DiffACLTokens(t *testing.T) { } func TestLeader_UpgradeRaftVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Datacenter = "dc1" @@ -1091,6 +1116,8 @@ func TestLeader_UpgradeRaftVersion(t *testing.T) { } func TestLeader_Reelection(t *testing.T) { + ci.Parallel(t) + raftProtocols := []int{1, 2, 3} for _, p := range raftProtocols { t.Run(fmt.Sprintf("Leader Election - Protocol version %d", p), func(t *testing.T) { @@ -1158,7 +1185,7 @@ func leaderElectionTest(t *testing.T, raftProtocol raft.ProtocolVersion) { } func TestLeader_RollRaftServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 2 @@ -1284,6 +1311,8 @@ func TestLeader_RollRaftServer(t *testing.T) { } func TestLeader_RevokeLeadership_MultipleTimes(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -1300,6 +1329,8 @@ func TestLeader_RevokeLeadership_MultipleTimes(t *testing.T) { } func TestLeader_TransitionsUpdateConsistencyRead(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -1321,6 +1352,8 @@ func TestLeader_TransitionsUpdateConsistencyRead(t *testing.T) { // TestLeader_PausingWorkers asserts that scheduling workers are paused // (and unpaused) upon leader elections (and step downs). func TestLeader_PausingWorkers(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 12 }) @@ -1359,7 +1392,7 @@ func TestLeader_PausingWorkers(t *testing.T) { // This verifies that removing the server and adding it back with a uuid works // even if the server's address stays the same. func TestServer_ReconcileMember(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a three node cluster s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -1457,7 +1490,7 @@ func TestServer_ReconcileMember(t *testing.T) { } func TestLeader_ReplicateNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" @@ -1504,7 +1537,7 @@ func TestLeader_ReplicateNamespaces(t *testing.T) { } func TestLeader_DiffNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := state.TestStateStore(t) diff --git a/nomad/namespace_endpoint_test.go b/nomad/namespace_endpoint_test.go index eec1c50bd..f56e4cc8e 100644 --- a/nomad/namespace_endpoint_test.go +++ b/nomad/namespace_endpoint_test.go @@ -7,6 +7,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,8 +16,8 @@ import ( ) func TestNamespaceEndpoint_GetNamespace(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -44,8 +45,8 @@ func TestNamespaceEndpoint_GetNamespace(t *testing.T) { } func TestNamespaceEndpoint_GetNamespace_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -105,8 +106,8 @@ func TestNamespaceEndpoint_GetNamespace_ACL(t *testing.T) { } func TestNamespaceEndpoint_GetNamespace_Blocking(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() state := s1.fsm.State() @@ -164,8 +165,8 @@ func TestNamespaceEndpoint_GetNamespace_Blocking(t *testing.T) { } func TestNamespaceEndpoint_GetNamespaces(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -190,8 +191,8 @@ func TestNamespaceEndpoint_GetNamespaces(t *testing.T) { } func TestNamespaceEndpoint_GetNamespaces_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -239,8 +240,8 @@ func TestNamespaceEndpoint_GetNamespaces_ACL(t *testing.T) { } func TestNamespaceEndpoint_GetNamespaces_Blocking(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() state := s1.fsm.State() @@ -298,8 +299,8 @@ func TestNamespaceEndpoint_GetNamespaces_Blocking(t *testing.T) { } func TestNamespaceEndpoint_List(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -336,8 +337,8 @@ func TestNamespaceEndpoint_List(t *testing.T) { } func TestNamespaceEndpoint_List_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -410,8 +411,8 @@ func TestNamespaceEndpoint_List_ACL(t *testing.T) { } func TestNamespaceEndpoint_List_Blocking(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() state := s1.fsm.State() @@ -460,8 +461,8 @@ func TestNamespaceEndpoint_List_Blocking(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -483,8 +484,8 @@ func TestNamespaceEndpoint_DeleteNamespaces(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Local(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -513,8 +514,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Local(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Federated_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" c.AuthoritativeRegion = "region1" @@ -574,8 +575,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Federated_ACL(t *testing } func TestNamespaceEndpoint_DeleteNamespaces_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -649,8 +650,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_ACL(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces_Default(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -666,8 +667,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_Default(t *testing.T) { } func TestNamespaceEndpoint_UpsertNamespaces(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -697,8 +698,8 @@ func TestNamespaceEndpoint_UpsertNamespaces(t *testing.T) { } func TestNamespaceEndpoint_UpsertNamespaces_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index c91dafc1a..8d1cd12cc 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -11,12 +11,8 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - vapi "github.com/hashicorp/vault/api" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -24,10 +20,14 @@ import ( "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + vapi "github.com/hashicorp/vault/api" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestClientEndpoint_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -90,7 +90,7 @@ func TestClientEndpoint_Register(t *testing.T) { // forwarded RPCs. This is essential otherwise we will think a Yamux session to // a Nomad server is actually the session to the node. func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -181,7 +181,7 @@ func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) { } func TestClientEndpoint_Register_SecretMismatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -211,7 +211,7 @@ func TestClientEndpoint_Register_SecretMismatch(t *testing.T) { // Test the deprecated single node deregistration path func TestClientEndpoint_DeregisterOne(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -257,7 +257,7 @@ func TestClientEndpoint_DeregisterOne(t *testing.T) { } func TestClientEndpoint_Deregister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -323,7 +323,7 @@ func TestClientEndpoint_Deregister_ACL(t *testing.T) { } func TestClientEndpoint_Deregister_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -385,7 +385,7 @@ func TestClientEndpoint_Deregister_Vault(t *testing.T) { } func TestClientEndpoint_UpdateStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -465,7 +465,7 @@ func TestClientEndpoint_UpdateStatus(t *testing.T) { } func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -524,7 +524,7 @@ func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) { } func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -575,7 +575,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) { } func TestClientEndpoint_Register_GetEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -670,7 +670,7 @@ func TestClientEndpoint_Register_GetEvals(t *testing.T) { } func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -754,7 +754,7 @@ func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) { } func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 @@ -832,7 +832,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) { } func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) advAddr := "127.0.1.1:1234" @@ -875,7 +875,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) { // * an evaluation is created when the node becomes eligible // * drain metadata is properly persisted in Node.LastDrain func TestClientEndpoint_UpdateDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -992,7 +992,7 @@ func TestClientEndpoint_UpdateDrain(t *testing.T) { // is properly persisted in Node.LastDrain as the node drain is updated and // completes. func TestClientEndpoint_UpdatedDrainAndCompleted(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1099,7 +1099,7 @@ func TestClientEndpoint_UpdatedDrainAndCompleted(t *testing.T) { // persisted in Node.LastDrain when calls to Node.UpdateDrain() don't affect // the drain status. func TestClientEndpoint_UpdatedDrainNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1174,7 +1174,7 @@ func TestClientEndpoint_UpdatedDrainNoop(t *testing.T) { // node.write ACLs, and that token accessor ID is properly persisted in // Node.LastDrain.AccessorID func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1243,7 +1243,7 @@ func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) { // This test ensures that Nomad marks client state of allocations which are in // pending/running state to lost when a node is marked as down. func TestClientEndpoint_Drain_Down(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1374,7 +1374,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) { } func TestClientEndpoint_UpdateEligibility(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1432,7 +1432,7 @@ func TestClientEndpoint_UpdateEligibility(t *testing.T) { } func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1488,7 +1488,7 @@ func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) { } func TestClientEndpoint_GetNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1555,7 +1555,7 @@ func TestClientEndpoint_GetNode(t *testing.T) { } func TestClientEndpoint_GetNode_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1619,7 +1619,7 @@ func TestClientEndpoint_GetNode_ACL(t *testing.T) { } func TestClientEndpoint_GetNode_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1722,7 +1722,7 @@ func TestClientEndpoint_GetNode_Blocking(t *testing.T) { } func TestClientEndpoint_GetAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1785,7 +1785,7 @@ func TestClientEndpoint_GetAllocs(t *testing.T) { } func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1860,7 +1860,7 @@ func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) { } func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1956,7 +1956,7 @@ func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) { } func TestClientEndpoint_GetClientAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2036,7 +2036,7 @@ func TestClientEndpoint_GetClientAllocs(t *testing.T) { } func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2158,7 +2158,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { } func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2235,7 +2235,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) { // A MigrateToken should not be created if an allocation shares the same node // with its previous allocation func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2288,7 +2288,7 @@ func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) { } func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2381,7 +2381,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { } func TestClientEndpoint_UpdateAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { // Disabling scheduling in this test so that we can @@ -2479,7 +2479,7 @@ func TestClientEndpoint_UpdateAlloc(t *testing.T) { } func TestClientEndpoint_BatchUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2537,7 +2537,7 @@ func TestClientEndpoint_BatchUpdate(t *testing.T) { } func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2623,7 +2623,7 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { } func TestClientEndpoint_CreateNodeEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2708,7 +2708,7 @@ func TestClientEndpoint_CreateNodeEvals(t *testing.T) { // TestClientEndpoint_CreateNodeEvals_MultipleNSes asserts that evals are made // for all jobs across namespaces func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2767,7 +2767,7 @@ func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) { } func TestClientEndpoint_Evaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2855,7 +2855,7 @@ func TestClientEndpoint_Evaluate(t *testing.T) { } func TestClientEndpoint_Evaluate_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -2914,7 +2914,7 @@ func TestClientEndpoint_Evaluate_ACL(t *testing.T) { } func TestClientEndpoint_ListNodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2986,7 +2986,7 @@ func TestClientEndpoint_ListNodes(t *testing.T) { } func TestClientEndpoint_ListNodes_Fields(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3023,7 +3023,7 @@ func TestClientEndpoint_ListNodes_Fields(t *testing.T) { } func TestClientEndpoint_ListNodes_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -3078,7 +3078,7 @@ func TestClientEndpoint_ListNodes_ACL(t *testing.T) { } func TestClientEndpoint_ListNodes_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3210,7 +3210,7 @@ func TestClientEndpoint_ListNodes_Blocking(t *testing.T) { } func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3292,7 +3292,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { } func TestClientEndpoint_DeriveVaultToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3385,7 +3385,7 @@ func TestClientEndpoint_DeriveVaultToken(t *testing.T) { } func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3443,7 +3443,7 @@ func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { } func TestClientEndpoint_taskUsesConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, task *structs.Task, exp bool) { result := taskUsesConnect(task) @@ -3471,7 +3471,7 @@ func TestClientEndpoint_taskUsesConnect(t *testing.T) { } func TestClientEndpoint_tasksNotUsingConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) taskGroup := &structs.TaskGroup{ Name: "testgroup", @@ -3523,7 +3523,7 @@ func mutateConnectJob(t *testing.T, job *structs.Job) { } func TestClientEndpoint_DeriveSIToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, nil) // already sets consul mocks @@ -3576,7 +3576,7 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { } func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -3624,7 +3624,7 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { } func TestClientEndpoint_EmitEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -3663,6 +3663,8 @@ func TestClientEndpoint_EmitEvents(t *testing.T) { } func TestClientEndpoint_ShouldCreateNodeEval(t *testing.T) { + ci.Parallel(t) + t.Run("spurious changes don't require eval", func(t *testing.T) { n1 := mock.Node() n2 := n1.Copy() diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 47c632351..48ad4d6ae 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/snapshot" @@ -30,7 +31,7 @@ import ( ) func TestOperator_RaftGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -74,7 +75,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) { } func TestOperator_RaftGetConfiguration_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -138,7 +139,7 @@ func TestOperator_RaftGetConfiguration_ACL(t *testing.T) { } func TestOperator_RaftRemovePeerByAddress(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(2) @@ -200,7 +201,7 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) { } func TestOperator_RaftRemovePeerByAddress_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(2) @@ -255,7 +256,7 @@ func TestOperator_RaftRemovePeerByAddress_ACL(t *testing.T) { } func TestOperator_RaftRemovePeerByID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -317,7 +318,7 @@ func TestOperator_RaftRemovePeerByID(t *testing.T) { } func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -371,7 +372,7 @@ func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) { } func TestOperator_SchedulerGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Build = "0.9.0+unittest" @@ -395,7 +396,7 @@ func TestOperator_SchedulerGetConfiguration(t *testing.T) { } func TestOperator_SchedulerSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Build = "0.9.0+unittest" @@ -437,7 +438,7 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) { } func TestOperator_SchedulerGetConfiguration_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -484,7 +485,7 @@ func TestOperator_SchedulerGetConfiguration_ACL(t *testing.T) { } func TestOperator_SchedulerSetConfiguration_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -535,7 +536,7 @@ func TestOperator_SchedulerSetConfiguration_ACL(t *testing.T) { } func TestOperator_SnapshotSave(t *testing.T) { - t.Parallel() + ci.Parallel(t) ////// Nomad clusters topology - not specific to test dir, err := ioutil.TempDir("", "nomadtest-operator-") @@ -642,7 +643,7 @@ func TestOperator_SnapshotSave(t *testing.T) { } func TestOperator_SnapshotSave_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ////// Nomad clusters topology - not specific to test dir, err := ioutil.TempDir("", "nomadtest-operator-") @@ -718,6 +719,8 @@ func TestOperator_SnapshotSave_ACL(t *testing.T) { } func TestOperator_SnapshotRestore(t *testing.T) { + ci.Parallel(t) + targets := []string{"leader", "non_leader", "remote_region"} for _, c := range targets { @@ -881,7 +884,7 @@ func testRestoreSnapshot(t *testing.T, req *structs.SnapshotRestoreRequest, snap } func TestOperator_SnapshotRestore_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir, err := ioutil.TempDir("", "nomadtest-operator-") require.NoError(t, err) diff --git a/nomad/periodic_endpoint_test.go b/nomad/periodic_endpoint_test.go index 5f2a5ed8e..2fb2a38cb 100644 --- a/nomad/periodic_endpoint_test.go +++ b/nomad/periodic_endpoint_test.go @@ -6,6 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -13,7 +14,7 @@ import ( ) func TestPeriodicEndpoint_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -64,7 +65,7 @@ func TestPeriodicEndpoint_Force(t *testing.T) { } func TestPeriodicEndpoint_Force_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -163,7 +164,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { } func TestPeriodicEndpoint_Force_NonPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index 605032cd3..862a2c5e7 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -119,7 +120,7 @@ func testPeriodicJob(times ...time.Time) *structs.Job { // TestPeriodicDispatch_SetEnabled test that setting enabled twice is a no-op. // This tests the reported issue: https://github.com/hashicorp/nomad/issues/2829 func TestPeriodicDispatch_SetEnabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) // SetEnabled has been called once but do it again. @@ -142,7 +143,7 @@ func TestPeriodicDispatch_SetEnabled(t *testing.T) { } func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.Job() if err := p.Add(job); err != nil { @@ -156,7 +157,7 @@ func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) { } func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() job.ParameterizedJob = &structs.ParameterizedJobConfig{} @@ -171,7 +172,7 @@ func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) { } func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() job.Stop = true @@ -186,7 +187,7 @@ func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) { } func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() err := p.Add(job) @@ -208,8 +209,8 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) { } func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() job2 := mock.PeriodicJob() @@ -226,7 +227,7 @@ func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) { } func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() if err := p.Add(job); err != nil { @@ -251,7 +252,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { } func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that won't be evaluated for a while. @@ -294,7 +295,7 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { } func TestPeriodicDispatch_Remove_Untracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) if err := p.Remove("ns", "foo"); err != nil { t.Fatalf("Remove failed %v; expected a no-op", err) @@ -302,7 +303,7 @@ func TestPeriodicDispatch_Remove_Untracked(t *testing.T) { } func TestPeriodicDispatch_Remove_Tracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() @@ -326,7 +327,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) { } func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) // Create a job that will be evaluated soon. @@ -356,7 +357,7 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) { } func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) if _, err := p.ForceRun("ns", "foo"); err == nil { @@ -365,7 +366,7 @@ func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) { } func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that won't be evaluated for a while. @@ -394,7 +395,7 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) { } func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that will trigger two launches but disallows overlapping. @@ -424,7 +425,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) { } func TestPeriodicDispatch_Run_Multiple(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that will be launched twice. @@ -456,7 +457,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) { } func TestPeriodicDispatch_Run_SameTime(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create two job that will be launched at the same time. @@ -494,7 +495,7 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) { } func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create two job that will be launched at the same time. @@ -541,7 +542,7 @@ func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) { // some after each other and some invalid times, and ensures the correct // behavior. func TestPeriodicDispatch_Complex(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create some jobs launching at different times. @@ -625,7 +626,7 @@ func shuffle(jobs []*structs.Job) { } func TestPeriodicHeap_Order(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := NewPeriodicHeap() j1 := mock.PeriodicJob() j2 := mock.PeriodicJob() @@ -663,7 +664,7 @@ func deriveChildJob(parent *structs.Job) *structs.Job { } func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -687,7 +688,7 @@ func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) { } func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -724,7 +725,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) { } func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -772,7 +773,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { // TestPeriodicDispatch_JobEmptyStatus asserts that dispatched // job will always has an empty status func TestPeriodicDispatch_JobEmptyStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) job := testPeriodicJob(time.Now().Add(1 * time.Second)) diff --git a/nomad/plan_apply_pool_test.go b/nomad/plan_apply_pool_test.go index 4743dc25e..1f88a5ccc 100644 --- a/nomad/plan_apply_pool_test.go +++ b/nomad/plan_apply_pool_test.go @@ -3,12 +3,13 @@ package nomad import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) func TestEvaluatePool(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -41,7 +42,7 @@ func TestEvaluatePool(t *testing.T) { } func TestEvaluatePool_Resize(t *testing.T) { - t.Parallel() + ci.Parallel(t) pool := NewEvaluatePool(1, 4) defer pool.Shutdown() if n := pool.Size(); n != 1 { diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 7550baf41..984708010 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -6,6 +6,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -65,7 +66,7 @@ func testRegisterJob(t *testing.T, s *Server, j *structs.Job) { // COMPAT 0.11: Tests the older unoptimized code path for applyPlan func TestPlanApply_applyPlan(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -239,7 +240,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Verifies that applyPlan properly updates the constituent objects in MemDB, // when the plan contains normalized allocs. func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Build = "0.9.2" @@ -390,7 +391,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { } func TestPlanApply_EvalPlan_Simple(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -434,7 +435,7 @@ func TestPlanApply_EvalPlan_Simple(t *testing.T) { } func TestPlanApply_EvalPlan_Preemption(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() node.NodeResources = &structs.NodeResources{ @@ -548,7 +549,7 @@ func TestPlanApply_EvalPlan_Preemption(t *testing.T) { } func TestPlanApply_EvalPlan_Partial(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -606,7 +607,7 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) { } func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -657,7 +658,7 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) { } func TestPlanApply_EvalNodePlan_Simple(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -684,7 +685,7 @@ func TestPlanApply_EvalNodePlan_Simple(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() node.Status = structs.NodeStatusInit @@ -712,7 +713,7 @@ func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.DrainNode() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -739,7 +740,7 @@ func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) snap, _ := state.Snapshot() @@ -765,7 +766,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -802,7 +803,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { // Test that we detect device oversubscription func TestPlanApply_EvalNodePlan_NodeFull_Device(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() state := testStateStore(t) @@ -855,7 +856,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Device(t *testing.T) { } func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -887,7 +888,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -925,7 +926,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -958,7 +959,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() diff --git a/nomad/plan_endpoint_test.go b/nomad/plan_endpoint_test.go index a3fb596a6..8c02c2ba9 100644 --- a/nomad/plan_endpoint_test.go +++ b/nomad/plan_endpoint_test.go @@ -5,6 +5,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -12,7 +13,7 @@ import ( ) func TestPlanEndpoint_Submit(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -54,7 +55,7 @@ func TestPlanEndpoint_Submit(t *testing.T) { // TestPlanEndpoint_Submit_Bad asserts that the Plan.Submit endpoint rejects // bad data with an error instead of panicking. func TestPlanEndpoint_Submit_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 diff --git a/nomad/plan_normalization_test.go b/nomad/plan_normalization_test.go index ba427d423..6dbe18b22 100644 --- a/nomad/plan_normalization_test.go +++ b/nomad/plan_normalization_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -17,6 +18,8 @@ import ( // Whenever this test is changed, care should be taken to ensure the older msgpack size // is recalculated when new fields are introduced in ApplyPlanResultsRequest func TestPlanNormalize(t *testing.T) { + ci.Parallel(t) + // This size was calculated using the older ApplyPlanResultsRequest format, in which allocations // didn't use OmitEmpty and only the job was normalized in the stopped and preempted allocs. // The newer format uses OmitEmpty and uses a minimal set of fields for the diff of the diff --git a/nomad/plan_queue_test.go b/nomad/plan_queue_test.go index 933bd1f39..42877846b 100644 --- a/nomad/plan_queue_test.go +++ b/nomad/plan_queue_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -17,7 +18,7 @@ func testPlanQueue(t *testing.T) *PlanQueue { } func TestPlanQueue_Enqueue_Dequeue(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) if pq.Enabled() { t.Fatalf("should not be enabled") @@ -84,7 +85,7 @@ func TestPlanQueue_Enqueue_Dequeue(t *testing.T) { } func TestPlanQueue_Enqueue_Disable(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) // Enqueue @@ -115,7 +116,7 @@ func TestPlanQueue_Enqueue_Disable(t *testing.T) { } func TestPlanQueue_Dequeue_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) pq.SetEnabled(true) @@ -137,7 +138,7 @@ func TestPlanQueue_Dequeue_Timeout(t *testing.T) { // Ensure higher priority dequeued first func TestPlanQueue_Dequeue_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) pq.SetEnabled(true) @@ -171,7 +172,7 @@ func TestPlanQueue_Dequeue_Priority(t *testing.T) { // Ensure FIFO at fixed priority func TestPlanQueue_Dequeue_FIFO(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) pq.SetEnabled(true) diff --git a/nomad/regions_endpoint_test.go b/nomad/regions_endpoint_test.go index 7f3e216de..97c7d1b04 100644 --- a/nomad/regions_endpoint_test.go +++ b/nomad/regions_endpoint_test.go @@ -5,12 +5,13 @@ import ( "testing" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" ) func TestRegionList(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make the servers s1, cleanupS1 := TestServer(t, func(c *Config) { diff --git a/nomad/rpc_test.go b/nomad/rpc_test.go index bd738f279..d7c0dc910 100644 --- a/nomad/rpc_test.go +++ b/nomad/rpc_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/helper/testlog" @@ -47,7 +48,7 @@ func rpcClient(t *testing.T, s *Server) rpc.ClientCodec { } func TestRPC_forwardLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -89,7 +90,7 @@ func TestRPC_forwardLeader(t *testing.T) { } func TestRPC_WaitForConsistentReads(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS2 := TestServer(t, func(c *Config) { c.RPCHoldTimeout = 20 * time.Millisecond @@ -131,7 +132,7 @@ func TestRPC_WaitForConsistentReads(t *testing.T) { } func TestRPC_forwardRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -156,7 +157,7 @@ func TestRPC_forwardRegion(t *testing.T) { } func TestRPC_getServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -182,7 +183,7 @@ func TestRPC_getServer(t *testing.T) { } func TestRPC_PlaintextRPCSucceedsWhenInUpgradeMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -225,7 +226,7 @@ func TestRPC_PlaintextRPCSucceedsWhenInUpgradeMode(t *testing.T) { } func TestRPC_PlaintextRPCFailsWhenNotInUpgradeMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -262,7 +263,7 @@ func TestRPC_PlaintextRPCFailsWhenNotInUpgradeMode(t *testing.T) { } func TestRPC_streamingRpcConn_badMethod(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -292,7 +293,7 @@ func TestRPC_streamingRpcConn_badMethod(t *testing.T) { } func TestRPC_streamingRpcConn_badMethod_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -352,7 +353,7 @@ func TestRPC_streamingRpcConn_badMethod_TLS(t *testing.T) { } func TestRPC_streamingRpcConn_goodMethod_Plaintext(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dir := tmpDir(t) defer os.RemoveAll(dir) @@ -404,7 +405,7 @@ func TestRPC_streamingRpcConn_goodMethod_Plaintext(t *testing.T) { } func TestRPC_streamingRpcConn_goodMethod_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -484,7 +485,7 @@ func TestRPC_streamingRpcConn_goodMethod_TLS(t *testing.T) { // switch the conn pool to establishing v2 connections and we can deprecate this // test. func TestRPC_handleMultiplexV2(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s, cleanupS := TestServer(t, nil) @@ -544,7 +545,7 @@ func TestRPC_handleMultiplexV2(t *testing.T) { // TestRPC_TLS_in_TLS asserts that trying to nest TLS connections fails. func TestRPC_TLS_in_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -604,7 +605,7 @@ func TestRPC_TLS_in_TLS(t *testing.T) { // // Invalid limits are tested in command/agent/agent_test.go func TestRPC_Limits_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -847,7 +848,7 @@ func TestRPC_Limits_OK(t *testing.T) { tc := cases[i] name := fmt.Sprintf("%d-tls-%t-timeout-%s-limit-%v", i, tc.tls, tc.timeout, tc.limit) t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) if tc.limit >= maxConns { t.Fatalf("test fixture failure: cannot assert limit (%d) >= max (%d)", tc.limit, maxConns) @@ -898,7 +899,7 @@ func TestRPC_Limits_OK(t *testing.T) { // the overall connection limit to prevent DOS via server-routed streaming API // calls. func TestRPC_Limits_Streaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { limits := config.DefaultLimits() @@ -1019,7 +1020,7 @@ func TestRPC_Limits_Streaming(t *testing.T) { } func TestRPC_TLS_Enforcement_Raft(t *testing.T) { - t.Parallel() + ci.Parallel(t) defer func() { //TODO Avoid panics from logging during shutdown @@ -1102,7 +1103,7 @@ func TestRPC_TLS_Enforcement_Raft(t *testing.T) { } func TestRPC_TLS_Enforcement_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) defer func() { //TODO Avoid panics from logging during shutdown diff --git a/nomad/scaling_endpoint_test.go b/nomad/scaling_endpoint_test.go index 6dabf3087..673d3be14 100644 --- a/nomad/scaling_endpoint_test.go +++ b/nomad/scaling_endpoint_test.go @@ -5,17 +5,18 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestScalingEndpoint_StaleReadSupport(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) list := &structs.ScalingPolicyListRequest{} assert.True(list.IsRead()) @@ -24,7 +25,7 @@ func TestScalingEndpoint_StaleReadSupport(t *testing.T) { } func TestScalingEndpoint_GetPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -63,7 +64,7 @@ func TestScalingEndpoint_GetPolicy(t *testing.T) { } func TestScalingEndpoint_GetPolicy_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -138,7 +139,7 @@ func TestScalingEndpoint_GetPolicy_ACL(t *testing.T) { } func TestScalingEndpoint_ListPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -230,7 +231,7 @@ func TestScalingEndpoint_ListPolicies(t *testing.T) { } func TestScalingEndpoint_ListPolicies_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -309,7 +310,7 @@ func TestScalingEndpoint_ListPolicies_ACL(t *testing.T) { } func TestScalingEndpoint_ListPolicies_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index cbd634b3e..071520ca5 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -8,6 +8,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -37,7 +38,7 @@ func mockAlloc() *structs.Allocation { } func TestSearch_PrefixSearch_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -70,7 +71,7 @@ func TestSearch_PrefixSearch_Job(t *testing.T) { } func TestSearch_PrefixSearch_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) jobID := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -179,7 +180,7 @@ func TestSearch_PrefixSearch_ACL(t *testing.T) { } func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "example-test-------" // Assert that a job with more than 4 hyphens works @@ -221,7 +222,7 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { } func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := strings.Repeat("a", 100) @@ -261,7 +262,7 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { // truncate should limit results to 20 func TestSearch_PrefixSearch_Truncate(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -294,7 +295,7 @@ func TestSearch_PrefixSearch_Truncate(t *testing.T) { } func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -330,7 +331,7 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { } func TestSearch_PrefixSearch_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -363,7 +364,7 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) { } func TestSearch_PrefixSearch_Allocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -400,7 +401,7 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) { } func TestSearch_PrefixSearch_All_UUID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -443,7 +444,7 @@ func TestSearch_PrefixSearch_All_UUID(t *testing.T) { } func TestSearch_PrefixSearch_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -480,7 +481,7 @@ func TestSearch_PrefixSearch_Node(t *testing.T) { } func TestSearch_PrefixSearch_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -512,7 +513,7 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) { } func TestSearch_PrefixSearch_AllContext(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -553,7 +554,7 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) { // Tests that the top 20 matches are returned when no prefix is set func TestSearch_PrefixSearch_NoPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -585,7 +586,7 @@ func TestSearch_PrefixSearch_NoPrefix(t *testing.T) { // Tests that the zero matches are returned when a prefix has no matching // results func TestSearch_PrefixSearch_NoMatches(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -614,7 +615,7 @@ func TestSearch_PrefixSearch_NoMatches(t *testing.T) { // Prefixes can only be looked up if their length is a power of two. For // prefixes which are an odd length, use the length-1 characters. func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { - t.Parallel() + ci.Parallel(t) id1 := "aaafaaaa-e8f7-fd38-c855-ab94ceb89" id2 := "aaafeaaa-e8f7-fd38-c855-ab94ceb89" @@ -646,7 +647,7 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { } func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) jobName := "exampleexample" @@ -687,7 +688,7 @@ func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { } func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -718,7 +719,7 @@ func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { } func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -755,7 +756,7 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { } func TestSearch_PrefixSearch_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -787,7 +788,7 @@ func TestSearch_PrefixSearch_Namespace(t *testing.T) { } func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanup := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -914,7 +915,7 @@ func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { } func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -952,7 +953,7 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { } func TestSearch_FuzzySearch_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanupS := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1057,7 +1058,7 @@ func TestSearch_FuzzySearch_ACL(t *testing.T) { } func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1085,7 +1086,7 @@ func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { } func TestSearch_FuzzySearch_ShortText(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1113,7 +1114,7 @@ func TestSearch_FuzzySearch_ShortText(t *testing.T) { } func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1146,7 +1147,7 @@ func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { } func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1181,7 +1182,7 @@ func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { } func TestSearch_FuzzySearch_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1213,7 +1214,7 @@ func TestSearch_FuzzySearch_Evals(t *testing.T) { } func TestSearch_FuzzySearch_Allocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1248,7 +1249,7 @@ func TestSearch_FuzzySearch_Allocation(t *testing.T) { } func TestSearch_FuzzySearch_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1280,7 +1281,7 @@ func TestSearch_FuzzySearch_Node(t *testing.T) { } func TestSearch_FuzzySearch_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1310,7 +1311,7 @@ func TestSearch_FuzzySearch_Deployment(t *testing.T) { } func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1338,7 +1339,7 @@ func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { } func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1373,7 +1374,7 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { } func TestSearch_FuzzySearch_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1403,7 +1404,7 @@ func TestSearch_FuzzySearch_Namespace(t *testing.T) { } func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1434,7 +1435,7 @@ func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { } func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1471,7 +1472,7 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { } func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanup := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1585,7 +1586,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { } func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanupS := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1879,7 +1880,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { } func TestSearch_FuzzySearch_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -2025,6 +2026,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { } func TestSearch_FuzzySearch_fuzzyIndex(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name, text string exp int diff --git a/nomad/serf_test.go b/nomad/serf_test.go index 7444368b1..938360196 100644 --- a/nomad/serf_test.go +++ b/nomad/serf_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" @@ -17,7 +18,7 @@ import ( ) func TestNomad_JoinPeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -59,7 +60,7 @@ func TestNomad_JoinPeer(t *testing.T) { } func TestNomad_RemovePeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -99,7 +100,7 @@ func TestNomad_RemovePeer(t *testing.T) { } func TestNomad_ReapPeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) @@ -195,7 +196,7 @@ func TestNomad_ReapPeer(t *testing.T) { } func TestNomad_BootstrapExpect(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) @@ -272,7 +273,7 @@ func TestNomad_BootstrapExpect(t *testing.T) { } func TestNomad_BootstrapExpect_NonVoter(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := t.TempDir() @@ -343,7 +344,7 @@ func TestNomad_BootstrapExpect_NonVoter(t *testing.T) { } func TestNomad_BadExpect(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -386,7 +387,7 @@ func TestNomad_BadExpect(t *testing.T) { // TestNomad_NonBootstraping_ShouldntBootstap asserts that if BootstrapExpect is zero, // the server shouldn't bootstrap func TestNomad_NonBootstraping_ShouldntBootstap(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) diff --git a/nomad/server_test.go b/nomad/server_test.go index 5872f3ceb..db1b1091e 100644 --- a/nomad/server_test.go +++ b/nomad/server_test.go @@ -11,6 +11,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -31,7 +32,7 @@ func tmpDir(t *testing.T) string { } func TestServer_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -43,7 +44,7 @@ func TestServer_RPC(t *testing.T) { } func TestServer_RPC_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -109,7 +110,7 @@ func TestServer_RPC_TLS(t *testing.T) { } func TestServer_RPC_MixedTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -178,7 +179,7 @@ func TestServer_RPC_MixedTLS(t *testing.T) { } func TestServer_Regions(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make the servers s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -211,7 +212,7 @@ func TestServer_Regions(t *testing.T) { } func TestServer_Reload_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Region = "global" @@ -243,7 +244,7 @@ func connectionReset(msg string) bool { // Tests that the server will successfully reload its network connections, // upgrading from plaintext to TLS if the server's TLS configuration changes. func TestServer_Reload_TLSConnections_PlaintextToTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -292,7 +293,7 @@ func TestServer_Reload_TLSConnections_PlaintextToTLS(t *testing.T) { // Tests that the server will successfully reload its network connections, // downgrading from TLS to plaintext if the server's TLS configuration changes. func TestServer_Reload_TLSConnections_TLSToPlaintext_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -339,7 +340,7 @@ func TestServer_Reload_TLSConnections_TLSToPlaintext_RPC(t *testing.T) { // Tests that the server will successfully reload its network connections, // downgrading only RPC connections func TestServer_Reload_TLSConnections_TLSToPlaintext_OnlyRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -393,7 +394,7 @@ func TestServer_Reload_TLSConnections_TLSToPlaintext_OnlyRPC(t *testing.T) { // Tests that the server will successfully reload its network connections, // upgrading only RPC connections func TestServer_Reload_TLSConnections_PlaintextToTLS_OnlyRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -449,7 +450,7 @@ func TestServer_Reload_TLSConnections_PlaintextToTLS_OnlyRPC(t *testing.T) { // Test that Raft connections are reloaded as expected when a Nomad server is // upgraded from plaintext to TLS func TestServer_Reload_TLSConnections_Raft(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -529,7 +530,7 @@ func TestServer_Reload_TLSConnections_Raft(t *testing.T) { } func TestServer_InvalidSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Set the config to not have the core scheduler @@ -553,7 +554,7 @@ func TestServer_InvalidSchedulers(t *testing.T) { } func TestServer_RPCNameAndRegionValidation(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, tc := range []struct { name string region string @@ -580,7 +581,7 @@ func TestServer_RPCNameAndRegionValidation(t *testing.T) { } func TestServer_ReloadSchedulers_NumSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 8 @@ -598,7 +599,7 @@ func TestServer_ReloadSchedulers_NumSchedulers(t *testing.T) { } func TestServer_ReloadSchedulers_EnabledSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnabledSchedulers = []string{structs.JobTypeCore, structs.JobTypeSystem} @@ -618,7 +619,7 @@ func TestServer_ReloadSchedulers_EnabledSchedulers(t *testing.T) { } func TestServer_ReloadSchedulers_InvalidSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Set the config to not have the core scheduler config := DefaultConfig() diff --git a/nomad/state/autopilot_test.go b/nomad/state/autopilot_test.go index f1805e0f3..9379f9ba8 100644 --- a/nomad/state/autopilot_test.go +++ b/nomad/state/autopilot_test.go @@ -5,10 +5,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) func TestStateStore_Autopilot(t *testing.T) { + ci.Parallel(t) + s := testStateStore(t) expected := &structs.AutopilotConfig{ @@ -39,6 +42,8 @@ func TestStateStore_Autopilot(t *testing.T) { } func TestStateStore_AutopilotCAS(t *testing.T) { + ci.Parallel(t) + s := testStateStore(t) expected := &structs.AutopilotConfig{ diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index 223812449..7bda62063 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" @@ -12,7 +13,7 @@ import ( ) func TestDeploymentEventFromChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 078ba43ce..61ffa65b4 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -5,6 +5,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -15,7 +16,7 @@ import ( // TestEventFromChange_SingleEventPerTable ensures that only a single event is // created per table per memdb.Change func TestEventFromChange_SingleEventPerTable(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -42,7 +43,7 @@ func TestEventFromChange_SingleEventPerTable(t *testing.T) { } func TestEventFromChange_ACLTokenSecretID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -95,7 +96,7 @@ func TestEventFromChange_ACLTokenSecretID(t *testing.T) { } func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -141,7 +142,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { } func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -218,7 +219,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { } func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -314,7 +315,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { } func TestEventsFromChanges_UpsertNodeEventsType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -355,7 +356,7 @@ func TestEventsFromChanges_UpsertNodeEventsType(t *testing.T) { } func TestEventsFromChanges_NodeUpdateStatusRequest(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -386,7 +387,7 @@ func TestEventsFromChanges_NodeUpdateStatusRequest(t *testing.T) { } func TestEventsFromChanges_EvalUpdateRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -420,7 +421,7 @@ func TestEventsFromChanges_EvalUpdateRequestType(t *testing.T) { } func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -481,7 +482,7 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { } func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -536,7 +537,7 @@ func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { } func TestEventsFromChanges_NodeUpdateEligibilityRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -576,7 +577,7 @@ func TestEventsFromChanges_NodeUpdateEligibilityRequestType(t *testing.T) { } func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -647,7 +648,7 @@ func TestEventsFromChanges_JobDeregisterRequestType(t *testing.T) { } func TestEventsFromChanges_WithDeletion(t *testing.T) { - t.Parallel() + ci.Parallel(t) changes := Changes{ Index: uint64(1), @@ -673,7 +674,7 @@ func TestEventsFromChanges_WithDeletion(t *testing.T) { } func TestEventsFromChanges_WithNodeDeregistration(t *testing.T) { - t.Parallel() + ci.Parallel(t) before := &structs.Node{ ID: "some-id", @@ -712,6 +713,8 @@ func TestEventsFromChanges_WithNodeDeregistration(t *testing.T) { } func TestNodeEventsFromChanges(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string MsgType structs.MessageType @@ -904,7 +907,7 @@ func TestNodeEventsFromChanges(t *testing.T) { } func TestNodeDrainEventFromChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() diff --git a/nomad/state/paginator/filter_test.go b/nomad/state/paginator/filter_test.go index d94f49a57..20e94bb95 100644 --- a/nomad/state/paginator/filter_test.go +++ b/nomad/state/paginator/filter_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/go-bexpr" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -12,7 +13,7 @@ import ( ) func TestGenericFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) ids := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"} filters := []Filter{GenericFilter{ @@ -45,7 +46,7 @@ func TestGenericFilter(t *testing.T) { } func TestNamespaceFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) mocks := []*mockObject{ {namespace: "default"}, diff --git a/nomad/state/paginator/paginator_test.go b/nomad/state/paginator/paginator_test.go index e3678da53..2d7daa2a2 100644 --- a/nomad/state/paginator/paginator_test.go +++ b/nomad/state/paginator/paginator_test.go @@ -4,13 +4,13 @@ import ( "errors" "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestPaginator(t *testing.T) { - t.Parallel() + ci.Parallel(t) ids := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"} cases := []struct { diff --git a/nomad/state/paginator/tokenizer_test.go b/nomad/state/paginator/tokenizer_test.go index c74fe8a67..174f1f1d3 100644 --- a/nomad/state/paginator/tokenizer_test.go +++ b/nomad/state/paginator/tokenizer_test.go @@ -4,11 +4,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/require" ) func TestStructsTokenizer(t *testing.T) { + ci.Parallel(t) + j := mock.Job() cases := []struct { diff --git a/nomad/state/schema_test.go b/nomad/state/schema_test.go index f5b1b620f..2131ffa5e 100644 --- a/nomad/state/schema_test.go +++ b/nomad/state/schema_test.go @@ -4,11 +4,14 @@ import ( "testing" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/require" ) func TestStateStoreSchema(t *testing.T) { + ci.Parallel(t) + schema := stateStoreSchema() _, err := memdb.NewMemDB(schema) if err != nil { @@ -17,6 +20,8 @@ func TestStateStoreSchema(t *testing.T) { } func TestState_singleRecord(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( @@ -87,6 +92,8 @@ func TestState_singleRecord(t *testing.T) { } func TestState_ScalingPolicyTargetFieldIndex_FromObject(t *testing.T) { + ci.Parallel(t) + require := require.New(t) policy := mock.ScalingPolicy() diff --git a/nomad/state/state_store_restore_test.go b/nomad/state/state_store_restore_test.go index a69f2c620..7c4e18e85 100644 --- a/nomad/state/state_store_restore_test.go +++ b/nomad/state/state_store_restore_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,7 +15,7 @@ import ( ) func TestStateStore_RestoreNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -42,7 +43,7 @@ func TestStateStore_RestoreNode(t *testing.T) { } func TestStateStore_RestoreJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -70,7 +71,7 @@ func TestStateStore_RestoreJob(t *testing.T) { } func TestStateStore_RestorePeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -107,7 +108,7 @@ func TestStateStore_RestorePeriodicLaunch(t *testing.T) { } func TestStateStore_RestoreJobVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -139,7 +140,7 @@ func TestStateStore_RestoreJobVersion(t *testing.T) { } func TestStateStore_RestoreDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) d := mock.Deployment() @@ -171,7 +172,7 @@ func TestStateStore_RestoreDeployment(t *testing.T) { } func TestStateStore_RestoreJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -207,7 +208,7 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { } func TestStateStore_RestoreCSIPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -227,7 +228,7 @@ func TestStateStore_RestoreCSIPlugin(t *testing.T) { } func TestStateStore_RestoreCSIVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -248,7 +249,7 @@ func TestStateStore_RestoreCSIVolume(t *testing.T) { } func TestStateStore_RestoreIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -276,7 +277,7 @@ func TestStateStore_RestoreIndex(t *testing.T) { } func TestStateStore_RestoreEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval := mock.Eval() @@ -304,7 +305,7 @@ func TestStateStore_RestoreEval(t *testing.T) { } func TestStateStore_RestoreAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -337,7 +338,7 @@ func TestStateStore_RestoreAlloc(t *testing.T) { } func TestStateStore_RestoreVaultAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) a := mock.VaultAccessor() @@ -369,7 +370,7 @@ func TestStateStore_RestoreVaultAccessor(t *testing.T) { } func TestStateStore_RestoreSITokenAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -393,7 +394,7 @@ func TestStateStore_RestoreSITokenAccessor(t *testing.T) { } func TestStateStore_RestoreACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) policy := mock.ACLPolicy() @@ -418,7 +419,7 @@ func TestStateStore_RestoreACLPolicy(t *testing.T) { } func TestStateStore_RestoreACLToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) token := mock.ACLToken() @@ -465,7 +466,7 @@ func TestStateStore_ClusterMetadataRestore(t *testing.T) { } func TestStateStore_RestoreScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -485,7 +486,7 @@ func TestStateStore_RestoreScalingPolicy(t *testing.T) { } func TestStateStore_RestoreScalingEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -515,7 +516,7 @@ func TestStateStore_RestoreScalingEvents(t *testing.T) { } func TestStateStore_RestoreSchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) schedConfig := &structs.SchedulerConfiguration{ diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 441344a28..5f1c8e49c 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -10,14 +10,14 @@ import ( "time" "github.com/hashicorp/go-memdb" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func testStateStore(t *testing.T) *StateStore { @@ -25,7 +25,7 @@ func testStateStore(t *testing.T) *StateStore { } func TestStateStore_Blocking_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) expected := fmt.Errorf("test error") errFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { @@ -39,7 +39,7 @@ func TestStateStore_Blocking_Error(t *testing.T) { } func TestStateStore_Blocking_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) noopFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { return nil, 5, nil @@ -57,7 +57,7 @@ func TestStateStore_Blocking_Timeout(t *testing.T) { } func TestStateStore_Blocking_MinQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) node := mock.Node() count := 0 @@ -99,7 +99,7 @@ func TestStateStore_Blocking_MinQuery(t *testing.T) { // 1) The job is denormalized // 2) Allocations are created func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -154,7 +154,7 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing // 2) Allocations are denormalized and updated with the diff // That stopped allocs Job is unmodified func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -249,7 +249,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { // This test checks that the deployment is created and allocations count towards // the deployment func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -356,7 +356,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { // 1) Preempted allocations in plan results are updated // 2) Evals are inserted for preempted jobs func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -437,7 +437,7 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { // This test checks that deployment updates are applied correctly func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) // Create a job that applies to all @@ -520,7 +520,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { } func TestStateStore_UpsertDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) deployment := mock.Deployment() @@ -565,7 +565,7 @@ func TestStateStore_UpsertDeployment(t *testing.T) { // Tests that deployments of older create index and same job id are not returned func TestStateStore_OldDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -603,7 +603,7 @@ func TestStateStore_OldDeployment(t *testing.T) { } func TestStateStore_DeleteDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) d1 := mock.Deployment() @@ -656,7 +656,7 @@ func TestStateStore_DeleteDeployment(t *testing.T) { } func TestStateStore_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var deployments []*structs.Deployment @@ -687,7 +687,7 @@ func TestStateStore_Deployments(t *testing.T) { } func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) deploy := mock.Deployment() @@ -773,7 +773,7 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { } func TestStateStore_UpsertNode_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -817,7 +817,7 @@ func TestStateStore_UpsertNode_Node(t *testing.T) { } func TestStateStore_DeleteNode_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -862,7 +862,7 @@ func TestStateStore_DeleteNode_Node(t *testing.T) { } func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -900,7 +900,7 @@ func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { } func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -962,7 +962,7 @@ func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { } func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1007,7 +1007,7 @@ func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { } func TestStateStore_AddSingleNodeEvent(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1051,7 +1051,7 @@ func TestStateStore_AddSingleNodeEvent(t *testing.T) { // To prevent stale node events from accumulating, we limit the number of // stored node events to 10. func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1100,7 +1100,7 @@ func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { } func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1154,7 +1154,7 @@ func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { } func TestStateStore_UpdateNodeEligibility(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1210,7 +1210,7 @@ func TestStateStore_UpdateNodeEligibility(t *testing.T) { } func TestStateStore_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var nodes []*structs.Node @@ -1254,7 +1254,7 @@ func TestStateStore_Nodes(t *testing.T) { } func TestStateStore_NodesByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -1342,7 +1342,7 @@ func TestStateStore_NodesByIDPrefix(t *testing.T) { } func TestStateStore_UpsertJob_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -1422,7 +1422,7 @@ func TestStateStore_UpsertJob_Job(t *testing.T) { } func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -1527,7 +1527,7 @@ func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { } func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.PeriodicJob() @@ -1583,7 +1583,7 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { } func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) state := testStateStore(t) @@ -1602,7 +1602,7 @@ func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { // Upsert a job that is the child of a parent job and ensures its summary gets // updated. func TestStateStore_UpsertJob_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -1647,7 +1647,7 @@ func TestStateStore_UpsertJob_ChildJob(t *testing.T) { } func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -1738,7 +1738,7 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { } func TestStateStore_DeleteJob_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -1819,7 +1819,7 @@ func TestStateStore_DeleteJob_Job(t *testing.T) { } func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -1890,7 +1890,7 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { } func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) assert := assert.New(t) @@ -1947,7 +1947,7 @@ func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { } func TestStateStore_DeleteJob_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -2001,7 +2001,7 @@ func TestStateStore_DeleteJob_ChildJob(t *testing.T) { } func TestStateStore_Jobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var jobs []*structs.Job @@ -2043,7 +2043,7 @@ func TestStateStore_Jobs(t *testing.T) { } func TestStateStore_JobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var jobs []*structs.Job @@ -2085,7 +2085,7 @@ func TestStateStore_JobVersions(t *testing.T) { } func TestStateStore_JobsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2169,7 +2169,7 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { } func TestStateStore_JobsByPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var periodic, nonPeriodic []*structs.Job @@ -2241,7 +2241,7 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { } func TestStateStore_JobsByScheduler(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var serviceJobs []*structs.Job @@ -2315,7 +2315,7 @@ func TestStateStore_JobsByScheduler(t *testing.T) { } func TestStateStore_JobsByGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) gc, nonGc := make(map[string]struct{}), make(map[string]struct{}) @@ -2389,7 +2389,7 @@ func TestStateStore_JobsByGC(t *testing.T) { } func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2444,7 +2444,7 @@ func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { } func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2509,7 +2509,7 @@ func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { } func TestStateStore_DeletePeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2563,7 +2563,7 @@ func TestStateStore_DeletePeriodicLaunch(t *testing.T) { } func TestStateStore_PeriodicLaunches(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var launches []*structs.PeriodicLaunch @@ -2839,7 +2839,7 @@ func TestStateStore_CSIVolume(t *testing.T) { } func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { - t.Parallel() + ci.Parallel(t) store := testStateStore(t) plugID := "foo" @@ -3209,7 +3209,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { } func TestStateStore_Indexes(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -3254,7 +3254,7 @@ func TestStateStore_Indexes(t *testing.T) { } func TestStateStore_LatestIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3278,7 +3278,7 @@ func TestStateStore_LatestIndex(t *testing.T) { } func TestStateStore_UpsertEvals_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval := mock.Eval() @@ -3322,7 +3322,7 @@ func TestStateStore_UpsertEvals_Eval(t *testing.T) { } func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3401,7 +3401,7 @@ func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { } func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval := mock.Eval() @@ -3468,7 +3468,7 @@ func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { } func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3559,7 +3559,7 @@ func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { } func TestStateStore_DeleteEval_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval1 := mock.Eval() @@ -3693,7 +3693,7 @@ func TestStateStore_DeleteEval_Eval(t *testing.T) { } func TestStateStore_DeleteEval_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3764,7 +3764,7 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { } func TestStateStore_EvalsByJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3802,7 +3802,7 @@ func TestStateStore_EvalsByJob(t *testing.T) { } func TestStateStore_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var evals []*structs.Evaluation @@ -3845,7 +3845,7 @@ func TestStateStore_Evals(t *testing.T) { } func TestStateStore_EvalsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var evals []*structs.Evaluation @@ -3938,7 +3938,7 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { } func TestStateStore_UpdateAllocsFromClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) parent := mock.Job() @@ -4028,7 +4028,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { } func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc1 := mock.Alloc() @@ -4164,7 +4164,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { } func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4235,7 +4235,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { } func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4279,7 +4279,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { // This tests that the deployment state is merged correctly func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4320,7 +4320,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { } func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4395,7 +4395,7 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { } func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4449,7 +4449,7 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { // Testing to ensure we keep issue // https://github.com/hashicorp/nomad/issues/2583 fixed func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4462,7 +4462,7 @@ func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { } func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -4505,7 +4505,7 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { } func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4606,7 +4606,7 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { // This test ensures that the state store will mark the clients status as lost // when set rather than preferring the existing status. func TestStateStore_UpdateAlloc_Lost(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4643,7 +4643,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { // associated with it. This will happen when a job is stopped by an user which // has non-terminal allocations on clients func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4687,7 +4687,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { } func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4755,7 +4755,7 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { } func TestStateStore_JobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -4879,7 +4879,7 @@ func TestStateStore_JobSummary(t *testing.T) { } func TestStateStore_ReconcileJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -4970,7 +4970,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { } func TestStateStore_ReconcileParentJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -5060,7 +5060,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { } func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -5111,7 +5111,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { } func TestStateStore_EvictAlloc_Alloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -5150,7 +5150,7 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { } func TestStateStore_AllocsByNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5189,7 +5189,7 @@ func TestStateStore_AllocsByNode(t *testing.T) { } func TestStateStore_AllocsByNodeTerminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs, term, nonterm []*structs.Allocation @@ -5248,7 +5248,7 @@ func TestStateStore_AllocsByNodeTerminal(t *testing.T) { } func TestStateStore_AllocsByJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5287,7 +5287,7 @@ func TestStateStore_AllocsByJob(t *testing.T) { } func TestStateStore_AllocsForRegisteredJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5352,7 +5352,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { } func TestStateStore_AllocsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5451,7 +5451,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { } func TestStateStore_Allocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5497,7 +5497,7 @@ func TestStateStore_Allocs(t *testing.T) { } func TestStateStore_Allocs_PrevAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5551,7 +5551,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { } func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) index := uint64(0) state := testStateStore(t) @@ -5587,7 +5587,7 @@ func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { } func TestStateStore_SetJobStatus_NoOp(t *testing.T) { - t.Parallel() + ci.Parallel(t) index := uint64(0) state := testStateStore(t) @@ -5618,7 +5618,7 @@ func TestStateStore_SetJobStatus_NoOp(t *testing.T) { } func TestStateStore_SetJobStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) txn := state.db.WriteTxn(uint64(0)) @@ -5653,7 +5653,7 @@ func TestStateStore_SetJobStatus(t *testing.T) { } func TestStateStore_GetJobStatus_NoEvalsOrAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) job := mock.Job() state := testStateStore(t) @@ -5669,7 +5669,7 @@ func TestStateStore_GetJobStatus_NoEvalsOrAllocs(t *testing.T) { } func TestStateStore_GetJobStatus_NoEvalsOrAllocs_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) job := mock.PeriodicJob() state := testStateStore(t) @@ -5685,7 +5685,7 @@ func TestStateStore_GetJobStatus_NoEvalsOrAllocs_Periodic(t *testing.T) { } func TestStateStore_GetJobStatus_NoEvalsOrAllocs_EvalDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) job := mock.Job() state := testStateStore(t) @@ -5701,7 +5701,7 @@ func TestStateStore_GetJobStatus_NoEvalsOrAllocs_EvalDelete(t *testing.T) { } func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5735,7 +5735,7 @@ func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { } func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5761,7 +5761,7 @@ func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { } func TestStateStore_GetJobStatus_PeriodicJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.PeriodicJob() @@ -5789,7 +5789,7 @@ func TestStateStore_GetJobStatus_PeriodicJob(t *testing.T) { } func TestStateStore_GetJobStatus_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5818,7 +5818,7 @@ func TestStateStore_GetJobStatus_ParameterizedJob(t *testing.T) { } func TestStateStore_SetJobStatus_PendingEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5845,7 +5845,7 @@ func TestStateStore_SetJobStatus_PendingEval(t *testing.T) { // TestStateStore_SetJobStatus_SystemJob asserts that system jobs are still // considered running until explicitly stopped. func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.SystemJob() @@ -5882,7 +5882,7 @@ func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { } func TestStateJobSummary_UpdateJobCount(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -6013,7 +6013,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { } func TestJobSummary_UpdateClientStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -6089,7 +6089,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { // Test that nonexistent deployment can't be updated func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6108,7 +6108,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { // Test that terminal deployment can't be updated func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6136,7 +6136,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { // Test that a non terminal deployment is updated and that a job and eval are // created. func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6198,7 +6198,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { // Test that when a deployment is updated to successful the job is updated to // stable func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6255,7 +6255,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { } func TestStateStore_UpdateJobStability(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6291,7 +6291,7 @@ func TestStateStore_UpdateJobStability(t *testing.T) { // Test that nonexistent deployment can't be promoted func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6310,7 +6310,7 @@ func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { // Test that terminal deployment can't be updated func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6337,7 +6337,7 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { // Test promoting unhealthy canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) require := require.New(t) @@ -6386,7 +6386,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { // Test promoting a deployment with no canaries func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) require := require.New(t) @@ -6415,7 +6415,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { // Test promoting all canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6514,7 +6514,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { // Test promoting a subset of canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -6617,7 +6617,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { // Test that allocation health can't be set against a nonexistent deployment func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6636,7 +6636,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { // Test that allocation health can't be set against a terminal deployment func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6663,7 +6663,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { // Test that allocation health can't be set against a nonexistent alloc func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6688,7 +6688,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. // Test that a deployments PlacedCanaries is properly updated func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6758,7 +6758,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { } func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6792,7 +6792,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { // Test that allocation health can't be set for an alloc with mismatched // deployment ids func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6828,7 +6828,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t // Test that allocation health is properly set func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6936,7 +6936,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { } func TestStateStore_UpsertVaultAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) a := mock.VaultAccessor() @@ -7017,7 +7017,7 @@ func TestStateStore_UpsertVaultAccessors(t *testing.T) { } func TestStateStore_DeleteVaultAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) a1 := mock.VaultAccessor() @@ -7073,7 +7073,7 @@ func TestStateStore_DeleteVaultAccessors(t *testing.T) { } func TestStateStore_VaultAccessorsByAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -7121,7 +7121,7 @@ func TestStateStore_VaultAccessorsByAlloc(t *testing.T) { } func TestStateStore_VaultAccessorsByNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -7169,7 +7169,7 @@ func TestStateStore_VaultAccessorsByNode(t *testing.T) { } func TestStateStore_UpsertSITokenAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7222,7 +7222,7 @@ func TestStateStore_UpsertSITokenAccessors(t *testing.T) { } func TestStateStore_DeleteSITokenAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7263,7 +7263,7 @@ func TestStateStore_DeleteSITokenAccessors(t *testing.T) { } func TestStateStore_SITokenAccessorsByAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7301,7 +7301,7 @@ func TestStateStore_SITokenAccessorsByAlloc(t *testing.T) { } func TestStateStore_SITokenAccessorsByNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7340,7 +7340,7 @@ func TestStateStore_SITokenAccessorsByNode(t *testing.T) { } func TestStateStore_UpsertACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) policy := mock.ACLPolicy() @@ -7402,7 +7402,7 @@ func TestStateStore_UpsertACLPolicy(t *testing.T) { } func TestStateStore_DeleteACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) policy := mock.ACLPolicy() @@ -7469,7 +7469,7 @@ func TestStateStore_DeleteACLPolicy(t *testing.T) { } func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) names := []string{ @@ -7518,7 +7518,7 @@ func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { } func TestStateStore_BootstrapACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7602,7 +7602,7 @@ func TestStateStore_BootstrapACLTokens(t *testing.T) { } func TestStateStore_UpsertACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7672,7 +7672,7 @@ func TestStateStore_UpsertACLTokens(t *testing.T) { } func TestStateStore_DeleteACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7739,7 +7739,7 @@ func TestStateStore_DeleteACLTokens(t *testing.T) { } func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) prefixes := []string{ @@ -7806,7 +7806,7 @@ func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { } func TestStateStore_ACLTokensByGlobal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7858,7 +7858,7 @@ func TestStateStore_ACLTokensByGlobal(t *testing.T) { } func TestStateStore_OneTimeTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) index := uint64(100) state := testStateStore(t) @@ -8009,7 +8009,7 @@ func TestStateStore_ClusterMetadata(t *testing.T) { } func TestStateStore_UpsertScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -8092,7 +8092,7 @@ func TestStateStore_UpsertScalingPolicy(t *testing.T) { } func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) otherNamespace := "not-default-namespace" @@ -8142,7 +8142,7 @@ func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { } func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ns1 := "name" @@ -8197,7 +8197,7 @@ func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { // Subsequent updates of the job should preserve the ID for the scaling policy // associated with a given target. func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8242,7 +8242,7 @@ func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { // Updating the scaling policy for a job should update the index table and fire the watch. // This test is the converse of TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8286,7 +8286,7 @@ func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { } func TestStateStore_DeleteScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8341,7 +8341,7 @@ func TestStateStore_DeleteScalingPolicies(t *testing.T) { } func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8388,7 +8388,7 @@ func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { } func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8431,7 +8431,7 @@ func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { } func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8462,7 +8462,7 @@ func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { } func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8496,7 +8496,7 @@ func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { // will not cause the scaling_policy table index to increase, on either job // registration or deletion. func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8524,7 +8524,7 @@ func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { } func TestStateStore_ScalingPoliciesByType(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8581,7 +8581,7 @@ func TestStateStore_ScalingPoliciesByType(t *testing.T) { } func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8654,7 +8654,7 @@ func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { } func TestStateStore_ScalingPoliciesByJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8718,7 +8718,7 @@ func TestStateStore_ScalingPoliciesByJob(t *testing.T) { } func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8757,7 +8757,7 @@ func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { } func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8799,7 +8799,7 @@ func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { } func TestStateStore_UpsertScalingEvent(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -8868,7 +8868,7 @@ func TestStateStore_UpsertScalingEvent(t *testing.T) { } func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -8935,7 +8935,7 @@ func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { } func TestStateStore_Abandon(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := testStateStore(t) abandonCh := s.AbandonCh() @@ -8949,7 +8949,7 @@ func TestStateStore_Abandon(t *testing.T) { // Verifies that an error is returned when an allocation doesn't exist in the state store. func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -8977,7 +8977,7 @@ func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testi // TestStateStore_SnapshotMinIndex_OK asserts StateStore.SnapshotMinIndex blocks // until the StateStore's latest index is >= the requested index. func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := testStateStore(t) index, err := s.LatestIndex() @@ -9054,7 +9054,7 @@ func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { // TestStateStore_SnapshotMinIndex_Timeout asserts StateStore.SnapshotMinIndex // returns an error if the desired index is not reached within the deadline. func TestStateStore_SnapshotMinIndex_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := testStateStore(t) index, err := s.LatestIndex() diff --git a/nomad/stats_fetcher_test.go b/nomad/stats_fetcher_test.go index 3c508c73a..36362d604 100644 --- a/nomad/stats_fetcher_test.go +++ b/nomad/stats_fetcher_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" ) func TestStatsFetcher(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.Region = "region-a" diff --git a/nomad/status_endpoint_test.go b/nomad/status_endpoint_test.go index acae7235a..8f5e95287 100644 --- a/nomad/status_endpoint_test.go +++ b/nomad/status_endpoint_test.go @@ -5,6 +5,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,7 +15,7 @@ import ( ) func TestStatusPing(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -28,7 +29,7 @@ func TestStatusPing(t *testing.T) { } func TestStatusLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -51,7 +52,7 @@ func TestStatusLeader(t *testing.T) { } func TestStatusPeers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -73,7 +74,7 @@ func TestStatusPeers(t *testing.T) { } func TestStatusMembers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -93,7 +94,7 @@ func TestStatusMembers(t *testing.T) { } func TestStatusMembers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -147,7 +148,7 @@ func TestStatusMembers_ACL(t *testing.T) { } func TestStatus_HasClientConn(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() diff --git a/nomad/stream/event_broker_test.go b/nomad/stream/event_broker_test.go index a136031cb..ea7457aa1 100644 --- a/nomad/stream/event_broker_test.go +++ b/nomad/stream/event_broker_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,6 +16,8 @@ import ( ) func TestEventBroker_PublishChangesAndSubscribe(t *testing.T) { + ci.Parallel(t) + subscription := &SubscribeRequest{ Topics: map[structs.Topic][]string{ "Test": {"sub-key"}, @@ -66,6 +69,8 @@ func TestEventBroker_PublishChangesAndSubscribe(t *testing.T) { } func TestEventBroker_ShutdownClosesSubscriptions(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -95,6 +100,8 @@ func TestEventBroker_ShutdownClosesSubscriptions(t *testing.T) { // the subscriptions should still be handled indeppendtly of each other when // unssubscribing. func TestEventBroker_EmptyReqToken_DistinctSubscriptions(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -117,6 +124,8 @@ func TestEventBroker_EmptyReqToken_DistinctSubscriptions(t *testing.T) { } func TestEventBroker_handleACLUpdates_TokenDeleted(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -176,6 +185,8 @@ func (p *fakeACLTokenProvider) ACLPolicyByName(ws memdb.WatchSet, policyName str } func TestEventBroker_handleACLUpdates_policyupdated(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) diff --git a/nomad/stream/event_buffer_test.go b/nomad/stream/event_buffer_test.go index 3b52927fe..f9ec7bd22 100644 --- a/nomad/stream/event_buffer_test.go +++ b/nomad/stream/event_buffer_test.go @@ -7,13 +7,15 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestEventBufferFuzz(t *testing.T) { + ci.Parallel(t) + nReaders := 1000 nMessages := 1000 @@ -85,6 +87,8 @@ func TestEventBufferFuzz(t *testing.T) { } func TestEventBuffer_Slow_Reader(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(10) for i := 1; i < 11; i++ { @@ -116,6 +120,8 @@ func TestEventBuffer_Slow_Reader(t *testing.T) { } func TestEventBuffer_Size(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(100) for i := 0; i < 10; i++ { @@ -129,6 +135,8 @@ func TestEventBuffer_Size(t *testing.T) { } func TestEventBuffer_MaxSize(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(10) var events []structs.Event @@ -144,6 +152,8 @@ func TestEventBuffer_MaxSize(t *testing.T) { // are removed, the event buffer should advance its head down to the last message // and insert a placeholder sentinel value. func TestEventBuffer_Emptying_Buffer(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(10) for i := 0; i < 10; i++ { @@ -184,6 +194,8 @@ func TestEventBuffer_Emptying_Buffer(t *testing.T) { } func TestEventBuffer_StartAt_CurrentIdx_Past_Start(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string req uint64 diff --git a/nomad/stream/ndjson_test.go b/nomad/stream/ndjson_test.go index 95bc2b23a..5da4f08cd 100644 --- a/nomad/stream/ndjson_test.go +++ b/nomad/stream/ndjson_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -13,7 +14,7 @@ type testObj struct { } func TestJsonStream(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -42,7 +43,7 @@ func TestJsonStream(t *testing.T) { } func TestJson_Send_After_Stop(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -57,7 +58,7 @@ func TestJson_Send_After_Stop(t *testing.T) { } func TestJson_HeartBeat(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/nomad/stream/subscription_test.go b/nomad/stream/subscription_test.go index d1d40f350..d7bb9be36 100644 --- a/nomad/stream/subscription_test.go +++ b/nomad/stream/subscription_test.go @@ -3,12 +3,14 @@ package stream import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" ) func TestFilter_AllTopics(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -22,6 +24,8 @@ func TestFilter_AllTopics(t *testing.T) { } func TestFilter_AllKeys(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -35,6 +39,8 @@ func TestFilter_AllKeys(t *testing.T) { } func TestFilter_PartialMatch_Topic(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Exclude", Key: "Two"}) @@ -51,6 +57,8 @@ func TestFilter_PartialMatch_Topic(t *testing.T) { } func TestFilter_Match_TopicAll_SpecificKey(t *testing.T) { + ci.Parallel(t) + events := []structs.Event{ {Topic: "Match", Key: "Two"}, {Topic: "NoMatch", Key: "One"}, @@ -72,6 +80,8 @@ func TestFilter_Match_TopicAll_SpecificKey(t *testing.T) { } func TestFilter_Match_TopicAll_SpecificKey_Plus(t *testing.T) { + ci.Parallel(t) + events := []structs.Event{ {Topic: "FirstTwo", Key: "Two"}, {Topic: "Test", Key: "One"}, @@ -95,6 +105,8 @@ func TestFilter_Match_TopicAll_SpecificKey_Plus(t *testing.T) { } func TestFilter_PartialMatch_Key(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -111,6 +123,8 @@ func TestFilter_PartialMatch_Key(t *testing.T) { } func TestFilter_NoMatch(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -128,6 +142,8 @@ func TestFilter_NoMatch(t *testing.T) { } func TestFilter_Namespace(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", Namespace: "foo"}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Test", Key: "Two", Namespace: "bar"}) @@ -148,6 +164,8 @@ func TestFilter_Namespace(t *testing.T) { } func TestFilter_NamespaceAll(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", Namespace: "foo"}, @@ -171,6 +189,8 @@ func TestFilter_NamespaceAll(t *testing.T) { } func TestFilter_FilterKeys(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", FilterKeys: []string{"extra-key"}}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Test", Key: "Two"}) diff --git a/nomad/structs/batch_future_test.go b/nomad/structs/batch_future_test.go index 52ff12563..84693fe59 100644 --- a/nomad/structs/batch_future_test.go +++ b/nomad/structs/batch_future_test.go @@ -4,10 +4,12 @@ import ( "fmt" "testing" "time" + + "github.com/hashicorp/nomad/ci" ) func TestBatchFuture(t *testing.T) { - t.Parallel() + ci.Parallel(t) bf := NewBatchFuture() // Async respond to the future diff --git a/nomad/structs/bitmap_test.go b/nomad/structs/bitmap_test.go index 42b2c635e..e28d831d6 100644 --- a/nomad/structs/bitmap_test.go +++ b/nomad/structs/bitmap_test.go @@ -3,9 +3,13 @@ package structs import ( "reflect" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestBitmap(t *testing.T) { + ci.Parallel(t) + // Check invalid sizes _, err := NewBitmap(0) if err == nil { diff --git a/nomad/structs/config/audit_test.go b/nomad/structs/config/audit_test.go index 4efc3bbfa..7cd9d930a 100644 --- a/nomad/structs/config/audit_test.go +++ b/nomad/structs/config/audit_test.go @@ -4,11 +4,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestAuditConfig_Merge(t *testing.T) { + ci.Parallel(t) + c1 := &AuditConfig{ Enabled: helper.BoolToPtr(true), Sinks: []*AuditSink{ diff --git a/nomad/structs/config/autopilot_test.go b/nomad/structs/config/autopilot_test.go index e379ff8de..053bfe51f 100644 --- a/nomad/structs/config/autopilot_test.go +++ b/nomad/structs/config/autopilot_test.go @@ -4,9 +4,13 @@ import ( "reflect" "testing" "time" + + "github.com/hashicorp/nomad/ci" ) func TestAutopilotConfig_Merge(t *testing.T) { + ci.Parallel(t) + trueValue, falseValue := true, false c1 := &AutopilotConfig{ diff --git a/nomad/structs/config/consul_test.go b/nomad/structs/config/consul_test.go index d2243e418..e66bae2cf 100644 --- a/nomad/structs/config/consul_test.go +++ b/nomad/structs/config/consul_test.go @@ -11,6 +11,7 @@ import ( consulapi "github.com/hashicorp/consul/api" sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,6 +32,8 @@ func TestMain(m *testing.M) { } func TestConsulConfig_Merge(t *testing.T) { + ci.Parallel(t) + yes, no := true, false c1 := &ConsulConfig{ @@ -121,7 +124,7 @@ func TestConsulConfig_Merge(t *testing.T) { // TestConsulConfig_Defaults asserts Consul defaults are copied from their // upstream API package defaults. func TestConsulConfig_Defaults(t *testing.T) { - t.Parallel() + ci.Parallel(t) nomadDef := DefaultConsulConfig() consulDef := consulapi.DefaultConfig() @@ -136,7 +139,7 @@ func TestConsulConfig_Defaults(t *testing.T) { // TestConsulConfig_Exec asserts Consul defaults use env vars when they are // set by forking a subprocess. func TestConsulConfig_Exec(t *testing.T) { - t.Parallel() + ci.Parallel(t) self, err := os.Executable() if err != nil { @@ -171,7 +174,7 @@ func TestConsulConfig_Exec(t *testing.T) { } func TestConsulConfig_IpTemplateParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) privateIps, err := sockaddr.GetPrivateIP() require.NoError(t, err) @@ -182,16 +185,16 @@ func TestConsulConfig_IpTemplateParse(t *testing.T) { tmpl string expectedOut string expectErr bool - } { - { name: "string address keeps working", tmpl: "10.0.1.0:8500", expectedOut: "10.0.1.0:8500", expectErr: false }, - { name: "single ip sock-addr template", tmpl: "{{ GetPrivateIP }}:8500", expectedOut: privateIp+":8500", expectErr: false }, - { name: "multi ip sock-addr template", tmpl: "{{ GetPrivateIPs }}:8500", expectedOut: "", expectErr: true }, + }{ + {name: "string address keeps working", tmpl: "10.0.1.0:8500", expectedOut: "10.0.1.0:8500", expectErr: false}, + {name: "single ip sock-addr template", tmpl: "{{ GetPrivateIP }}:8500", expectedOut: privateIp + ":8500", expectErr: false}, + {name: "multi ip sock-addr template", tmpl: "{{ GetPrivateIPs }}:8500", expectedOut: "", expectErr: true}, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := ConsulConfig{ Addr: tc.tmpl, } diff --git a/nomad/structs/config/limits_test.go b/nomad/structs/config/limits_test.go index e4bd9d598..7a4082f3d 100644 --- a/nomad/structs/config/limits_test.go +++ b/nomad/structs/config/limits_test.go @@ -4,13 +4,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) // TestLimits_Defaults asserts the default limits are valid. func TestLimits_Defaults(t *testing.T) { - t.Parallel() + ci.Parallel(t) l := DefaultLimits() d, err := time.ParseDuration(l.HTTPSHandshakeTimeout) @@ -24,7 +25,7 @@ func TestLimits_Defaults(t *testing.T) { // TestLimits_Copy asserts Limits structs are deep copied. func TestLimits_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) o := DefaultLimits() c := o.Copy() @@ -52,7 +53,7 @@ func TestLimits_Copy(t *testing.T) { // TestLimits_Merge asserts non-zero fields from the method argument take // precedence over the existing limits. func TestLimits_Merge(t *testing.T) { - t.Parallel() + ci.Parallel(t) l := Limits{} o := DefaultLimits() diff --git a/nomad/structs/config/plugins_test.go b/nomad/structs/config/plugins_test.go index e0e98d108..e380ba622 100644 --- a/nomad/structs/config/plugins_test.go +++ b/nomad/structs/config/plugins_test.go @@ -4,11 +4,12 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestPluginConfig_Merge(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) a := &PluginConfig{ Name: "foo", @@ -56,7 +57,7 @@ func TestPluginConfig_Merge(t *testing.T) { } func TestPluginConfigSet_Merge(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) a := &PluginConfig{ diff --git a/nomad/structs/config/tls_test.go b/nomad/structs/config/tls_test.go index b57b4fa25..59bcfed18 100644 --- a/nomad/structs/config/tls_test.go +++ b/nomad/structs/config/tls_test.go @@ -3,11 +3,14 @@ package config import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTLSConfig_Merge(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) a := &TLSConfig{ CAFile: "test-ca-file", @@ -31,6 +34,8 @@ func TestTLSConfig_Merge(t *testing.T) { } func TestTLS_CertificateInfoIsEqual_TrueWhenEmpty(t *testing.T) { + ci.Parallel(t) + require := require.New(t) a := &TLSConfig{} b := &TLSConfig{} @@ -40,6 +45,8 @@ func TestTLS_CertificateInfoIsEqual_TrueWhenEmpty(t *testing.T) { } func TestTLS_CertificateInfoIsEqual_FalseWhenUnequal(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( cafile = "../../../helper/tlsutil/testdata/ca.pem" @@ -143,6 +150,8 @@ func TestTLS_CertificateInfoIsEqual_FalseWhenUnequal(t *testing.T) { // Certificate info should be equal when the CA file, certificate file, and key // file all are equal func TestTLS_CertificateInfoIsEqual_TrueWhenEqual(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( cafile = "../../../helper/tlsutil/testdata/ca.pem" @@ -167,6 +176,8 @@ func TestTLS_CertificateInfoIsEqual_TrueWhenEqual(t *testing.T) { } func TestTLS_Copy(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( cafile = "../../../helper/tlsutil/testdata/ca.pem" @@ -192,6 +203,8 @@ func TestTLS_Copy(t *testing.T) { // GetKeyLoader should always return an initialized KeyLoader for a TLSConfig // object func TestTLS_GetKeyloader(t *testing.T) { + ci.Parallel(t) + require := require.New(t) a := &TLSConfig{} require.NotNil(a.GetKeyLoader()) diff --git a/nomad/structs/config/ui_test.go b/nomad/structs/config/ui_test.go index af040b519..d310403b5 100644 --- a/nomad/structs/config/ui_test.go +++ b/nomad/structs/config/ui_test.go @@ -3,10 +3,12 @@ package config import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestUIConfig_Merge(t *testing.T) { + ci.Parallel(t) fullConfig := &UIConfig{ Enabled: true, @@ -69,7 +71,7 @@ func TestUIConfig_Merge(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) result := tc.left.Merge(tc.right) require.Equal(t, tc.expect, result) }) diff --git a/nomad/structs/config/vault_test.go b/nomad/structs/config/vault_test.go index e48b6ef02..c4eda801c 100644 --- a/nomad/structs/config/vault_test.go +++ b/nomad/structs/config/vault_test.go @@ -4,10 +4,13 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestVaultConfig_Merge(t *testing.T) { + ci.Parallel(t) + trueValue, falseValue := true, false c1 := &VaultConfig{ Enabled: &falseValue, @@ -61,6 +64,8 @@ func TestVaultConfig_Merge(t *testing.T) { } func TestVaultConfig_IsEqual(t *testing.T) { + ci.Parallel(t) + require := require.New(t) trueValue, falseValue := true, false diff --git a/nomad/structs/connect_test.go b/nomad/structs/connect_test.go index 385716fe8..11f93e894 100644 --- a/nomad/structs/connect_test.go +++ b/nomad/structs/connect_test.go @@ -3,10 +3,13 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestTaskKind_IsAnyConnectGateway(t *testing.T) { + ci.Parallel(t) + t.Run("gateways", func(t *testing.T) { require.True(t, NewTaskKind(ConnectIngressPrefix, "foo").IsAnyConnectGateway()) require.True(t, NewTaskKind(ConnectTerminatingPrefix, "foo").IsAnyConnectGateway()) diff --git a/nomad/structs/consul_oss_test.go b/nomad/structs/consul_oss_test.go index de3359c34..a1cabf60b 100644 --- a/nomad/structs/consul_oss_test.go +++ b/nomad/structs/consul_oss_test.go @@ -6,11 +6,12 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestJob_ConfigEntries(t *testing.T) { - t.Parallel() + ci.Parallel(t) ingress := &ConsulConnect{ Gateway: &ConsulGateway{ diff --git a/nomad/structs/consul_test.go b/nomad/structs/consul_test.go index 43801c933..66e15584c 100644 --- a/nomad/structs/consul_test.go +++ b/nomad/structs/consul_test.go @@ -3,10 +3,13 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestConsul_Copy(t *testing.T) { + ci.Parallel(t) + t.Run("nil", func(t *testing.T) { result := (*Consul)(nil).Copy() require.Nil(t, result) @@ -21,6 +24,8 @@ func TestConsul_Copy(t *testing.T) { } func TestConsul_Equals(t *testing.T) { + ci.Parallel(t) + t.Run("nil and nil", func(t *testing.T) { result := (*Consul)(nil).Equals((*Consul)(nil)) require.True(t, result) @@ -43,6 +48,8 @@ func TestConsul_Equals(t *testing.T) { } func TestConsul_Validate(t *testing.T) { + ci.Parallel(t) + t.Run("empty ns", func(t *testing.T) { result := (&Consul{Namespace: ""}).Validate() require.Nil(t, result) diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 8f84d6226..9ef5a7f8b 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -5,11 +5,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) // TestCSIVolumeClaim ensures that a volume claim workflows work as expected. func TestCSIVolumeClaim(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeUnknown @@ -187,6 +190,8 @@ func TestCSIVolumeClaim(t *testing.T) { // // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0 func TestCSIVolumeClaim_CompatOldClaims(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter @@ -283,6 +288,8 @@ func TestCSIVolumeClaim_CompatOldClaims(t *testing.T) { // // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0 func TestCSIVolumeClaim_CompatNewClaimsOK(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter @@ -388,6 +395,8 @@ func TestCSIVolumeClaim_CompatNewClaimsOK(t *testing.T) { // // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0 func TestCSIVolumeClaim_CompatNewClaimsNoUpgrade(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeMultiNodeReader @@ -471,6 +480,7 @@ func TestCSIVolumeClaim_CompatNewClaimsNoUpgrade(t *testing.T) { } func TestVolume_Copy(t *testing.T) { + ci.Parallel(t) a1 := MockAlloc() a2 := MockAlloc() @@ -555,6 +565,8 @@ func TestVolume_Copy(t *testing.T) { } func TestCSIVolume_Validate(t *testing.T) { + ci.Parallel(t) + vol := &CSIVolume{ ID: "test", PluginID: "test", @@ -570,6 +582,7 @@ func TestCSIVolume_Validate(t *testing.T) { } func TestCSIVolume_Merge(t *testing.T) { + ci.Parallel(t) testCases := []struct { name string @@ -739,6 +752,8 @@ func TestCSIVolume_Merge(t *testing.T) { } func TestCSIPluginJobs(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) controller := &Job{ ID: "job", @@ -789,6 +804,8 @@ func TestCSIPluginJobs(t *testing.T) { } func TestCSIPluginCleanup(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.AddPlugin("n0", &CSIInfo{ PluginID: "foo", @@ -824,6 +841,8 @@ func TestCSIPluginCleanup(t *testing.T) { } func TestDeleteNodeForType_Controller(t *testing.T) { + ci.Parallel(t) + info := &CSIInfo{ PluginID: "foo", AllocID: "a0", @@ -847,6 +866,8 @@ func TestDeleteNodeForType_Controller(t *testing.T) { } func TestDeleteNodeForType_NilController(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Controllers["n0"] = nil @@ -861,6 +882,8 @@ func TestDeleteNodeForType_NilController(t *testing.T) { } func TestDeleteNodeForType_Node(t *testing.T) { + ci.Parallel(t) + info := &CSIInfo{ PluginID: "foo", AllocID: "a0", @@ -884,6 +907,8 @@ func TestDeleteNodeForType_Node(t *testing.T) { } func TestDeleteNodeForType_NilNode(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Nodes["n0"] = nil @@ -898,6 +923,8 @@ func TestDeleteNodeForType_NilNode(t *testing.T) { } func TestDeleteNodeForType_Monolith(t *testing.T) { + ci.Parallel(t) + controllerInfo := &CSIInfo{ PluginID: "foo", AllocID: "a0", @@ -940,6 +967,8 @@ func TestDeleteNodeForType_Monolith(t *testing.T) { } func TestDeleteNodeForType_Monolith_NilController(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Controllers["n0"] = nil @@ -972,6 +1001,8 @@ func TestDeleteNodeForType_Monolith_NilController(t *testing.T) { } func TestDeleteNodeForType_Monolith_NilNode(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Nodes["n0"] = nil diff --git a/nomad/structs/devices_test.go b/nomad/structs/devices_test.go index 013b6fcec..8d43a45ad 100644 --- a/nomad/structs/devices_test.go +++ b/nomad/structs/devices_test.go @@ -3,6 +3,7 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" "github.com/stretchr/testify/require" @@ -54,6 +55,8 @@ func devNode() *Node { // Make sure that the device accounter works even if the node has no devices func TestDeviceAccounter_AddAllocs_NoDeviceNode(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := MockNode() d := NewDeviceAccounter(n) @@ -71,6 +74,8 @@ func TestDeviceAccounter_AddAllocs_NoDeviceNode(t *testing.T) { // Add allocs to a node with a device func TestDeviceAccounter_AddAllocs(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -109,6 +114,8 @@ func TestDeviceAccounter_AddAllocs(t *testing.T) { // operate on previous allocs even if the device has changed to unhealthy and we // don't track it func TestDeviceAccounter_AddAllocs_UnknownID(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -137,6 +144,8 @@ func TestDeviceAccounter_AddAllocs_UnknownID(t *testing.T) { // Test that collision detection works func TestDeviceAccounter_AddAllocs_Collision(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -155,6 +164,8 @@ func TestDeviceAccounter_AddAllocs_Collision(t *testing.T) { // Make sure that the device allocator works even if the node has no devices func TestDeviceAccounter_AddReserved_NoDeviceNode(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := MockNode() d := NewDeviceAccounter(n) @@ -166,6 +177,8 @@ func TestDeviceAccounter_AddReserved_NoDeviceNode(t *testing.T) { // Add reserved to a node with a device func TestDeviceAccounter_AddReserved(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -197,6 +210,8 @@ func TestDeviceAccounter_AddReserved(t *testing.T) { // Test that collision detection works func TestDeviceAccounter_AddReserved_Collision(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index ef950b90d..e5409b17c 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -5,11 +5,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestJobDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { Old, New *Job Expected *JobDiff @@ -1340,6 +1343,8 @@ func TestJobDiff(t *testing.T) { } func TestTaskGroupDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { TestCase string Old, New *TaskGroup @@ -3918,6 +3923,8 @@ func TestTaskGroupDiff(t *testing.T) { } func TestTaskDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Old, New *Task @@ -7257,6 +7264,8 @@ func TestTaskDiff(t *testing.T) { } func TestServicesDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Old, New []*Service diff --git a/nomad/structs/errors_test.go b/nomad/structs/errors_test.go index 08e5fb716..0fb67753d 100644 --- a/nomad/structs/errors_test.go +++ b/nomad/structs/errors_test.go @@ -4,10 +4,13 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestRPCCodedErrors(t *testing.T) { + ci.Parallel(t) + cases := []struct { err error code int diff --git a/nomad/structs/funcs_test.go b/nomad/structs/funcs_test.go index 24211c67a..a36a36c58 100644 --- a/nomad/structs/funcs_test.go +++ b/nomad/structs/funcs_test.go @@ -7,12 +7,15 @@ import ( "testing" lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestRemoveAllocs(t *testing.T) { + ci.Parallel(t) + l := []*Allocation{ {ID: "foo"}, {ID: "bar"}, @@ -30,6 +33,8 @@ func TestRemoveAllocs(t *testing.T) { } func TestFilterTerminalAllocs(t *testing.T) { + ci.Parallel(t) + l := []*Allocation{ { ID: "bar", @@ -81,6 +86,8 @@ func TestFilterTerminalAllocs(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) { + ci.Parallel(t) + n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ @@ -137,6 +144,8 @@ func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestAllocsFit_Old(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -200,6 +209,8 @@ func TestAllocsFit_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestAllocsFit_TerminalAlloc_Old(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -264,6 +275,8 @@ func TestAllocsFit_TerminalAlloc_Old(t *testing.T) { } func TestAllocsFit(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -405,6 +418,8 @@ func TestAllocsFit(t *testing.T) { } func TestAllocsFit_TerminalAlloc(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -488,6 +503,8 @@ func TestAllocsFit_TerminalAlloc(t *testing.T) { // Tests that AllocsFit detects device collisions func TestAllocsFit_Devices(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := MockNvidiaNode() @@ -555,6 +572,8 @@ func TestAllocsFit_Devices(t *testing.T) { // TestAllocsFit_MemoryOversubscription asserts that only reserved memory is // used for capacity func TestAllocsFit_MemoryOversubscription(t *testing.T) { + ci.Parallel(t) + n := &Node{ NodeResources: &NodeResources{ Cpu: NodeCpuResources{ @@ -609,6 +628,8 @@ func TestAllocsFit_MemoryOversubscription(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestScoreFitBinPack_Old(t *testing.T) { + ci.Parallel(t) + node := &Node{} node.Resources = &Resources{ CPU: 4096, @@ -669,6 +690,8 @@ func TestScoreFitBinPack_Old(t *testing.T) { } func TestScoreFitBinPack(t *testing.T) { + ci.Parallel(t) + node := &Node{} node.NodeResources = &NodeResources{ Cpu: NodeCpuResources{ @@ -738,6 +761,8 @@ func TestScoreFitBinPack(t *testing.T) { } func TestACLPolicyListHash(t *testing.T) { + ci.Parallel(t) + h1 := ACLPolicyListHash(nil) assert.NotEqual(t, "", h1) @@ -784,6 +809,8 @@ func TestACLPolicyListHash(t *testing.T) { } func TestCompileACLObject(t *testing.T) { + ci.Parallel(t) + p1 := &ACLPolicy{ Name: fmt.Sprintf("policy-%s", uuid.Generate()), Description: "Super cool policy!", @@ -843,6 +870,8 @@ func TestCompileACLObject(t *testing.T) { // TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP // headers and CompareMigrateToken works as expected. func TestGenerateMigrateToken(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) allocID := uuid.Generate() nodeSecret := uuid.Generate() @@ -863,6 +892,8 @@ func TestGenerateMigrateToken(t *testing.T) { } func TestMergeMultierrorWarnings(t *testing.T) { + ci.Parallel(t) + var errs []error // empty @@ -883,6 +914,8 @@ func TestMergeMultierrorWarnings(t *testing.T) { // TestParsePortRanges asserts ParsePortRanges errors on invalid port ranges. func TestParsePortRanges(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string spec string diff --git a/nomad/structs/network_test.go b/nomad/structs/network_test.go index 277f36f1a..ccb2900c1 100644 --- a/nomad/structs/network_test.go +++ b/nomad/structs/network_test.go @@ -6,10 +6,13 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestNetworkIndex_Copy(t *testing.T) { + ci.Parallel(t) + n := &Node{ NodeResources: &NodeResources{ Networks: []*NetworkResource{ @@ -123,6 +126,7 @@ func TestNetworkIndex_Copy(t *testing.T) { func TestNetworkIndex_Overcommitted(t *testing.T) { t.Skip() + ci.Parallel(t) idx := NewNetworkIndex() // Consume some network @@ -165,6 +169,8 @@ func TestNetworkIndex_Overcommitted(t *testing.T) { } func TestNetworkIndex_SetNode(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ NodeResources: &NodeResources{ @@ -200,6 +206,8 @@ func TestNetworkIndex_SetNode(t *testing.T) { } func TestNetworkIndex_AddAllocs(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() allocs := []*Allocation{ { @@ -255,6 +263,8 @@ func TestNetworkIndex_AddAllocs(t *testing.T) { } func TestNetworkIndex_AddReserved(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() reserved := &NetworkResource{ @@ -288,6 +298,8 @@ func TestNetworkIndex_AddReserved(t *testing.T) { // XXX Reserving ports doesn't work when yielding from a CIDR block. This is // okay for now since we do not actually fingerprint CIDR blocks. func TestNetworkIndex_yieldIP(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ NodeResources: &NodeResources{ @@ -316,6 +328,7 @@ func TestNetworkIndex_yieldIP(t *testing.T) { } func TestNetworkIndex_AssignNetwork(t *testing.T) { + ci.Parallel(t) idx := NewNetworkIndex() n := &Node{ NodeResources: &NodeResources{ @@ -419,6 +432,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) { // This test ensures that even with a small domain of available ports we are // able to make a dynamic port allocation. func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) { + ci.Parallel(t) // Create a node that only has one free port idx := NewNetworkIndex() @@ -465,6 +479,8 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_SetNode_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ Resources: &Resources{ @@ -508,6 +524,8 @@ func TestNetworkIndex_SetNode_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_AddAllocs_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() allocs := []*Allocation{ { @@ -560,6 +578,8 @@ func TestNetworkIndex_AddAllocs_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_yieldIP_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ Resources: &Resources{ @@ -599,6 +619,8 @@ func TestNetworkIndex_yieldIP_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_AssignNetwork_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ Resources: &Resources{ @@ -735,6 +757,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) { // This test ensures that even with a small domain of available ports we are // able to make a dynamic port allocation. func TestNetworkIndex_AssignNetwork_Dynamic_Contention_Old(t *testing.T) { + ci.Parallel(t) // Create a node that only has one free port idx := NewNetworkIndex() @@ -787,6 +810,8 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention_Old(t *testing.T) { } func TestIntContains(t *testing.T) { + ci.Parallel(t) + l := []int{1, 2, 10, 20} if isPortReserved(l, 50) { t.Fatalf("bad") diff --git a/nomad/structs/node_class_test.go b/nomad/structs/node_class_test.go index 1c7ffc3f6..77faca36b 100644 --- a/nomad/structs/node_class_test.go +++ b/nomad/structs/node_class_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" "github.com/stretchr/testify/require" @@ -52,6 +53,8 @@ func testNode() *Node { } func TestNode_ComputedClass(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Create a node and gets it computed class @@ -81,6 +84,8 @@ func TestNode_ComputedClass(t *testing.T) { } func TestNode_ComputedClass_Ignore(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Create a node and gets it computed class @@ -98,6 +103,8 @@ func TestNode_ComputedClass_Ignore(t *testing.T) { } func TestNode_ComputedClass_Device_Attr(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Create a node and gets it computed class @@ -122,6 +129,8 @@ func TestNode_ComputedClass_Device_Attr(t *testing.T) { } func TestNode_ComputedClass_Attr(t *testing.T) { + ci.Parallel(t) + // Create a node and gets it computed class n := testNode() if err := n.ComputeClass(); err != nil { @@ -168,6 +177,8 @@ func TestNode_ComputedClass_Attr(t *testing.T) { } func TestNode_ComputedClass_Meta(t *testing.T) { + ci.Parallel(t) + // Create a node and gets it computed class n := testNode() if err := n.ComputeClass(); err != nil { @@ -205,6 +216,8 @@ func TestNode_ComputedClass_Meta(t *testing.T) { } func TestNode_EscapedConstraints(t *testing.T) { + ci.Parallel(t) + // Non-escaped constraints ne1 := &Constraint{ LTarget: "${attr.kernel.name}", diff --git a/nomad/structs/node_test.go b/nomad/structs/node_test.go index aee21accb..6c829dca3 100644 --- a/nomad/structs/node_test.go +++ b/nomad/structs/node_test.go @@ -3,10 +3,13 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestDriverInfoEquals(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var driverInfoTest = []struct { input []*DriverInfo diff --git a/nomad/structs/services_test.go b/nomad/structs/services_test.go index 9375366d7..6965f2421 100644 --- a/nomad/structs/services_test.go +++ b/nomad/structs/services_test.go @@ -4,12 +4,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestServiceCheck_Hash(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := &ServiceCheck{ Name: "check", @@ -53,7 +54,7 @@ func TestServiceCheck_Hash(t *testing.T) { } func TestServiceCheck_validate_PassingTypes(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("valid", func(t *testing.T) { for _, checkType := range []string{"tcp", "http", "grpc"} { @@ -83,7 +84,7 @@ func TestServiceCheck_validate_PassingTypes(t *testing.T) { } func TestServiceCheck_validate_FailingTypes(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("valid", func(t *testing.T) { for _, checkType := range []string{"tcp", "http", "grpc"} { @@ -114,7 +115,7 @@ func TestServiceCheck_validate_FailingTypes(t *testing.T) { } func TestServiceCheck_validate_PassFailZero_on_scripts(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid", func(t *testing.T) { err := (&ServiceCheck{ @@ -131,7 +132,7 @@ func TestServiceCheck_validate_PassFailZero_on_scripts(t *testing.T) { } func TestServiceCheck_validate_OnUpdate_CheckRestart_Conflict(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid", func(t *testing.T) { err := (&ServiceCheck{ @@ -186,7 +187,7 @@ func TestServiceCheck_validate_OnUpdate_CheckRestart_Conflict(t *testing.T) { } func TestService_Hash(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := &Service{ Name: "myService", @@ -293,7 +294,7 @@ func TestService_Hash(t *testing.T) { } func TestConsulConnect_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulConnect{} @@ -312,7 +313,7 @@ func TestConsulConnect_Validate(t *testing.T) { } func TestConsulConnect_CopyEquals(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulConnect{ SidecarService: &ConsulSidecarService{ @@ -349,7 +350,7 @@ func TestConsulConnect_CopyEquals(t *testing.T) { } func TestConsulConnect_GatewayProxy_CopyEquals(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulGatewayProxy{ ConnectTimeout: helper.TimeToPtr(1 * time.Second), @@ -366,7 +367,7 @@ func TestConsulConnect_GatewayProxy_CopyEquals(t *testing.T) { } func TestSidecarTask_MergeIntoTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) task := MockJob().TaskGroups[0].Tasks[0] sTask := &SidecarTask{ @@ -422,7 +423,7 @@ func TestSidecarTask_MergeIntoTask(t *testing.T) { } func TestSidecarTask_Equals(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := &SidecarTask{ Name: "sidecar-task-1", @@ -501,7 +502,7 @@ func TestSidecarTask_Equals(t *testing.T) { } func TestConsulUpstream_upstreamEquals(t *testing.T) { - t.Parallel() + ci.Parallel(t) up := func(name string, port int) ConsulUpstream { return ConsulUpstream{ @@ -542,7 +543,7 @@ func TestConsulUpstream_upstreamEquals(t *testing.T) { } func TestConsulExposePath_exposePathsEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) expose := func(path, protocol, listen string, local int) ConsulExposePath { return ConsulExposePath{ @@ -579,7 +580,7 @@ func TestConsulExposePath_exposePathsEqual(t *testing.T) { } func TestConsulExposeConfig_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, (*ConsulExposeConfig)(nil).Copy()) require.Equal(t, &ConsulExposeConfig{ @@ -594,7 +595,7 @@ func TestConsulExposeConfig_Copy(t *testing.T) { } func TestConsulExposeConfig_Equals(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.True(t, (*ConsulExposeConfig)(nil).Equals(nil)) require.True(t, (&ConsulExposeConfig{ @@ -609,7 +610,7 @@ func TestConsulExposeConfig_Equals(t *testing.T) { } func TestConsulSidecarService_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { s := (*ConsulSidecarService)(nil) @@ -700,6 +701,8 @@ var ( ) func TestConsulGateway_Prefix(t *testing.T) { + ci.Parallel(t) + t.Run("ingress", func(t *testing.T) { result := (&ConsulGateway{Ingress: new(ConsulIngressConfigEntry)}).Prefix() require.Equal(t, ConnectIngressPrefix, result) @@ -717,7 +720,7 @@ func TestConsulGateway_Prefix(t *testing.T) { } func TestConsulGateway_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { g := (*ConsulGateway)(nil) @@ -748,7 +751,7 @@ func TestConsulGateway_Copy(t *testing.T) { } func TestConsulGateway_Equals_mesh(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { a := (*ConsulGateway)(nil) @@ -764,7 +767,7 @@ func TestConsulGateway_Equals_mesh(t *testing.T) { } func TestConsulGateway_Equals_ingress(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { a := (*ConsulGateway)(nil) @@ -858,7 +861,7 @@ func TestConsulGateway_Equals_ingress(t *testing.T) { } func TestConsulGateway_Equals_terminating(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := consulTerminatingGateway1.Copy() @@ -911,7 +914,7 @@ func TestConsulGateway_Equals_terminating(t *testing.T) { } func TestConsulGateway_ingressServicesEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) igs1 := []*ConsulIngressService{{ Name: "service1", @@ -942,7 +945,7 @@ func TestConsulGateway_ingressServicesEqual(t *testing.T) { } func TestConsulGateway_ingressListenersEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) ils1 := []*ConsulIngressListener{{ Port: 2000, @@ -969,7 +972,7 @@ func TestConsulGateway_ingressListenersEqual(t *testing.T) { } func TestConsulGateway_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("bad proxy", func(t *testing.T) { err := (&ConsulGateway{ @@ -1037,7 +1040,7 @@ func TestConsulGateway_Validate(t *testing.T) { } func TestConsulGatewayBindAddress_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no address", func(t *testing.T) { err := (&ConsulGatewayBindAddress{ @@ -1065,7 +1068,7 @@ func TestConsulGatewayBindAddress_Validate(t *testing.T) { } func TestConsulGatewayProxy_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no timeout", func(t *testing.T) { err := (&ConsulGatewayProxy{ @@ -1117,7 +1120,7 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { } func TestConsulIngressService_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid name", func(t *testing.T) { err := (&ConsulIngressService{ @@ -1172,7 +1175,7 @@ func TestConsulIngressService_Validate(t *testing.T) { } func TestConsulIngressListener_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid port", func(t *testing.T) { err := (&ConsulIngressListener{ @@ -1229,7 +1232,7 @@ func TestConsulIngressListener_Validate(t *testing.T) { } func TestConsulIngressConfigEntry_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no listeners", func(t *testing.T) { err := (&ConsulIngressConfigEntry{}).Validate() @@ -1264,7 +1267,7 @@ func TestConsulIngressConfigEntry_Validate(t *testing.T) { } func TestConsulLinkedService_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { err := (*ConsulLinkedService)(nil).Validate() @@ -1349,7 +1352,7 @@ func TestConsulLinkedService_Validate(t *testing.T) { } func TestConsulLinkedService_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, (*ConsulLinkedService)(nil).Copy()) require.Equal(t, &ConsulLinkedService{ @@ -1368,7 +1371,7 @@ func TestConsulLinkedService_Copy(t *testing.T) { } func TestConsulLinkedService_linkedServicesEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) services := []*ConsulLinkedService{{ Name: "service1", @@ -1399,7 +1402,7 @@ func TestConsulLinkedService_linkedServicesEqual(t *testing.T) { } func TestConsulTerminatingConfigEntry_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { err := (*ConsulTerminatingConfigEntry)(nil).Validate() @@ -1433,7 +1436,7 @@ func TestConsulTerminatingConfigEntry_Validate(t *testing.T) { } func TestConsulMeshGateway_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, (*ConsulMeshGateway)(nil)) require.Equal(t, &ConsulMeshGateway{ @@ -1444,7 +1447,7 @@ func TestConsulMeshGateway_Copy(t *testing.T) { } func TestConsulMeshGateway_Equals(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulMeshGateway{Mode: "local"} require.False(t, c.Equals(nil)) @@ -1455,7 +1458,7 @@ func TestConsulMeshGateway_Equals(t *testing.T) { } func TestConsulMeshGateway_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { err := (*ConsulMeshGateway)(nil).Validate() diff --git a/nomad/structs/structs_periodic_test.go b/nomad/structs/structs_periodic_test.go index de795296c..f828bced4 100644 --- a/nomad/structs/structs_periodic_test.go +++ b/nomad/structs/structs_periodic_test.go @@ -6,11 +6,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPeriodicConfig_DSTChange_Transitions(t *testing.T) { + ci.Parallel(t) + locName := "America/Los_Angeles" loc, err := time.LoadLocation(locName) require.NoError(t, err) @@ -221,6 +224,8 @@ func TestPeriodicConfig_DSTChange_Transitions(t *testing.T) { } func TestPeriodConfig_DSTSprintForward_Property(t *testing.T) { + ci.Parallel(t) + locName := "America/Los_Angeles" loc, err := time.LoadLocation(locName) require.NoError(t, err) diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 40b343910..b7a1e7a01 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -19,6 +20,8 @@ import ( ) func TestJob_Validate(t *testing.T) { + ci.Parallel(t) + j := &Job{} err := j.Validate() requireErrors(t, err, @@ -98,6 +101,8 @@ func TestJob_Validate(t *testing.T) { } func TestJob_ValidateScaling(t *testing.T) { + ci.Parallel(t) + require := require.New(t) p := &ScalingPolicy{ @@ -142,6 +147,8 @@ func TestJob_ValidateScaling(t *testing.T) { } func TestJob_ValidateNullChar(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) // job id should not allow null characters @@ -166,6 +173,8 @@ func TestJob_ValidateNullChar(t *testing.T) { } func TestJob_Warnings(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Job *Job @@ -270,6 +279,8 @@ func TestJob_Warnings(t *testing.T) { } func TestJob_SpecChanged(t *testing.T) { + ci.Parallel(t) + // Get a base test job base := testJob() @@ -412,6 +423,8 @@ func testJob() *Job { } func TestJob_Copy(t *testing.T) { + ci.Parallel(t) + j := testJob() c := j.Copy() if !reflect.DeepEqual(j, c) { @@ -420,6 +433,8 @@ func TestJob_Copy(t *testing.T) { } func TestJob_IsPeriodic(t *testing.T) { + ci.Parallel(t) + j := &Job{ Type: JobTypeService, Periodic: &PeriodicConfig{ @@ -439,6 +454,8 @@ func TestJob_IsPeriodic(t *testing.T) { } func TestJob_IsPeriodicActive(t *testing.T) { + ci.Parallel(t) + cases := []struct { job *Job active bool @@ -549,6 +566,8 @@ func TestJob_SystemJob_Validate(t *testing.T) { } func TestJob_VaultPolicies(t *testing.T) { + ci.Parallel(t) + j0 := &Job{} e0 := make(map[string]map[string]*Vault, 0) @@ -632,7 +651,7 @@ func TestJob_VaultPolicies(t *testing.T) { } func TestJob_ConnectTasks(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) j0 := &Job{ @@ -697,6 +716,8 @@ func TestJob_ConnectTasks(t *testing.T) { } func TestJob_RequiredSignals(t *testing.T) { + ci.Parallel(t) + j0 := &Job{} e0 := make(map[string]map[string][]string, 0) @@ -809,6 +830,8 @@ func TestJob_RequiredSignals(t *testing.T) { // test new Equal comparisons for components of Jobs func TestJob_PartEqual(t *testing.T) { + ci.Parallel(t) + ns := &Networks{} require.True(t, ns.Equals(&Networks{})) @@ -854,7 +877,7 @@ func TestJob_PartEqual(t *testing.T) { } func TestTask_UsesConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("normal task", func(t *testing.T) { task := testJob().TaskGroups[0].Tasks[0] @@ -900,7 +923,7 @@ func TestTask_UsesConnect(t *testing.T) { } func TestTaskGroup_UsesConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, tg *TaskGroup, exp bool) { result := tg.UsesConnect() @@ -948,6 +971,8 @@ func TestTaskGroup_UsesConnect(t *testing.T) { } func TestTaskGroup_Validate(t *testing.T) { + ci.Parallel(t) + j := testJob() tg := &TaskGroup{ Count: -1, @@ -1228,6 +1253,8 @@ func TestTaskGroup_Validate(t *testing.T) { } func TestTaskGroupNetwork_Validate(t *testing.T) { + ci.Parallel(t) + cases := []struct { TG *TaskGroup ErrContains string @@ -1489,6 +1516,8 @@ func TestTaskGroupNetwork_Validate(t *testing.T) { } func TestTask_Validate(t *testing.T) { + ci.Parallel(t) + task := &Task{} ephemeralDisk := DefaultEphemeralDisk() err := task.Validate(ephemeralDisk, JobTypeBatch, nil, nil) @@ -1534,6 +1563,8 @@ func TestTask_Validate(t *testing.T) { } func TestTask_Validate_Resources(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string res *Resources @@ -1628,6 +1659,8 @@ func TestTask_Validate_Resources(t *testing.T) { } func TestNetworkResource_Copy(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputNetworkResource *NetworkResource name string @@ -1687,6 +1720,8 @@ func TestNetworkResource_Copy(t *testing.T) { } func TestTask_Validate_Services(t *testing.T) { + ci.Parallel(t) + s1 := &Service{ Name: "service-name", PortLabel: "bar", @@ -1785,6 +1820,8 @@ func TestTask_Validate_Services(t *testing.T) { } func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { + ci.Parallel(t) + ephemeralDisk := DefaultEphemeralDisk() getTask := func(s *Service) *Task { task := &Task{ @@ -1846,6 +1883,8 @@ func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { } func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { + ci.Parallel(t) + ephemeralDisk := DefaultEphemeralDisk() getTask := func(s *Service) *Task { return &Task{ @@ -1899,6 +1938,7 @@ func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { } func TestTask_Validate_Service_Check(t *testing.T) { + ci.Parallel(t) invalidCheck := ServiceCheck{ Name: "check-name", @@ -2006,6 +2046,8 @@ func TestTask_Validate_Service_Check(t *testing.T) { // TestTask_Validate_Service_Check_AddressMode asserts that checks do not // inherit address mode but do inherit ports. func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { + ci.Parallel(t) + getTask := func(s *Service) (*Task, *TaskGroup) { return &Task{ Services: []*Service{s}, @@ -2184,7 +2226,7 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { } func TestTask_Validate_Service_Check_GRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Bad (no port) invalidGRPC := &ServiceCheck{ Type: ServiceCheckGRPC, @@ -2210,7 +2252,7 @@ func TestTask_Validate_Service_Check_GRPC(t *testing.T) { } func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { - t.Parallel() + ci.Parallel(t) invalidCheckRestart := &CheckRestart{ Limit: -1, Grace: -1, @@ -2229,6 +2271,8 @@ func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { } func TestTask_Validate_ConnectProxyKind(t *testing.T) { + ci.Parallel(t) + ephemeralDisk := DefaultEphemeralDisk() getTask := func(kind TaskKind, leader bool) *Task { task := &Task{ @@ -2336,6 +2380,8 @@ func TestTask_Validate_ConnectProxyKind(t *testing.T) { } func TestTask_Validate_LogConfig(t *testing.T) { + ci.Parallel(t) + task := &Task{ LogConfig: DefaultLogConfig(), } @@ -2348,6 +2394,8 @@ func TestTask_Validate_LogConfig(t *testing.T) { } func TestLogConfig_Equals(t *testing.T) { + ci.Parallel(t) + t.Run("both nil", func(t *testing.T) { a := (*LogConfig)(nil) b := (*LogConfig)(nil) @@ -2380,6 +2428,8 @@ func TestLogConfig_Equals(t *testing.T) { } func TestTask_Validate_CSIPluginConfig(t *testing.T) { + ci.Parallel(t) + table := []struct { name string pc *TaskCSIPluginConfig @@ -2426,6 +2476,7 @@ func TestTask_Validate_CSIPluginConfig(t *testing.T) { } func TestTask_Validate_Template(t *testing.T) { + ci.Parallel(t) bad := &Template{} task := &Task{ @@ -2471,6 +2522,8 @@ func TestTask_Validate_Template(t *testing.T) { } func TestTemplate_Validate(t *testing.T) { + ci.Parallel(t) + cases := []struct { Tmpl *Template Fail bool @@ -2613,6 +2666,8 @@ func TestTemplate_Validate(t *testing.T) { } func TestTaskWaitConfig_Equals(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string config *WaitConfig @@ -2662,6 +2717,8 @@ func TestTaskWaitConfig_Equals(t *testing.T) { } func TestConstraint_Validate(t *testing.T) { + ci.Parallel(t) + c := &Constraint{} err := c.Validate() require.Error(t, err, "Missing constraint operand") @@ -2734,6 +2791,8 @@ func TestConstraint_Validate(t *testing.T) { } func TestAffinity_Validate(t *testing.T) { + ci.Parallel(t) + type tc struct { affinity *Affinity err error @@ -2820,6 +2879,8 @@ func TestAffinity_Validate(t *testing.T) { } func TestUpdateStrategy_Validate(t *testing.T) { + ci.Parallel(t) + u := &UpdateStrategy{ MaxParallel: -1, HealthCheck: "foo", @@ -2844,6 +2905,8 @@ func TestUpdateStrategy_Validate(t *testing.T) { } func TestResource_NetIndex(t *testing.T) { + ci.Parallel(t) + r := &Resources{ Networks: []*NetworkResource{ {Device: "eth0"}, @@ -2863,6 +2926,8 @@ func TestResource_NetIndex(t *testing.T) { } func TestResource_Add(t *testing.T) { + ci.Parallel(t) + r1 := &Resources{ CPU: 2000, MemoryMB: 2048, @@ -2909,6 +2974,8 @@ func TestResource_Add(t *testing.T) { } func TestResource_Add_Network(t *testing.T) { + ci.Parallel(t) + r1 := &Resources{} r2 := &Resources{ Networks: []*NetworkResource{ @@ -2945,6 +3012,8 @@ func TestResource_Add_Network(t *testing.T) { } func TestComparableResources_Subtract(t *testing.T) { + ci.Parallel(t) + r1 := &ComparableResources{ Flattened: AllocatedTaskResources{ Cpu: AllocatedCpuResources{ @@ -3020,6 +3089,8 @@ func TestComparableResources_Subtract(t *testing.T) { } func TestMemoryResources_Add(t *testing.T) { + ci.Parallel(t) + r := &AllocatedMemoryResources{} // adding plain no max @@ -3043,6 +3114,8 @@ func TestMemoryResources_Add(t *testing.T) { } func TestNodeNetworkResource_Copy(t *testing.T) { + ci.Parallel(t) + netResource := &NodeNetworkResource{ Mode: "host", Device: "eth0", @@ -3073,6 +3146,8 @@ func TestNodeNetworkResource_Copy(t *testing.T) { } func TestEncodeDecode(t *testing.T) { + ci.Parallel(t) + type FooRequest struct { Foo string Bar int @@ -3117,6 +3192,8 @@ func BenchmarkEncodeDecode(b *testing.B) { } func TestInvalidServiceCheck(t *testing.T) { + ci.Parallel(t) + s := Service{ Name: "service-name", PortLabel: "bar", @@ -3227,6 +3304,8 @@ func TestInvalidServiceCheck(t *testing.T) { } func TestDistinctCheckID(t *testing.T) { + ci.Parallel(t) + c1 := ServiceCheck{ Name: "web-health", Type: "http", @@ -3261,6 +3340,8 @@ func TestDistinctCheckID(t *testing.T) { } func TestService_Canonicalize(t *testing.T) { + ci.Parallel(t) + job := "example" taskGroup := "cache" task := "redis" @@ -3295,6 +3376,8 @@ func TestService_Canonicalize(t *testing.T) { } func TestService_Validate(t *testing.T) { + ci.Parallel(t) + s := Service{ Name: "testservice", } @@ -3320,6 +3403,8 @@ func TestService_Validate(t *testing.T) { } func TestService_Equals(t *testing.T) { + ci.Parallel(t) + s := Service{ Name: "testservice", } @@ -3365,6 +3450,8 @@ func TestService_Equals(t *testing.T) { } func TestJob_ExpandServiceNames(t *testing.T) { + ci.Parallel(t) + j := &Job{ Name: "my-job", TaskGroups: []*TaskGroup{ @@ -3410,6 +3497,8 @@ func TestJob_ExpandServiceNames(t *testing.T) { } func TestJob_CombinedTaskMeta(t *testing.T) { + ci.Parallel(t) + j := &Job{ Meta: map[string]string{ "job_test": "job", @@ -3455,6 +3544,8 @@ func TestJob_CombinedTaskMeta(t *testing.T) { } func TestPeriodicConfig_EnabledInvalid(t *testing.T) { + ci.Parallel(t) + // Create a config that is enabled but with no interval specified. p := &PeriodicConfig{Enabled: true} if err := p.Validate(); err == nil { @@ -3481,6 +3572,8 @@ func TestPeriodicConfig_EnabledInvalid(t *testing.T) { } func TestPeriodicConfig_InvalidCron(t *testing.T) { + ci.Parallel(t) + specs := []string{"foo", "* *", "@foo"} for _, spec := range specs { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} @@ -3492,6 +3585,8 @@ func TestPeriodicConfig_InvalidCron(t *testing.T) { } func TestPeriodicConfig_ValidCron(t *testing.T) { + ci.Parallel(t) + specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} for _, spec := range specs { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} @@ -3503,6 +3598,8 @@ func TestPeriodicConfig_ValidCron(t *testing.T) { } func TestPeriodicConfig_NextCron(t *testing.T) { + ci.Parallel(t) + from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) cases := []struct { @@ -3543,6 +3640,8 @@ func TestPeriodicConfig_NextCron(t *testing.T) { } func TestPeriodicConfig_ValidTimeZone(t *testing.T) { + ci.Parallel(t) + zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} for _, zone := range zones { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} @@ -3554,6 +3653,8 @@ func TestPeriodicConfig_ValidTimeZone(t *testing.T) { } func TestPeriodicConfig_DST(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC @@ -3583,6 +3684,8 @@ func TestPeriodicConfig_DST(t *testing.T) { } func TestTaskLifecycleConfig_Validate(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string tlc *TaskLifecycleConfig @@ -3628,6 +3731,8 @@ func TestTaskLifecycleConfig_Validate(t *testing.T) { } func TestRestartPolicy_Validate(t *testing.T) { + ci.Parallel(t) + // Policy with acceptable restart options passes p := &RestartPolicy{ Mode: RestartPolicyModeFail, @@ -3682,6 +3787,8 @@ func TestRestartPolicy_Validate(t *testing.T) { } func TestReschedulePolicy_Validate(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string ReschedulePolicy *ReschedulePolicy @@ -3873,6 +3980,8 @@ func TestReschedulePolicy_Validate(t *testing.T) { } func TestAllocation_Index(t *testing.T) { + ci.Parallel(t) + a1 := Allocation{ Name: "example.cache[1]", TaskGroup: "cache", @@ -3892,6 +4001,8 @@ func TestAllocation_Index(t *testing.T) { } func TestTaskArtifact_Validate_Source(t *testing.T) { + ci.Parallel(t) + valid := &TaskArtifact{GetterSource: "google.com"} if err := valid.Validate(); err != nil { t.Fatalf("unexpected error: %v", err) @@ -3899,6 +4010,8 @@ func TestTaskArtifact_Validate_Source(t *testing.T) { } func TestTaskArtifact_Validate_Dest(t *testing.T) { + ci.Parallel(t) + valid := &TaskArtifact{GetterSource: "google.com"} if err := valid.Validate(); err != nil { t.Fatalf("unexpected error: %v", err) @@ -3923,7 +4036,7 @@ func TestTaskArtifact_Validate_Dest(t *testing.T) { // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the // fields change. func TestTaskArtifact_Hash(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []TaskArtifact{ {}, @@ -4008,6 +4121,8 @@ func TestTaskArtifact_Hash(t *testing.T) { } func TestAllocation_ShouldMigrate(t *testing.T) { + ci.Parallel(t) + alloc := Allocation{ PreviousAllocation: "123", TaskGroup: "foo", @@ -4103,6 +4218,8 @@ func TestAllocation_ShouldMigrate(t *testing.T) { } func TestTaskArtifact_Validate_Checksum(t *testing.T) { + ci.Parallel(t) + cases := []struct { Input *TaskArtifact Err bool @@ -4155,7 +4272,7 @@ func TestTaskArtifact_Validate_Checksum(t *testing.T) { } func TestPlan_NormalizeAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) plan := &Plan{ NodeUpdate: make(map[string][]*Allocation), NodePreemptions: make(map[string][]*Allocation), @@ -4186,7 +4303,7 @@ func TestPlan_NormalizeAllocations(t *testing.T) { } func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) plan := &Plan{ NodeUpdate: make(map[string][]*Allocation), } @@ -4215,7 +4332,7 @@ func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { } func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) plan := &Plan{ NodePreemptions: make(map[string][]*Allocation), } @@ -4240,7 +4357,7 @@ func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { } func TestAllocation_MsgPackTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) planType := reflect.TypeOf(Allocation{}) msgPackTags, _ := planType.FieldByName("_struct") @@ -4249,7 +4366,7 @@ func TestAllocation_MsgPackTags(t *testing.T) { } func TestEvaluation_MsgPackTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) planType := reflect.TypeOf(Evaluation{}) msgPackTags, _ := planType.FieldByName("_struct") @@ -4258,6 +4375,8 @@ func TestEvaluation_MsgPackTags(t *testing.T) { } func TestAllocation_Terminated(t *testing.T) { + ci.Parallel(t) + type desiredState struct { ClientStatus string DesiredStatus string @@ -4297,6 +4416,8 @@ func TestAllocation_Terminated(t *testing.T) { } func TestAllocation_ShouldReschedule(t *testing.T) { + ci.Parallel(t) + type testCase struct { Desc string FailTime time.Time @@ -4433,6 +4554,8 @@ func TestAllocation_ShouldReschedule(t *testing.T) { } func TestAllocation_LastEventTime(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string taskState map[string]*TaskState @@ -4495,6 +4618,8 @@ func TestAllocation_LastEventTime(t *testing.T) { } func TestAllocation_NextDelay(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string reschedulePolicy *ReschedulePolicy @@ -4978,6 +5103,8 @@ func TestAllocation_NextDelay(t *testing.T) { } func TestAllocation_WaitClientStop(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string stop time.Duration @@ -5037,6 +5164,8 @@ func TestAllocation_WaitClientStop(t *testing.T) { } func TestAllocation_Canonicalize_Old(t *testing.T) { + ci.Parallel(t) + alloc := MockAlloc() alloc.AllocatedResources = nil alloc.TaskResources = map[string]*Resources{ @@ -5090,6 +5219,8 @@ func TestAllocation_Canonicalize_Old(t *testing.T) { // TestAllocation_Canonicalize_New asserts that an alloc with latest // schema isn't modified with Canonicalize func TestAllocation_Canonicalize_New(t *testing.T) { + ci.Parallel(t) + alloc := MockAlloc() copy := alloc.Copy() @@ -5098,6 +5229,8 @@ func TestAllocation_Canonicalize_New(t *testing.T) { } func TestRescheduleTracker_Copy(t *testing.T) { + ci.Parallel(t) + type testCase struct { original *RescheduleTracker expected *RescheduleTracker @@ -5125,6 +5258,8 @@ func TestRescheduleTracker_Copy(t *testing.T) { } func TestVault_Validate(t *testing.T) { + ci.Parallel(t) + v := &Vault{ Env: true, ChangeMode: VaultChangeModeNoop, @@ -5151,6 +5286,8 @@ func TestVault_Validate(t *testing.T) { } func TestParameterizedJobConfig_Validate(t *testing.T) { + ci.Parallel(t) + d := &ParameterizedJobConfig{ Payload: "foo", } @@ -5169,6 +5306,8 @@ func TestParameterizedJobConfig_Validate(t *testing.T) { } func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { + ci.Parallel(t) + job := testJob() job.ParameterizedJob = &ParameterizedJobConfig{ Payload: DispatchPayloadOptional, @@ -5181,6 +5320,8 @@ func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { } func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) { + ci.Parallel(t) + // Setup a system Job with stop_after_client_disconnect set, which is invalid job := testJob() job.Type = JobTypeSystem @@ -5208,6 +5349,8 @@ func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) { } func TestParameterizedJobConfig_Canonicalize(t *testing.T) { + ci.Parallel(t) + d := &ParameterizedJobConfig{} d.Canonicalize() if d.Payload != DispatchPayloadOptional { @@ -5216,6 +5359,8 @@ func TestParameterizedJobConfig_Canonicalize(t *testing.T) { } func TestDispatchPayloadConfig_Validate(t *testing.T) { + ci.Parallel(t) + d := &DispatchPayloadConfig{ File: "foo", } @@ -5239,6 +5384,8 @@ func TestDispatchPayloadConfig_Validate(t *testing.T) { } func TestScalingPolicy_Canonicalize(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input *ScalingPolicy @@ -5267,6 +5414,8 @@ func TestScalingPolicy_Canonicalize(t *testing.T) { } func TestScalingPolicy_Validate(t *testing.T) { + ci.Parallel(t) + type testCase struct { name string input *ScalingPolicy @@ -5404,6 +5553,8 @@ func TestScalingPolicy_Validate(t *testing.T) { } func TestIsRecoverable(t *testing.T) { + ci.Parallel(t) + if IsRecoverable(nil) { t.Errorf("nil should not be recoverable") } @@ -5422,6 +5573,8 @@ func TestIsRecoverable(t *testing.T) { } func TestACLTokenValidate(t *testing.T) { + ci.Parallel(t) + tk := &ACLToken{} // Missing a type @@ -5467,6 +5620,8 @@ func TestACLTokenValidate(t *testing.T) { } func TestACLTokenPolicySubset(t *testing.T) { + ci.Parallel(t) + tk := &ACLToken{ Type: ACLClientToken, Policies: []string{"foo", "bar", "baz"}, @@ -5492,6 +5647,8 @@ func TestACLTokenPolicySubset(t *testing.T) { } func TestACLTokenSetHash(t *testing.T) { + ci.Parallel(t) + tk := &ACLToken{ Name: "foo", Type: ACLClientToken, @@ -5512,6 +5669,8 @@ func TestACLTokenSetHash(t *testing.T) { } func TestACLPolicySetHash(t *testing.T) { + ci.Parallel(t) + ap := &ACLPolicy{ Name: "foo", Description: "great policy", @@ -5531,6 +5690,8 @@ func TestACLPolicySetHash(t *testing.T) { } func TestTaskEventPopulate(t *testing.T) { + ci.Parallel(t) + prepopulatedEvent := NewTaskEvent(TaskSetup) prepopulatedEvent.DisplayMessage = "Hola" testcases := []struct { @@ -5583,6 +5744,8 @@ func TestTaskEventPopulate(t *testing.T) { } func TestNetworkResourcesEquals(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var networkResourcesTest = []struct { input []*NetworkResource @@ -5742,7 +5905,7 @@ func TestNetworkResourcesEquals(t *testing.T) { } func TestNode_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make sure the eligiblity is set properly @@ -5762,7 +5925,7 @@ func TestNode_Canonicalize(t *testing.T) { } func TestNode_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) node := &Node{ @@ -5873,6 +6036,8 @@ func TestNode_Copy(t *testing.T) { } func TestNode_Sanitize(t *testing.T) { + ci.Parallel(t) + require := require.New(t) testCases := []*Node{ @@ -5898,6 +6063,8 @@ func TestNode_Sanitize(t *testing.T) { } func TestSpread_Validate(t *testing.T) { + ci.Parallel(t) + type tc struct { spread *Spread err error @@ -6013,6 +6180,8 @@ func TestSpread_Validate(t *testing.T) { } func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { + ci.Parallel(t) + require := require.New(t) cases := []struct { Input string @@ -6059,6 +6228,8 @@ func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { } func TestMultiregion_CopyCanonicalize(t *testing.T) { + ci.Parallel(t) + require := require.New(t) emptyOld := &Multiregion{} @@ -6100,6 +6271,8 @@ func TestMultiregion_CopyCanonicalize(t *testing.T) { } func TestNodeResources_Copy(t *testing.T) { + ci.Parallel(t) + orig := &NodeResources{ Cpu: NodeCpuResources{ CpuShares: int64(32000), @@ -6146,6 +6319,8 @@ func TestNodeResources_Copy(t *testing.T) { } func TestNodeResources_Merge(t *testing.T) { + ci.Parallel(t) + res := &NodeResources{ Cpu: NodeCpuResources{ CpuShares: int64(32000), @@ -6194,6 +6369,8 @@ func TestNodeResources_Merge(t *testing.T) { } func TestAllocatedResources_Canonicalize(t *testing.T) { + ci.Parallel(t) + cases := map[string]struct { input *AllocatedResources expected *AllocatedResources @@ -6294,6 +6471,8 @@ func TestAllocatedResources_Canonicalize(t *testing.T) { } func TestAllocatedSharedResources_Canonicalize(t *testing.T) { + ci.Parallel(t) + a := &AllocatedSharedResources{ Networks: []*NetworkResource{ { @@ -6334,6 +6513,8 @@ func TestAllocatedSharedResources_Canonicalize(t *testing.T) { } func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) { + ci.Parallel(t) + t.Run("service task not set", func(t *testing.T) { tg := &TaskGroup{ Name: "group1", @@ -6400,6 +6581,8 @@ func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) { } func TestComparableResources_Superset(t *testing.T) { + ci.Parallel(t) + base := &ComparableResources{ Flattened: AllocatedTaskResources{ Cpu: AllocatedCpuResources{ diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index ee5eabd46..d0651a121 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -8,6 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -15,7 +16,7 @@ import ( ) func TestSystemEndpoint_GarbageCollect(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -66,7 +67,7 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { } func TestSystemEndpoint_GarbageCollect_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -111,7 +112,7 @@ func TestSystemEndpoint_GarbageCollect_ACL(t *testing.T) { } func TestSystemEndpoint_ReconcileSummaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -174,7 +175,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) { } func TestSystemEndpoint_ReconcileJobSummaries_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/timetable_test.go b/nomad/timetable_test.go index b5377fbf3..eeacd08f0 100644 --- a/nomad/timetable_test.go +++ b/nomad/timetable_test.go @@ -9,11 +9,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) func TestTimeTable(t *testing.T) { - t.Parallel() + ci.Parallel(t) tt := NewTimeTable(time.Second, time.Minute) index := tt.NearestIndex(time.Now()) @@ -90,7 +91,7 @@ func TestTimeTable(t *testing.T) { } func TestTimeTable_SerializeDeserialize(t *testing.T) { - t.Parallel() + ci.Parallel(t) tt := NewTimeTable(time.Second, time.Minute) // Witness some data @@ -132,7 +133,7 @@ func TestTimeTable_SerializeDeserialize(t *testing.T) { } func TestTimeTable_Overflow(t *testing.T) { - t.Parallel() + ci.Parallel(t) tt := NewTimeTable(time.Second, 3*time.Second) // Witness some data diff --git a/nomad/util_test.go b/nomad/util_test.go index 7f36e6850..af7583fd0 100644 --- a/nomad/util_test.go +++ b/nomad/util_test.go @@ -6,13 +6,14 @@ import ( "testing" version "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" ) func TestIsNomadServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) m := serf.Member{ Name: "foo", Addr: net.IP([]byte{127, 0, 0, 1}), @@ -84,7 +85,7 @@ func TestIsNomadServer(t *testing.T) { } func TestServersMeetMinimumVersionExcludingFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { members []serf.Member @@ -153,7 +154,7 @@ func TestServersMeetMinimumVersionExcludingFailed(t *testing.T) { } func TestServersMeetMinimumVersionIncludingFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { members []serf.Member @@ -206,7 +207,7 @@ func makeMember(version string, status serf.MemberStatus) serf.Member { } func TestShuffleStrings(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Generate input inp := make([]string, 10) for idx := range inp { @@ -242,7 +243,7 @@ func Test_partitionAll(t *testing.T) { } func TestMaxUint64(t *testing.T) { - t.Parallel() + ci.Parallel(t) if maxUint64(1, 2) != 2 { t.Fatalf("bad") } diff --git a/nomad/vault_test.go b/nomad/vault_test.go index 243a5a6b6..86532858e 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -153,7 +154,7 @@ func testVaultRoleAndToken(v *testutil.TestVault, t *testing.T, vaultPolicies ma } func TestVaultClient_BadConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := &config.VaultConfig{} logger := testlog.HCLogger(t) @@ -180,7 +181,7 @@ func TestVaultClient_BadConfig(t *testing.T) { // TestVaultClient_WithNamespaceSupport tests that the Vault namespace config, if present, will result in the // namespace header being set on the created Vault client. func TestVaultClient_WithNamespaceSupport(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tr := true testNs := "test-namespace" @@ -206,7 +207,7 @@ func TestVaultClient_WithNamespaceSupport(t *testing.T) { // TestVaultClient_WithoutNamespaceSupport tests that the Vault namespace config, if present, will result in the // namespace header being set on the created Vault client. func TestVaultClient_WithoutNamespaceSupport(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tr := true conf := &config.VaultConfig{ @@ -232,7 +233,7 @@ func TestVaultClient_WithoutNamespaceSupport(t *testing.T) { // Test that the Vault Client can establish a connection even if it is started // before Vault is available. func TestVaultClient_EstablishConnection(t *testing.T) { - t.Parallel() + ci.Parallel(t) for i := 10; i >= 0; i-- { v := testutil.NewTestVaultDelayed(t) logger := testlog.HCLogger(t) @@ -286,7 +287,7 @@ func TestVaultClient_EstablishConnection(t *testing.T) { } func TestVaultClient_ValidateRole(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -335,7 +336,7 @@ func TestVaultClient_ValidateRole(t *testing.T) { // TestVaultClient_ValidateRole_Success asserts that a valid token role // gets marked as valid func TestVaultClient_ValidateRole_Success(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -381,7 +382,7 @@ func TestVaultClient_ValidateRole_Success(t *testing.T) { // TestVaultClient_ValidateRole_Deprecated_Success asserts that a valid token // role gets marked as valid, even if it uses deprecated field, period func TestVaultClient_ValidateRole_Deprecated_Success(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -425,7 +426,7 @@ func TestVaultClient_ValidateRole_Deprecated_Success(t *testing.T) { } func TestVaultClient_ValidateRole_NonExistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -465,7 +466,7 @@ func TestVaultClient_ValidateRole_NonExistent(t *testing.T) { } func TestVaultClient_ValidateToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -519,7 +520,7 @@ func TestVaultClient_ValidateToken(t *testing.T) { } func TestVaultClient_SetActive(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -549,7 +550,7 @@ func TestVaultClient_SetActive(t *testing.T) { // Test that we can update the config and things keep working func TestVaultClient_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -612,7 +613,7 @@ func TestVaultClient_SetConfig(t *testing.T) { // TestVaultClient_SetConfig_Deadlock asserts that calling SetConfig // concurrently with establishConnection does not deadlock. func TestVaultClient_SetConfig_Deadlock(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -643,7 +644,7 @@ func TestVaultClient_SetConfig_Deadlock(t *testing.T) { // Test that we can disable vault func TestVaultClient_SetConfig_Disable(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -677,7 +678,7 @@ func TestVaultClient_SetConfig_Disable(t *testing.T) { } func TestVaultClient_RenewalLoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -713,7 +714,7 @@ func TestVaultClient_RenewalLoop(t *testing.T) { } func TestVaultClientRenewUpdatesExpiration(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -752,7 +753,7 @@ func TestVaultClientRenewUpdatesExpiration(t *testing.T) { } func TestVaultClient_StopsAfterPermissionError(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -786,7 +787,7 @@ func TestVaultClient_StopsAfterPermissionError(t *testing.T) { }) } func TestVaultClient_LoopsUntilCannotRenew(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -847,7 +848,7 @@ func parseTTLFromLookup(s *vapi.Secret, t *testing.T) int64 { } func TestVaultClient_LookupToken_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) tr := true conf := &config.VaultConfig{ Enabled: &tr, @@ -871,7 +872,7 @@ func TestVaultClient_LookupToken_Invalid(t *testing.T) { } func TestVaultClient_LookupToken_Root(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -933,7 +934,7 @@ func TestVaultClient_LookupToken_Root(t *testing.T) { } func TestVaultClient_LookupToken_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -998,7 +999,7 @@ func TestVaultClient_LookupToken_Role(t *testing.T) { } func TestVaultClient_LookupToken_RateLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1059,7 +1060,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) { } func TestVaultClient_CreateToken_Root(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1103,7 +1104,7 @@ func TestVaultClient_CreateToken_Root(t *testing.T) { } func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1151,7 +1152,7 @@ func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) { } func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1202,7 +1203,7 @@ func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) { } func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Need to skip if test is 0.6.4 version, err := testutil.VaultVersion() if err != nil { @@ -1261,7 +1262,7 @@ func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) { } func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1300,7 +1301,7 @@ func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) { } func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1335,7 +1336,7 @@ func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) { } func TestVaultClient_CreateToken_Prestart(t *testing.T) { - t.Parallel() + ci.Parallel(t) vconfig := &config.VaultConfig{ Enabled: helper.BoolToPtr(true), Token: uuid.Generate(), @@ -1395,7 +1396,7 @@ func TestVaultClient_MarkForRevocation(t *testing.T) { } func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { - t.Parallel() + ci.Parallel(t) vconfig := &config.VaultConfig{ Enabled: helper.BoolToPtr(true), Token: uuid.Generate(), @@ -1441,7 +1442,7 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { // TestVaultClient_RevokeTokens_Failures_TTL asserts that // the registered TTL doesn't get extended on retries func TestVaultClient_RevokeTokens_Failures_TTL(t *testing.T) { - t.Parallel() + ci.Parallel(t) vconfig := &config.VaultConfig{ Enabled: helper.BoolToPtr(true), Token: uuid.Generate(), @@ -1485,7 +1486,7 @@ func TestVaultClient_RevokeTokens_Failures_TTL(t *testing.T) { } func TestVaultClient_RevokeTokens_Root(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1550,7 +1551,7 @@ func TestVaultClient_RevokeTokens_Root(t *testing.T) { } func TestVaultClient_RevokeTokens_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1619,7 +1620,7 @@ func TestVaultClient_RevokeTokens_Role(t *testing.T) { // TestVaultClient_RevokeTokens_Idempotent asserts that token revocation // is idempotent, and can cope with cases if token was deleted out of band. func TestVaultClient_RevokeTokens_Idempotent(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1699,7 +1700,7 @@ func TestVaultClient_RevokeTokens_Idempotent(t *testing.T) { // TestVaultClient_RevokeDaemon_Bounded asserts that token revocation // batches are bounded in size. func TestVaultClient_RevokeDaemon_Bounded(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1780,6 +1781,8 @@ func waitForConnection(v *vaultClient, t *testing.T) { } func TestVaultClient_nextBackoff(t *testing.T) { + ci.Parallel(t) + simpleCases := []struct { name string initBackoff float64 diff --git a/nomad/volumewatcher/volume_watcher_test.go b/nomad/volumewatcher/volume_watcher_test.go index 4bb4ddae4..5c6b39c8d 100644 --- a/nomad/volumewatcher/volume_watcher_test.go +++ b/nomad/volumewatcher/volume_watcher_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -12,7 +13,7 @@ import ( ) func TestVolumeWatch_Reap(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockRPCServer{ @@ -81,6 +82,7 @@ func TestVolumeWatch_Reap(t *testing.T) { } func TestVolumeReapBadState(t *testing.T) { + ci.Parallel(t) store := state.TestStateStore(t) err := state.TestBadCSIState(t, store) diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 47f1c970a..c5c900674 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -5,6 +5,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -15,7 +16,7 @@ import ( // TestVolumeWatch_EnableDisable tests the watcher registration logic that needs // to happen during leader step-up/step-down func TestVolumeWatch_EnableDisable(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockRPCServer{} @@ -55,7 +56,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { // TestVolumeWatch_LeadershipTransition tests the correct behavior of // claim reaping across leader step-up/step-down func TestVolumeWatch_LeadershipTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockRPCServer{} @@ -139,7 +140,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { // TestVolumeWatch_StartStop tests the start and stop of the watcher when // it receives notifcations and has completed its work func TestVolumeWatch_StartStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockStatefulRPCServer{} @@ -234,7 +235,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { // TestVolumeWatch_RegisterDeregister tests the start and stop of // watchers around registration func TestVolumeWatch_RegisterDeregister(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockStatefulRPCServer{} diff --git a/nomad/worker_test.go b/nomad/worker_test.go index 0e872dedd..d8d5f4481 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -10,6 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" "github.com/hashicorp/nomad/helper/testlog" @@ -64,7 +65,7 @@ func NewTestWorker(shutdownCtx context.Context, srv *Server) *Worker { } func TestWorker_dequeueEvaluation(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -102,7 +103,7 @@ func TestWorker_dequeueEvaluation(t *testing.T) { // Test that the worker picks up the correct wait index when there are multiple // evals for the same job. func TestWorker_dequeueEvaluation_SerialJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -172,7 +173,7 @@ func TestWorker_dequeueEvaluation_SerialJobs(t *testing.T) { } func TestWorker_dequeueEvaluation_paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -221,7 +222,7 @@ func TestWorker_dequeueEvaluation_paused(t *testing.T) { } func TestWorker_dequeueEvaluation_shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -252,7 +253,7 @@ func TestWorker_dequeueEvaluation_shutdown(t *testing.T) { } func TestWorker_Shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -276,7 +277,7 @@ func TestWorker_Shutdown(t *testing.T) { } func TestWorker_Shutdown_paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -303,7 +304,7 @@ func TestWorker_Shutdown_paused(t *testing.T) { } func TestWorker_sendAck(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -352,7 +353,7 @@ func TestWorker_sendAck(t *testing.T) { } func TestWorker_waitForIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -392,7 +393,7 @@ func TestWorker_waitForIndex(t *testing.T) { } func TestWorker_invokeScheduler(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -413,7 +414,7 @@ func TestWorker_invokeScheduler(t *testing.T) { } func TestWorker_SubmitPlan(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -482,7 +483,7 @@ func TestWorker_SubmitPlan(t *testing.T) { } func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -535,7 +536,7 @@ func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { } func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -609,7 +610,7 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) { } func TestWorker_UpdateEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -660,7 +661,7 @@ func TestWorker_UpdateEval(t *testing.T) { } func TestWorker_CreateEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -712,7 +713,7 @@ func TestWorker_CreateEval(t *testing.T) { } func TestWorker_ReblockEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -805,7 +806,7 @@ func TestWorker_ReblockEval(t *testing.T) { } func TestWorker_Info(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -830,7 +831,7 @@ const ( ) func TestWorker_SetPause(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) srv := &Server{ logger: logger, @@ -869,7 +870,7 @@ func TestWorker_SetPause(t *testing.T) { } func TestWorker_SetPause_OutOfOrderEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) srv := &Server{ logger: logger, diff --git a/plugins/base/plugin_test.go b/plugins/base/plugin_test.go index 19172f009..1f7954684 100644 --- a/plugins/base/plugin_test.go +++ b/plugins/base/plugin_test.go @@ -5,6 +5,7 @@ import ( pb "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/shared/hclspec" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( ) func TestBasePlugin_PluginInfo_GRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var ( @@ -79,7 +80,7 @@ func TestBasePlugin_PluginInfo_GRPC(t *testing.T) { } func TestBasePlugin_ConfigSchema(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockPlugin{ @@ -110,7 +111,7 @@ func TestBasePlugin_ConfigSchema(t *testing.T) { } func TestBasePlugin_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var receivedData []byte diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 6554662c7..35abf626a 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -9,6 +9,7 @@ import ( csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/protobuf/ptypes/wrappers" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" fake "github.com/hashicorp/nomad/plugins/csi/testing" "github.com/stretchr/testify/require" @@ -41,6 +42,8 @@ func newTestClient(t *testing.T) (*fake.IdentityClient, *fake.ControllerClient, } func TestClient_RPC_PluginProbe(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -99,6 +102,8 @@ func TestClient_RPC_PluginProbe(t *testing.T) { } func TestClient_RPC_PluginInfo(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -152,6 +157,8 @@ func TestClient_RPC_PluginInfo(t *testing.T) { } func TestClient_RPC_PluginGetCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -215,6 +222,8 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { } func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -313,6 +322,8 @@ func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { } func TestClient_RPC_NodeGetCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -371,6 +382,8 @@ func TestClient_RPC_NodeGetCapabilities(t *testing.T) { } func TestClient_RPC_ControllerPublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Request *ControllerPublishVolumeRequest @@ -436,6 +449,8 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { } func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Request *ControllerUnpublishVolumeRequest @@ -482,6 +497,7 @@ func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { } func TestClient_RPC_ControllerValidateVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -706,6 +722,7 @@ func TestClient_RPC_ControllerValidateVolume(t *testing.T) { } func TestClient_RPC_ControllerCreateVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -833,6 +850,7 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { } func TestClient_RPC_ControllerDeleteVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -875,6 +893,7 @@ func TestClient_RPC_ControllerDeleteVolume(t *testing.T) { } func TestClient_RPC_ControllerListVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -969,6 +988,7 @@ func TestClient_RPC_ControllerListVolume(t *testing.T) { } func TestClient_RPC_ControllerCreateSnapshot(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -1030,6 +1050,7 @@ func TestClient_RPC_ControllerCreateSnapshot(t *testing.T) { } func TestClient_RPC_ControllerDeleteSnapshot(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -1072,6 +1093,7 @@ func TestClient_RPC_ControllerDeleteSnapshot(t *testing.T) { } func TestClient_RPC_ControllerListSnapshots(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -1136,6 +1158,8 @@ func TestClient_RPC_ControllerListSnapshots(t *testing.T) { } func TestClient_RPC_NodeStageVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -1177,6 +1201,8 @@ func TestClient_RPC_NodeStageVolume(t *testing.T) { } func TestClient_RPC_NodeUnstageVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -1214,6 +1240,8 @@ func TestClient_RPC_NodeUnstageVolume(t *testing.T) { } func TestClient_RPC_NodePublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Request *NodePublishVolumeRequest @@ -1269,6 +1297,8 @@ func TestClient_RPC_NodePublishVolume(t *testing.T) { } } func TestClient_RPC_NodeUnpublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ExternalID string diff --git a/plugins/device/plugin_test.go b/plugins/device/plugin_test.go index a07fa329b..52629489f 100644 --- a/plugins/device/plugin_test.go +++ b/plugins/device/plugin_test.go @@ -8,6 +8,7 @@ import ( pb "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" @@ -21,7 +22,7 @@ import ( ) func TestDevicePlugin_PluginInfo(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var ( @@ -90,7 +91,7 @@ func TestDevicePlugin_PluginInfo(t *testing.T) { } func TestDevicePlugin_ConfigSchema(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockDevicePlugin{ @@ -124,7 +125,7 @@ func TestDevicePlugin_ConfigSchema(t *testing.T) { } func TestDevicePlugin_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var receivedData []byte @@ -184,7 +185,7 @@ func TestDevicePlugin_SetConfig(t *testing.T) { } func TestDevicePlugin_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) devices1 := []*DeviceGroup{ @@ -288,7 +289,7 @@ func TestDevicePlugin_Fingerprint(t *testing.T) { } func TestDevicePlugin_Fingerprint_StreamErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ferr := fmt.Errorf("mock fingerprinting failed") @@ -348,7 +349,7 @@ func TestDevicePlugin_Fingerprint_StreamErr(t *testing.T) { } func TestDevicePlugin_Fingerprint_CancelCtx(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockDevicePlugin{ @@ -408,7 +409,7 @@ func TestDevicePlugin_Fingerprint_CancelCtx(t *testing.T) { } func TestDevicePlugin_Reserve(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) reservation := &ContainerReservation{ @@ -464,7 +465,7 @@ func TestDevicePlugin_Reserve(t *testing.T) { } func TestDevicePlugin_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) devices1 := []*DeviceGroupStats{ @@ -605,7 +606,7 @@ func TestDevicePlugin_Stats(t *testing.T) { } func TestDevicePlugin_Stats_StreamErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ferr := fmt.Errorf("mock stats failed") @@ -665,7 +666,7 @@ func TestDevicePlugin_Stats_StreamErr(t *testing.T) { } func TestDevicePlugin_Stats_CancelCtx(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockDevicePlugin{ diff --git a/plugins/drivers/testutils/testing_test.go b/plugins/drivers/testutils/testing_test.go index 0c0c9efe2..cd368a3bd 100644 --- a/plugins/drivers/testutils/testing_test.go +++ b/plugins/drivers/testutils/testing_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -19,6 +20,8 @@ var _ drivers.DriverPlugin = (*MockDriver)(nil) // Very simple test to ensure the test harness works as expected func TestDriverHarness(t *testing.T) { + ci.Parallel(t) + handle := &drivers.TaskHandle{Config: &drivers.TaskConfig{Name: "mock"}} d := &MockDriver{ StartTaskF: func(task *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { @@ -38,7 +41,7 @@ type testDriverState struct { } func TestBaseDriver_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fingerprints := []*drivers.Fingerprint{ @@ -100,7 +103,7 @@ func TestBaseDriver_Fingerprint(t *testing.T) { } func TestBaseDriver_RecoverTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // build driver state and encode it into proto msg @@ -130,7 +133,7 @@ func TestBaseDriver_RecoverTask(t *testing.T) { } func TestBaseDriver_StartTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) cfg := &drivers.TaskConfig{ @@ -162,7 +165,7 @@ func TestBaseDriver_StartTask(t *testing.T) { } func TestBaseDriver_WaitTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) result := &drivers.ExitResult{ExitCode: 1, Signal: 9} @@ -200,7 +203,7 @@ func TestBaseDriver_WaitTask(t *testing.T) { } func TestBaseDriver_TaskEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) now := time.Now().UTC().Truncate(time.Millisecond) @@ -263,6 +266,8 @@ func TestBaseDriver_TaskEvents(t *testing.T) { } func TestBaseDriver_Capabilities(t *testing.T) { + ci.Parallel(t) + capabilities := &drivers.Capabilities{ NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, diff --git a/scheduler/annotate_test.go b/scheduler/annotate_test.go index 57c95ce6e..9f651fc70 100644 --- a/scheduler/annotate_test.go +++ b/scheduler/annotate_test.go @@ -4,10 +4,13 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) func TestAnnotateTaskGroup_Updates(t *testing.T) { + ci.Parallel(t) + annotations := &structs.PlanAnnotations{ DesiredTGUpdates: map[string]*structs.DesiredUpdates{ "foo": { @@ -50,6 +53,8 @@ func TestAnnotateTaskGroup_Updates(t *testing.T) { } func TestAnnotateCountChange_NonEdited(t *testing.T) { + ci.Parallel(t) + tg := &structs.TaskGroupDiff{} tgOrig := &structs.TaskGroupDiff{} annotateCountChange(tg) @@ -59,6 +64,8 @@ func TestAnnotateCountChange_NonEdited(t *testing.T) { } func TestAnnotateCountChange(t *testing.T) { + ci.Parallel(t) + up := &structs.FieldDiff{ Type: structs.DiffTypeEdited, Name: "Count", @@ -100,6 +107,8 @@ func TestAnnotateCountChange(t *testing.T) { } func TestAnnotateTask_NonEdited(t *testing.T) { + ci.Parallel(t) + tgd := &structs.TaskGroupDiff{Type: structs.DiffTypeNone} td := &structs.TaskDiff{Type: structs.DiffTypeNone} tdOrig := &structs.TaskDiff{Type: structs.DiffTypeNone} @@ -110,6 +119,8 @@ func TestAnnotateTask_NonEdited(t *testing.T) { } func TestAnnotateTask(t *testing.T) { + ci.Parallel(t) + cases := []struct { Diff *structs.TaskDiff Parent *structs.TaskGroupDiff diff --git a/scheduler/context_test.go b/scheduler/context_test.go index 8187e9cbf..37afc149a 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -3,6 +3,7 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -27,6 +28,8 @@ func testContext(t testing.TB) (*state.StateStore, *EvalContext) { } func TestEvalContext_ProposedAlloc(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*RankedNode{ { @@ -156,7 +159,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { // See https://github.com/hashicorp/nomad/issues/6787 // func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { - t.Parallel() + ci.Parallel(t) state, ctx := testContext(t) nodes := []*RankedNode{ { @@ -261,6 +264,8 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { } func TestEvalEligibility_JobStatus(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() cc := "v1:100" @@ -282,6 +287,8 @@ func TestEvalEligibility_JobStatus(t *testing.T) { } func TestEvalEligibility_TaskGroupStatus(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() cc := "v1:100" tg := "foo" @@ -304,6 +311,8 @@ func TestEvalEligibility_TaskGroupStatus(t *testing.T) { } func TestEvalEligibility_SetJob(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() ne1 := &structs.Constraint{ LTarget: "${attr.kernel.name}", @@ -349,6 +358,8 @@ func TestEvalEligibility_SetJob(t *testing.T) { } func TestEvalEligibility_GetClasses(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() e.SetJobEligibility(true, "v1:1") e.SetJobEligibility(false, "v1:2") @@ -372,6 +383,8 @@ func TestEvalEligibility_GetClasses(t *testing.T) { require.Equal(t, expClasses, actClasses) } func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() e.SetJobEligibility(true, "v1:1") e.SetTaskGroupEligibility(false, "foo", "v1:1") @@ -395,6 +408,8 @@ func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T } func TestPortCollisionEvent_Copy(t *testing.T) { + ci.Parallel(t) + ev := &PortCollisionEvent{ Reason: "original", Node: mock.Node(), @@ -425,6 +440,8 @@ func TestPortCollisionEvent_Copy(t *testing.T) { } func TestPortCollisionEvent_Sanitize(t *testing.T) { + ci.Parallel(t) + ev := &PortCollisionEvent{ Reason: "original", Node: mock.Node(), diff --git a/scheduler/device_test.go b/scheduler/device_test.go index 332165c4b..7bf6319bc 100644 --- a/scheduler/device_test.go +++ b/scheduler/device_test.go @@ -3,6 +3,7 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -88,6 +89,8 @@ func collectInstanceIDs(devices ...*structs.NodeDeviceResource) []string { // Test that asking for a device that isn't fully specified works. func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) n := devNode() @@ -109,6 +112,8 @@ func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) { // Test that asking for a device that is fully specified works. func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) n := devNode() @@ -130,6 +135,8 @@ func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) { // Test that asking for a device with too much count doesn't place func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) n := devNode() @@ -147,6 +154,8 @@ func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) { // Test that asking for a device with constraints works func TestDeviceAllocator_Allocate_Constraints(t *testing.T) { + ci.Parallel(t) + n := multipleNvidiaNode() nvidia0 := n.NodeResources.Devices[0] nvidia1 := n.NodeResources.Devices[1] @@ -257,6 +266,8 @@ func TestDeviceAllocator_Allocate_Constraints(t *testing.T) { // Test that asking for a device with affinities works func TestDeviceAllocator_Allocate_Affinities(t *testing.T) { + ci.Parallel(t) + n := multipleNvidiaNode() nvidia0 := n.NodeResources.Devices[0] nvidia1 := n.NodeResources.Devices[1] diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 1689fd415..16a0ada00 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,6 +16,8 @@ import ( ) func TestStaticIterator_Reset(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 3; i++ { @@ -46,6 +49,8 @@ func TestStaticIterator_Reset(t *testing.T) { } func TestStaticIterator_SetNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 3; i++ { @@ -63,6 +68,8 @@ func TestStaticIterator_SetNodes(t *testing.T) { } func TestRandomIterator(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 10; i++ { @@ -83,6 +90,8 @@ func TestRandomIterator(t *testing.T) { } func TestHostVolumeChecker(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -165,6 +174,8 @@ func TestHostVolumeChecker(t *testing.T) { } func TestHostVolumeChecker_ReadOnly(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -233,7 +244,7 @@ func TestHostVolumeChecker_ReadOnly(t *testing.T) { } func TestCSIVolumeChecker(t *testing.T) { - t.Parallel() + ci.Parallel(t) state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -494,6 +505,8 @@ func TestCSIVolumeChecker(t *testing.T) { } func TestNetworkChecker(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) node := func(mode string) *structs.Node { @@ -638,6 +651,8 @@ func TestNetworkChecker(t *testing.T) { } func TestNetworkChecker_bridge_upgrade_path(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) t.Run("older client", func(t *testing.T) { @@ -668,6 +683,8 @@ func TestNetworkChecker_bridge_upgrade_path(t *testing.T) { } func TestDriverChecker_DriverInfo(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -717,6 +734,8 @@ func TestDriverChecker_DriverInfo(t *testing.T) { } } func TestDriverChecker_Compatibility(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -768,6 +787,8 @@ func TestDriverChecker_Compatibility(t *testing.T) { } func Test_HealthChecks(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) @@ -831,6 +852,8 @@ func Test_HealthChecks(t *testing.T) { } func TestConstraintChecker(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -891,6 +914,8 @@ func TestConstraintChecker(t *testing.T) { } func TestResolveConstraintTarget(t *testing.T) { + ci.Parallel(t) + type tcase struct { target string node *structs.Node @@ -966,6 +991,8 @@ func TestResolveConstraintTarget(t *testing.T) { } func TestCheckConstraint(t *testing.T) { + ci.Parallel(t) + type tcase struct { op string lVal, rVal interface{} @@ -1103,6 +1130,8 @@ func TestCheckConstraint(t *testing.T) { } func TestCheckLexicalOrder(t *testing.T) { + ci.Parallel(t) + type tcase struct { op string lVal, rVal interface{} @@ -1143,7 +1172,7 @@ func TestCheckLexicalOrder(t *testing.T) { } func TestCheckVersionConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) type tcase struct { lVal, rVal interface{} @@ -1196,7 +1225,7 @@ func TestCheckVersionConstraint(t *testing.T) { } func TestCheckSemverConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) type tcase struct { name string @@ -1258,6 +1287,8 @@ func TestCheckSemverConstraint(t *testing.T) { } func TestCheckRegexpConstraint(t *testing.T) { + ci.Parallel(t) + type tcase struct { lVal, rVal interface{} result bool @@ -1295,6 +1326,8 @@ func TestCheckRegexpConstraint(t *testing.T) { // This test puts allocations on the node to test if it detects infeasibility of // nodes correctly and picks the only feasible one func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1369,6 +1402,8 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { } func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1420,6 +1455,8 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) { } func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1488,6 +1525,8 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) { // value to detect if the constraint at the job level properly considers all // task groups. func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1668,6 +1707,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { // detect if the constraint at the job level properly considers all task groups // when the constraint allows a count greater than one func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1875,6 +1916,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { // there is a plan to re-use that for a new allocation, that the next select // won't select that node. func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1957,6 +2000,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin // test if it detects infeasibility of property values correctly and picks the // only feasible one func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -2034,6 +2079,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { // test if it detects infeasibility of property values correctly and picks the // only feasible one func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -2129,6 +2176,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin // test if it detects infeasibility of property values correctly and picks the // only feasible one when the constraint is at the task group. func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -2290,6 +2339,8 @@ func (c *mockFeasibilityChecker) Feasible(*structs.Node) bool { func (c *mockFeasibilityChecker) calls() int { return c.i } func TestFeasibilityWrapper_JobIneligible(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2308,6 +2359,8 @@ func TestFeasibilityWrapper_JobIneligible(t *testing.T) { } func TestFeasibilityWrapper_JobEscapes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2333,6 +2386,8 @@ func TestFeasibilityWrapper_JobEscapes(t *testing.T) { } func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2355,6 +2410,8 @@ func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) { } func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2377,6 +2434,8 @@ func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) { } func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2404,6 +2463,8 @@ func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { } func TestSetContainsAny(t *testing.T) { + ci.Parallel(t) + require.True(t, checkSetContainsAny("a", "a")) require.True(t, checkSetContainsAny("a,b", "a")) require.True(t, checkSetContainsAny(" a,b ", "a ")) @@ -2412,6 +2473,8 @@ func TestSetContainsAny(t *testing.T) { } func TestDeviceChecker(t *testing.T) { + ci.Parallel(t) + getTg := func(devices ...*structs.RequestedDevice) *structs.TaskGroup { return &structs.TaskGroup{ Name: "example", @@ -2750,6 +2813,8 @@ func TestDeviceChecker(t *testing.T) { } func TestCheckAttributeConstraint(t *testing.T) { + ci.Parallel(t) + type tcase struct { op string lVal, rVal *psstructs.Attribute diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 713c6a94e..08d1ebc5a 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -8,6 +8,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -18,6 +19,8 @@ import ( ) func TestServiceSched_JobRegister(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -106,6 +109,7 @@ func TestServiceSched_JobRegister(t *testing.T) { } func TestServiceSched_JobRegister_MemoryMaxHonored(t *testing.T) { + ci.Parallel(t) cases := []struct { name string @@ -218,6 +222,8 @@ func TestServiceSched_JobRegister_MemoryMaxHonored(t *testing.T) { } func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -310,6 +316,8 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { } func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -384,6 +392,8 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { } func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -466,6 +476,8 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { } func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -563,6 +575,8 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { } func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -642,6 +656,8 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { } func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) assert := assert.New(t) @@ -724,6 +740,8 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) // Test job registration with spread configured func TestServiceSched_Spread(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) start := uint8(100) @@ -818,6 +836,8 @@ func TestServiceSched_Spread(t *testing.T) { // Test job registration with even spread across dc func TestServiceSched_EvenSpread(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) h := NewHarness(t) @@ -891,6 +911,8 @@ func TestServiceSched_EvenSpread(t *testing.T) { } func TestServiceSched_JobRegister_Annotate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -970,6 +992,8 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { } func TestServiceSched_JobRegister_CountZero(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1019,6 +1043,8 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { } func TestServiceSched_JobRegister_AllocFail(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create NO nodes @@ -1093,6 +1119,8 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { } func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a full node @@ -1191,6 +1219,8 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { } func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create one node @@ -1285,6 +1315,8 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { // This test just ensures the scheduler handles the eval type to avoid // regressions. func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a job and set the task group count to zero. @@ -1320,6 +1352,8 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { } func TestServiceSched_Plan_Partial_Progress(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -1390,6 +1424,8 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { } func TestServiceSched_EvaluateBlockedEval(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a job @@ -1435,6 +1471,8 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { } func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1519,6 +1557,8 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { } func TestServiceSched_JobModify(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1621,6 +1661,8 @@ func TestServiceSched_JobModify(t *testing.T) { } func TestServiceSched_JobModify_Datacenters(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -1701,6 +1743,8 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { // on the node but the node doesn't have enough resources to fit the new count + // 1. This tests that we properly discount the resources of existing allocs. func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create one node @@ -1793,6 +1837,8 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { } func TestServiceSched_JobModify_CountZero(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1893,6 +1939,8 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { } func TestServiceSched_JobModify_Rolling(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1999,6 +2047,8 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { // allocations as this allows us to assert that destructive changes are done // first. func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node and clear the reserved resources @@ -2119,6 +2169,8 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { } func TestServiceSched_JobModify_Canaries(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -2243,6 +2295,8 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { } func TestServiceSched_JobModify_InPlace(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -2395,6 +2449,8 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { // Safe to remove in 0.11.0 as no one should ever be trying to upgrade from 0.8 // to 0.11! func TestServiceSched_JobModify_InPlace08(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create node @@ -2472,6 +2528,8 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { } func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -2584,6 +2642,8 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { // a failing allocation gets rescheduled with a penalty to the old // node, but an updated job doesn't apply the penalty. func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -2711,6 +2771,8 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { } func TestServiceSched_JobDeregister_Purged(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Generate a fake job with allocations @@ -2778,6 +2840,8 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { } func TestServiceSched_JobDeregister_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -2849,6 +2913,8 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { } func TestServiceSched_NodeDown(t *testing.T) { + ci.Parallel(t) + cases := []struct { desired string client string @@ -2966,6 +3032,8 @@ func TestServiceSched_NodeDown(t *testing.T) { } func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { + ci.Parallel(t) + cases := []struct { stop time.Duration when time.Time @@ -3127,6 +3195,8 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { } func TestServiceSched_NodeUpdate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -3181,6 +3251,8 @@ func TestServiceSched_NodeUpdate(t *testing.T) { } func TestServiceSched_NodeDrain(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -3262,6 +3334,8 @@ func TestServiceSched_NodeDrain(t *testing.T) { } func TestServiceSched_NodeDrain_Down(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -3374,6 +3448,8 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { } func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -3427,6 +3503,8 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { // TestServiceSched_NodeDrain_TaskHandle asserts that allocations with task // handles have them propagated to replacement allocations when drained. func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node := mock.Node() @@ -3517,6 +3595,8 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { } func TestServiceSched_RetryLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) h.Planner = &RejectPlan{h} @@ -3567,6 +3647,8 @@ func TestServiceSched_RetryLimit(t *testing.T) { } func TestServiceSched_Reschedule_OnceNow(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -3679,6 +3761,8 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Tests that alloc reschedulable at a future time creates a follow up eval func TestServiceSched_Reschedule_Later(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) // Create some nodes @@ -3767,6 +3851,8 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { } func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -3908,6 +3994,8 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { // Tests that old reschedule attempts are pruned func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -4039,6 +4127,8 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { // Tests that deployments with failed allocs result in placements as long as the // deployment is running. func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { + ci.Parallel(t) + for _, failedDeployment := range []bool{false, true} { t.Run(fmt.Sprintf("Failed Deployment: %v", failedDeployment), func(t *testing.T) { h := NewHarness(t) @@ -4125,6 +4215,8 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { } func TestBatchSched_Run_CompleteAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4182,6 +4274,8 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { } func TestBatchSched_Run_FailedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4252,6 +4346,8 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { } func TestBatchSched_Run_LostAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4339,6 +4435,8 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { } func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node := mock.DrainNode() @@ -4391,6 +4489,8 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { } func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes, one that is drained and has a successfully finished @@ -4464,6 +4564,8 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { // This test checks that terminal allocations that receive an in-place updated // are not added to the plan func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -4516,6 +4618,8 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { // This test ensures that terminal jobs from older versions are ignored. func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -4600,6 +4704,8 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { // This test asserts that an allocation from an old job that is running on a // drained node is cleaned up. func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes, one that is drained and has a successfully finished @@ -4671,6 +4777,8 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { // This test asserts that an allocation from a job that is complete on a // drained node is ignored up. func TestBatchSched_NodeDrain_Complete(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes, one that is drained and has a successfully finished @@ -4735,6 +4843,8 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { // task group's count and that it works even if all the allocs have the same // name. func TestBatchSched_ScaleDown_SameName(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4816,6 +4926,8 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { } func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { + ci.Parallel(t) + testCases := []struct { Name string NodeCpu int64 @@ -4934,6 +5046,8 @@ func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { } func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node := mock.Node() node.NodeResources.Cpu.CpuShares = 10000 @@ -4979,6 +5093,8 @@ func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { } func TestGenericSched_ChainedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -5068,6 +5184,8 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { } func TestServiceSched_NodeDrain_Sticky(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -5129,6 +5247,8 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { // This test ensures that when a job is stopped, the scheduler properly cancels // an outstanding deployment. func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Generate a fake job @@ -5203,6 +5323,8 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { // This test ensures that when a job is updated and had an old deployment, the scheduler properly cancels // the deployment. func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Generate a fake job @@ -5274,6 +5396,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Various table driven tests for carry forward // of past reschedule events func Test_updateRescheduleTracker(t *testing.T) { + ci.Parallel(t) t1 := time.Now().UTC() alloc := mock.Alloc() @@ -5509,6 +5632,8 @@ func Test_updateRescheduleTracker(t *testing.T) { } func TestServiceSched_Preemption(t *testing.T) { + ci.Parallel(t) + require := require.New(t) h := NewHarness(t) @@ -5670,6 +5795,8 @@ func TestServiceSched_Preemption(t *testing.T) { // TestServiceSched_Migrate_NonCanary asserts that when rescheduling // non-canary allocations, a single allocation is migrated func TestServiceSched_Migrate_NonCanary(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node1 := mock.Node() @@ -5740,6 +5867,8 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { // Canaries should be replaced by canaries, and non-canaries should be replaced // with the latest promoted version. func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node1 := mock.Node() @@ -5907,6 +6036,8 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { // picks the latest deployment that have either been marked as promoted or is considered // non-destructive so it doesn't use canaries. func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // This test tests downgradedJobForPlacement directly to ease testing many different scenarios @@ -6024,6 +6155,8 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { // TestServiceSched_RunningWithNextAllocation asserts that if a running allocation has // NextAllocation Set, the allocation is not ignored and will be stopped func TestServiceSched_RunningWithNextAllocation(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node1 := mock.Node() @@ -6096,6 +6229,8 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { } func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -6265,7 +6400,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { // TestPropagateTaskState asserts that propagateTaskState only copies state // when the previous allocation is lost or draining. func TestPropagateTaskState(t *testing.T) { - t.Parallel() + ci.Parallel(t) const taskName = "web" taskHandle := &structs.TaskHandle{ diff --git a/scheduler/preemption_test.go b/scheduler/preemption_test.go index 0c3784498..9179405ba 100644 --- a/scheduler/preemption_test.go +++ b/scheduler/preemption_test.go @@ -2,10 +2,10 @@ package scheduler import ( "fmt" + "strconv" "testing" - "strconv" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,6 +14,8 @@ import ( ) func TestResourceDistance(t *testing.T) { + ci.Parallel(t) + resourceAsk := &structs.ComparableResources{ Flattened: structs.AllocatedTaskResources{ Cpu: structs.AllocatedCpuResources{ @@ -142,6 +144,8 @@ func TestResourceDistance(t *testing.T) { } func TestPreemption(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string currentAllocations []*structs.Allocation @@ -1383,6 +1387,8 @@ func TestPreemption(t *testing.T) { // TestPreemptionMultiple tests evicting multiple allocations in the same time func TestPreemptionMultiple(t *testing.T) { + ci.Parallel(t) + // The test setup: // * a node with 4 GPUs // * a low priority job with 4 allocs, each is using 1 GPU diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index c80252d3b..1a9fc37f7 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -289,6 +290,8 @@ func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) { // Tests the reconciler properly handles placements for a job that has no // existing allocations func TestReconciler_Place_NoExisting(t *testing.T) { + ci.Parallel(t) + job := mock.Job() reconciler := NewAllocReconciler( testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, @@ -315,6 +318,8 @@ func TestReconciler_Place_NoExisting(t *testing.T) { // Tests the reconciler properly handles placements for a job that has some // existing allocations func TestReconciler_Place_Existing(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 3 existing allocations @@ -353,6 +358,8 @@ func TestReconciler_Place_Existing(t *testing.T) { // Tests the reconciler properly handles stopping allocations for a job that has // scaled down func TestReconciler_ScaleDown_Partial(t *testing.T) { + ci.Parallel(t) + // Has desired 10 job := mock.Job() @@ -392,6 +399,8 @@ func TestReconciler_ScaleDown_Partial(t *testing.T) { // Tests the reconciler properly handles stopping allocations for a job that has // scaled down to zero desired func TestReconciler_ScaleDown_Zero(t *testing.T) { + ci.Parallel(t) + // Set desired 0 job := mock.Job() job.TaskGroups[0].Count = 0 @@ -431,6 +440,8 @@ func TestReconciler_ScaleDown_Zero(t *testing.T) { // Tests the reconciler properly handles stopping allocations for a job that has // scaled down to zero desired where allocs have duplicate names func TestReconciler_ScaleDown_Zero_DuplicateNames(t *testing.T) { + ci.Parallel(t) + // Set desired 0 job := mock.Job() job.TaskGroups[0].Count = 0 @@ -471,6 +482,8 @@ func TestReconciler_ScaleDown_Zero_DuplicateNames(t *testing.T) { // Tests the reconciler properly handles inplace upgrading allocations func TestReconciler_Inplace(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -508,6 +521,8 @@ func TestReconciler_Inplace(t *testing.T) { // Tests the reconciler properly handles inplace upgrading allocations while // scaling up func TestReconciler_Inplace_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -549,6 +564,8 @@ func TestReconciler_Inplace_ScaleUp(t *testing.T) { // Tests the reconciler properly handles inplace upgrading allocations while // scaling down func TestReconciler_Inplace_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -591,6 +608,8 @@ func TestReconciler_Inplace_ScaleDown(t *testing.T) { // generates the expected placements for any already-running allocations of // that version. func TestReconciler_Inplace_Rollback(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Count = 4 job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ @@ -657,6 +676,8 @@ func TestReconciler_Inplace_Rollback(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations func TestReconciler_Destructive(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -691,6 +712,8 @@ func TestReconciler_Destructive(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations when max_parallel=0 func TestReconciler_DestructiveMaxParallel(t *testing.T) { + ci.Parallel(t) + job := mock.MaxParallelJob() // Create 10 existing allocations @@ -726,6 +749,8 @@ func TestReconciler_DestructiveMaxParallel(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations while // scaling up func TestReconciler_Destructive_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -766,6 +791,8 @@ func TestReconciler_Destructive_ScaleUp(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations while // scaling down func TestReconciler_Destructive_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -805,6 +832,8 @@ func TestReconciler_Destructive_ScaleDown(t *testing.T) { // Tests the reconciler properly handles lost nodes with allocations func TestReconciler_LostNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -854,6 +883,8 @@ func TestReconciler_LostNode(t *testing.T) { // Tests the reconciler properly handles lost nodes with allocations while // scaling up func TestReconciler_LostNode_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -905,6 +936,8 @@ func TestReconciler_LostNode_ScaleUp(t *testing.T) { // Tests the reconciler properly handles lost nodes with allocations while // scaling down func TestReconciler_LostNode_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -953,6 +986,8 @@ func TestReconciler_LostNode_ScaleDown(t *testing.T) { // Tests the reconciler properly handles draining nodes with allocations func TestReconciler_DrainNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -1004,6 +1039,8 @@ func TestReconciler_DrainNode(t *testing.T) { // Tests the reconciler properly handles draining nodes with allocations while // scaling up func TestReconciler_DrainNode_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -1058,6 +1095,8 @@ func TestReconciler_DrainNode_ScaleUp(t *testing.T) { // Tests the reconciler properly handles draining nodes with allocations while // scaling down func TestReconciler_DrainNode_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 8 job := mock.Job() job.TaskGroups[0].Count = 8 @@ -1111,6 +1150,8 @@ func TestReconciler_DrainNode_ScaleDown(t *testing.T) { // Tests the reconciler properly handles a task group being removed func TestReconciler_RemovedTG(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 allocations for a tg that no longer exists @@ -1155,6 +1196,8 @@ func TestReconciler_RemovedTG(t *testing.T) { // Tests the reconciler properly handles a job in stopped states func TestReconciler_JobStopped(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Stop = true @@ -1217,6 +1260,8 @@ func TestReconciler_JobStopped(t *testing.T) { // Tests the reconciler doesn't update allocs in terminal state // when job is stopped or nil func TestReconciler_JobStopped_TerminalAllocs(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Stop = true @@ -1279,6 +1324,8 @@ func TestReconciler_JobStopped_TerminalAllocs(t *testing.T) { // Tests the reconciler properly handles jobs with multiple task groups func TestReconciler_MultiTG(t *testing.T) { + ci.Parallel(t) + job := mock.Job() tg2 := job.TaskGroups[0].Copy() tg2.Name = "foo" @@ -1323,6 +1370,8 @@ func TestReconciler_MultiTG(t *testing.T) { // Tests the reconciler properly handles jobs with multiple task groups with // only one having an update stanza and a deployment already being created func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) { + ci.Parallel(t) + job := mock.Job() tg2 := job.TaskGroups[0].Copy() tg2.Name = "foo" @@ -1372,6 +1421,8 @@ func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) { // Tests delayed rescheduling of failed batch allocations func TestReconciler_RescheduleLater_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 @@ -1466,6 +1517,8 @@ func TestReconciler_RescheduleLater_Batch(t *testing.T) { // Tests delayed rescheduling of failed batch allocations and batching of allocs // with fail times that are close together func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 @@ -1553,6 +1606,8 @@ func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { // Tests rescheduling failed batch allocations func TestReconciler_RescheduleNow_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 job := mock.Job() @@ -1635,6 +1690,8 @@ func TestReconciler_RescheduleNow_Batch(t *testing.T) { // Tests rescheduling failed service allocations with desired state stop func TestReconciler_RescheduleLater_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -1718,6 +1775,8 @@ func TestReconciler_RescheduleLater_Service(t *testing.T) { // Tests service allocations with client status complete func TestReconciler_Service_ClientStatusComplete(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -1773,6 +1832,8 @@ func TestReconciler_Service_ClientStatusComplete(t *testing.T) { // Tests service job placement with desired stop and client status complete func TestReconciler_Service_DesiredStop_ClientStatusComplete(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -1833,6 +1894,8 @@ func TestReconciler_Service_DesiredStop_ClientStatusComplete(t *testing.T) { // Tests rescheduling failed service allocations with desired state stop func TestReconciler_RescheduleNow_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -1914,6 +1977,8 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) { // Tests rescheduling failed service allocations when there's clock drift (upto a second) func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -1994,6 +2059,8 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { // Tests rescheduling failed service allocations when the eval ID matches and there's a large clock drift func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2076,6 +2143,8 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { // Tests rescheduling failed service allocations when there are canaries func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2185,6 +2254,8 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { // Tests rescheduling failed canary service allocations func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2311,6 +2382,8 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { // Tests rescheduling failed canary service allocations when one has reached its // reschedule limit func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2438,6 +2511,8 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { // Tests failed service allocations that were already rescheduled won't be rescheduled again func TestReconciler_DontReschedule_PreviouslyRescheduled(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2497,6 +2572,8 @@ func TestReconciler_DontReschedule_PreviouslyRescheduled(t *testing.T) { // Tests the reconciler cancels an old deployment when the job is being stopped func TestReconciler_CancelDeployment_JobStop(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Stop = true @@ -2595,6 +2672,8 @@ func TestReconciler_CancelDeployment_JobStop(t *testing.T) { // Tests the reconciler cancels an old deployment when the job is updated func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) { + ci.Parallel(t) + // Create a base job job := mock.Job() @@ -2672,6 +2751,8 @@ func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) { // Tests the reconciler creates a deployment and does a rolling upgrade with // destructive changes func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -2714,6 +2795,8 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) { // Tests the reconciler creates a deployment for inplace updates func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) { + ci.Parallel(t) + jobOld := mock.Job() job := jobOld.Copy() job.Version++ @@ -2757,6 +2840,8 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) { // Tests the reconciler creates a deployment when the job has a newer create index func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) { + ci.Parallel(t) + jobOld := mock.Job() job := jobOld.Copy() job.TaskGroups[0].Update = noCanaryUpdate @@ -2804,6 +2889,8 @@ func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) { // Tests the reconciler doesn't creates a deployment if there are no changes func TestReconciler_DontCreateDeployment_NoChanges(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -2842,6 +2929,8 @@ func TestReconciler_DontCreateDeployment_NoChanges(t *testing.T) { // Tests the reconciler doesn't place any more canaries when the deployment is // paused or failed func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -2923,6 +3012,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) { // Tests the reconciler doesn't place any more allocs when the deployment is // paused or failed func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate job.TaskGroups[0].Count = 15 @@ -2988,6 +3079,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) { // Tests the reconciler doesn't do any more destructive updates when the // deployment is paused or failed func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -3062,6 +3155,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing // Tests the reconciler handles migrating a canary correctly on a draining node func TestReconciler_DrainNode_Canary(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3135,6 +3230,8 @@ func TestReconciler_DrainNode_Canary(t *testing.T) { // Tests the reconciler handles migrating a canary correctly on a lost node func TestReconciler_LostNode_Canary(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3209,6 +3306,8 @@ func TestReconciler_LostNode_Canary(t *testing.T) { // Tests the reconciler handles stopping canaries from older deployments func TestReconciler_StopOldCanaries(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3290,6 +3389,8 @@ func TestReconciler_StopOldCanaries(t *testing.T) { // Tests the reconciler creates new canaries when the job changes func TestReconciler_NewCanaries(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3337,6 +3438,8 @@ func TestReconciler_NewCanaries(t *testing.T) { // Tests the reconciler creates new canaries when the job changes and the // canary count is greater than the task group count func TestReconciler_NewCanaries_CountGreater(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Count = 3 job.TaskGroups[0].Update = canaryUpdate.Copy() @@ -3387,6 +3490,8 @@ func TestReconciler_NewCanaries_CountGreater(t *testing.T) { // Tests the reconciler creates new canaries when the job changes for multiple // task groups func TestReconciler_NewCanaries_MultiTG(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy()) @@ -3443,6 +3548,8 @@ func TestReconciler_NewCanaries_MultiTG(t *testing.T) { // Tests the reconciler creates new canaries when the job changes and scales up func TestReconciler_NewCanaries_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Scale the job up to 15 job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3492,6 +3599,8 @@ func TestReconciler_NewCanaries_ScaleUp(t *testing.T) { // Tests the reconciler creates new canaries when the job changes and scales // down func TestReconciler_NewCanaries_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Scale the job down to 5 job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3542,6 +3651,8 @@ func TestReconciler_NewCanaries_ScaleDown(t *testing.T) { // Tests the reconciler handles filling the names of partially placed canaries func TestReconciler_NewCanaries_FillNames(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = &structs.UpdateStrategy{ Canary: 4, @@ -3611,6 +3722,8 @@ func TestReconciler_NewCanaries_FillNames(t *testing.T) { // Tests the reconciler handles canary promotion by unblocking max_parallel func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3684,6 +3797,8 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { // Tests the reconciler handles canary promotion when the canary count equals // the total correctly func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate job.TaskGroups[0].Count = 2 @@ -3766,6 +3881,8 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { // Tests the reconciler checks the health of placed allocs to determine the // limit func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -3859,6 +3976,8 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { // Tests the reconciler handles an alloc on a tainted node during a rolling // update func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -3944,6 +4063,8 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { // Tests the reconciler handles a failed deployment with allocs on tainted // nodes func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4028,6 +4149,8 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { // Tests the reconciler handles a run after a deployment is complete // successfully. func TestReconciler_CompleteDeployment(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -4080,6 +4203,8 @@ func TestReconciler_CompleteDeployment(t *testing.T) { // nothing left to place even if there are failed allocations that are part of // the deployment. func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4142,6 +4267,8 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { // Test that a failed deployment cancels non-promoted canaries func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { + ci.Parallel(t) + // Create a job with two task groups job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -4236,6 +4363,8 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { // Test that a failed deployment and updated job works func TestReconciler_FailedDeployment_NewJob(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4306,6 +4435,8 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { // Tests the reconciler marks a deployment as complete func TestReconciler_MarkDeploymentComplete(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4363,6 +4494,8 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) { // Tests the reconciler handles changing a job such that a deployment is created // while doing a scale up but as the second eval. func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) { + ci.Parallel(t) + // Scale the job up to 15 job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4424,6 +4557,8 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) { // Tests the reconciler doesn't stop allocations when doing a rolling upgrade // where the count of the old job allocs is < desired count. func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4470,6 +4605,8 @@ func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) { // Tests that the reconciler handles rerunning a batch job in the case that the // allocations are from an older instance of the job. func TestReconciler_Batch_Rerun(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Update = nil @@ -4516,6 +4653,8 @@ func TestReconciler_Batch_Rerun(t *testing.T) { // Test that a failed deployment will not result in rescheduling failed allocations func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4574,6 +4713,8 @@ func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) { // Test that a running deployment with failed allocs will not result in // rescheduling failed allocations unless they are marked as reschedulable. func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate tgName := job.TaskGroups[0].Name @@ -4632,6 +4773,8 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { // Test that a failed deployment cancels non-promoted canaries func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { + ci.Parallel(t) + // Create a job job := mock.Job() job.TaskGroups[0].Count = 3 @@ -4728,6 +4871,8 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { // Test that a successful deployment with failed allocs will result in // rescheduling failed allocations func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate tgName := job.TaskGroups[0].Name @@ -4782,6 +4927,8 @@ func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T // Tests force rescheduling a failed alloc that is past its reschedule limit func TestReconciler_ForceReschedule_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -4858,6 +5005,8 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { // new allocs should be placed to satisfy the job count, and current allocations are // left unmodified func TestReconciler_RescheduleNot_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -4940,6 +5089,8 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) { // Tests behavior of batch failure with rescheduling policy preventing rescheduling: // current allocations are left unmodified and no follow up func TestReconciler_RescheduleNot_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 job := mock.Job() @@ -5017,5 +5168,4 @@ func TestReconciler_RescheduleNot_Batch(t *testing.T) { }, }, }) - } diff --git a/scheduler/reconcile_util_test.go b/scheduler/reconcile_util_test.go index 59772a349..17617cf3c 100644 --- a/scheduler/reconcile_util_test.go +++ b/scheduler/reconcile_util_test.go @@ -3,6 +3,7 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,6 +15,8 @@ import ( // aligned. // Ensure no regression from: https://github.com/hashicorp/nomad/issues/3008 func TestBitmapFrom(t *testing.T) { + ci.Parallel(t) + input := map[string]*structs.Allocation{ "8": { JobID: "foo", @@ -34,6 +37,8 @@ func TestBitmapFrom(t *testing.T) { } func TestAllocSet_filterByTainted(t *testing.T) { + ci.Parallel(t) + require := require.New(t) nodes := map[string]*structs.Node{ diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index acee0affd..dcc3d6ea3 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -15,6 +16,8 @@ import ( ) func TestSysBatch_JobRegister(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -74,6 +77,8 @@ func TestSysBatch_JobRegister(t *testing.T) { } func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -149,6 +154,8 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { } func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -225,6 +232,8 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { } func TestSysBatch_JobModify(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -313,6 +322,8 @@ func TestSysBatch_JobModify(t *testing.T) { } func TestSysBatch_JobModify_InPlace(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -385,6 +396,8 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { } func TestSysBatch_JobDeregister_Purged(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -444,6 +457,8 @@ func TestSysBatch_JobDeregister_Purged(t *testing.T) { } func TestSysBatch_JobDeregister_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -505,6 +520,8 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { } func TestSysBatch_NodeDown(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a down node @@ -564,6 +581,8 @@ func TestSysBatch_NodeDown(t *testing.T) { } func TestSysBatch_NodeDrain_Down(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -616,6 +635,8 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { } func TestSysBatch_NodeDrain(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -671,6 +692,8 @@ func TestSysBatch_NodeDrain(t *testing.T) { } func TestSysBatch_NodeUpdate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -713,6 +736,8 @@ func TestSysBatch_NodeUpdate(t *testing.T) { } func TestSysBatch_RetryLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) h.Planner = &RejectPlan{h} @@ -757,6 +782,8 @@ func TestSysBatch_RetryLimit(t *testing.T) { // count for a task group when allocations can't be created on currently // available nodes because of constraint mismatches. func TestSysBatch_Queued_With_Constraints(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) nodes := createNodes(t, h, 3) @@ -802,6 +829,8 @@ func TestSysBatch_Queued_With_Constraints(t *testing.T) { } func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // linux machines @@ -850,6 +879,8 @@ func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { // should be that the TaskGroup constrained to the newly added node class is // added and that the TaskGroup constrained to the ineligible node is ignored. func TestSysBatch_JobConstraint_AddNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes @@ -995,6 +1026,8 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { // No errors reported when no available nodes prevent placement func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1074,6 +1107,8 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { } func TestSysBatch_ConstraintErrors(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1147,6 +1182,8 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { } func TestSysBatch_ChainedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1234,6 +1271,8 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { } func TestSysBatch_PlanWithDrainedNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1314,6 +1353,8 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { } func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1370,6 +1411,8 @@ func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { } func TestSysBatch_Preemption(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create nodes @@ -1654,6 +1697,8 @@ func TestSysBatch_Preemption(t *testing.T) { } func TestSysBatch_canHandle(t *testing.T) { + ci.Parallel(t) + s := SystemScheduler{sysbatch: true} t.Run("sysbatch register", func(t *testing.T) { require.True(t, s.canHandle(structs.EvalTriggerJobRegister)) diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index fc94a6180..63c4c1869 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -8,6 +8,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -16,6 +17,8 @@ import ( ) func TestSystemSched_JobRegister(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -75,6 +78,8 @@ func TestSystemSched_JobRegister(t *testing.T) { } func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -148,6 +153,8 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { } func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -217,6 +224,8 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { } func TestSystemSched_ExhaustResources(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -295,6 +304,8 @@ func TestSystemSched_ExhaustResources(t *testing.T) { } func TestSystemSched_JobRegister_Annotate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -391,6 +402,8 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { } func TestSystemSched_JobRegister_AddNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -469,6 +482,8 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { } func TestSystemSched_JobRegister_AllocFail(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create NO nodes @@ -501,6 +516,8 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { } func TestSystemSched_JobModify(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -588,6 +605,8 @@ func TestSystemSched_JobModify(t *testing.T) { } func TestSystemSched_JobModify_Rolling(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -686,6 +705,8 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { } func TestSystemSched_JobModify_InPlace(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -766,6 +787,8 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { } func TestSystemSched_JobModify_RemoveDC(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -851,6 +874,8 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { } func TestSystemSched_JobDeregister_Purged(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -910,6 +935,8 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { } func TestSystemSched_JobDeregister_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -971,6 +998,8 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { } func TestSystemSched_NodeDown(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a down node @@ -1030,6 +1059,8 @@ func TestSystemSched_NodeDown(t *testing.T) { } func TestSystemSched_NodeDrain_Down(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -1082,6 +1113,8 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { } func TestSystemSched_NodeDrain(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -1137,6 +1170,8 @@ func TestSystemSched_NodeDrain(t *testing.T) { } func TestSystemSched_NodeUpdate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -1179,6 +1214,8 @@ func TestSystemSched_NodeUpdate(t *testing.T) { } func TestSystemSched_RetryLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) h.Planner = &RejectPlan{h} @@ -1223,6 +1260,8 @@ func TestSystemSched_RetryLimit(t *testing.T) { // count for a task group when allocations can't be created on currently // available nodes because of constraint mismatches. func TestSystemSched_Queued_With_Constraints(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -1262,6 +1301,8 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { // should be that the TaskGroup constrained to the newly added node class is // added and that the TaskGroup constrained to the ineligible node is ignored. func TestSystemSched_JobConstraint_AddNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes @@ -1409,6 +1450,8 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { // No errors reported when no available nodes prevent placement func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1488,6 +1531,8 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { // No errors reported when constraints prevent placement func TestSystemSched_ConstraintErrors(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1559,6 +1604,8 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { } func TestSystemSched_ChainedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1647,6 +1694,8 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { } func TestSystemSched_PlanWithDrainedNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1727,6 +1776,8 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { } func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1783,6 +1834,8 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { } func TestSystemSched_Preemption(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create nodes @@ -2066,6 +2119,8 @@ func TestSystemSched_Preemption(t *testing.T) { } func TestSystemSched_canHandle(t *testing.T) { + ci.Parallel(t) + s := SystemScheduler{sysbatch: false} t.Run("system register", func(t *testing.T) { require.True(t, s.canHandle(structs.EvalTriggerJobRegister)) diff --git a/scheduler/select_test.go b/scheduler/select_test.go index 7625acdff..b553882a6 100644 --- a/scheduler/select_test.go +++ b/scheduler/select_test.go @@ -3,12 +3,15 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestLimitIterator(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*RankedNode{ { @@ -53,6 +56,8 @@ func TestLimitIterator(t *testing.T) { } func TestLimitIterator_ScoreThreshold(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) type testCase struct { desc string @@ -317,6 +322,8 @@ func TestLimitIterator_ScoreThreshold(t *testing.T) { } func TestMaxScoreIterator(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*RankedNode{ { diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index bf9059ecb..adba6ffa5 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -1,14 +1,14 @@ package scheduler import ( + "fmt" "math" "math/rand" "sort" "testing" "time" - "fmt" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -17,6 +17,8 @@ import ( ) func TestSpreadIterator_SingleAttribute(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) dcs := []string{"dc1", "dc2", "dc1", "dc1"} var nodes []*RankedNode @@ -175,6 +177,8 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) { } func TestSpreadIterator_MultipleAttributes(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) dcs := []string{"dc1", "dc2", "dc1", "dc1"} rack := []string{"r1", "r1", "r2", "r2"} @@ -276,6 +280,8 @@ func TestSpreadIterator_MultipleAttributes(t *testing.T) { } func TestSpreadIterator_EvenSpread(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) dcs := []string{"dc1", "dc2", "dc1", "dc2", "dc1", "dc2", "dc2", "dc1", "dc1", "dc1"} var nodes []*RankedNode @@ -464,6 +470,8 @@ func TestSpreadIterator_EvenSpread(t *testing.T) { // Test scenarios where the spread iterator sets maximum penalty (-1.0) func TestSpreadIterator_MaxPenalty(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) var nodes []*RankedNode @@ -551,6 +559,8 @@ func TestSpreadIterator_MaxPenalty(t *testing.T) { } func Test_evenSpreadScoreBoost(t *testing.T) { + ci.Parallel(t) + pset := &propertySet{ existingValues: map[string]uint64{}, proposedValues: map[string]uint64{ @@ -580,7 +590,7 @@ func Test_evenSpreadScoreBoost(t *testing.T) { // can prevent quadratic performance but then we need this test to // verify we have satisfactory spread results. func TestSpreadOnLargeCluster(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string nodeCount int @@ -640,7 +650,7 @@ func TestSpreadOnLargeCluster(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := NewHarness(t) err := upsertNodes(h, tc.nodeCount, tc.racks) require.NoError(t, err) @@ -814,6 +824,7 @@ func validateEqualSpread(h *Harness) error { } func TestSpreadPanicDowngrade(t *testing.T) { + ci.Parallel(t) h := NewHarness(t) diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 2f36e0014..458f27f36 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -6,6 +6,7 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -55,6 +56,8 @@ func benchmarkServiceStack_MetaKeyConstraint(b *testing.B, key string, numNodes, } func TestServiceStack_SetNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewGenericStack(false, ctx) @@ -82,6 +85,8 @@ func TestServiceStack_SetNodes(t *testing.T) { } func TestServiceStack_SetJob(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewGenericStack(false, ctx) @@ -97,6 +102,8 @@ func TestServiceStack_SetJob(t *testing.T) { } func TestServiceStack_Select_Size(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -122,6 +129,8 @@ func TestServiceStack_Select_Size(t *testing.T) { } func TestServiceStack_Select_PreferringNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -166,6 +175,8 @@ func TestServiceStack_Select_PreferringNodes(t *testing.T) { } func TestServiceStack_Select_MetricsReset(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -202,6 +213,8 @@ func TestServiceStack_Select_MetricsReset(t *testing.T) { } func TestServiceStack_Select_DriverFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -232,6 +245,8 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { } func TestServiceStack_Select_CSI(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -308,6 +323,8 @@ func TestServiceStack_Select_CSI(t *testing.T) { } func TestServiceStack_Select_ConstraintFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -348,6 +365,8 @@ func TestServiceStack_Select_ConstraintFilter(t *testing.T) { } func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -391,6 +410,8 @@ func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { } func TestSystemStack_SetNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewSystemStack(false, ctx) @@ -413,6 +434,8 @@ func TestSystemStack_SetNodes(t *testing.T) { } func TestSystemStack_SetJob(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewSystemStack(false, ctx) @@ -428,6 +451,8 @@ func TestSystemStack_SetJob(t *testing.T) { } func TestSystemStack_Select_Size(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} stack := NewSystemStack(false, ctx) @@ -451,6 +476,8 @@ func TestSystemStack_Select_Size(t *testing.T) { } func TestSystemStack_Select_MetricsReset(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -487,6 +514,8 @@ func TestSystemStack_Select_MetricsReset(t *testing.T) { } func TestSystemStack_Select_DriverFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -526,6 +555,8 @@ func TestSystemStack_Select_DriverFilter(t *testing.T) { } func TestSystemStack_Select_ConstraintFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -567,6 +598,8 @@ func TestSystemStack_Select_ConstraintFilter(t *testing.T) { } func TestSystemStack_Select_BinPack_Overflow(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), diff --git a/scheduler/util_test.go b/scheduler/util_test.go index f03114ba7..fec7d8ad4 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" "github.com/hashicorp/nomad/helper" @@ -17,6 +18,8 @@ import ( ) func TestMaterializeTaskGroups(t *testing.T) { + ci.Parallel(t) + job := mock.Job() index := materializeTaskGroups(job) require.Equal(t, 10, len(index)) @@ -35,6 +38,8 @@ func newNode(name string) *structs.Node { } func TestDiffSystemAllocsForNode_Sysbatch_terminal(t *testing.T) { + ci.Parallel(t) + // For a sysbatch job, the scheduler should not re-place an allocation // that has become terminal, unless the job has been updated. @@ -99,6 +104,8 @@ func TestDiffSystemAllocsForNode_Sysbatch_terminal(t *testing.T) { } func TestDiffSystemAllocsForNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() required := materializeTaskGroups(job) @@ -233,6 +240,8 @@ func TestDiffSystemAllocsForNode(t *testing.T) { // Test the desired diff for an updated system job running on a // ineligible node func TestDiffSystemAllocsForNode_ExistingAllocIneligibleNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Count = 1 required := materializeTaskGroups(job) @@ -284,6 +293,8 @@ func TestDiffSystemAllocsForNode_ExistingAllocIneligibleNode(t *testing.T) { } func TestDiffSystemAllocs(t *testing.T) { + ci.Parallel(t) + job := mock.SystemJob() drainNode := mock.DrainNode() @@ -391,6 +402,8 @@ func TestDiffSystemAllocs(t *testing.T) { } func TestReadyNodesInDCs(t *testing.T) { + ci.Parallel(t) + state := state.TestStateStore(t) node1 := mock.Node() node2 := mock.Node() @@ -421,6 +434,8 @@ func TestReadyNodesInDCs(t *testing.T) { } func TestRetryMax(t *testing.T) { + ci.Parallel(t) + calls := 0 bad := func() (bool, error) { calls += 1 @@ -454,6 +469,8 @@ func TestRetryMax(t *testing.T) { } func TestTaintedNodes(t *testing.T) { + ci.Parallel(t) + state := state.TestStateStore(t) node1 := mock.Node() node2 := mock.Node() @@ -491,6 +508,8 @@ func TestTaintedNodes(t *testing.T) { } func TestShuffleNodes(t *testing.T) { + ci.Parallel(t) + // Use a large number of nodes to make the probability of shuffling to the // original order very low. nodes := []*structs.Node{ @@ -521,6 +540,8 @@ func TestShuffleNodes(t *testing.T) { } func TestTaskUpdatedAffinity(t *testing.T) { + ci.Parallel(t) + j1 := mock.Job() j2 := mock.Job() name := j1.TaskGroups[0].Name @@ -589,6 +610,8 @@ func TestTaskUpdatedAffinity(t *testing.T) { } func TestTaskUpdatedSpread(t *testing.T) { + ci.Parallel(t) + j1 := mock.Job() j2 := mock.Job() name := j1.TaskGroups[0].Name @@ -654,6 +677,8 @@ func TestTaskUpdatedSpread(t *testing.T) { require.False(t, tasksUpdated(j5, j6, name)) } func TestTasksUpdated(t *testing.T) { + ci.Parallel(t) + j1 := mock.Job() j2 := mock.Job() name := j1.TaskGroups[0].Name @@ -789,6 +814,8 @@ func TestTasksUpdated(t *testing.T) { } func TestTasksUpdated_connectServiceUpdated(t *testing.T) { + ci.Parallel(t) + servicesA := []*structs.Service{{ Name: "service1", PortLabel: "1111", @@ -868,7 +895,7 @@ func TestTasksUpdated_connectServiceUpdated(t *testing.T) { } func TestNetworkUpdated(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string a []*structs.NetworkResource @@ -935,6 +962,8 @@ func TestNetworkUpdated(t *testing.T) { } func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) allocs := []allocTuple{ {Alloc: &structs.Allocation{ID: uuid.Generate()}}, @@ -951,6 +980,8 @@ func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { } func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) allocs := []allocTuple{ {Alloc: &structs.Allocation{ID: uuid.Generate()}}, @@ -967,6 +998,8 @@ func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { } func TestSetStatus(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) logger := testlog.HCLogger(t) eval := mock.Eval() @@ -1027,6 +1060,8 @@ func TestSetStatus(t *testing.T) { } func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1082,6 +1117,8 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { } func TestInplaceUpdate_AllocatedResources(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1139,6 +1176,8 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { } func TestInplaceUpdate_NoMatch(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1190,6 +1229,8 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { } func TestInplaceUpdate_Success(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1279,6 +1320,8 @@ func TestInplaceUpdate_Success(t *testing.T) { } func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) allocs := []allocTuple{ {Alloc: &structs.Allocation{ID: uuid.Generate()}}, @@ -1295,6 +1338,8 @@ func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { } func TestTaskGroupConstraints(t *testing.T) { + ci.Parallel(t) + constr := &structs.Constraint{RTarget: "bar"} constr2 := &structs.Constraint{LTarget: "foo"} constr3 := &structs.Constraint{Operand: "<"} @@ -1336,6 +1381,8 @@ func TestTaskGroupConstraints(t *testing.T) { } func TestProgressMade(t *testing.T) { + ci.Parallel(t) + noopPlan := &structs.PlanResult{} require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress") @@ -1360,6 +1407,8 @@ func TestProgressMade(t *testing.T) { } func TestDesiredUpdates(t *testing.T) { + ci.Parallel(t) + tg1 := &structs.TaskGroup{Name: "foo"} tg2 := &structs.TaskGroup{Name: "bar"} a2 := &structs.Allocation{TaskGroup: "bar"} @@ -1416,6 +1465,8 @@ func TestDesiredUpdates(t *testing.T) { } func TestUtil_AdjustQueuedAllocations(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc1 := mock.Alloc() alloc2 := mock.Alloc() @@ -1451,6 +1502,8 @@ func TestUtil_AdjustQueuedAllocations(t *testing.T) { } func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { + ci.Parallel(t) + node := mock.Node() node.Status = structs.NodeStatusDown alloc1 := mock.Alloc() @@ -1503,6 +1556,8 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { } func TestUtil_connectUpdated(t *testing.T) { + ci.Parallel(t) + t.Run("both nil", func(t *testing.T) { require.False(t, connectUpdated(nil, nil)) }) @@ -1555,6 +1610,8 @@ func TestUtil_connectUpdated(t *testing.T) { } func TestUtil_connectSidecarServiceUpdated(t *testing.T) { + ci.Parallel(t) + t.Run("both nil", func(t *testing.T) { require.False(t, connectSidecarServiceUpdated(nil, nil)) }) diff --git a/testutil/slow.go b/testutil/slow.go deleted file mode 100644 index 1a8088024..000000000 --- a/testutil/slow.go +++ /dev/null @@ -1,15 +0,0 @@ -package testutil - -import ( - "os" - - testing "github.com/mitchellh/go-testing-interface" -) - -// SkipSlow skips a slow test unless the NOMAD_SLOW_TEST environment variable -// is set. -func SkipSlow(t testing.T) { - if os.Getenv("NOMAD_SLOW_TEST") == "" { - t.Skip("Skipping slow test. Set NOMAD_SLOW_TEST=1 to run.") - } -} From 2b83614a26dc23afaa953c3a78e616ded2564777 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Wed, 16 Mar 2022 08:38:42 -0500 Subject: [PATCH 2/2] ci: explain why ci runs tests in serial now --- ci/slow.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/slow.go b/ci/slow.go index 5becb90b7..f6b8d066a 100644 --- a/ci/slow.go +++ b/ci/slow.go @@ -16,6 +16,9 @@ func SkipSlow(t *testing.T, reason string) { } // Parallel runs t in parallel, unless CI is set to a true value. +// +// In CI (CircleCI / GitHub Actions) we get better performance by running tests +// in serial while not restricting GOMAXPROCS. func Parallel(t *testing.T) { value := os.Getenv("CI") isCI, err := strconv.ParseBool(value)