ci: swap freeport for portal in packages (#15661)

This commit is contained in:
Seth Hoenig 2023-01-03 11:25:20 -06:00 committed by GitHub
parent 9771281ecd
commit 7214e21402
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 201 additions and 1041 deletions

20
ci/ports.go Normal file
View File

@ -0,0 +1,20 @@
package ci
import (
"fmt"
"github.com/shoenig/test/portal"
)
type fatalTester struct{}
func (t *fatalTester) Fatalf(msg string, args ...any) {
panic(fmt.Sprintf(msg, args...))
}
// PortAllocator is used to acquire unused ports for testing real network
// listeners.
var PortAllocator = portal.New(
new(fatalTester),
portal.WithAddress("127.0.0.1"),
)

View File

@ -12,7 +12,6 @@ import (
"time"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
@ -410,8 +409,7 @@ func TestChecker_Do_TCP(t *testing.T) {
}
}
ports := freeport.MustTake(3)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(3)
cases := []struct {
name string

View File

@ -16,7 +16,6 @@ import (
"github.com/hashicorp/nomad/ci"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
@ -660,8 +659,7 @@ func TestConfig_Listener(t *testing.T) {
}
// Works with valid inputs
ports := freeport.MustTake(2)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(2)
ln, err := config.Listener("tcp", "127.0.0.1", ports[0])
if err != nil {

View File

@ -15,10 +15,10 @@ import (
metrics "github.com/armon/go-metrics"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/ci"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/mock"
@ -269,8 +269,6 @@ func (a *TestAgent) Shutdown() {
}
a.shutdown = true
defer freeport.Return(a.ports)
defer func() {
if a.DataDir != "" {
_ = os.RemoveAll(a.DataDir)
@ -332,7 +330,7 @@ func (a *TestAgent) Client() *api.Client {
// Instead of relying on one set of ports to be sufficient we retry
// starting the agent with different ports on port conflict.
func (a *TestAgent) pickRandomPorts(c *Config) {
ports := freeport.MustTake(3)
ports := ci.PortAllocator.Grab(3)
a.ports = append(a.ports, ports...)
c.Ports.HTTP = ports[0]

View File

@ -30,8 +30,7 @@ func TestHTTP_Variables(t *testing.T) {
httpTest(t, cb, func(s *TestAgent) {
// These tests are run against the same running server in order to reduce
// the costs of server startup and allow as much parallelization as possible
// given the port reuse issue that we have seen with the current freeport
// the costs of server startup and allow as much parallelization as possible.
t.Run("error_badverb_list", func(t *testing.T) {
req, err := http.NewRequest("LOLWUT", "/v1/vars", nil)
require.NoError(t, err)

View File

@ -14,12 +14,11 @@ Each unit test should meet a few criteria:
- Undo any changes to the environment
- Set environment variables must be unset (use `t.Setenv`)
- Scratch files/dirs must be removed (use `t.TempDir`)
- Consumed ports must be freed (e.g. `TestServer.Cleanup`, `freeport.Return`)
- Able to run in parallel
- All package level `Test*` functions should start with `ci.Parallel`
- Always use dynamic scratch dirs, files
- Always get ports from helpers (`TestServer`, `TestClient`, `TestAgent`, `freeport.Get`)
- Always get ports via `ci.PortAllocator.Grab()`
- Log control
- Logging must go through the `testing.T` (use `helper/testlog.HCLogger`)

View File

@ -12,7 +12,6 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/pointer"
tu "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
@ -55,8 +54,7 @@ func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) {
driver := dh.Impl().(*Driver)
driver.config.PidsLimit = 5
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
cfg.PidsLimit = 7
@ -75,8 +73,7 @@ func TestDockerDriver_PidsLimit(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.PidsLimit = 1
cfg.Command = "/bin/sh"

View File

@ -20,7 +20,6 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
"github.com/hashicorp/nomad/helper/pluginutils/loader"
@ -73,10 +72,9 @@ var (
busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
)
// Returns a task with a reserved and dynamic port. The ports are returned
// respectively, and should be reclaimed with freeport.Return at the end of a test.
// Returns a task with a reserved and dynamic port.
func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
ports := freeport.MustTake(2)
ports := ci.PortAllocator.Grab(2)
dockerReserved := ports[0]
dockerDynamic := ports[1]
@ -640,14 +638,11 @@ func TestDockerDriver_StartN(t *testing.T) {
testutil.DockerCompatible(t)
require := require.New(t)
task1, _, ports1 := dockerTask(t)
defer freeport.Return(ports1)
task1, _, _ := dockerTask(t)
task2, _, ports2 := dockerTask(t)
defer freeport.Return(ports2)
task2, _, _ := dockerTask(t)
task3, _, ports3 := dockerTask(t)
defer freeport.Return(ports3)
task3, _, _ := dockerTask(t)
taskList := []*drivers.TaskConfig{task1, task2, task3}
@ -694,22 +689,22 @@ func TestDockerDriver_StartNVersions(t *testing.T) {
testutil.DockerCompatible(t)
require := require.New(t)
task1, cfg1, ports1 := dockerTask(t)
defer freeport.Return(ports1)
task1, cfg1, _ := dockerTask(t)
tcfg1 := newTaskConfig("", []string{"echo", "hello"})
cfg1.Image = tcfg1.Image
cfg1.LoadImage = tcfg1.LoadImage
require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
task2, cfg2, ports2 := dockerTask(t)
defer freeport.Return(ports2)
task2, cfg2, _ := dockerTask(t)
tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
cfg2.Image = tcfg2.Image
cfg2.LoadImage = tcfg2.LoadImage
require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
task3, cfg3, ports3 := dockerTask(t)
defer freeport.Return(ports3)
task3, cfg3, _ := dockerTask(t)
tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
cfg3.Image = tcfg3.Image
cfg3.LoadImage = tcfg3.LoadImage
@ -759,8 +754,7 @@ func TestDockerDriver_Labels(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Labels = map[string]string{
"label1": "value1",
@ -788,8 +782,7 @@ func TestDockerDriver_ExtraLabels(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -823,8 +816,7 @@ func TestDockerDriver_LoggingConfiguration(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -850,9 +842,9 @@ func TestDockerDriver_HealthchecksDisable(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
task, cfg, _ := dockerTask(t)
cfg.Healthchecks.Disable = true
defer freeport.Return(ports)
must.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client, d, handle, cleanup := dockerSetup(t, task, nil)
@ -870,8 +862,7 @@ func TestDockerDriver_ForcePull(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.ForcePull = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -894,8 +885,8 @@ func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
}
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.LoadImage = ""
cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
@ -920,8 +911,8 @@ func TestDockerDriver_SecurityOptUnconfined(t *testing.T) {
}
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.SecurityOpt = []string{"seccomp=unconfined"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -944,8 +935,8 @@ func TestDockerDriver_SecurityOptFromFile(t *testing.T) {
}
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -963,8 +954,8 @@ func TestDockerDriver_Runtime(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Runtime = "runc"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -983,8 +974,8 @@ func TestDockerDriver_Runtime(t *testing.T) {
func TestDockerDriver_CreateContainerConfig(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
opt := map[string]string{"size": "120G"}
cfg.StorageOpt = opt
@ -1007,8 +998,8 @@ func TestDockerDriver_CreateContainerConfig(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1046,8 +1037,8 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
"custom",
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
for _, runtime := range allowRuntime {
@ -1071,8 +1062,8 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.User = "random-user-1"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1089,8 +1080,8 @@ func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.AllocID = uuid.Generate()
task.JobName = "redis-demo-job"
@ -1180,8 +1171,7 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Logging = c.loggingConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1202,8 +1192,7 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Mounts = []DockerMount{
{
@ -1317,8 +1306,7 @@ func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
dh := dockerDriverHarness(t, map[string]interface{}{
"allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"},
@ -1408,8 +1396,7 @@ func TestDockerDriver_Capabilities(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
client := newTestDockerClient(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
if len(tc.CapAdd) > 0 {
cfg.CapAdd = tc.CapAdd
@ -1486,8 +1473,8 @@ func TestDockerDriver_DNS(t *testing.T) {
}
for _, c := range cases {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.DNS = c.cfg
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1509,8 +1496,7 @@ func TestDockerDriver_Init(t *testing.T) {
t.Skip("Windows does not support init.")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Init = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1550,8 +1536,7 @@ func TestDockerDriver_CPUSetCPUs(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.Name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.CPUSetCPUs = testCase.CPUSetCPUs
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1575,8 +1560,7 @@ func TestDockerDriver_MemoryHardLimit(t *testing.T) {
t.Skip("Windows does not support MemoryReservation")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.MemoryHardLimit = 300
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1599,8 +1583,8 @@ func TestDockerDriver_MACAddress(t *testing.T) {
t.Skip("Windows docker does not support setting MacAddress")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.MacAddress = "00:16:3e:00:00:00"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1618,8 +1602,8 @@ func TestDockerWorkDir(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.WorkDir = "/some/path"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1646,7 +1630,6 @@ func TestDockerDriver_PortsNoMap(t *testing.T) {
testutil.DockerCompatible(t)
task, _, ports := dockerTask(t)
defer freeport.Return(ports)
res := ports[0]
dyn := ports[1]
@ -1688,7 +1671,6 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
res := ports[0]
dyn := ports[1]
cfg.PortMap = map[string]int{
@ -1737,7 +1719,6 @@ func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
hostIP := "127.0.0.1"
if runtime.GOOS == "windows" {
hostIP = ""
@ -1780,7 +1761,6 @@ func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
res := ports[0]
dyn := ports[1]
cfg.PortMap = map[string]int{
@ -1816,8 +1796,7 @@ func TestDockerDriver_CleanupContainer(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1854,8 +1833,7 @@ func TestDockerDriver_EnableImageGC(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1921,8 +1899,7 @@ func TestDockerDriver_DisableImageGC(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -1984,8 +1961,8 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -2051,8 +2028,8 @@ func TestDockerDriver_Stats(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "sleep"
cfg.Args = []string{"1000"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -2273,8 +2250,8 @@ func TestDockerDriver_Mounts(t *testing.T) {
driver.config.Volumes.Enabled = true
// Build the task
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "sleep"
cfg.Args = []string{"10000"}
cfg.Mounts = c.Mounts
@ -2472,7 +2449,7 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
}
for _, tc := range testCases {
task, cfg, ports := dockerTask(t)
task, cfg, _ := dockerTask(t)
cfg.Devices = tc.deviceConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
d := dockerDriverHarness(t, nil)
@ -2483,7 +2460,6 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
_, _, err := d.StartTask(task)
require.Error(t, err)
require.Contains(t, err.Error(), tc.err.Error())
freeport.Return(ports)
}
}
@ -2509,8 +2485,8 @@ func TestDockerDriver_Device_Success(t *testing.T) {
ContainerPath: containerPath,
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Devices = []DockerDevice{config}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -2530,8 +2506,8 @@ func TestDockerDriver_Entrypoint(t *testing.T) {
testutil.DockerCompatible(t)
entrypoint := []string{"sh", "-c"}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Entrypoint = entrypoint
cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
cfg.Args = []string{}
@ -2558,8 +2534,8 @@ func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
t.Skip("Windows Docker does not support root filesystem in read-only mode")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.ReadonlyRootfs = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -2596,8 +2572,8 @@ func TestDockerDriver_VolumeError(t *testing.T) {
ci.Parallel(t)
// setup
_, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
_, cfg, _ := dockerTask(t)
driver := dockerDriverHarness(t, nil)
// assert volume error is recoverable
@ -2611,8 +2587,8 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
expectedPrefix := "2001:db8:1::242:ac11"
expectedAdvertise := true
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.AdvertiseIPv6Addr = expectedAdvertise
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -2718,8 +2694,8 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client := newTestDockerClient(t)
@ -2786,8 +2762,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
ci.Parallel(t)
task, _, ports := dockerTask(t)
defer freeport.Return(ports)
task, _, _ := dockerTask(t)
dh := dockerDriverHarness(t, nil)
driver := dh.Impl().(*Driver)

View File

@ -18,7 +18,6 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/plugins/drivers"
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
@ -31,8 +30,8 @@ func TestDockerDriver_User(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.User = "alice"
cfg.Command = "/bin/sleep"
cfg.Args = []string{"10000"}
@ -148,8 +147,8 @@ func TestDockerDriver_CPUCFSPeriod(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.CPUHardLimit = true
cfg.CPUCFSPeriod = 1000000
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
@ -169,8 +168,8 @@ func TestDockerDriver_Sysctl_Ulimit(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
expectedUlimits := map[string]string{
"nproc": "4242",
"nofile": "2048:4096",
@ -240,19 +239,18 @@ func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) {
}
for _, tc := range testCases {
task, cfg, ports := dockerTask(t)
task, cfg, _ := dockerTask(t)
cfg.Ulimit = tc.ulimitConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
d := dockerDriverHarness(t, nil)
cleanup := d.MkAllocDir(task, true)
defer cleanup()
t.Cleanup(cleanup)
copyImage(t, task.TaskDir(), "busybox.tar")
_, _, err := d.StartTask(task)
require.NotNil(t, err, "Expected non nil error")
require.Contains(t, err.Error(), tc.err.Error())
freeport.Return(ports)
}
}
@ -339,8 +337,7 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.VolumeDriver = c.volumeDriver
cfg.Volumes = c.volumes
@ -366,8 +363,7 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.VolumeDriver = c.volumeDriver
cfg.Volumes = c.volumes
@ -515,8 +511,7 @@ func TestDockerDriver_MountsSerialization(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Mounts = c.passedMounts
task.AllocDir = allocDir
@ -538,8 +533,8 @@ func TestDockerDriver_MountsSerialization(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Mounts = c.passedMounts
task.AllocDir = allocDir
@ -567,8 +562,7 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.Devices = []*drivers.DeviceConfig{
{

View File

@ -9,7 +9,6 @@ import (
docker "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/stretchr/testify/require"
)
@ -63,8 +62,7 @@ func TestDanglingContainerRemoval(t *testing.T) {
testutil.DockerCompatible(t)
// start two containers: one tracked nomad container, and one unrelated container
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client, d, handle, cleanup := dockerSetup(t, task, nil)
@ -166,8 +164,7 @@ func TestDanglingContainerRemoval_Stopped(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
_, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
_, cfg, _ := dockerTask(t)
client := newTestDockerClient(t)
container, err := client.CreateContainer(docker.CreateContainerOptions{

View File

@ -1,47 +0,0 @@
//go:build darwin
// +build darwin
package freeport
import (
"fmt"
"os/exec"
"regexp"
"strconv"
)
/*
$ sysctl net.inet.ip.portrange.first net.inet.ip.portrange.last
net.inet.ip.portrange.first: 49152
net.inet.ip.portrange.last: 65535
*/
const (
ephPortFirst = "net.inet.ip.portrange.first"
ephPortLast = "net.inet.ip.portrange.last"
command = "sysctl"
)
var ephPortRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`)
func getEphemeralPortRange() (int, int, error) {
cmd := exec.Command(command, "-n", ephPortFirst, ephPortLast)
out, err := cmd.Output()
if err != nil {
return 0, 0, err
}
val := string(out)
m := ephPortRe.FindStringSubmatch(val)
if m != nil {
min, err1 := strconv.Atoi(m[1])
max, err2 := strconv.Atoi(m[2])
if err1 == nil && err2 == nil {
return min, max, nil
}
}
return 0, 0, fmt.Errorf("unexpected sysctl value %q for keys %q %q", val, ephPortFirst, ephPortLast)
}

View File

@ -1,19 +0,0 @@
//go:build darwin
// +build darwin
package freeport
import (
"testing"
)
func TestGetEphemeralPortRange(t *testing.T) {
min, max, err := getEphemeralPortRange()
if err != nil {
t.Fatalf("err: %v", err)
}
if min <= 0 || max <= 0 || min > max {
t.Fatalf("unexpected values: min=%d, max=%d", min, max)
}
t.Logf("min=%d, max=%d", min, max)
}

View File

@ -1,47 +0,0 @@
//go:build freebsd
// +build freebsd
package freeport
import (
"fmt"
"os/exec"
"regexp"
"strconv"
)
/*
$ sysctl net.inet.ip.portrange.first net.inet.ip.portrange.last
net.inet.ip.portrange.first: 49152
net.inet.ip.portrange.last: 65535
*/
const (
ephPortFirst = "net.inet.ip.portrange.first"
ephPortLast = "net.inet.ip.portrange.last"
command = "sysctl"
)
var ephPortRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`)
func getEphemeralPortRange() (int, int, error) {
cmd := exec.Command(command, "-n", ephPortFirst, ephPortLast)
out, err := cmd.Output()
if err != nil {
return 0, 0, err
}
val := string(out)
m := ephPortRe.FindStringSubmatch(val)
if m != nil {
min, err1 := strconv.Atoi(m[1])
max, err2 := strconv.Atoi(m[2])
if err1 == nil && err2 == nil {
return min, max, nil
}
}
return 0, 0, fmt.Errorf("unexpected sysctl value %q for keys %q %q", val, ephPortFirst, ephPortLast)
}

View File

@ -1,42 +0,0 @@
//go:build linux
// +build linux
package freeport
import (
"fmt"
"os/exec"
"regexp"
"strconv"
)
/*
$ sysctl -n net.ipv4.ip_local_port_range
32768 60999
*/
const ephemeralPortRangeSysctlKey = "net.ipv4.ip_local_port_range"
var ephemeralPortRangePatt = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`)
func getEphemeralPortRange() (int, int, error) {
cmd := exec.Command("sysctl", "-n", ephemeralPortRangeSysctlKey)
out, err := cmd.Output()
if err != nil {
return 0, 0, err
}
val := string(out)
m := ephemeralPortRangePatt.FindStringSubmatch(val)
if m != nil {
min, err1 := strconv.Atoi(m[1])
max, err2 := strconv.Atoi(m[2])
if err1 == nil && err2 == nil {
return min, max, nil
}
}
return 0, 0, fmt.Errorf("unexpected sysctl value %q for key %q", val, ephemeralPortRangeSysctlKey)
}

View File

@ -1,19 +0,0 @@
//go:build linux
// +build linux
package freeport
import (
"testing"
)
func TestGetEphemeralPortRange(t *testing.T) {
min, max, err := getEphemeralPortRange()
if err != nil {
t.Fatalf("err: %v", err)
}
if min <= 0 || max <= 0 || min > max {
t.Fatalf("unexpected values: min=%d, max=%d", min, max)
}
t.Logf("min=%d, max=%d", min, max)
}

View File

@ -1,12 +0,0 @@
//go:build windows
// +build windows
package freeport
// For now we hard-code the Windows ephemeral port range, which is documented by
// Microsoft to be in this range for Vista / Server 2008 and newer.
//
// https://support.microsoft.com/en-us/help/832017/service-overview-and-network-port-requirements-for-windows
func getEphemeralPortRange() (int, int, error) {
return 49152, 65535, nil
}

View File

@ -1,297 +0,0 @@
// Copied from github.com/hashicorp/consul/sdk/freeport
//
// and tweaked for use by Nomad.
package freeport
import (
"container/list"
"fmt"
"math/rand"
"net"
"os"
"runtime"
"sync"
"time"
)
// todo(shoenig)
// There is a conflict between this copy of the updated sdk/freeport package
// and the lib/freeport package that is vendored as of nomad v0.10.x, which
// means we need to be careful to avoid the ports that transitive dependency
// is going to use (i.e. 10,000+). For now, we use the 9XXX port range with
// small blocks which means some tests will have to wait, and we need to be
// very careful not to leak ports.
const (
// blockSize is the size of the allocated port block. ports are given out
// consecutively from that block and after that point in a LRU fashion.
// blockSize = 1500
blockSize = 100 // todo(shoenig) revert once consul dependency is updated
// maxBlocks is the number of available port blocks before exclusions.
// maxBlocks = 30
maxBlocks = 10 // todo(shoenig) revert once consul dependency is updated
// lowPort is the lowest port number that should be used.
// lowPort = 10000
lowPort = 9000 // todo(shoenig) revert once consul dependency is updated
// attempts is how often we try to allocate a port block
// before giving up.
attempts = 10
)
var (
// effectiveMaxBlocks is the number of available port blocks.
// lowPort + effectiveMaxBlocks * blockSize must be less than 65535.
effectiveMaxBlocks int
// firstPort is the first port of the allocated block.
firstPort int
// lockLn is the system-wide mutex for the port block.
lockLn net.Listener
// mu guards:
// - pendingPorts
// - freePorts
// - total
mu sync.Mutex
// once is used to do the initialization on the first call to retrieve free
// ports
once sync.Once
// condNotEmpty is a condition variable to wait for freePorts to be not
// empty. Linked to 'mu'
condNotEmpty *sync.Cond
// freePorts is a FIFO of all currently free ports. Take from the front,
// and return to the back.
freePorts *list.List
// pendingPorts is a FIFO of recently freed ports that have not yet passed
// the not-in-use check.
pendingPorts *list.List
// total is the total number of available ports in the block for use.
total int
)
// initialize is used to initialize freeport.
func initialize() {
var err error
effectiveMaxBlocks, err = adjustMaxBlocks()
if err != nil {
panic("freeport: ephemeral port range detection failed: " + err.Error())
}
if effectiveMaxBlocks < 0 {
panic("freeport: no blocks of ports available outside of ephemeral range")
}
if lowPort+effectiveMaxBlocks*blockSize > 65535 {
panic("freeport: block size too big or too many blocks requested")
}
rand.Seed(time.Now().UnixNano())
firstPort, lockLn = alloc()
condNotEmpty = sync.NewCond(&mu)
freePorts = list.New()
pendingPorts = list.New()
// fill with all available free ports
for port := firstPort + 1; port < firstPort+blockSize; port++ {
if used := isPortInUse(port); !used {
freePorts.PushBack(port)
}
}
total = freePorts.Len()
go checkFreedPorts()
}
func checkFreedPorts() {
ticker := time.NewTicker(250 * time.Millisecond)
for {
<-ticker.C
checkFreedPortsOnce()
}
}
func checkFreedPortsOnce() {
mu.Lock()
defer mu.Unlock()
pending := pendingPorts.Len()
remove := make([]*list.Element, 0, pending)
for elem := pendingPorts.Front(); elem != nil; elem = elem.Next() {
port := elem.Value.(int)
if used := isPortInUse(port); !used {
freePorts.PushBack(port)
remove = append(remove, elem)
}
}
retained := pending - len(remove)
if retained > 0 {
logf("WARN", "%d out of %d pending ports are still in use; something probably didn't wait around for the port to be closed!", retained, pending)
}
if len(remove) == 0 {
return
}
for _, elem := range remove {
pendingPorts.Remove(elem)
}
condNotEmpty.Broadcast()
}
// adjustMaxBlocks avoids having the allocation ranges overlap the ephemeral
// port range.
func adjustMaxBlocks() (int, error) {
ephemeralPortMin, ephemeralPortMax, err := getEphemeralPortRange()
if err != nil {
return 0, err
}
if ephemeralPortMin <= 0 || ephemeralPortMax <= 0 {
logf("INFO", "ephemeral port range detection not configured for GOOS=%q", runtime.GOOS)
return maxBlocks, nil
}
logf("INFO", "detected ephemeral port range of [%d, %d]", ephemeralPortMin, ephemeralPortMax)
for block := 0; block < maxBlocks; block++ {
min := lowPort + block*blockSize
max := min + blockSize
overlap := intervalOverlap(min, max-1, ephemeralPortMin, ephemeralPortMax)
if overlap {
logf("INFO", "reducing max blocks from %d to %d to avoid the ephemeral port range", maxBlocks, block)
return block, nil
}
}
return maxBlocks, nil
}
// alloc reserves a port block for exclusive use for the lifetime of the
// application. lockLn serves as a system-wide mutex for the port block and is
// implemented as a TCP listener which is bound to the firstPort and which will
// be automatically released when the application terminates.
func alloc() (int, net.Listener) {
for i := 0; i < attempts; i++ {
block := int(rand.Int31n(int32(effectiveMaxBlocks)))
firstPort := lowPort + block*blockSize
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort))
if err != nil {
continue
}
// logf("DEBUG", "allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1)
return firstPort, ln
}
panic("freeport: cannot allocate port block")
}
// MustTake is the same as Take except it panics on error.
func MustTake(n int) (ports []int) {
ports, err := Take(n)
if err != nil {
panic(err)
}
return ports
}
// Take returns a list of free ports from the allocated port block. It is safe
// to call this method concurrently. Ports have been tested to be available on
// 127.0.0.1 TCP but there is no guarantee that they will remain free in the
// future.
func Take(n int) (ports []int, err error) {
if n <= 0 {
return nil, fmt.Errorf("freeport: cannot take %d ports", n)
}
mu.Lock()
defer mu.Unlock()
// Reserve a port block
once.Do(initialize)
if n > total {
return nil, fmt.Errorf("freeport: block size too small")
}
for len(ports) < n {
for freePorts.Len() == 0 {
if total == 0 {
return nil, fmt.Errorf("freeport: impossible to satisfy request; there are no actual free ports in the block anymore")
}
condNotEmpty.Wait()
}
elem := freePorts.Front()
freePorts.Remove(elem)
port := elem.Value.(int)
if used := isPortInUse(port); used {
// Something outside of the test suite has stolen this port, possibly
// due to assignment to an ephemeral port, remove it completely.
logf("WARN", "leaked port %d due to theft; removing from circulation", port)
total--
continue
}
ports = append(ports, port)
}
// logf("DEBUG", "free ports: %v", ports)
return ports, nil
}
// Return returns a block of ports back to the general pool. These ports should
// have been returned from a call to Take().
func Return(ports []int) {
if len(ports) == 0 {
return // convenience short circuit for test ergonomics
}
mu.Lock()
defer mu.Unlock()
for _, port := range ports {
if port > firstPort && port < firstPort+blockSize {
pendingPorts.PushBack(port)
}
}
}
func isPortInUse(port int) bool {
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
if err != nil {
return true
}
_ = ln.Close()
return false
}
func tcpAddr(ip string, port int) *net.TCPAddr {
return &net.TCPAddr{IP: net.ParseIP(ip), Port: port}
}
// intervalOverlap returns true if the doubly-inclusive integer intervals
// represented by [min1, max1] and [min2, max2] overlap.
func intervalOverlap(min1, max1, min2, max2 int) bool {
if min1 > max1 {
logf("WARN", "interval1 is not ordered [%d, %d]", min1, max1)
return false
}
if min2 > max2 {
logf("WARN", "interval2 is not ordered [%d, %d]", min2, max2)
return false
}
return min1 <= max2 && min2 <= max1
}
func logf(severity string, format string, a ...interface{}) {
_, _ = fmt.Fprintf(os.Stderr, "["+severity+"] freeport: "+format+"\n", a...)
}

View File

@ -1,281 +0,0 @@
package freeport
import (
"fmt"
"io"
"net"
"sync"
"testing"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
// reset will reverse the setup from initialize() and then redo it (for tests)
func reset() {
mu.Lock()
defer mu.Unlock()
logf("INFO", "resetting the freeport package state")
effectiveMaxBlocks = 0
firstPort = 0
if lockLn != nil {
lockLn.Close()
lockLn = nil
}
once = sync.Once{}
freePorts = nil
pendingPorts = nil
total = 0
}
// peekFree returns the next port that will be returned by Take to aid in testing.
func peekFree() int {
mu.Lock()
defer mu.Unlock()
return freePorts.Front().Value.(int)
}
// peekAllFree returns all free ports that could be returned by Take to aid in testing.
func peekAllFree() []int {
mu.Lock()
defer mu.Unlock()
var out []int
for elem := freePorts.Front(); elem != nil; elem = elem.Next() {
port := elem.Value.(int)
out = append(out, port)
}
return out
}
// stats returns diagnostic data to aid in testing
func stats() (numTotal, numPending, numFree int) {
mu.Lock()
defer mu.Unlock()
return total, pendingPorts.Len(), freePorts.Len()
}
func TestTakeReturn(t *testing.T) {
// NOTE: for global var reasons this cannot execute in parallel
// ci.Parallel(t)
// Since this test is destructive (i.e. it leaks all ports) it means that
// any other test cases in this package will not function after it runs. To
// help out we reset the global state after we run this test.
defer reset()
// OK: do a simple take/return cycle to trigger the package initialization
func() {
ports, err := Take(1)
if err != nil {
t.Fatalf("err: %v", err)
}
defer Return(ports)
if len(ports) != 1 {
t.Fatalf("expected %d but got %d ports", 1, len(ports))
}
}()
waitForStatsReset := func() (numTotal int) {
t.Helper()
numTotal, numPending, numFree := stats()
if numTotal != numFree+numPending {
t.Fatalf("expected total (%d) and free+pending (%d) ports to match", numTotal, numFree+numPending)
}
retry.Run(t, func(r *retry.R) {
numTotal, numPending, numFree = stats()
if numPending != 0 {
r.Fatalf("pending is still non zero: %d", numPending)
}
if numTotal != numFree {
r.Fatalf("total (%d) does not equal free (%d)", numTotal, numFree)
}
})
return numTotal
}
// Reset
numTotal := waitForStatsReset()
// --------------------
// OK: take the max
func() {
ports, err := Take(numTotal)
if err != nil {
t.Fatalf("err: %v", err)
}
defer Return(ports)
if len(ports) != numTotal {
t.Fatalf("expected %d but got %d ports", numTotal, len(ports))
}
}()
// Reset
numTotal = waitForStatsReset()
expectError := func(expected string, got error) {
t.Helper()
if got == nil {
t.Fatalf("expected error but was nil")
}
if got.Error() != expected {
t.Fatalf("expected error %q but got %q", expected, got.Error())
}
}
// --------------------
// ERROR: take too many ports
func() {
ports, err := Take(numTotal + 1)
defer Return(ports)
expectError("freeport: block size too small", err)
}()
// --------------------
// ERROR: invalid ports request (negative)
func() {
_, err := Take(-1)
expectError("freeport: cannot take -1 ports", err)
}()
// --------------------
// ERROR: invalid ports request (zero)
func() {
_, err := Take(0)
expectError("freeport: cannot take 0 ports", err)
}()
// --------------------
// OK: Steal a port under the covers and let freeport detect the theft and compensate
leakedPort := peekFree()
func() {
leakyListener, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", leakedPort))
if err != nil {
t.Fatalf("err: %v", err)
}
defer leakyListener.Close()
func() {
ports, err := Take(3)
if err != nil {
t.Fatalf("err: %v", err)
}
defer Return(ports)
if len(ports) != 3 {
t.Fatalf("expected %d but got %d ports", 3, len(ports))
}
for _, port := range ports {
if port == leakedPort {
t.Fatalf("did not expect for Take to return the leaked port")
}
}
}()
newNumTotal := waitForStatsReset()
if newNumTotal != numTotal-1 {
t.Fatalf("expected total to drop to %d but got %d", numTotal-1, newNumTotal)
}
numTotal = newNumTotal // update outer variable for later tests
}()
// --------------------
// OK: sequence it so that one Take must wait on another Take to Return.
func() {
mostPorts, err := Take(numTotal - 5)
if err != nil {
t.Fatalf("err: %v", err)
}
type reply struct {
ports []int
err error
}
ch := make(chan reply, 1)
go func() {
ports, err := Take(10)
ch <- reply{ports: ports, err: err}
}()
Return(mostPorts)
r := <-ch
if r.err != nil {
t.Fatalf("err: %v", r.err)
}
defer Return(r.ports)
if len(r.ports) != 10 {
t.Fatalf("expected %d ports but got %d", 10, len(r.ports))
}
}()
// Reset
numTotal = waitForStatsReset()
// --------------------
// ERROR: Now we end on the crazy "Ocean's 11" level port theft where we
// orchestrate a situation where all ports are stolen and we don't find out
// until Take.
func() {
// 1. Grab all of the ports.
allPorts := peekAllFree()
// 2. Leak all of the ports
leaked := make([]io.Closer, 0, len(allPorts))
defer func() {
for _, c := range leaked {
c.Close()
}
}()
for _, port := range allPorts {
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
if err != nil {
t.Fatalf("err: %v", err)
}
leaked = append(leaked, ln)
}
// 3. Request 1 port which will detect the leaked ports and fail.
_, err := Take(1)
expectError("freeport: impossible to satisfy request; there are no actual free ports in the block anymore", err)
// 4. Wait for the block to zero out.
newNumTotal := waitForStatsReset()
if newNumTotal != 0 {
t.Fatalf("expected total to drop to %d but got %d", 0, newNumTotal)
}
}()
}
func TestIntervalOverlap(t *testing.T) {
cases := []struct {
min1, max1, min2, max2 int
overlap bool
}{
{0, 0, 0, 0, true},
{1, 1, 1, 1, true},
{1, 3, 1, 3, true}, // same
{1, 3, 4, 6, false}, // serial
{1, 4, 3, 6, true}, // inner overlap
{1, 6, 3, 4, true}, // nest
}
for _, tc := range cases {
t.Run(fmt.Sprintf("%d:%d vs %d:%d", tc.min1, tc.max1, tc.min2, tc.max2), func(t *testing.T) {
if tc.overlap != intervalOverlap(tc.min1, tc.max1, tc.min2, tc.max2) { // 1 vs 2
t.Fatalf("expected %v but got %v", tc.overlap, !tc.overlap)
}
if tc.overlap != intervalOverlap(tc.min2, tc.max2, tc.min1, tc.max1) { // 2 vs 1
t.Fatalf("expected %v but got %v", tc.overlap, !tc.overlap)
}
})
}
}

View File

@ -6,9 +6,9 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func newTestPool(t *testing.T) *ConnPool {
@ -18,24 +18,19 @@ func newTestPool(t *testing.T) *ConnPool {
}
func TestConnPool_ConnListener(t *testing.T) {
require := require.New(t)
ports := freeport.MustTake(1)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(1)
addrStr := fmt.Sprintf("127.0.0.1:%d", ports[0])
addr, err := net.ResolveTCPAddr("tcp", addrStr)
require.Nil(err)
must.NoError(t, err)
exitCh := make(chan struct{})
defer close(exitCh)
go func() {
ln, err := net.Listen("tcp", addrStr)
require.Nil(err)
defer ln.Close()
ln, listenErr := net.Listen("tcp", addrStr)
must.NoError(t, listenErr)
defer func() { _ = ln.Close() }()
conn, _ := ln.Accept()
defer conn.Close()
defer func() { _ = conn.Close() }()
<-exitCh
}()
@ -50,7 +45,7 @@ func TestConnPool_ConnListener(t *testing.T) {
// Make an RPC
_, err = pool.acquire("test", addr)
require.Nil(err)
must.NoError(t, err)
// Assert we get a connection.
select {
@ -60,7 +55,9 @@ func TestConnPool_ConnListener(t *testing.T) {
}
// Test that the channel is closed when the pool shuts down.
require.Nil(pool.Shutdown())
err = pool.Shutdown()
must.NoError(t, err)
_, ok := <-c
require.False(ok)
must.False(t, ok)
}

View File

@ -1568,8 +1568,7 @@ func TestACLEndpoint_UpsertTokens(t *testing.T) {
ci.Parallel(t)
// Each sub-test uses the same server to avoid creating a new one for each
// test. This means some care has to be taken with resource naming, but
// does avoid lots of calls to systems such as freeport.
// test. This means some care has to be taken with resource naming.
testServer, rootACLToken, testServerCleanup := TestACLServer(t, nil)
defer testServerCleanup()
codec := rpcClient(t, testServer)

View File

@ -19,7 +19,6 @@ import (
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/ci"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/snapshot"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
@ -148,8 +147,7 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
ports := freeport.MustTake(1)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(1)
// Try to remove a peer that's not there.
arg := structs.RaftPeerByAddressRequest{
@ -216,8 +214,7 @@ func TestOperator_RaftRemovePeerByAddress_ACL(t *testing.T) {
// Create ACL token
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid", mock.NodePolicy(acl.PolicyWrite))
ports := freeport.MustTake(1)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(1)
arg := structs.RaftPeerByAddressRequest{
Address: raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", ports[0])),
@ -276,8 +273,7 @@ func TestOperator_RaftRemovePeerByID(t *testing.T) {
t.Fatalf("err: %v", err)
}
ports := freeport.MustTake(1)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(1)
// Add it manually to Raft.
{
@ -337,8 +333,7 @@ func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) {
}
arg.Region = s1.config.Region
ports := freeport.MustTake(1)
defer freeport.Return(ports)
ports := ci.PortAllocator.Grab(1)
// Add peer manually to Raft.
{

View File

@ -8,8 +8,8 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/pluginutils/catalog"
"github.com/hashicorp/nomad/helper/pluginutils/singleton"
"github.com/hashicorp/nomad/helper/testlog"
@ -111,7 +111,7 @@ func TestServerErr(t *testing.T, cb func(*Config)) (*Server, func(), error) {
for i := 10; i >= 0; i-- {
// Get random ports, need to cleanup later
ports := freeport.MustTake(2)
ports := ci.PortAllocator.Grab(2)
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
@ -132,8 +132,6 @@ func TestServerErr(t *testing.T, cb func(*Config)) (*Server, func(), error) {
if err != nil {
ch <- fmt.Errorf("failed to shutdown server: %w", err)
}
freeport.Return(ports)
}()
select {
@ -146,12 +144,10 @@ func TestServerErr(t *testing.T, cb func(*Config)) (*Server, func(), error) {
}
}, nil
} else if i == 0 {
freeport.Return(ports)
return nil, nil, err
} else {
if server != nil {
_ = server.Shutdown()
freeport.Return(ports)
}
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)

View File

@ -23,8 +23,8 @@ import (
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/discover"
"github.com/hashicorp/nomad/helper/freeport"
testing "github.com/mitchellh/go-testing-interface"
)
@ -98,8 +98,8 @@ type ServerConfigCallback func(c *TestServerConfig)
// defaultServerConfig returns a new TestServerConfig struct
// with all of the listen ports incremented by one.
func defaultServerConfig() (*TestServerConfig, []int) {
ports := freeport.MustTake(3)
func defaultServerConfig() *TestServerConfig {
ports := ci.PortAllocator.Grab(3)
return &TestServerConfig{
NodeName: fmt.Sprintf("node-%d", ports[0]),
DisableCheckpoint: true,
@ -123,7 +123,7 @@ func defaultServerConfig() (*TestServerConfig, []int) {
ACL: &ACLConfig{
Enabled: false,
},
}, ports
}
}
// TestServer is the main server wrapper struct.
@ -132,10 +132,6 @@ type TestServer struct {
Config *TestServerConfig
t testing.T
// ports (if any) that are reserved through freeport that must be returned
// at the end of a test, done when Close() is called.
ports []int
HTTPAddr string
SerfAddr string
HTTPClient *http.Client
@ -169,7 +165,7 @@ func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer {
}
defer configFile.Close()
nomadConfig, ports := defaultServerConfig()
nomadConfig := defaultServerConfig()
nomadConfig.DataDir = dataDir
if cb != nil {
@ -216,8 +212,6 @@ func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer {
cmd: cmd,
t: t,
ports: ports,
HTTPAddr: fmt.Sprintf("127.0.0.1:%d", nomadConfig.Ports.HTTP),
SerfAddr: fmt.Sprintf("127.0.0.1:%d", nomadConfig.Ports.Serf),
HTTPClient: client,
@ -240,8 +234,6 @@ func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer {
// Stop stops the test Nomad server, and removes the Nomad data
// directory once we are done.
func (s *TestServer) Stop() {
defer freeport.Return(s.ports)
defer os.RemoveAll(s.Config.DataDir)
// wait for the process to exit to be sure that the data dir can be

View File

@ -3,12 +3,11 @@ package testutil
import (
"errors"
"fmt"
"math/rand"
"os"
"os/exec"
"time"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs/config"
@ -30,10 +29,6 @@ type TestVault struct {
t testing.T
waitCh chan error
// ports (if any) that are reserved through freeport that must be returned
// at the end of a test, done when Stop() is called.
ports []int
Addr string
HTTPAddr string
RootToken string
@ -42,93 +37,70 @@ type TestVault struct {
}
func NewTestVaultFromPath(t testing.T, binary string) *TestVault {
var ports []int
nextPort := func() int {
next := freeport.MustTake(1)
ports = append(ports, next...)
return next[0]
port := ci.PortAllocator.Grab(1)[0]
token := uuid.Generate()
bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port)
http := fmt.Sprintf("http://127.0.0.1:%d", port)
root := fmt.Sprintf("-dev-root-token-id=%s", token)
cmd := exec.Command(binary, "server", "-dev", bind, root)
cmd.Stdout = testlog.NewWriter(t)
cmd.Stderr = testlog.NewWriter(t)
// Build the config
conf := vapi.DefaultConfig()
conf.Address = http
// Make the client and set the token to the root token
client, err := vapi.NewClient(conf)
if err != nil {
t.Fatalf("failed to build Vault API client: %v", err)
}
client.SetToken(token)
enable := true
tv := &TestVault{
cmd: cmd,
t: t,
Addr: bind,
HTTPAddr: http,
RootToken: token,
Client: client,
Config: &config.VaultConfig{
Enabled: &enable,
Token: token,
Addr: http,
},
}
for i := 10; i >= 0; i-- {
port := nextPort() // collect every port for cleanup after the test
token := uuid.Generate()
bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port)
http := fmt.Sprintf("http://127.0.0.1:%d", port)
root := fmt.Sprintf("-dev-root-token-id=%s", token)
cmd := exec.Command(binary, "server", "-dev", bind, root)
cmd.Stdout = testlog.NewWriter(t)
cmd.Stderr = testlog.NewWriter(t)
// Build the config
conf := vapi.DefaultConfig()
conf.Address = http
// Make the client and set the token to the root token
client, err := vapi.NewClient(conf)
if err != nil {
t.Fatalf("failed to build Vault API client: %v", err)
}
client.SetToken(token)
enable := true
tv := &TestVault{
cmd: cmd,
t: t,
ports: ports,
Addr: bind,
HTTPAddr: http,
RootToken: token,
Client: client,
Config: &config.VaultConfig{
Enabled: &enable,
Token: token,
Addr: http,
},
}
if err := tv.cmd.Start(); err != nil {
tv.t.Fatalf("failed to start vault: %v", err)
}
// Start the waiter
tv.waitCh = make(chan error, 1)
go func() {
err := tv.cmd.Wait()
tv.waitCh <- err
}()
// Ensure Vault started
var startErr error
select {
case startErr = <-tv.waitCh:
case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond):
}
if startErr != nil && i == 0 {
t.Fatalf("failed to start vault: %v", startErr)
} else if startErr != nil {
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
continue
}
waitErr := tv.waitForAPI()
if waitErr != nil && i == 0 {
t.Fatalf("failed to start vault: %v", waitErr)
} else if waitErr != nil {
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
continue
}
return tv
if err = tv.cmd.Start(); err != nil {
tv.t.Fatalf("failed to start vault: %v", err)
}
return nil
// Start the waiter
tv.waitCh = make(chan error, 1)
go func() {
err = tv.cmd.Wait()
tv.waitCh <- err
}()
// Ensure Vault started
var startErr error
select {
case startErr = <-tv.waitCh:
case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond):
}
if startErr != nil {
t.Fatalf("failed to start vault: %v", startErr)
}
waitErr := tv.waitForAPI()
if waitErr != nil {
t.Fatalf("failed to start vault: %v", waitErr)
}
return tv
}
// NewTestVault returns a new TestVault instance that is ready for API calls
@ -141,7 +113,7 @@ func NewTestVault(t testing.T) *TestVault {
// Start must be called and it is the callers responsibility to deal with any
// port conflicts that may occur and retry accordingly.
func NewTestVaultDelayed(t testing.T) *TestVault {
port := freeport.MustTake(1)[0]
port := ci.PortAllocator.Grab(1)[0]
token := uuid.Generate()
bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port)
http := fmt.Sprintf("http://127.0.0.1:%d", port)
@ -209,8 +181,6 @@ func (tv *TestVault) Start() error {
// Stop stops the test Vault server
func (tv *TestVault) Stop() {
defer freeport.Return(tv.ports)
if tv.cmd.Process == nil {
return
}