open-nomad/e2e/v3/cluster3/cluster3.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

219 lines
4.3 KiB
Go
Raw Normal View History

e2e: create a v3/ set of packages for creating Nomad e2e tests (#17620) * e2e: create a v3/ set of packages for creating Nomad e2e tests This PR creates an experimental set of packages under `e2e/v3/` for crafting Nomad e2e tests. Unlike previous generations, this is an attempt at providing a way to create tests in a declarative (ish) pattern, with a focus on being easy to use, easy to cleanup, and easy to debug. @shoenig is just trying this out to see how it goes. Lots of features need to be implemented. Many more docs need to be written. Breaking changes are to be expected. There are known and unknown bugs. No warranty. Quick run of `example` with verbose logging. ```shell ➜ NOMAD_E2E_VERBOSE=1 go test -v === RUN TestExample === RUN TestExample/testSleep util3.go:25: register (service) job: "sleep-809" util3.go:25: checking eval: 9f0ae04d-7259-9333-3763-44d0592d03a1, status: pending util3.go:25: checking eval: 9f0ae04d-7259-9333-3763-44d0592d03a1, status: complete util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: successful util3.go:25: deployment a85ad2f8-269c-6620-d390-8eac7a9c397d was a success util3.go:25: deregister job "sleep-809" util3.go:25: system gc === RUN TestExample/testNamespace util3.go:25: apply namespace "example-291" util3.go:25: register (service) job: "sleep-967" util3.go:25: checking eval: a2a2303a-adf1-2621-042e-a9654292e569, status: pending util3.go:25: checking eval: a2a2303a-adf1-2621-042e-a9654292e569, status: complete util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: running util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: running util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: running util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: successful util3.go:25: deployment 3395e9a8-3ffc-8990-d5b8-cc0ce311f302 was a success util3.go:25: deregister job "sleep-967" util3.go:25: system gc util3.go:25: cleanup namespace "example-291" === RUN TestExample/testEnv util3.go:25: register (batch) job: "env-582" util3.go:25: checking eval: 600f3bce-ea17-6d13-9d20-9d9eb2a784f7, status: pending util3.go:25: checking eval: 600f3bce-ea17-6d13-9d20-9d9eb2a784f7, status: complete util3.go:25: deregister job "env-582" util3.go:25: system gc --- PASS: TestExample (10.08s) --- PASS: TestExample/testSleep (5.02s) --- PASS: TestExample/testNamespace (4.02s) --- PASS: TestExample/testEnv (1.03s) PASS ok github.com/hashicorp/nomad/e2e/example 10.079s ``` * cluster3: use filter for kernel.name instead of filtering manually
2023-06-23 14:10:49 +00:00
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package cluster3
import (
"errors"
"fmt"
"os"
"testing"
"time"
consulapi "github.com/hashicorp/consul/api"
nomadapi "github.com/hashicorp/nomad/api"
vaultapi "github.com/hashicorp/vault/api"
"github.com/shoenig/test/must"
"github.com/shoenig/test/wait"
)
type Cluster struct {
t *testing.T
consulClient *consulapi.Client
nomadClient *nomadapi.Client
vaultClient *vaultapi.Client
timeout time.Duration
leaderReady bool
consulReady bool
vaultReady bool
linuxClients int
windowsClients int
}
func (c *Cluster) wait() {
errCh := make(chan error)
statusAPI := c.nomadClient.Status()
nodesAPI := c.nomadClient.Nodes()
consulStatusAPI := c.consulClient.Status()
vaultSysAPI := c.vaultClient.Sys()
waitLeader := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.TestFunc(func() (bool, error) {
if !c.leaderReady {
return true, nil
}
result, err := statusAPI.Leader()
return result != "", err
}),
)
waitLinuxClients := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.ErrorFunc(func() error {
if c.linuxClients <= 0 {
return nil
}
queryOpts := &nomadapi.QueryOptions{
Filter: `Attributes["kernel.name"] == "linux"`,
}
nodes, _, err := nodesAPI.List(queryOpts)
if err != nil {
return err
}
eligible := len(nodes)
if eligible < c.linuxClients {
return fmt.Errorf("not enough linux clients, want %d, got %d", c.linuxClients, eligible)
}
return nil
}),
)
waitWindowsClients := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.ErrorFunc(func() error {
if c.windowsClients <= 0 {
return nil
}
return errors.New("todo: windows")
}),
)
waitConsul := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.TestFunc(func() (bool, error) {
if !c.consulReady {
return true, nil
}
result, err := consulStatusAPI.Leader()
return result != "", err
}),
)
waitVault := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.TestFunc(func() (bool, error) {
if !c.vaultReady {
return true, nil
}
result, err := vaultSysAPI.Leader()
if err != nil {
return false, fmt.Errorf("failed to find vault leader: %w", err)
}
if result == nil {
return false, errors.New("empty response for vault leader")
}
return result.ActiveTime.String() != "", nil
}),
)
// todo: generalize
go func() {
err := waitLeader.Run()
errCh <- err
}()
go func() {
err := waitLinuxClients.Run()
errCh <- err
}()
go func() {
err := waitWindowsClients.Run()
errCh <- err
}()
go func() {
err := waitConsul.Run()
errCh <- err
}()
go func() {
err := waitVault.Run()
errCh <- err
}()
for i := 0; i < 5; i++ {
err := <-errCh
must.NoError(c.t, err)
}
}
type Option func(c *Cluster)
func Establish(t *testing.T, opts ...Option) {
c := &Cluster{
t: t,
timeout: 10 * time.Second,
}
for _, opt := range opts {
opt(c)
}
c.setClients()
c.wait()
}
func (c *Cluster) setClients() {
nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig())
must.NoError(c.t, nomadErr, must.Sprint("failed to create nomad api client"))
c.nomadClient = nomadClient
consulClient, consulErr := consulapi.NewClient(consulapi.DefaultConfig())
must.NoError(c.t, consulErr, must.Sprint("failed to create consul api client"))
c.consulClient = consulClient
vConfig := vaultapi.DefaultConfig()
if os.Getenv("VAULT_ADDR") == "" {
vConfig.Address = "http://localhost:8200"
}
vaultClient, vaultErr := vaultapi.NewClient(vConfig)
must.NoError(c.t, vaultErr, must.Sprint("failed to create vault api client"))
c.vaultClient = vaultClient
}
func Timeout(timeout time.Duration) Option {
return func(c *Cluster) {
c.timeout = timeout
}
}
func LinuxClients(count int) Option {
return func(c *Cluster) {
c.linuxClients = count
}
}
func WindowsClients(count int) Option {
panic("not yet implemented")
// return func(c *Cluster) {
// c.windowsClients = count
// }
}
func Leader() Option {
return func(c *Cluster) {
c.leaderReady = true
}
}
func Consul() Option {
return func(c *Cluster) {
c.consulReady = true
}
}
func Vault() Option {
return func(c *Cluster) {
c.vaultReady = true
}
}