open-nomad/nomad/testing.go

157 lines
4.1 KiB
Go
Raw Normal View History

2018-01-12 01:00:30 +00:00
package nomad
import (
"fmt"
"math/rand"
"net"
"sync/atomic"
"time"
2019-01-15 19:46:12 +00:00
testing "github.com/mitchellh/go-testing-interface"
"github.com/pkg/errors"
2019-01-15 19:46:12 +00:00
2018-01-12 01:00:30 +00:00
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/freeport"
2019-01-23 14:27:14 +00:00
"github.com/hashicorp/nomad/helper/pluginutils/catalog"
"github.com/hashicorp/nomad/helper/pluginutils/singleton"
2018-01-12 01:00:30 +00:00
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
2019-09-04 01:42:41 +00:00
"github.com/hashicorp/nomad/version"
2018-01-12 01:00:30 +00:00
)
var (
nodeNumber uint32 = 0
)
func TestACLServer(t testing.T, cb func(*Config)) (*Server, *structs.ACLToken, func()) {
server, cleanup := TestServer(t, func(c *Config) {
2018-01-12 01:00:30 +00:00
c.ACLEnabled = true
if cb != nil {
cb(c)
}
})
token := mock.ACLManagementToken()
err := server.State().BootstrapACLTokens(1, 0, token)
if err != nil {
t.Fatalf("failed to bootstrap ACL token: %v", err)
}
return server, token, cleanup
2018-01-12 01:00:30 +00:00
}
func TestServer(t testing.T, cb func(*Config)) (*Server, func()) {
2018-01-12 01:00:30 +00:00
// Setup the default settings
config := DefaultConfig()
2018-09-15 23:23:13 +00:00
config.Logger = testlog.HCLogger(t)
2019-09-04 01:42:41 +00:00
config.Build = version.Version + "+unittest"
2018-01-12 01:00:30 +00:00
config.DevMode = true
Simplify Bootstrap logic in tests This change updates tests to honor `BootstrapExpect` exclusively when forming test clusters and removes test only knobs, e.g. `config.DevDisableBootstrap`. Background: Test cluster creation is fragile. Test servers don't follow the BootstapExpected route like production clusters. Instead they start as single node clusters and then get rejoin and may risk causing brain split or other test flakiness. The test framework expose few knobs to control those (e.g. `config.DevDisableBootstrap` and `config.Bootstrap`) that control whether a server should bootstrap the cluster. These flags are confusing and it's unclear when to use: their usage in multi-node cluster isn't properly documented. Furthermore, they have some bad side-effects as they don't control Raft library: If `config.DevDisableBootstrap` is true, the test server may not immediately attempt to bootstrap a cluster, but after an election timeout (~50ms), Raft may force a leadership election and win it (with only one vote) and cause a split brain. The knobs are also confusing as Bootstrap is an overloaded term. In BootstrapExpect, we refer to bootstrapping the cluster only after N servers are connected. But in tests and the knobs above, it refers to whether the server is a single node cluster and shouldn't wait for any other server. Changes: This commit makes two changes: First, it relies on `BootstrapExpected` instead of `Bootstrap` and/or `DevMode` flags. This change is relatively trivial. Introduce a `Bootstrapped` flag to track if the cluster is bootstrapped. This allows us to keep `BootstrapExpected` immutable. Previously, the flag was a config value but it gets set to 0 after cluster bootstrap completes.
2020-03-02 15:29:24 +00:00
config.BootstrapExpect = 1
2018-01-12 01:00:30 +00:00
nodeNum := atomic.AddUint32(&nodeNumber, 1)
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
// Tighten the Serf timing
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
// Tighten the Raft timing
config.RaftConfig.LeaderLeaseTimeout = 50 * time.Millisecond
config.RaftConfig.HeartbeatTimeout = 50 * time.Millisecond
config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
config.RaftTimeout = 500 * time.Millisecond
// Disable Vault
f := false
config.VaultConfig.Enabled = &f
// Squelch output when -v isn't specified
config.LogOutput = testlog.NewWriter(t)
2018-02-15 23:48:32 +00:00
// Tighten the autopilot timing
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
config.ServerHealthInterval = 50 * time.Millisecond
config.AutopilotInterval = 100 * time.Millisecond
// Set the plugin loaders
config.PluginLoader = catalog.TestPluginLoader(t)
config.PluginSingletonLoader = singleton.NewSingletonLoader(config.Logger, config.PluginLoader)
// Disable consul autojoining: tests typically join servers directly
config.ConsulConfig.ServerAutoJoin = &f
2018-01-12 01:00:30 +00:00
// Invoke the callback if any
if cb != nil {
cb(config)
}
2018-09-15 23:23:13 +00:00
catalog := consul.NewMockCatalog(config.Logger)
2018-01-12 01:00:30 +00:00
acls := consul.NewMockACLsAPI(config.Logger)
2018-01-12 01:00:30 +00:00
for i := 10; i >= 0; i-- {
// Get random ports, need to cleanup later
ports := freeport.MustTake(2)
2018-01-12 01:00:30 +00:00
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: ports[0],
}
config.SerfConfig.MemberlistConfig.BindPort = ports[1]
// Create server
server, err := NewServer(config, catalog, acls)
2018-01-12 01:00:30 +00:00
if err == nil {
return server, func() {
ch := make(chan error)
go func() {
defer close(ch)
// Shutdown server
err := server.Shutdown()
if err != nil {
ch <- errors.Wrap(err, "failed to shutdown server")
}
freeport.Return(ports)
}()
select {
case e := <-ch:
if e != nil {
t.Fatal(e.Error())
}
case <-time.After(1 * time.Minute):
t.Fatal("timed out while shutting down server")
}
}
2018-01-12 01:00:30 +00:00
} else if i == 0 {
freeport.Return(ports)
2018-01-12 01:00:30 +00:00
t.Fatalf("err: %v", err)
} else {
if server != nil {
_ = server.Shutdown()
freeport.Return(ports)
2018-01-12 01:00:30 +00:00
}
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
}
}
return nil, nil
2018-01-12 01:00:30 +00:00
}
func TestJoin(t testing.T, s1 *Server, other ...*Server) {
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfConfig.MemberlistConfig.BindPort)
for _, s2 := range other {
if num, err := s2.Join([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
} else if num != 1 {
t.Fatalf("bad: %d", num)
}
}
}