open-nomad/nomad/serf_test.go

488 lines
11 KiB
Go
Raw Normal View History

2015-06-04 10:33:12 +00:00
package nomad
import (
2017-02-08 22:50:19 +00:00
"errors"
2015-06-04 10:33:12 +00:00
"fmt"
2015-06-04 11:11:35 +00:00
"os"
"path"
"strings"
"sync/atomic"
2015-06-04 10:33:12 +00:00
"testing"
2018-09-20 00:13:37 +00:00
"time"
2015-06-04 11:02:39 +00:00
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/require"
2015-06-04 10:33:12 +00:00
)
func TestNomad_JoinPeer(t *testing.T) {
2017-07-23 22:04:38 +00:00
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
2015-06-04 11:02:39 +00:00
c.Region = "region2"
})
defer cleanupS2()
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2)
2015-06-04 10:33:12 +00:00
2015-06-04 11:02:39 +00:00
testutil.WaitForResult(func() (bool, error) {
if members := s1.Members(); len(members) != 2 {
return false, fmt.Errorf("bad: %#v", members)
}
if members := s2.Members(); len(members) != 2 {
return false, fmt.Errorf("bad: %#v", members)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
if len(s1.peers) != 2 {
return false, fmt.Errorf("bad: %#v", s1.peers)
}
if len(s2.peers) != 2 {
return false, fmt.Errorf("bad: %#v", s2.peers)
}
2015-06-07 18:37:59 +00:00
if len(s1.localPeers) != 1 {
return false, fmt.Errorf("bad: %#v", s1.localPeers)
}
if len(s2.localPeers) != 1 {
return false, fmt.Errorf("bad: %#v", s2.localPeers)
}
2015-06-04 11:02:39 +00:00
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestNomad_RemovePeer(t *testing.T) {
2017-07-23 22:04:38 +00:00
t.Parallel()
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
c.Region = "global"
2015-06-04 11:02:39 +00:00
})
defer cleanupS2()
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2)
2015-06-04 11:02:39 +00:00
testutil.WaitForResult(func() (bool, error) {
if members := s1.Members(); len(members) != 2 {
return false, fmt.Errorf("bad: %#v", members)
}
if members := s2.Members(); len(members) != 2 {
return false, fmt.Errorf("bad: %#v", members)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
// Leave immediately
s2.Leave()
s2.Shutdown()
testutil.WaitForResult(func() (bool, error) {
if len(s1.peers) != 1 {
return false, fmt.Errorf("bad: %#v", s1.peers)
}
if len(s2.peers) != 1 {
return false, fmt.Errorf("bad: %#v", s2.peers)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
2015-06-04 10:33:12 +00:00
}
2015-06-04 11:11:35 +00:00
func TestNomad_ReapPeer(t *testing.T) {
2017-07-23 22:04:38 +00:00
t.Parallel()
dir := tmpDir(t)
defer os.RemoveAll(dir)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2017-10-19 21:16:27 +00:00
c.NodeName = "node1"
c.BootstrapExpect = 3
c.DevMode = false
c.DataDir = path.Join(dir, "node1")
})
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
2017-10-19 21:16:27 +00:00
c.NodeName = "node2"
c.BootstrapExpect = 3
c.DevMode = false
c.DataDir = path.Join(dir, "node2")
})
defer cleanupS2()
s3, cleanupS3 := TestServer(t, func(c *Config) {
2017-10-19 21:16:27 +00:00
c.NodeName = "node3"
c.BootstrapExpect = 3
c.DevMode = false
c.DataDir = path.Join(dir, "node3")
})
defer cleanupS3()
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2, s3)
testutil.WaitForResult(func() (bool, error) {
2017-10-19 21:16:27 +00:00
// Retry the join to decrease flakiness
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2, s3)
if members := s1.Members(); len(members) != 3 {
2017-10-19 21:16:27 +00:00
return false, fmt.Errorf("bad s1: %#v", members)
}
if members := s2.Members(); len(members) != 3 {
2017-10-19 21:16:27 +00:00
return false, fmt.Errorf("bad s2: %#v", members)
}
if members := s3.Members(); len(members) != 3 {
2017-10-19 21:16:27 +00:00
return false, fmt.Errorf("bad s3: %#v", members)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testutil.WaitForLeader(t, s1.RPC)
// Simulate a reap
mems := s1.Members()
var s2mem serf.Member
for _, m := range mems {
if strings.Contains(m.Name, s2.config.NodeName) {
s2mem = m
s2mem.Status = StatusReap
break
}
}
// Shutdown and then send the reap
s2.Shutdown()
s1.reconcileCh <- s2mem
s2.reconcileCh <- s2mem
s3.reconcileCh <- s2mem
testutil.WaitForResult(func() (bool, error) {
if len(s1.peers["global"]) != 2 {
return false, fmt.Errorf("bad: %#v", s1.peers["global"])
}
peers, err := s1.numPeers()
if err != nil {
return false, fmt.Errorf("numPeers() failed: %v", err)
}
if peers != 2 {
return false, fmt.Errorf("bad: %#v", peers)
}
if len(s3.peers["global"]) != 2 {
return false, fmt.Errorf("bad: %#v", s1.peers["global"])
}
peers, err = s3.numPeers()
if err != nil {
return false, fmt.Errorf("numPeers() failed: %v", err)
}
if peers != 2 {
return false, fmt.Errorf("bad: %#v", peers)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
2015-06-04 11:11:35 +00:00
func TestNomad_BootstrapExpect(t *testing.T) {
2017-07-23 22:04:38 +00:00
t.Parallel()
2015-06-04 11:11:35 +00:00
dir := tmpDir(t)
defer os.RemoveAll(dir)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2017-02-08 22:50:19 +00:00
c.BootstrapExpect = 3
2015-06-04 11:11:35 +00:00
c.DevMode = false
c.DataDir = path.Join(dir, "node1")
})
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
2017-02-08 22:50:19 +00:00
c.BootstrapExpect = 3
2015-06-04 11:11:35 +00:00
c.DevMode = false
c.DataDir = path.Join(dir, "node2")
})
defer cleanupS2()
s3, cleanupS3 := TestServer(t, func(c *Config) {
2017-02-08 22:50:19 +00:00
c.BootstrapExpect = 3
c.DevMode = false
c.DataDir = path.Join(dir, "node3")
})
defer cleanupS3()
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2, s3)
2015-06-04 11:11:35 +00:00
testutil.WaitForResult(func() (bool, error) {
2017-10-19 21:16:27 +00:00
// Retry the join to decrease flakiness
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2, s3)
2017-02-03 01:50:06 +00:00
peers, err := s1.numPeers()
2015-06-04 11:11:35 +00:00
if err != nil {
return false, err
}
2017-02-08 22:50:19 +00:00
if peers != 3 {
2015-06-04 11:11:35 +00:00
return false, fmt.Errorf("bad: %#v", peers)
}
2017-02-03 01:50:06 +00:00
peers, err = s2.numPeers()
2015-06-04 11:11:35 +00:00
if err != nil {
return false, err
}
2017-02-08 22:50:19 +00:00
if peers != 3 {
return false, fmt.Errorf("bad: %#v", peers)
}
peers, err = s3.numPeers()
if err != nil {
return false, err
}
if peers != 3 {
2015-06-04 11:11:35 +00:00
return false, fmt.Errorf("bad: %#v", peers)
}
2017-02-08 22:50:19 +00:00
if len(s1.localPeers) != 3 {
2015-06-07 18:37:59 +00:00
return false, fmt.Errorf("bad: %#v", s1.localPeers)
}
2017-02-08 22:50:19 +00:00
if len(s2.localPeers) != 3 {
2015-06-07 18:37:59 +00:00
return false, fmt.Errorf("bad: %#v", s2.localPeers)
}
2017-02-08 22:50:19 +00:00
if len(s3.localPeers) != 3 {
return false, fmt.Errorf("bad: %#v", s3.localPeers)
}
2015-06-04 11:11:35 +00:00
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
2017-02-08 22:50:19 +00:00
// Join a fourth server after quorum has already been formed and ensure
// there is no election
s4, cleanupS4 := TestServer(t, func(c *Config) {
2017-02-08 22:50:19 +00:00
c.BootstrapExpect = 3
c.DevMode = false
c.DataDir = path.Join(dir, "node4")
})
defer cleanupS4()
2017-02-08 22:50:19 +00:00
// Make sure a leader is elected, grab the current term and then add in
// the fourth server.
testutil.WaitForLeader(t, s1.RPC)
termBefore := s1.raft.Stats()["last_log_term"]
2017-10-19 21:16:27 +00:00
var addresses []string
for _, s := range []*Server{s1, s2, s3} {
addr := fmt.Sprintf("127.0.0.1:%d", s.config.SerfConfig.MemberlistConfig.BindPort)
addresses = append(addresses, addr)
}
if _, err := s4.Join(addresses); err != nil {
2017-02-08 22:50:19 +00:00
t.Fatalf("err: %v", err)
}
// Wait for the new server to see itself added to the cluster.
var p4 int
testutil.WaitForResult(func() (bool, error) {
2017-10-19 21:16:27 +00:00
// Retry join to reduce flakiness
if _, err := s4.Join(addresses); err != nil {
t.Fatalf("err: %v", err)
}
2017-02-08 22:50:19 +00:00
p4, _ = s4.numPeers()
return p4 == 4, errors.New(fmt.Sprintf("%d", p4))
}, func(err error) {
t.Fatalf("should have 4 peers: %v", err)
})
// Make sure there's still a leader and that the term didn't change,
// so we know an election didn't occur.
testutil.WaitForLeader(t, s1.RPC)
termAfter := s1.raft.Stats()["last_log_term"]
if termAfter != termBefore {
t.Fatalf("looks like an election took place")
}
2015-06-04 11:11:35 +00:00
}
2015-06-05 22:11:16 +00:00
2018-09-20 00:13:37 +00:00
func TestNomad_BootstrapExpect_NonVoter(t *testing.T) {
t.Parallel()
2018-09-20 00:13:37 +00:00
dir := tmpDir(t)
defer os.RemoveAll(dir)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2018-09-20 00:13:37 +00:00
c.BootstrapExpect = 2
c.DevMode = false
c.DataDir = path.Join(dir, "node1")
c.NonVoter = true
})
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
2018-09-20 00:13:37 +00:00
c.BootstrapExpect = 2
c.DevMode = false
c.DataDir = path.Join(dir, "node2")
c.NonVoter = true
})
defer cleanupS2()
s3, cleanupS3 := TestServer(t, func(c *Config) {
2018-09-20 00:13:37 +00:00
c.BootstrapExpect = 2
c.DevMode = false
c.DataDir = path.Join(dir, "node3")
})
defer cleanupS3()
2018-09-20 00:13:37 +00:00
TestJoin(t, s1, s2, s3)
// Assert that we do not bootstrap
testutil.AssertUntil(testutil.Timeout(time.Second), func() (bool, error) {
_, p := s1.getLeader()
if p != nil {
return false, fmt.Errorf("leader %v", p)
}
return true, nil
}, func(err error) {
t.Fatalf("should not have leader: %v", err)
})
// Add the fourth server that is a voter
s4, cleanupS4 := TestServer(t, func(c *Config) {
2018-09-20 00:13:37 +00:00
c.BootstrapExpect = 2
c.DevMode = false
c.DataDir = path.Join(dir, "node4")
})
defer cleanupS4()
2018-09-20 00:13:37 +00:00
TestJoin(t, s1, s2, s3, s4)
testutil.WaitForResult(func() (bool, error) {
// Retry the join to decrease flakiness
TestJoin(t, s1, s2, s3, s4)
peers, err := s1.numPeers()
if err != nil {
return false, err
}
if peers != 4 {
return false, fmt.Errorf("bad: %#v", peers)
}
peers, err = s2.numPeers()
if err != nil {
return false, err
}
if peers != 4 {
return false, fmt.Errorf("bad: %#v", peers)
}
peers, err = s3.numPeers()
if err != nil {
return false, err
}
if peers != 4 {
return false, fmt.Errorf("bad: %#v", peers)
}
peers, err = s4.numPeers()
if err != nil {
return false, err
}
if peers != 4 {
return false, fmt.Errorf("bad: %#v", peers)
}
if len(s1.localPeers) != 4 {
return false, fmt.Errorf("bad: %#v", s1.localPeers)
}
if len(s2.localPeers) != 4 {
return false, fmt.Errorf("bad: %#v", s2.localPeers)
}
if len(s3.localPeers) != 4 {
return false, fmt.Errorf("bad: %#v", s3.localPeers)
}
if len(s4.localPeers) != 4 {
return false, fmt.Errorf("bad: %#v", s3.localPeers)
}
_, p := s1.getLeader()
if p == nil {
return false, fmt.Errorf("no leader")
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
2015-06-05 22:11:16 +00:00
func TestNomad_BadExpect(t *testing.T) {
2017-07-23 22:04:38 +00:00
t.Parallel()
s1, cleanupS1 := TestServer(t, func(c *Config) {
2015-06-05 22:11:16 +00:00
c.BootstrapExpect = 2
})
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
2015-06-05 22:11:16 +00:00
c.BootstrapExpect = 3
})
defer cleanupS2()
2015-06-05 22:11:16 +00:00
servers := []*Server{s1, s2}
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2)
2015-06-05 22:11:16 +00:00
// Serf members should update
testutil.WaitForResult(func() (bool, error) {
for _, s := range servers {
members := s.Members()
if len(members) != 2 {
return false, fmt.Errorf("%d", len(members))
}
}
return true, nil
}, func(err error) {
t.Fatalf("should have 2 peers: %v", err)
})
// should still have no peers (because s2 is in expect=2 mode)
testutil.WaitForResult(func() (bool, error) {
for _, s := range servers {
p, _ := s.numPeers()
Simplify Bootstrap logic in tests This change updates tests to honor `BootstrapExpect` exclusively when forming test clusters and removes test only knobs, e.g. `config.DevDisableBootstrap`. Background: Test cluster creation is fragile. Test servers don't follow the BootstapExpected route like production clusters. Instead they start as single node clusters and then get rejoin and may risk causing brain split or other test flakiness. The test framework expose few knobs to control those (e.g. `config.DevDisableBootstrap` and `config.Bootstrap`) that control whether a server should bootstrap the cluster. These flags are confusing and it's unclear when to use: their usage in multi-node cluster isn't properly documented. Furthermore, they have some bad side-effects as they don't control Raft library: If `config.DevDisableBootstrap` is true, the test server may not immediately attempt to bootstrap a cluster, but after an election timeout (~50ms), Raft may force a leadership election and win it (with only one vote) and cause a split brain. The knobs are also confusing as Bootstrap is an overloaded term. In BootstrapExpect, we refer to bootstrapping the cluster only after N servers are connected. But in tests and the knobs above, it refers to whether the server is a single node cluster and shouldn't wait for any other server. Changes: This commit makes two changes: First, it relies on `BootstrapExpected` instead of `Bootstrap` and/or `DevMode` flags. This change is relatively trivial. Introduce a `Bootstrapped` flag to track if the cluster is bootstrapped. This allows us to keep `BootstrapExpected` immutable. Previously, the flag was a config value but it gets set to 0 after cluster bootstrap completes.
2020-03-02 15:29:24 +00:00
if p != 0 {
2017-02-03 01:50:06 +00:00
return false, fmt.Errorf("%d", p)
2015-06-05 22:11:16 +00:00
}
}
return true, nil
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
}
// TestNomad_NonBootstraping_ShouldntBootstap asserts that if BootstrapExpect is zero,
// the server shouldn't bootstrap
func TestNomad_NonBootstraping_ShouldntBootstap(t *testing.T) {
t.Parallel()
dir := tmpDir(t)
defer os.RemoveAll(dir)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.BootstrapExpect = 0
c.DevMode = false
c.DataDir = path.Join(dir, "node")
})
defer cleanupS1()
testutil.WaitForResult(func() (bool, error) {
s1.peerLock.Lock()
p := len(s1.localPeers)
s1.peerLock.Unlock()
if p != 1 {
return false, fmt.Errorf("%d", p)
}
return true, nil
}, func(err error) {
t.Fatalf("expected 1 local peer: %v", err)
})
2021-01-04 14:00:40 +00:00
// as non-bootstrap mode is the initial state, we must wait long enough to assert that
// we don't bootstrap even if enough time has elapsed. Also, explicitly attempt bootstrap.
s1.maybeBootstrap()
time.Sleep(100 * time.Millisecond)
bootstrapped := atomic.LoadInt32(&s1.config.Bootstrapped)
require.Zero(t, bootstrapped, "expecting non-bootstrapped servers")
p, _ := s1.numPeers()
require.Zero(t, p, "number of peers in Raft")
}