2015-06-04 10:33:12 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2017-02-08 22:50:19 +00:00
|
|
|
"errors"
|
2015-06-04 10:33:12 +00:00
|
|
|
"fmt"
|
2015-06-04 11:11:35 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
2017-02-13 22:22:54 +00:00
|
|
|
"strings"
|
2020-12-17 20:52:40 +00:00
|
|
|
"sync/atomic"
|
2015-06-04 10:33:12 +00:00
|
|
|
"testing"
|
2018-09-20 00:13:37 +00:00
|
|
|
"time"
|
2015-06-04 11:02:39 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2017-02-13 22:22:54 +00:00
|
|
|
"github.com/hashicorp/serf/serf"
|
2020-12-17 20:52:40 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-06-04 10:33:12 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestNomad_JoinPeer(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2015-06-04 11:02:39 +00:00
|
|
|
c.Region = "region2"
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2)
|
2015-06-04 10:33:12 +00:00
|
|
|
|
2015-06-04 11:02:39 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if members := s1.Members(); len(members) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", members)
|
|
|
|
}
|
|
|
|
if members := s2.Members(); len(members) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", members)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if len(s1.peers) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s1.peers)
|
|
|
|
}
|
|
|
|
if len(s2.peers) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s2.peers)
|
|
|
|
}
|
2015-06-07 18:37:59 +00:00
|
|
|
if len(s1.localPeers) != 1 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s1.localPeers)
|
|
|
|
}
|
|
|
|
if len(s2.localPeers) != 1 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s2.localPeers)
|
|
|
|
}
|
2015-06-04 11:02:39 +00:00
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNomad_RemovePeer(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2019-05-02 20:00:21 +00:00
|
|
|
c.Region = "global"
|
2015-06-04 11:02:39 +00:00
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2)
|
2015-06-04 11:02:39 +00:00
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if members := s1.Members(); len(members) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", members)
|
|
|
|
}
|
|
|
|
if members := s2.Members(); len(members) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", members)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Leave immediately
|
|
|
|
s2.Leave()
|
|
|
|
s2.Shutdown()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if len(s1.peers) != 1 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s1.peers)
|
|
|
|
}
|
|
|
|
if len(s2.peers) != 1 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s2.peers)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2015-06-04 10:33:12 +00:00
|
|
|
}
|
2015-06-04 11:11:35 +00:00
|
|
|
|
2017-02-13 22:22:54 +00:00
|
|
|
func TestNomad_ReapPeer(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2017-02-13 22:22:54 +00:00
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2017-10-19 21:16:27 +00:00
|
|
|
c.NodeName = "node1"
|
2017-02-13 22:22:54 +00:00
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node1")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2017-10-19 21:16:27 +00:00
|
|
|
c.NodeName = "node2"
|
2017-02-13 22:22:54 +00:00
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node2")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
|
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
2017-10-19 21:16:27 +00:00
|
|
|
c.NodeName = "node3"
|
2017-02-13 22:22:54 +00:00
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node3")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS3()
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2, s3)
|
2017-02-13 22:22:54 +00:00
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-10-19 21:16:27 +00:00
|
|
|
// Retry the join to decrease flakiness
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2, s3)
|
2017-02-13 22:22:54 +00:00
|
|
|
if members := s1.Members(); len(members) != 3 {
|
2017-10-19 21:16:27 +00:00
|
|
|
return false, fmt.Errorf("bad s1: %#v", members)
|
2017-02-13 22:22:54 +00:00
|
|
|
}
|
|
|
|
if members := s2.Members(); len(members) != 3 {
|
2017-10-19 21:16:27 +00:00
|
|
|
return false, fmt.Errorf("bad s2: %#v", members)
|
2017-02-13 22:22:54 +00:00
|
|
|
}
|
|
|
|
if members := s3.Members(); len(members) != 3 {
|
2017-10-19 21:16:27 +00:00
|
|
|
return false, fmt.Errorf("bad s3: %#v", members)
|
2017-02-13 22:22:54 +00:00
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Simulate a reap
|
|
|
|
mems := s1.Members()
|
|
|
|
var s2mem serf.Member
|
|
|
|
for _, m := range mems {
|
|
|
|
if strings.Contains(m.Name, s2.config.NodeName) {
|
|
|
|
s2mem = m
|
|
|
|
s2mem.Status = StatusReap
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown and then send the reap
|
|
|
|
s2.Shutdown()
|
|
|
|
s1.reconcileCh <- s2mem
|
|
|
|
s2.reconcileCh <- s2mem
|
|
|
|
s3.reconcileCh <- s2mem
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if len(s1.peers["global"]) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s1.peers["global"])
|
|
|
|
}
|
|
|
|
peers, err := s1.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("numPeers() failed: %v", err)
|
|
|
|
}
|
|
|
|
if peers != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(s3.peers["global"]) != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s1.peers["global"])
|
|
|
|
}
|
|
|
|
peers, err = s3.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("numPeers() failed: %v", err)
|
|
|
|
}
|
|
|
|
if peers != 2 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-06-04 11:11:35 +00:00
|
|
|
func TestNomad_BootstrapExpect(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2015-06-04 11:11:35 +00:00
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2017-02-08 22:50:19 +00:00
|
|
|
c.BootstrapExpect = 3
|
2015-06-04 11:11:35 +00:00
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node1")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2017-02-08 22:50:19 +00:00
|
|
|
c.BootstrapExpect = 3
|
2015-06-04 11:11:35 +00:00
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node2")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
|
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
2017-02-08 22:50:19 +00:00
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node3")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS3()
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2, s3)
|
2015-06-04 11:11:35 +00:00
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-10-19 21:16:27 +00:00
|
|
|
// Retry the join to decrease flakiness
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2, s3)
|
2017-02-03 01:50:06 +00:00
|
|
|
peers, err := s1.numPeers()
|
2015-06-04 11:11:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2017-02-08 22:50:19 +00:00
|
|
|
if peers != 3 {
|
2015-06-04 11:11:35 +00:00
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
2017-02-03 01:50:06 +00:00
|
|
|
peers, err = s2.numPeers()
|
2015-06-04 11:11:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2017-02-08 22:50:19 +00:00
|
|
|
if peers != 3 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
peers, err = s3.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if peers != 3 {
|
2015-06-04 11:11:35 +00:00
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
2017-02-08 22:50:19 +00:00
|
|
|
if len(s1.localPeers) != 3 {
|
2015-06-07 18:37:59 +00:00
|
|
|
return false, fmt.Errorf("bad: %#v", s1.localPeers)
|
|
|
|
}
|
2017-02-08 22:50:19 +00:00
|
|
|
if len(s2.localPeers) != 3 {
|
2015-06-07 18:37:59 +00:00
|
|
|
return false, fmt.Errorf("bad: %#v", s2.localPeers)
|
|
|
|
}
|
2017-02-08 22:50:19 +00:00
|
|
|
if len(s3.localPeers) != 3 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s3.localPeers)
|
|
|
|
}
|
2015-06-04 11:11:35 +00:00
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2017-02-08 22:50:19 +00:00
|
|
|
|
|
|
|
// Join a fourth server after quorum has already been formed and ensure
|
|
|
|
// there is no election
|
2019-12-04 00:15:11 +00:00
|
|
|
s4, cleanupS4 := TestServer(t, func(c *Config) {
|
2017-02-08 22:50:19 +00:00
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node4")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS4()
|
2017-02-08 22:50:19 +00:00
|
|
|
|
|
|
|
// Make sure a leader is elected, grab the current term and then add in
|
|
|
|
// the fourth server.
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
termBefore := s1.raft.Stats()["last_log_term"]
|
2017-10-19 21:16:27 +00:00
|
|
|
|
|
|
|
var addresses []string
|
|
|
|
for _, s := range []*Server{s1, s2, s3} {
|
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", s.config.SerfConfig.MemberlistConfig.BindPort)
|
|
|
|
addresses = append(addresses, addr)
|
|
|
|
}
|
|
|
|
if _, err := s4.Join(addresses); err != nil {
|
2017-02-08 22:50:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the new server to see itself added to the cluster.
|
|
|
|
var p4 int
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-10-19 21:16:27 +00:00
|
|
|
// Retry join to reduce flakiness
|
|
|
|
if _, err := s4.Join(addresses); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-02-08 22:50:19 +00:00
|
|
|
p4, _ = s4.numPeers()
|
|
|
|
return p4 == 4, errors.New(fmt.Sprintf("%d", p4))
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have 4 peers: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Make sure there's still a leader and that the term didn't change,
|
|
|
|
// so we know an election didn't occur.
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
termAfter := s1.raft.Stats()["last_log_term"]
|
|
|
|
if termAfter != termBefore {
|
|
|
|
t.Fatalf("looks like an election took place")
|
|
|
|
}
|
2015-06-04 11:11:35 +00:00
|
|
|
}
|
2015-06-05 22:11:16 +00:00
|
|
|
|
2018-09-20 00:13:37 +00:00
|
|
|
func TestNomad_BootstrapExpect_NonVoter(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2018-09-20 00:13:37 +00:00
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2018-09-20 00:13:37 +00:00
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node1")
|
|
|
|
c.NonVoter = true
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2018-09-20 00:13:37 +00:00
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node2")
|
|
|
|
c.NonVoter = true
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
|
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
2018-09-20 00:13:37 +00:00
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node3")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS3()
|
2018-09-20 00:13:37 +00:00
|
|
|
TestJoin(t, s1, s2, s3)
|
|
|
|
|
|
|
|
// Assert that we do not bootstrap
|
|
|
|
testutil.AssertUntil(testutil.Timeout(time.Second), func() (bool, error) {
|
|
|
|
_, p := s1.getLeader()
|
|
|
|
if p != nil {
|
|
|
|
return false, fmt.Errorf("leader %v", p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should not have leader: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Add the fourth server that is a voter
|
2019-12-04 00:15:11 +00:00
|
|
|
s4, cleanupS4 := TestServer(t, func(c *Config) {
|
2018-09-20 00:13:37 +00:00
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node4")
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS4()
|
2018-09-20 00:13:37 +00:00
|
|
|
TestJoin(t, s1, s2, s3, s4)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
// Retry the join to decrease flakiness
|
|
|
|
TestJoin(t, s1, s2, s3, s4)
|
|
|
|
peers, err := s1.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if peers != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
peers, err = s2.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if peers != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
peers, err = s3.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if peers != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
peers, err = s4.numPeers()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if peers != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", peers)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(s1.localPeers) != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s1.localPeers)
|
|
|
|
}
|
|
|
|
if len(s2.localPeers) != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s2.localPeers)
|
|
|
|
}
|
|
|
|
if len(s3.localPeers) != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s3.localPeers)
|
|
|
|
}
|
|
|
|
if len(s4.localPeers) != 4 {
|
|
|
|
return false, fmt.Errorf("bad: %#v", s3.localPeers)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, p := s1.getLeader()
|
|
|
|
if p == nil {
|
|
|
|
return false, fmt.Errorf("no leader")
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-06-05 22:11:16 +00:00
|
|
|
func TestNomad_BadExpect(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2015-06-05 22:11:16 +00:00
|
|
|
c.BootstrapExpect = 2
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2015-06-05 22:11:16 +00:00
|
|
|
c.BootstrapExpect = 3
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
2015-06-05 22:11:16 +00:00
|
|
|
servers := []*Server{s1, s2}
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2)
|
2015-06-05 22:11:16 +00:00
|
|
|
|
|
|
|
// Serf members should update
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
for _, s := range servers {
|
|
|
|
members := s.Members()
|
|
|
|
if len(members) != 2 {
|
|
|
|
return false, fmt.Errorf("%d", len(members))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have 2 peers: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// should still have no peers (because s2 is in expect=2 mode)
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
for _, s := range servers {
|
2017-02-02 21:52:31 +00:00
|
|
|
p, _ := s.numPeers()
|
2020-03-02 15:29:24 +00:00
|
|
|
if p != 0 {
|
2017-02-03 01:50:06 +00:00
|
|
|
return false, fmt.Errorf("%d", p)
|
2015-06-05 22:11:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have 0 peers: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2020-12-17 20:52:40 +00:00
|
|
|
|
|
|
|
// TestNomad_NonBootstraping_ShouldntBootstap asserts that if BootstrapExpect is zero,
|
|
|
|
// the server shouldn't bootstrap
|
|
|
|
func TestNomad_NonBootstraping_ShouldntBootstap(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 0
|
|
|
|
c.DevMode = false
|
|
|
|
c.DataDir = path.Join(dir, "node")
|
|
|
|
})
|
|
|
|
defer cleanupS1()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
s1.peerLock.Lock()
|
|
|
|
p := len(s1.localPeers)
|
|
|
|
s1.peerLock.Unlock()
|
|
|
|
if p != 1 {
|
|
|
|
return false, fmt.Errorf("%d", p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("expected 1 local peer: %v", err)
|
|
|
|
})
|
|
|
|
|
2021-01-04 14:00:40 +00:00
|
|
|
// as non-bootstrap mode is the initial state, we must wait long enough to assert that
|
|
|
|
// we don't bootstrap even if enough time has elapsed. Also, explicitly attempt bootstrap.
|
|
|
|
s1.maybeBootstrap()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2020-12-17 20:52:40 +00:00
|
|
|
|
|
|
|
bootstrapped := atomic.LoadInt32(&s1.config.Bootstrapped)
|
|
|
|
require.Zero(t, bootstrapped, "expecting non-bootstrapped servers")
|
|
|
|
|
|
|
|
p, _ := s1.numPeers()
|
|
|
|
require.Zero(t, p, "number of peers in Raft")
|
|
|
|
|
|
|
|
}
|