2018-01-09 23:26:53 +00:00
|
|
|
package servers
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"math/rand"
|
|
|
|
"net"
|
|
|
|
"os"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2018-01-10 19:01:46 +00:00
|
|
|
func init() {
|
|
|
|
// Seed the random number generator
|
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
}
|
|
|
|
|
2018-01-09 23:26:53 +00:00
|
|
|
type fauxAddr struct {
|
|
|
|
Addr string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fa *fauxAddr) String() string { return fa.Addr }
|
|
|
|
func (fa *fauxAddr) Network() string { return fa.Addr }
|
|
|
|
|
|
|
|
type fauxConnPool struct {
|
|
|
|
// failPct between 0.0 and 1.0 == pct of time a Ping should fail
|
|
|
|
failPct float64
|
|
|
|
}
|
|
|
|
|
2018-01-25 02:00:21 +00:00
|
|
|
func (cp *fauxConnPool) Ping(net.Addr) error {
|
2018-01-09 23:26:53 +00:00
|
|
|
successProb := rand.Float64()
|
|
|
|
if successProb > cp.failPct {
|
2018-01-25 02:00:21 +00:00
|
|
|
return nil
|
2018-01-09 23:26:53 +00:00
|
|
|
}
|
2018-01-25 02:00:21 +00:00
|
|
|
return fmt.Errorf("bad server")
|
2018-01-09 23:26:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func testManager(t *testing.T) (m *Manager) {
|
|
|
|
logger := log.New(os.Stderr, "", 0)
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
m = New(logger, shutdownCh, &fauxConnPool{})
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
func testManagerFailProb(failPct float64) (m *Manager) {
|
|
|
|
logger := log.New(os.Stderr, "", 0)
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
m = New(logger, shutdownCh, &fauxConnPool{failPct: failPct})
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestManagerInternal_cycleServer(t *testing.T) {
|
|
|
|
server0 := &Server{Addr: &fauxAddr{"server1"}}
|
|
|
|
server1 := &Server{Addr: &fauxAddr{"server2"}}
|
|
|
|
server2 := &Server{Addr: &fauxAddr{"server3"}}
|
2018-01-25 05:14:03 +00:00
|
|
|
srvs := Servers([]*Server{server0, server1, server2})
|
2018-01-09 23:26:53 +00:00
|
|
|
|
2018-01-25 05:14:03 +00:00
|
|
|
srvs.cycle()
|
|
|
|
if len(srvs) != 3 {
|
|
|
|
t.Fatalf("server length incorrect: %d/3", len(srvs))
|
2018-01-09 23:26:53 +00:00
|
|
|
}
|
2018-01-25 05:14:03 +00:00
|
|
|
if srvs[0] != server1 &&
|
|
|
|
srvs[1] != server2 &&
|
|
|
|
srvs[2] != server0 {
|
2018-01-09 23:26:53 +00:00
|
|
|
t.Fatalf("server ordering after one cycle not correct")
|
|
|
|
}
|
|
|
|
|
2018-01-25 05:14:03 +00:00
|
|
|
srvs.cycle()
|
|
|
|
if srvs[0] != server2 &&
|
|
|
|
srvs[1] != server0 &&
|
|
|
|
srvs[2] != server1 {
|
2018-01-09 23:26:53 +00:00
|
|
|
t.Fatalf("server ordering after two cycles not correct")
|
|
|
|
}
|
|
|
|
|
2018-01-25 05:14:03 +00:00
|
|
|
srvs.cycle()
|
|
|
|
if srvs[0] != server0 &&
|
|
|
|
srvs[1] != server1 &&
|
|
|
|
srvs[2] != server2 {
|
2018-01-09 23:26:53 +00:00
|
|
|
t.Fatalf("server ordering after three cycles not correct")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestManagerInternal_New(t *testing.T) {
|
|
|
|
m := testManager(t)
|
|
|
|
if m == nil {
|
|
|
|
t.Fatalf("Manager nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.logger == nil {
|
|
|
|
t.Fatalf("Manager.logger nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.shutdownCh == nil {
|
|
|
|
t.Fatalf("Manager.shutdownCh nil")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// func (l *serverList) refreshServerRebalanceTimer() {
|
|
|
|
func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
|
|
|
|
type clusterSizes struct {
|
|
|
|
numNodes int32
|
|
|
|
numServers int
|
|
|
|
minRebalance time.Duration
|
|
|
|
}
|
|
|
|
clusters := []clusterSizes{
|
2018-01-10 19:01:46 +00:00
|
|
|
{1, 0, 5 * time.Minute}, // partitioned cluster
|
|
|
|
{1, 3, 5 * time.Minute},
|
|
|
|
{2, 3, 5 * time.Minute},
|
|
|
|
{100, 0, 5 * time.Minute}, // partitioned
|
|
|
|
{100, 1, 5 * time.Minute}, // partitioned
|
|
|
|
{100, 3, 5 * time.Minute},
|
|
|
|
{1024, 1, 5 * time.Minute}, // partitioned
|
|
|
|
{1024, 3, 5 * time.Minute}, // partitioned
|
|
|
|
{1024, 5, 5 * time.Minute},
|
2018-01-09 23:26:53 +00:00
|
|
|
{16384, 1, 4 * time.Minute}, // partitioned
|
2018-01-10 19:01:46 +00:00
|
|
|
{16384, 2, 5 * time.Minute}, // partitioned
|
|
|
|
{16384, 3, 5 * time.Minute}, // partitioned
|
|
|
|
{16384, 5, 5 * time.Minute},
|
|
|
|
{32768, 0, 5 * time.Minute}, // partitioned
|
|
|
|
{32768, 1, 8 * time.Minute}, // partitioned
|
|
|
|
{32768, 2, 3 * time.Minute}, // partitioned
|
|
|
|
{32768, 3, 5 * time.Minute}, // partitioned
|
|
|
|
{32768, 5, 3 * time.Minute}, // partitioned
|
|
|
|
{65535, 7, 5 * time.Minute},
|
|
|
|
{65535, 0, 5 * time.Minute}, // partitioned
|
2018-01-09 23:26:53 +00:00
|
|
|
{65535, 1, 8 * time.Minute}, // partitioned
|
|
|
|
{65535, 2, 3 * time.Minute}, // partitioned
|
|
|
|
{65535, 3, 5 * time.Minute}, // partitioned
|
|
|
|
{65535, 5, 3 * time.Minute}, // partitioned
|
2018-01-10 19:01:46 +00:00
|
|
|
{65535, 7, 5 * time.Minute},
|
2018-01-09 23:26:53 +00:00
|
|
|
{1000000, 1, 4 * time.Hour}, // partitioned
|
|
|
|
{1000000, 2, 2 * time.Hour}, // partitioned
|
|
|
|
{1000000, 3, 80 * time.Minute}, // partitioned
|
|
|
|
{1000000, 5, 50 * time.Minute}, // partitioned
|
|
|
|
{1000000, 11, 20 * time.Minute}, // partitioned
|
|
|
|
{1000000, 19, 10 * time.Minute},
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := log.New(os.Stderr, "", log.LstdFlags)
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
|
|
|
|
for _, s := range clusters {
|
|
|
|
m := New(logger, shutdownCh, &fauxConnPool{})
|
|
|
|
m.SetNumNodes(s.numNodes)
|
2018-01-25 02:00:21 +00:00
|
|
|
servers := make([]*Server, 0, s.numServers)
|
2018-01-09 23:26:53 +00:00
|
|
|
for i := 0; i < s.numServers; i++ {
|
|
|
|
nodeName := fmt.Sprintf("s%02d", i)
|
2018-01-25 02:00:21 +00:00
|
|
|
servers = append(servers, &Server{Addr: &fauxAddr{nodeName}})
|
2018-01-09 23:26:53 +00:00
|
|
|
}
|
2018-01-25 02:00:21 +00:00
|
|
|
m.SetServers(servers)
|
2018-01-09 23:26:53 +00:00
|
|
|
|
|
|
|
d := m.refreshServerRebalanceTimer()
|
2018-01-10 19:01:46 +00:00
|
|
|
t.Logf("Nodes: %d; Servers: %d; Refresh: %v; Min: %v", s.numNodes, s.numServers, d, s.minRebalance)
|
2018-01-09 23:26:53 +00:00
|
|
|
if d < s.minRebalance {
|
|
|
|
t.Errorf("duration too short for cluster of size %d and %d servers (%s < %s)", s.numNodes, s.numServers, d, s.minRebalance)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|