Make the chunking test multidimensional (#6212)

This ensures that it's not just a single operation we restores
successfully, but many. It's the same foundation, just with multiple
going on at once.
This commit is contained in:
Jeff Mitchell 2019-07-25 06:40:09 -04:00 committed by Paul Banks
parent 7dbbe7e55a
commit e266b038cc
1 changed files with 85 additions and 63 deletions

View File

@ -1415,55 +1415,69 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
fsm, err := New(nil, os.Stderr) fsm, err := New(nil, os.Stderr)
require.NoError(err) require.NoError(err)
req := structs.RegisterRequest{ var logOfLogs [][]*raft.Log
Datacenter: "dc1", var bufs [][]byte
Node: "foo", for i := 0; i < 10; i++ {
Address: "127.0.0.1", req := structs.RegisterRequest{
Service: &structs.NodeService{ Datacenter: "dc1",
ID: "db", Node: fmt.Sprintf("foo%d", i),
Service: "db", Address: "127.0.0.1",
Tags: []string{"master"}, Service: &structs.NodeService{
Port: 8000, ID: "db",
}, Service: "db",
Check: &structs.HealthCheck{ Tags: []string{"master"},
Node: "foo", Port: 8000,
CheckID: "db", },
Name: "db connectivity", Check: &structs.HealthCheck{
Status: api.HealthPassing, Node: fmt.Sprintf("foo%d", i),
ServiceID: "db", CheckID: "db",
}, Name: "db connectivity",
} Status: api.HealthPassing,
ServiceID: "db",
buf, err := structs.Encode(structs.RegisterRequestType, req) },
require.NoError(err)
var logs []*raft.Log
for i, b := range buf {
chunkInfo := &raftchunkingtypes.ChunkInfo{
OpNum: uint64(32),
SequenceNum: uint32(i),
NumChunks: uint32(len(buf)),
} }
chunkBytes, err := proto.Marshal(chunkInfo)
buf, err := structs.Encode(structs.RegisterRequestType, req)
require.NoError(err) require.NoError(err)
logs = append(logs, &raft.Log{ var logs []*raft.Log
Data: []byte{b},
Extensions: chunkBytes, for j, b := range buf {
}) chunkInfo := &raftchunkingtypes.ChunkInfo{
OpNum: uint64(32 + i),
SequenceNum: uint32(j),
NumChunks: uint32(len(buf)),
}
chunkBytes, err := proto.Marshal(chunkInfo)
require.NoError(err)
logs = append(logs, &raft.Log{
Data: []byte{b},
Extensions: chunkBytes,
})
}
bufs = append(bufs, buf)
logOfLogs = append(logOfLogs, logs)
} }
// The reason for the skipping is to test out-of-order applies which are // The reason for the skipping is to test out-of-order applies which are
// theoretically possible // theoretically possible. Apply some logs from each set of chunks, but not
for i := 0; i < len(logs); i += 2 { // the full set, and out of order.
resp := fsm.chunker.Apply(logs[i]) for _, logs := range logOfLogs {
resp := fsm.chunker.Apply(logs[8])
assert.Nil(resp)
resp = fsm.chunker.Apply(logs[0])
assert.Nil(resp)
resp = fsm.chunker.Apply(logs[3])
assert.Nil(resp) assert.Nil(resp)
} }
// Verify we are not registered // Verify we are not registered
_, node, err := fsm.state.GetNode("foo") for i := 0; i < 10; i++ {
require.NoError(err) _, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i))
assert.Nil(node) require.NoError(err)
assert.Nil(node)
}
// Snapshot, restore elsewhere, apply the rest of the logs, make sure it // Snapshot, restore elsewhere, apply the rest of the logs, make sure it
// looks right // looks right
@ -1482,38 +1496,46 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
require.NoError(err) require.NoError(err)
// Verify we are still not registered // Verify we are still not registered
_, node, err = fsm2.state.GetNode("foo") for i := 0; i < 10; i++ {
require.NoError(err) _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i))
assert.Nil(node) require.NoError(err)
assert.Nil(node)
}
// Apply the rest of the logs // Apply the rest of the logs
var resp interface{} for _, logs := range logOfLogs {
for i := 1; i < len(logs); i += 2 { var resp interface{}
resp = fsm2.chunker.Apply(logs[i]) for i, log := range logs {
if resp != nil { switch i {
_, ok := resp.(raftchunking.ChunkingSuccess) case 0, 3, 8:
assert.True(ok) default:
resp = fsm2.chunker.Apply(log)
if i != len(logs)-1 {
assert.Nil(resp)
}
}
} }
_, ok := resp.(raftchunking.ChunkingSuccess)
assert.True(ok)
} }
assert.NotNil(resp)
_, ok := resp.(raftchunking.ChunkingSuccess)
assert.True(ok)
// Verify we are registered // Verify we are registered
_, node, err = fsm2.state.GetNode("foo") for i := 0; i < 10; i++ {
require.NoError(err) _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i))
assert.NotNil(node) require.NoError(err)
assert.NotNil(node)
// Verify service registered // Verify service registered
_, services, err := fsm2.state.NodeServices(nil, "foo") _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i))
require.NoError(err) require.NoError(err)
_, ok = services.Services["db"] _, ok := services.Services["db"]
assert.True(ok) assert.True(ok)
// Verify check // Verify check
_, checks, err := fsm2.state.NodeChecks(nil, "foo") _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i))
require.NoError(err) require.NoError(err)
require.Equal(string(checks[0].CheckID), "db") require.Equal(string(checks[0].CheckID), "db")
}
} }
func TestFSM_Chunking_TermChange(t *testing.T) { func TestFSM_Chunking_TermChange(t *testing.T) {