Fix client test reconnect test; Remove guard test (#12173)
* Update reconnect test to new algorithm and interface; remove guard test
This commit is contained in:
parent
3cbd76ea9d
commit
c7a5b17251
|
@ -1612,12 +1612,17 @@ func TestAllocRunner_Reconnect(t *testing.T) {
|
||||||
t.Run(tc.clientStatus, func(t *testing.T) {
|
t.Run(tc.clientStatus, func(t *testing.T) {
|
||||||
// create a running alloc
|
// create a running alloc
|
||||||
alloc := mock.BatchAlloc()
|
alloc := mock.BatchAlloc()
|
||||||
|
alloc.AllocModifyIndex = 10
|
||||||
|
alloc.ModifyIndex = 10
|
||||||
|
alloc.ModifyTime = time.Now().UnixNano()
|
||||||
|
|
||||||
// Ensure task takes some time
|
// Ensure task takes some time
|
||||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||||
task.Driver = "mock_driver"
|
task.Driver = "mock_driver"
|
||||||
task.Config["run_for"] = "30s"
|
task.Config["run_for"] = "30s"
|
||||||
|
|
||||||
|
original := alloc.Copy()
|
||||||
|
|
||||||
conf, cleanup := testAllocRunnerConfig(t, alloc)
|
conf, cleanup := testAllocRunnerConfig(t, alloc)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
|
@ -1631,10 +1636,24 @@ func TestAllocRunner_Reconnect(t *testing.T) {
|
||||||
taskRunner.UpdateState(tc.taskState, tc.taskEvent)
|
taskRunner.UpdateState(tc.taskState, tc.taskEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
ar.Reconnect()
|
update := ar.Alloc().Copy()
|
||||||
|
|
||||||
|
update.ClientStatus = structs.AllocClientStatusUnknown
|
||||||
|
update.AllocModifyIndex = original.AllocModifyIndex + 10
|
||||||
|
update.ModifyIndex = original.ModifyIndex + 10
|
||||||
|
update.ModifyTime = original.ModifyTime + 10
|
||||||
|
|
||||||
|
err = ar.Reconnect(update)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, tc.clientStatus, ar.AllocState().ClientStatus)
|
require.Equal(t, tc.clientStatus, ar.AllocState().ClientStatus)
|
||||||
|
|
||||||
|
|
||||||
|
// Make sure the runner's alloc indexes match the update.
|
||||||
|
require.Equal(t, update.AllocModifyIndex, ar.Alloc().AllocModifyIndex)
|
||||||
|
require.Equal(t, update.ModifyIndex, ar.Alloc().ModifyIndex)
|
||||||
|
require.Equal(t, update.ModifyTime, ar.Alloc().ModifyTime)
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
updater := conf.StateUpdater.(*MockStateUpdater)
|
updater := conf.StateUpdater.(*MockStateUpdater)
|
||||||
|
@ -1664,98 +1683,3 @@ func TestAllocRunner_Reconnect(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAllocRunner_MaybeHasPendingReconnect(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
type tcase struct {
|
|
||||||
name string
|
|
||||||
timestamp int64
|
|
||||||
expectedDiff int
|
|
||||||
}
|
|
||||||
tcases := []tcase{
|
|
||||||
{
|
|
||||||
"should guard now",
|
|
||||||
time.Now().UnixNano(),
|
|
||||||
1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"should guard 3 seconds",
|
|
||||||
time.Now().Add(-(3 * time.Second)).UnixNano(),
|
|
||||||
1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"should not guard 6 seconds",
|
|
||||||
time.Now().Add(-(6 * time.Second)).UnixNano(),
|
|
||||||
2,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tcases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
alloc := mock.BatchAlloc()
|
|
||||||
|
|
||||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
||||||
task.Driver = "mock_driver"
|
|
||||||
task.Config["run_for"] = "30s"
|
|
||||||
|
|
||||||
conf, cleanup := testAllocRunnerConfig(t, alloc)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
ar, err := NewAllocRunner(conf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer destroy(ar)
|
|
||||||
|
|
||||||
go ar.Run()
|
|
||||||
|
|
||||||
reconnectEvent := structs.NewTaskEvent(structs.TaskClientReconnected)
|
|
||||||
reconnectEvent.Time = tc.timestamp
|
|
||||||
for _, tr := range ar.tasks {
|
|
||||||
tr.EmitEvent(reconnectEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
updater := conf.StateUpdater.(*MockStateUpdater)
|
|
||||||
// get a copy of the first states so that we can compare lengths to
|
|
||||||
// determine how many events were appended.
|
|
||||||
var firstStates map[string]*structs.TaskState
|
|
||||||
testutil.WaitForResult(func() (bool, error) {
|
|
||||||
last := updater.Last()
|
|
||||||
if last == nil {
|
|
||||||
return false, errors.New("last update nil")
|
|
||||||
}
|
|
||||||
states := last.TaskStates
|
|
||||||
for _, s := range states {
|
|
||||||
for _, e := range s.Events {
|
|
||||||
if e.Type == structs.TaskClientReconnected {
|
|
||||||
firstStates = states
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, errors.New("no reconnect event found")
|
|
||||||
}, func(err error) {
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
ar.Reconnect()
|
|
||||||
|
|
||||||
testutil.WaitForResult(func() (bool, error) {
|
|
||||||
last := updater.Last()
|
|
||||||
if last == nil {
|
|
||||||
return false, errors.New("last update nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, taskState := range last.TaskStates {
|
|
||||||
if len(taskState.Events) != len(firstStates[k].Events)+tc.expectedDiff {
|
|
||||||
return false, fmt.Errorf("expected %d reconnect events", tc.expectedDiff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}, func(err error) {
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue