From 81ae581da6fbd600e149d6272d35cd308f4fe18c Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 17 Jun 2020 15:41:51 -0400 Subject: [PATCH] test: remove flaky test from volumewatcher (#8189) The volumewatcher restores itself on notification, but detecting this is racy because it may reap any claim (or find there are no claims to reap) and shutdown before we can test whether it's running. This appears to have become flaky with a new version of golang. The other cases in this test case sufficiently exercise the start/stop behavior of the volumewatcher, so remove the flaky section. --- nomad/volumewatcher/volumes_watcher_test.go | 24 --------------------- 1 file changed, 24 deletions(-) diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 53f97fe30..26329cf9b 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -223,30 +223,6 @@ func TestVolumeWatch_StartStop(t *testing.T) { require.Eventually(func() bool { return !watcher.watchers[vol.ID+vol.Namespace].isRunning() }, time.Second*5, 10*time.Millisecond) - - // the watcher will have incremented the index so we need to make sure - // our inserts will trigger new events - index, _ = srv.State().LatestIndex() - - // create a new claim - alloc3 := mock.Alloc() - alloc3.ClientStatus = structs.AllocClientStatusRunning - index++ - err = srv.State().UpsertAllocs(index, []*structs.Allocation{alloc3}) - require.NoError(err) - claim3 := &structs.CSIVolumeClaim{ - AllocationID: alloc3.ID, - NodeID: node.ID, - Mode: structs.CSIVolumeClaimRelease, - } - index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim3) - require.NoError(err) - - // a stopped watcher should restore itself on notification - require.Eventually(func() bool { - return watcher.watchers[vol.ID+vol.Namespace].isRunning() - }, time.Second*5, 10*time.Millisecond) } // TestVolumeWatch_RegisterDeregister tests the start and stop of