Add retry to TestPostgresqlBackend (#10032)

This commit is contained in:
Scott Miller 2020-09-24 16:19:11 -05:00 committed by GitHub
parent 34b7b4bde6
commit a8cbda1713
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 25 additions and 1 deletions

View File

@ -177,7 +177,18 @@ func TestConnectionURL(t *testing.T) {
// Similar to testHABackend, but using internal implementation details to
// trigger the lock failure scenario by setting the lock renew period for one
// of the locks to a higher value than the lock TTL.
const maxTries = 3
func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
for tries := 1; tries <= maxTries; tries++ {
// Try this several times. If the test environment is too slow the lock can naturally lapse
if attemptLockTTLTest(t, ha, tries) {
break
}
}
}
func attemptLockTTLTest(t *testing.T, ha physical.HABackend, tries int) bool {
// Set much smaller lock times to speed up the test.
lockTTL := 3
renewInterval := time.Second * 1
@ -199,6 +210,7 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
lock.ttlSeconds = lockTTL
// Attempt to lock
lockTime := time.Now()
leaderCh, err = lock.Lock(nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -207,12 +219,19 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
t.Fatalf("failed to get leader ch")
}
if tries == 1 {
time.Sleep(3 * time.Second)
}
// Check the value
held, val, err := lock.Value()
if err != nil {
t.Fatalf("err: %v", err)
}
if !held {
if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) {
//Our test environment is slow enough that we failed this, retry
return false
}
t.Fatalf("should be held")
}
if val != "bar" {
@ -239,6 +258,7 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
})
// Attempt to lock should work
lockTime := time.Now()
leaderCh2, err := lock2.Lock(stopCh)
if err != nil {
t.Fatalf("err: %v", err)
@ -254,19 +274,23 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
t.Fatalf("err: %v", err)
}
if !held {
if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) {
//Our test environment is slow enough that we failed this, retry
return false
}
t.Fatalf("should be held")
}
if val != "baz" {
t.Fatalf("bad value: %v", val)
}
}
// The first lock should have lost the leader channel
select {
case <-time.After(longRenewInterval * 2):
t.Fatalf("original lock did not have its leader channel closed.")
case <-leaderCh:
}
return true
}
// Verify that once Unlock is called, we don't keep trying to renew the original