Add retry to TestPostgresqlBackend (#10032)
This commit is contained in:
parent
34b7b4bde6
commit
a8cbda1713
|
@ -177,7 +177,18 @@ func TestConnectionURL(t *testing.T) {
|
||||||
// Similar to testHABackend, but using internal implementation details to
|
// Similar to testHABackend, but using internal implementation details to
|
||||||
// trigger the lock failure scenario by setting the lock renew period for one
|
// trigger the lock failure scenario by setting the lock renew period for one
|
||||||
// of the locks to a higher value than the lock TTL.
|
// of the locks to a higher value than the lock TTL.
|
||||||
|
const maxTries = 3
|
||||||
|
|
||||||
func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
|
func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
|
||||||
|
for tries := 1; tries <= maxTries; tries++ {
|
||||||
|
// Try this several times. If the test environment is too slow the lock can naturally lapse
|
||||||
|
if attemptLockTTLTest(t, ha, tries) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func attemptLockTTLTest(t *testing.T, ha physical.HABackend, tries int) bool {
|
||||||
// Set much smaller lock times to speed up the test.
|
// Set much smaller lock times to speed up the test.
|
||||||
lockTTL := 3
|
lockTTL := 3
|
||||||
renewInterval := time.Second * 1
|
renewInterval := time.Second * 1
|
||||||
|
@ -199,6 +210,7 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
|
||||||
lock.ttlSeconds = lockTTL
|
lock.ttlSeconds = lockTTL
|
||||||
|
|
||||||
// Attempt to lock
|
// Attempt to lock
|
||||||
|
lockTime := time.Now()
|
||||||
leaderCh, err = lock.Lock(nil)
|
leaderCh, err = lock.Lock(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
|
@ -207,12 +219,19 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
|
||||||
t.Fatalf("failed to get leader ch")
|
t.Fatalf("failed to get leader ch")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tries == 1 {
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
}
|
||||||
// Check the value
|
// Check the value
|
||||||
held, val, err := lock.Value()
|
held, val, err := lock.Value()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
if !held {
|
if !held {
|
||||||
|
if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) {
|
||||||
|
//Our test environment is slow enough that we failed this, retry
|
||||||
|
return false
|
||||||
|
}
|
||||||
t.Fatalf("should be held")
|
t.Fatalf("should be held")
|
||||||
}
|
}
|
||||||
if val != "bar" {
|
if val != "bar" {
|
||||||
|
@ -239,6 +258,7 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// Attempt to lock should work
|
// Attempt to lock should work
|
||||||
|
lockTime := time.Now()
|
||||||
leaderCh2, err := lock2.Lock(stopCh)
|
leaderCh2, err := lock2.Lock(stopCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
|
@ -254,19 +274,23 @@ func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
if !held {
|
if !held {
|
||||||
|
if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) {
|
||||||
|
//Our test environment is slow enough that we failed this, retry
|
||||||
|
return false
|
||||||
|
}
|
||||||
t.Fatalf("should be held")
|
t.Fatalf("should be held")
|
||||||
}
|
}
|
||||||
if val != "baz" {
|
if val != "baz" {
|
||||||
t.Fatalf("bad value: %v", val)
|
t.Fatalf("bad value: %v", val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The first lock should have lost the leader channel
|
// The first lock should have lost the leader channel
|
||||||
select {
|
select {
|
||||||
case <-time.After(longRenewInterval * 2):
|
case <-time.After(longRenewInterval * 2):
|
||||||
t.Fatalf("original lock did not have its leader channel closed.")
|
t.Fatalf("original lock did not have its leader channel closed.")
|
||||||
case <-leaderCh:
|
case <-leaderCh:
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that once Unlock is called, we don't keep trying to renew the original
|
// Verify that once Unlock is called, we don't keep trying to renew the original
|
||||||
|
|
Loading…
Reference in New Issue