rpcclient/health: fix data race in a test
The idleTTL was being written and read concurrently. Instead move the idleTTL to a struct field so that when one test patches the TTL it does not impact others. The background goroutines for the store can outlive a test because context cancellation is async.
This commit is contained in:
parent
d257acee24
commit
95c92343e4
|
@ -17,6 +17,11 @@ type Store struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
byKey map[string]entry
|
byKey map[string]entry
|
||||||
expiryHeap *ttlcache.ExpiryHeap
|
expiryHeap *ttlcache.ExpiryHeap
|
||||||
|
|
||||||
|
// idleTTL is the duration of time an entry should remain in the Store after the
|
||||||
|
// last request for that entry has been terminated. It is a field on the struct
|
||||||
|
// so that it can be patched in tests without need a lock.
|
||||||
|
idleTTL time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
type entry struct {
|
type entry struct {
|
||||||
|
@ -33,6 +38,7 @@ func NewStore(logger hclog.Logger) *Store {
|
||||||
logger: logger,
|
logger: logger,
|
||||||
byKey: make(map[string]entry),
|
byKey: make(map[string]entry),
|
||||||
expiryHeap: ttlcache.NewExpiryHeap(),
|
expiryHeap: ttlcache.NewExpiryHeap(),
|
||||||
|
idleTTL: 20 * time.Minute,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,10 +192,6 @@ func (s *Store) getEntry(req Request) (string, entry, error) {
|
||||||
return key, e, nil
|
return key, e, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// idleTTL is the duration of time an entry should remain in the Store after the
|
|
||||||
// last request for that entry has been terminated.
|
|
||||||
var idleTTL = 20 * time.Minute
|
|
||||||
|
|
||||||
// releaseEntry decrements the request count and starts an expiry timer if the
|
// releaseEntry decrements the request count and starts an expiry timer if the
|
||||||
// count has reached 0. Must be called once for every call to getEntry.
|
// count has reached 0. Must be called once for every call to getEntry.
|
||||||
func (s *Store) releaseEntry(key string) {
|
func (s *Store) releaseEntry(key string) {
|
||||||
|
@ -204,12 +206,12 @@ func (s *Store) releaseEntry(key string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.expiry.Index() == ttlcache.NotIndexed {
|
if e.expiry.Index() == ttlcache.NotIndexed {
|
||||||
e.expiry = s.expiryHeap.Add(key, idleTTL)
|
e.expiry = s.expiryHeap.Add(key, s.idleTTL)
|
||||||
s.byKey[key] = e
|
s.byKey[key] = e
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.expiryHeap.Update(e.expiry.Index(), idleTTL)
|
s.expiryHeap.Update(e.expiry.Index(), s.idleTTL)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeEntryKey matches agent/cache.makeEntryKey, but may change in the future.
|
// makeEntryKey matches agent/cache.makeEntryKey, but may change in the future.
|
||||||
|
|
|
@ -391,10 +391,9 @@ func TestStore_Run_ExpiresEntries(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
ttl := 10 * time.Millisecond
|
|
||||||
patchIdleTTL(t, ttl)
|
|
||||||
|
|
||||||
store := NewStore(hclog.New(nil))
|
store := NewStore(hclog.New(nil))
|
||||||
|
ttl := 10 * time.Millisecond
|
||||||
|
store.idleTTL = ttl
|
||||||
go store.Run(ctx)
|
go store.Run(ctx)
|
||||||
|
|
||||||
req := &fakeRequest{
|
req := &fakeRequest{
|
||||||
|
@ -430,14 +429,6 @@ func TestStore_Run_ExpiresEntries(t *testing.T) {
|
||||||
require.Equal(t, ttlcache.NotIndexed, e.expiry.Index())
|
require.Equal(t, ttlcache.NotIndexed, e.expiry.Index())
|
||||||
}
|
}
|
||||||
|
|
||||||
func patchIdleTTL(t *testing.T, ttl time.Duration) {
|
|
||||||
orig := idleTTL
|
|
||||||
idleTTL = ttl
|
|
||||||
t.Cleanup(func() {
|
|
||||||
idleTTL = orig
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if !t.Run(name, fn) {
|
if !t.Run(name, fn) {
|
||||||
|
|
Loading…
Reference in New Issue