VAULT-4240 time.After() in a select statement can lead to memory leak (#14814)
* VAULT-4240 time.After() in a select statement can lead to memory leak * CL
This commit is contained in:
parent
34f634eb9e
commit
aafb5d6427
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
core: time.After() used in a select statement can lead to memory leak
|
||||
```
|
|
@ -265,6 +265,10 @@ func (j *JobManager) assignWork() {
|
|||
j.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
// ticker is used to prevent memory leak of using time.After in
|
||||
// for - select pattern.
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
for {
|
||||
// assign work while there are jobs to distribute
|
||||
|
@ -291,13 +295,14 @@ func (j *JobManager) assignWork() {
|
|||
}
|
||||
}
|
||||
|
||||
ticker.Reset(50 * time.Millisecond)
|
||||
select {
|
||||
case <-j.quit:
|
||||
j.wg.Done()
|
||||
return
|
||||
case <-j.newWork:
|
||||
// listen for wake-up when an empty job manager has been given work
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
case <-ticker.C:
|
||||
// periodically check if new workers can be assigned. with the
|
||||
// fairsharing worker distribution it can be the case that there
|
||||
// is work waiting, but no queues are eligible for another worker
|
||||
|
|
|
@ -860,11 +860,16 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
|
|||
// StartAsLeader is only set during init, recovery mode, storage migration,
|
||||
// and tests.
|
||||
if opts.StartAsLeader {
|
||||
// ticker is used to prevent memory leak of using time.After in
|
||||
// for - select pattern.
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
if raftObj.State() == raft.Leader {
|
||||
break
|
||||
}
|
||||
|
||||
ticker.Reset(10 * time.Millisecond)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
future := raftObj.Shutdown()
|
||||
|
@ -873,7 +878,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
|
|||
}
|
||||
|
||||
return errors.New("shutdown while waiting for leadership")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -481,6 +481,10 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf
|
|||
defer keyCheckInterval.Stop()
|
||||
|
||||
var backoff bool
|
||||
// ticker is used to prevent memory leak of using time.After in
|
||||
// for - select pattern.
|
||||
ticker := time.NewTicker(time.Until(nextRotationTime))
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
// If we encountered and error we should try to create the key
|
||||
// again.
|
||||
|
@ -489,13 +493,14 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf
|
|||
backoff = false
|
||||
}
|
||||
|
||||
ticker.Reset(time.Until(nextRotationTime))
|
||||
select {
|
||||
case <-keyCheckInterval.C:
|
||||
err := checkCommitted()
|
||||
if err != nil {
|
||||
logger.Error("failed to activate TLS key", "error", err)
|
||||
}
|
||||
case <-time.After(time.Until(nextRotationTime)):
|
||||
case <-ticker.C:
|
||||
// It's time to rotate the keys
|
||||
next, err := rotateKeyring()
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue