From aafb5d6427953e469b194e5fe8e9d704b7d997da Mon Sep 17 00:00:00 2001 From: hghaf099 <83242695+hghaf099@users.noreply.github.com> Date: Fri, 1 Apr 2022 10:17:11 -0400 Subject: [PATCH] VAULT-4240 time.After() in a select statement can lead to memory leak (#14814) * VAULT-4240 time.After() in a select statement can lead to memory leak * CL --- changelog/14814.txt | 3 +++ helper/fairshare/jobmanager.go | 7 ++++++- physical/raft/raft.go | 7 ++++++- vault/raft.go | 7 ++++++- 4 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 changelog/14814.txt diff --git a/changelog/14814.txt b/changelog/14814.txt new file mode 100644 index 000000000..0583fb7c3 --- /dev/null +++ b/changelog/14814.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: time.After() used in a select statement can lead to memory leak +``` diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go index a878d29fc..75c7662fc 100644 --- a/helper/fairshare/jobmanager.go +++ b/helper/fairshare/jobmanager.go @@ -265,6 +265,10 @@ func (j *JobManager) assignWork() { j.wg.Add(1) go func() { + // ticker is used to prevent memory leak of using time.After in + // for - select pattern. + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() for { for { // assign work while there are jobs to distribute @@ -291,13 +295,14 @@ func (j *JobManager) assignWork() { } } + ticker.Reset(50 * time.Millisecond) select { case <-j.quit: j.wg.Done() return case <-j.newWork: // listen for wake-up when an empty job manager has been given work - case <-time.After(50 * time.Millisecond): + case <-ticker.C: // periodically check if new workers can be assigned. with the // fairsharing worker distribution it can be the case that there // is work waiting, but no queues are eligible for another worker diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 41138dd80..b356998d1 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -860,11 +860,16 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { // StartAsLeader is only set during init, recovery mode, storage migration, // and tests. if opts.StartAsLeader { + // ticker is used to prevent memory leak of using time.After in + // for - select pattern. + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() for { if raftObj.State() == raft.Leader { break } + ticker.Reset(10 * time.Millisecond) select { case <-ctx.Done(): future := raftObj.Shutdown() @@ -873,7 +878,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { } return errors.New("shutdown while waiting for leadership") - case <-time.After(10 * time.Millisecond): + case <-ticker.C: } } } diff --git a/vault/raft.go b/vault/raft.go index 52b850a67..c3de8f5b2 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -481,6 +481,10 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf defer keyCheckInterval.Stop() var backoff bool + // ticker is used to prevent memory leak of using time.After in + // for - select pattern. + ticker := time.NewTicker(time.Until(nextRotationTime)) + defer ticker.Stop() for { // If we encountered and error we should try to create the key // again. @@ -489,13 +493,14 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf backoff = false } + ticker.Reset(time.Until(nextRotationTime)) select { case <-keyCheckInterval.C: err := checkCommitted() if err != nil { logger.Error("failed to activate TLS key", "error", err) } - case <-time.After(time.Until(nextRotationTime)): + case <-ticker.C: // It's time to rotate the keys next, err := rotateKeyring() if err != nil {