From 25bff579ea1566243a810c76f7bea63ad03615bf Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Mon, 12 Dec 2022 17:07:53 -0600 Subject: [PATCH] Use a small pool of workers to run postUnsealFuncs in parallel (#18244) * Initial worker pool * Run postUnsealFuncs in parallel * Use the old logic for P=1 * changelog * Use a CPU count relative worker pool * Update vault/core.go Co-authored-by: Nick Cabatoff * Done must be called once per postUnsealFunc * Defer is overkill Co-authored-by: Nick Cabatoff --- changelog/18244.txt | 3 +++ vault/core.go | 39 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 changelog/18244.txt diff --git a/changelog/18244.txt b/changelog/18244.txt new file mode 100644 index 000000000..b81de038d --- /dev/null +++ b/changelog/18244.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: parallelize backend initialization to improve startup time for large numbers of mounts. +``` \ No newline at end of file diff --git a/vault/core.go b/vault/core.go index 0d24ef84e..35aedddbc 100644 --- a/vault/core.go +++ b/vault/core.go @@ -16,6 +16,7 @@ import ( "net/url" "os" "path/filepath" + "runtime" "strconv" "strings" "sync" @@ -2335,8 +2336,42 @@ func (c *Core) postUnseal(ctx context.Context, ctxCancelFunc context.CancelFunc, // This is intentionally the last block in this function. We want to allow // writes just before allowing client requests, to ensure everything has // been set up properly before any writes can have happened. - for _, v := range c.postUnsealFuncs { - v() + // + // Use a small temporary worker pool to run postUnsealFuncs in parallel + postUnsealFuncConcurrency := runtime.NumCPU() * 2 + if v := os.Getenv("VAULT_POSTUNSEAL_FUNC_CONCURRENCY"); v != "" { + pv, err := strconv.Atoi(v) + if err != nil || pv < 1 { + c.logger.Warn("invalid value for VAULT_POSTUNSEAL_FUNC_CURRENCY, must be a positive integer", "error", err, "value", pv) + } else { + postUnsealFuncConcurrency = pv + } + } + if postUnsealFuncConcurrency <= 1 { + // Out of paranoia, keep the old logic for parallism=1 + for _, v := range c.postUnsealFuncs { + v() + } + } else { + workerChans := make([]chan func(), postUnsealFuncConcurrency) + var wg sync.WaitGroup + for i := 0; i < postUnsealFuncConcurrency; i++ { + workerChans[i] = make(chan func()) + go func(i int) { + for v := range workerChans[i] { + v() + wg.Done() + } + }(i) + } + for i, v := range c.postUnsealFuncs { + wg.Add(1) + workerChans[i%postUnsealFuncConcurrency] <- v + } + for i := 0; i < postUnsealFuncConcurrency; i++ { + close(workerChans[i]) + } + wg.Wait() } if atomic.LoadUint32(c.sealMigrationDone) == 1 {