2023-03-15 16:00:52 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-03-17 23:16:04 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
2018-01-08 20:26:13 +00:00
|
|
|
"context"
|
2023-10-17 12:33:54 +00:00
|
|
|
"fmt"
|
2015-04-02 05:11:48 +00:00
|
|
|
"sync"
|
2015-03-17 23:16:04 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2015-06-30 19:38:32 +00:00
|
|
|
|
2018-04-03 00:46:59 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2022-04-26 16:13:45 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2018-09-18 03:03:00 +00:00
|
|
|
"github.com/hashicorp/vault/helper/namespace"
|
2019-04-13 07:44:06 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/logging"
|
2023-10-17 12:33:54 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/logical"
|
|
|
|
"github.com/stretchr/testify/require"
|
2015-03-17 23:16:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// mockRollback returns a mock rollback manager
|
|
|
|
func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) {
|
|
|
|
backend := new(NoopBackend)
|
|
|
|
mounts := new(MountTable)
|
|
|
|
router := NewRouter()
|
2018-08-01 02:45:38 +00:00
|
|
|
core, _, _ := TestCoreUnsealed(t)
|
2015-03-17 23:16:04 +00:00
|
|
|
|
2017-01-07 23:18:22 +00:00
|
|
|
_, barrier, _ := mockBarrier(t)
|
|
|
|
view := NewBarrierView(barrier, "logical/")
|
|
|
|
|
2015-03-17 23:16:04 +00:00
|
|
|
mounts.Entries = []*MountEntry{
|
2021-04-08 16:43:39 +00:00
|
|
|
{
|
2018-09-18 03:03:00 +00:00
|
|
|
Path: "foo",
|
|
|
|
NamespaceID: namespace.RootNamespaceID,
|
2018-11-05 16:11:32 +00:00
|
|
|
namespace: namespace.RootNamespace,
|
2015-03-17 23:16:04 +00:00
|
|
|
},
|
|
|
|
}
|
2016-01-13 18:40:08 +00:00
|
|
|
meUUID, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2018-11-05 16:11:32 +00:00
|
|
|
if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID, Accessor: "noopaccessor", NamespaceID: namespace.RootNamespaceID, namespace: namespace.RootNamespace}, view); err != nil {
|
2015-03-17 23:16:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-11-15 22:32:57 +00:00
|
|
|
mountsFunc := func() []*MountEntry {
|
|
|
|
return mounts.Entries
|
2015-11-11 16:44:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 00:46:59 +00:00
|
|
|
logger := logging.NewVaultLogger(log.Trace)
|
2016-08-19 20:45:17 +00:00
|
|
|
|
2018-08-01 02:45:38 +00:00
|
|
|
rb := NewRollbackManager(context.Background(), logger, mountsFunc, router, core)
|
2015-04-02 05:11:48 +00:00
|
|
|
rb.period = 10 * time.Millisecond
|
|
|
|
return rb, backend
|
2015-03-17 23:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestRollbackManager(t *testing.T) {
|
|
|
|
m, backend := mockRollback(t)
|
|
|
|
if len(backend.Paths) > 0 {
|
|
|
|
t.Fatalf("bad: %#v", backend)
|
|
|
|
}
|
|
|
|
|
2015-04-02 05:11:48 +00:00
|
|
|
m.Start()
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
2015-03-17 23:16:04 +00:00
|
|
|
m.Stop()
|
|
|
|
|
|
|
|
count := len(backend.Paths)
|
|
|
|
if count == 0 {
|
|
|
|
t.Fatalf("bad: %#v", backend)
|
|
|
|
}
|
|
|
|
if backend.Paths[0] != "" {
|
|
|
|
t.Fatalf("bad: %#v", backend)
|
|
|
|
}
|
|
|
|
|
2015-04-02 05:11:48 +00:00
|
|
|
time.Sleep(50 * time.Millisecond)
|
2015-03-17 23:16:04 +00:00
|
|
|
|
|
|
|
if count != len(backend.Paths) {
|
|
|
|
t.Fatalf("should stop requests: %#v", backend)
|
|
|
|
}
|
|
|
|
}
|
2015-04-02 05:11:48 +00:00
|
|
|
|
2023-10-17 12:33:54 +00:00
|
|
|
// TestRollbackManager_ManyWorkers adds 10 backends that require a rollback
|
|
|
|
// operation, with 20 workers. The test verifies that the 10
|
|
|
|
// work items will run in parallel
|
|
|
|
func TestRollbackManager_ManyWorkers(t *testing.T) {
|
|
|
|
core := TestCoreWithConfig(t, &CoreConfig{NumRollbackWorkers: 20, RollbackPeriod: time.Millisecond * 10})
|
|
|
|
view := NewBarrierView(core.barrier, "logical/")
|
|
|
|
|
|
|
|
ran := make(chan string)
|
|
|
|
release := make(chan struct{})
|
|
|
|
core, _, _ = testCoreUnsealed(t, core)
|
|
|
|
|
|
|
|
// create 10 backends
|
|
|
|
// when a rollback happens, each backend will try to write to an unbuffered
|
|
|
|
// channel, then wait to be released
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
b := &NoopBackend{}
|
|
|
|
b.RequestHandler = func(ctx context.Context, request *logical.Request) (*logical.Response, error) {
|
|
|
|
if request.Operation == logical.RollbackOperation {
|
|
|
|
ran <- request.Path
|
|
|
|
<-release
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
b.Root = []string{fmt.Sprintf("foo/%d", i)}
|
|
|
|
meUUID, err := uuid.GenerateUUID()
|
|
|
|
require.NoError(t, err)
|
|
|
|
mountEntry := &MountEntry{
|
|
|
|
Table: mountTableType,
|
|
|
|
UUID: meUUID,
|
|
|
|
Accessor: fmt.Sprintf("accessor-%d", i),
|
|
|
|
NamespaceID: namespace.RootNamespaceID,
|
|
|
|
namespace: namespace.RootNamespace,
|
|
|
|
Path: fmt.Sprintf("logical/foo/%d", i),
|
|
|
|
}
|
|
|
|
func() {
|
|
|
|
core.mountsLock.Lock()
|
|
|
|
defer core.mountsLock.Unlock()
|
|
|
|
newTable := core.mounts.shallowClone()
|
|
|
|
newTable.Entries = append(newTable.Entries, mountEntry)
|
|
|
|
core.mounts = newTable
|
|
|
|
err = core.router.Mount(b, "logical", mountEntry, view)
|
|
|
|
require.NoError(t, core.persistMounts(context.Background(), newTable, &mountEntry.Local))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
timeout, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
got := make(map[string]bool)
|
|
|
|
hasMore := true
|
|
|
|
for hasMore {
|
|
|
|
// we're not bounding the number of workers, so we would expect to see
|
|
|
|
// all 10 writes to the channel from each of the backends. Once that
|
|
|
|
// happens, close the release channel so that the functions can exit
|
|
|
|
select {
|
|
|
|
case <-timeout.Done():
|
|
|
|
require.Fail(t, "test timed out")
|
|
|
|
case i := <-ran:
|
|
|
|
got[i] = true
|
|
|
|
if len(got) == 10 {
|
|
|
|
close(release)
|
|
|
|
hasMore = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
|
|
|
// start a goroutine to consume the remaining items from the queued work
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ran:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
// stop the rollback worker, which will wait for all inflight rollbacks to
|
|
|
|
// complete
|
|
|
|
core.rollback.Stop()
|
|
|
|
close(done)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRollbackManager_WorkerPool adds 10 backends that require a rollback
|
|
|
|
// operation, with 5 workers. The test verifies that the 5 work items can occur
|
|
|
|
// concurrently, and that the remainder of the work is queued and run when
|
|
|
|
// workers are available
|
|
|
|
func TestRollbackManager_WorkerPool(t *testing.T) {
|
|
|
|
core := TestCoreWithConfig(t, &CoreConfig{NumRollbackWorkers: 5, RollbackPeriod: time.Millisecond * 10})
|
|
|
|
view := NewBarrierView(core.barrier, "logical/")
|
|
|
|
|
|
|
|
ran := make(chan string)
|
|
|
|
release := make(chan struct{})
|
|
|
|
core, _, _ = testCoreUnsealed(t, core)
|
|
|
|
|
|
|
|
// create 10 backends
|
|
|
|
// when a rollback happens, each backend will try to write to an unbuffered
|
|
|
|
// channel, then wait to be released
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
b := &NoopBackend{}
|
|
|
|
b.RequestHandler = func(ctx context.Context, request *logical.Request) (*logical.Response, error) {
|
|
|
|
if request.Operation == logical.RollbackOperation {
|
|
|
|
ran <- request.Path
|
|
|
|
<-release
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
b.Root = []string{fmt.Sprintf("foo/%d", i)}
|
|
|
|
meUUID, err := uuid.GenerateUUID()
|
|
|
|
require.NoError(t, err)
|
|
|
|
mountEntry := &MountEntry{
|
|
|
|
Table: mountTableType,
|
|
|
|
UUID: meUUID,
|
|
|
|
Accessor: fmt.Sprintf("accessor-%d", i),
|
|
|
|
NamespaceID: namespace.RootNamespaceID,
|
|
|
|
namespace: namespace.RootNamespace,
|
|
|
|
Path: fmt.Sprintf("logical/foo/%d", i),
|
|
|
|
}
|
|
|
|
func() {
|
|
|
|
core.mountsLock.Lock()
|
|
|
|
defer core.mountsLock.Unlock()
|
|
|
|
newTable := core.mounts.shallowClone()
|
|
|
|
newTable.Entries = append(newTable.Entries, mountEntry)
|
|
|
|
core.mounts = newTable
|
|
|
|
err = core.router.Mount(b, "logical", mountEntry, view)
|
|
|
|
require.NoError(t, core.persistMounts(context.Background(), newTable, &mountEntry.Local))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
timeout, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
got := make(map[string]bool)
|
|
|
|
hasMore := true
|
|
|
|
for hasMore {
|
|
|
|
// we're using 5 workers, so we would expect to see 5 writes to the
|
|
|
|
// channel. Once that happens, close the release channel so that the
|
|
|
|
// functions can exit and new rollback operations can run
|
|
|
|
select {
|
|
|
|
case <-timeout.Done():
|
|
|
|
require.Fail(t, "test timed out")
|
|
|
|
case i := <-ran:
|
|
|
|
got[i] = true
|
|
|
|
numGot := len(got)
|
|
|
|
if numGot == 5 {
|
|
|
|
close(release)
|
|
|
|
hasMore = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
// start a goroutine to consume the remaining items from the queued work
|
|
|
|
gotAllPaths := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
channelClosed := false
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case i := <-ran:
|
|
|
|
got[i] = true
|
|
|
|
|
|
|
|
// keep this goroutine running even after there are 10 paths.
|
|
|
|
// More rollback operations might get queued before Stop() is
|
|
|
|
// called, and we don't want them to block on writing the to the
|
|
|
|
// ran channel
|
|
|
|
if len(got) == 10 && !channelClosed {
|
|
|
|
close(gotAllPaths)
|
|
|
|
channelClosed = true
|
|
|
|
}
|
|
|
|
case <-timeout.Done():
|
|
|
|
require.Fail(t, "test timed out")
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// wait until all 10 backends have each ran at least once
|
|
|
|
<-gotAllPaths
|
|
|
|
// stop the rollback worker, which will wait for any inflight rollbacks to
|
|
|
|
// complete
|
|
|
|
core.rollback.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRollbackManager_numRollbackWorkers verifies that the number of rollback
|
|
|
|
// workers is parsed from the configuration, but can be overridden by an
|
|
|
|
// environment variable. This test cannot be run in parallel because of the
|
|
|
|
// environment variable
|
|
|
|
func TestRollbackManager_numRollbackWorkers(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
configWorkers int
|
|
|
|
setEnvVar bool
|
|
|
|
envVar string
|
|
|
|
wantWorkers int
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "default in config",
|
|
|
|
configWorkers: RollbackDefaultNumWorkers,
|
|
|
|
wantWorkers: RollbackDefaultNumWorkers,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "invalid envvar",
|
|
|
|
configWorkers: RollbackDefaultNumWorkers,
|
|
|
|
wantWorkers: RollbackDefaultNumWorkers,
|
|
|
|
setEnvVar: true,
|
|
|
|
envVar: "invalid",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "envvar overrides config",
|
|
|
|
configWorkers: RollbackDefaultNumWorkers,
|
|
|
|
wantWorkers: 20,
|
|
|
|
setEnvVar: true,
|
|
|
|
envVar: "20",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "envvar negative",
|
|
|
|
configWorkers: RollbackDefaultNumWorkers,
|
|
|
|
wantWorkers: RollbackDefaultNumWorkers,
|
|
|
|
setEnvVar: true,
|
|
|
|
envVar: "-1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "envvar zero",
|
|
|
|
configWorkers: RollbackDefaultNumWorkers,
|
|
|
|
wantWorkers: RollbackDefaultNumWorkers,
|
|
|
|
setEnvVar: true,
|
|
|
|
envVar: "0",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
if tc.setEnvVar {
|
|
|
|
t.Setenv(RollbackWorkersEnvVar, tc.envVar)
|
|
|
|
}
|
|
|
|
core := &Core{numRollbackWorkers: tc.configWorkers}
|
|
|
|
r := &RollbackManager{logger: logger.Named("test"), core: core}
|
|
|
|
require.Equal(t, tc.wantWorkers, r.numRollbackWorkers())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-02 05:11:48 +00:00
|
|
|
func TestRollbackManager_Join(t *testing.T) {
|
|
|
|
m, backend := mockRollback(t)
|
|
|
|
if len(backend.Paths) > 0 {
|
|
|
|
t.Fatalf("bad: %#v", backend)
|
|
|
|
}
|
|
|
|
|
|
|
|
m.Start()
|
|
|
|
defer m.Stop()
|
|
|
|
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
wg.Add(3)
|
|
|
|
|
2017-11-06 19:34:27 +00:00
|
|
|
errCh := make(chan error, 3)
|
2015-04-02 05:11:48 +00:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2018-11-05 16:11:32 +00:00
|
|
|
err := m.Rollback(namespace.RootContext(nil), "foo")
|
2015-04-02 05:11:48 +00:00
|
|
|
if err != nil {
|
2017-11-06 19:34:27 +00:00
|
|
|
errCh <- err
|
2015-04-02 05:11:48 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2018-11-05 16:11:32 +00:00
|
|
|
err := m.Rollback(namespace.RootContext(nil), "foo")
|
2015-04-02 05:11:48 +00:00
|
|
|
if err != nil {
|
2017-11-06 19:34:27 +00:00
|
|
|
errCh <- err
|
2015-04-02 05:11:48 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2018-11-05 16:11:32 +00:00
|
|
|
err := m.Rollback(namespace.RootContext(nil), "foo")
|
2015-04-02 05:11:48 +00:00
|
|
|
if err != nil {
|
2017-11-06 19:34:27 +00:00
|
|
|
errCh <- err
|
2015-04-02 05:11:48 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
2017-11-06 19:34:27 +00:00
|
|
|
close(errCh)
|
|
|
|
err := <-errCh
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error on rollback:%v", err)
|
|
|
|
}
|
2015-04-02 05:11:48 +00:00
|
|
|
}
|