2023-03-15 16:00:52 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-03-15 20:52:43 +00:00
|
|
|
package logical
|
|
|
|
|
2015-03-20 16:59:48 +00:00
|
|
|
import (
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2017-01-07 23:18:22 +00:00
|
|
|
"errors"
|
2016-07-06 16:25:40 +00:00
|
|
|
"fmt"
|
2017-01-06 20:42:18 +00:00
|
|
|
"strings"
|
2016-07-06 16:25:40 +00:00
|
|
|
|
2018-04-05 15:49:21 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2019-06-20 20:02:11 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2019-04-12 21:54:35 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
2015-03-20 16:59:48 +00:00
|
|
|
)
|
|
|
|
|
2017-01-07 23:18:22 +00:00
|
|
|
// ErrReadOnly is returned when a backend does not support
|
|
|
|
// writing. This can be caused by a read-only replica or secondary
|
|
|
|
// cluster operation.
|
2018-04-09 18:35:21 +00:00
|
|
|
var ErrReadOnly = errors.New("cannot write to readonly storage")
|
2017-01-07 23:18:22 +00:00
|
|
|
|
2018-02-09 19:04:25 +00:00
|
|
|
// ErrSetupReadOnly is returned when a write operation is attempted on a
|
|
|
|
// storage while the backend is still being setup.
|
2018-04-09 18:35:21 +00:00
|
|
|
var ErrSetupReadOnly = errors.New("cannot write to storage during setup")
|
2018-02-09 19:04:25 +00:00
|
|
|
|
Add path based primary write forwarding (PBPWF) - OSS (#18735)
* Add WriteForwardedStorage to sdk's plugin, logical in OSS
This should allow backends to specify paths to forward write
(storage.Put(...) and storage.Delete(...)) operations for.
Notably, these semantics are subject to change and shouldn't yet be
relied on.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Collect paths for write forwarding in OSS
This adds a path manager to Core, allowing tracking across all Vault
versions of paths which could use write forwarding if available. In
particular, even on OSS offerings, we'll need to template {{clusterId}}
into the paths, in the event of later upgrading to Enterprise. If we
didn't, we'd end up writing paths which will no longer be accessible
post-migration, due to write forwarding now replacing the sentinel with
the actual cluster identifier.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add forwarded writer implementation to OSS
Here, for paths given to us, we determine if we need to do cluster
translation and perform local writing. This is the OSS variant.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Wire up mount-specific request forwarding in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Clarify that state lock needs to be held to call HAState in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Move cluster sentinel constant to sdk/logical
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Expose ClusterID to Plugins via SystemView
This will let plugins learn what the Cluster's ID is, without having to
resort to hacks like writing a random string to its cluster-prefixed
namespace and then reading it once it has replicated.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add GRPC ClusterID implementation
For any external plugins which wish to use it.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2023-01-20 21:36:18 +00:00
|
|
|
// Plugins using Paths.WriteForwardedStorage will need to use this sentinel
|
|
|
|
// in their path to write cross-cluster. See the description of that parameter
|
|
|
|
// for more information.
|
|
|
|
const PBPWFClusterSentinel = "{{clusterId}}"
|
|
|
|
|
2015-03-15 20:52:43 +00:00
|
|
|
// Storage is the way that logical backends are able read/write data.
|
|
|
|
type Storage interface {
|
2018-01-19 06:44:44 +00:00
|
|
|
List(context.Context, string) ([]string, error)
|
|
|
|
Get(context.Context, string) (*StorageEntry, error)
|
|
|
|
Put(context.Context, *StorageEntry) error
|
|
|
|
Delete(context.Context, string) error
|
2015-03-15 20:52:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// StorageEntry is the entry for an item in a Storage implementation.
|
|
|
|
type StorageEntry struct {
|
2017-10-23 20:42:56 +00:00
|
|
|
Key string
|
|
|
|
Value []byte
|
|
|
|
SealWrap bool
|
2015-03-15 20:52:43 +00:00
|
|
|
}
|
2015-03-20 16:59:48 +00:00
|
|
|
|
2016-07-06 16:25:40 +00:00
|
|
|
// DecodeJSON decodes the 'Value' present in StorageEntry.
|
2015-03-20 16:59:48 +00:00
|
|
|
func (e *StorageEntry) DecodeJSON(out interface{}) error {
|
2016-07-06 16:25:40 +00:00
|
|
|
return jsonutil.DecodeJSON(e.Value, out)
|
2015-03-20 16:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// StorageEntryJSON creates a StorageEntry with a JSON-encoded value.
|
|
|
|
func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) {
|
2016-07-06 16:25:40 +00:00
|
|
|
encodedBytes, err := jsonutil.EncodeJSON(v)
|
|
|
|
if err != nil {
|
2018-04-05 15:49:21 +00:00
|
|
|
return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err)
|
2015-03-20 16:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &StorageEntry{
|
|
|
|
Key: k,
|
2016-07-06 16:25:40 +00:00
|
|
|
Value: encodedBytes,
|
2015-03-20 16:59:48 +00:00
|
|
|
}, nil
|
|
|
|
}
|
2017-01-06 20:42:18 +00:00
|
|
|
|
|
|
|
type ClearableView interface {
|
2018-01-19 06:44:44 +00:00
|
|
|
List(context.Context, string) ([]string, error)
|
|
|
|
Delete(context.Context, string) error
|
2017-01-06 20:42:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ScanView is used to scan all the keys in a view iteratively
|
2018-01-19 06:44:44 +00:00
|
|
|
func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error {
|
2017-01-06 20:42:18 +00:00
|
|
|
frontier := []string{""}
|
|
|
|
for len(frontier) > 0 {
|
|
|
|
n := len(frontier)
|
|
|
|
current := frontier[n-1]
|
|
|
|
frontier = frontier[:n-1]
|
|
|
|
|
|
|
|
// List the contents
|
2018-01-19 06:44:44 +00:00
|
|
|
contents, err := view.List(ctx, current)
|
2017-01-06 20:42:18 +00:00
|
|
|
if err != nil {
|
2018-04-05 15:49:21 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err)
|
2017-01-06 20:42:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the contents in the directory
|
|
|
|
for _, c := range contents {
|
2019-09-04 13:18:19 +00:00
|
|
|
// Exit if the context has been canceled
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2017-01-06 20:42:18 +00:00
|
|
|
fullPath := current + c
|
|
|
|
if strings.HasSuffix(c, "/") {
|
|
|
|
frontier = append(frontier, fullPath)
|
|
|
|
} else {
|
|
|
|
cb(fullPath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CollectKeys is used to collect all the keys in a view
|
2018-01-19 06:44:44 +00:00
|
|
|
func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) {
|
2019-05-01 21:56:18 +00:00
|
|
|
return CollectKeysWithPrefix(ctx, view, "")
|
|
|
|
}
|
|
|
|
|
2019-05-02 01:48:12 +00:00
|
|
|
// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string
|
2019-05-01 21:56:18 +00:00
|
|
|
func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) {
|
|
|
|
var keys []string
|
|
|
|
|
2017-01-06 20:42:18 +00:00
|
|
|
cb := func(path string) {
|
2019-05-01 21:56:18 +00:00
|
|
|
if strings.HasPrefix(path, prefix) {
|
|
|
|
keys = append(keys, path)
|
|
|
|
}
|
2017-01-06 20:42:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Scan for all the keys
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := ScanView(ctx, view, cb); err != nil {
|
2017-01-06 20:42:18 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-01 21:56:18 +00:00
|
|
|
return keys, nil
|
2017-01-06 20:42:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ClearView is used to delete all the keys in a view
|
2018-01-19 06:44:44 +00:00
|
|
|
func ClearView(ctx context.Context, view ClearableView) error {
|
2019-06-20 20:02:11 +00:00
|
|
|
return ClearViewWithLogging(ctx, view, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error {
|
2017-10-23 20:42:56 +00:00
|
|
|
if view == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
if logger == nil {
|
|
|
|
logger = hclog.NewNullLogger()
|
|
|
|
}
|
|
|
|
|
2017-01-06 20:42:18 +00:00
|
|
|
// Collect all the keys
|
2018-01-19 06:44:44 +00:00
|
|
|
keys, err := CollectKeys(ctx, view)
|
2017-01-06 20:42:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
logger.Debug("clearing view", "total_keys", len(keys))
|
|
|
|
|
2017-01-06 20:42:18 +00:00
|
|
|
// Delete all the keys
|
2019-06-20 20:02:11 +00:00
|
|
|
var pctDone int
|
|
|
|
for idx, key := range keys {
|
|
|
|
// Rather than keep trying to do stuff with a canceled context, bail;
|
|
|
|
// storage will fail anyways
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := view.Delete(ctx, key); err != nil {
|
2017-01-06 20:42:18 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-06-20 20:02:11 +00:00
|
|
|
|
|
|
|
newPctDone := idx * 100.0 / len(keys)
|
|
|
|
if int(newPctDone) > pctDone {
|
|
|
|
pctDone = int(newPctDone)
|
|
|
|
logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx)
|
|
|
|
}
|
2017-01-06 20:42:18 +00:00
|
|
|
}
|
2019-06-20 20:02:11 +00:00
|
|
|
|
|
|
|
logger.Debug("view cleared")
|
|
|
|
|
2017-01-06 20:42:18 +00:00
|
|
|
return nil
|
|
|
|
}
|