open-vault/sdk/logical/storage.go

167 lines
4.3 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package logical
2015-03-20 16:59:48 +00:00
import (
"context"
"errors"
"fmt"
2017-01-06 20:42:18 +00:00
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
2015-03-20 16:59:48 +00:00
)
// ErrReadOnly is returned when a backend does not support
// writing. This can be caused by a read-only replica or secondary
// cluster operation.
var ErrReadOnly = errors.New("cannot write to readonly storage")
// ErrSetupReadOnly is returned when a write operation is attempted on a
// storage while the backend is still being setup.
var ErrSetupReadOnly = errors.New("cannot write to storage during setup")
Add path based primary write forwarding (PBPWF) - OSS (#18735) * Add WriteForwardedStorage to sdk's plugin, logical in OSS This should allow backends to specify paths to forward write (storage.Put(...) and storage.Delete(...)) operations for. Notably, these semantics are subject to change and shouldn't yet be relied on. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Collect paths for write forwarding in OSS This adds a path manager to Core, allowing tracking across all Vault versions of paths which could use write forwarding if available. In particular, even on OSS offerings, we'll need to template {{clusterId}} into the paths, in the event of later upgrading to Enterprise. If we didn't, we'd end up writing paths which will no longer be accessible post-migration, due to write forwarding now replacing the sentinel with the actual cluster identifier. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Add forwarded writer implementation to OSS Here, for paths given to us, we determine if we need to do cluster translation and perform local writing. This is the OSS variant. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Wire up mount-specific request forwarding in OSS Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Clarify that state lock needs to be held to call HAState in OSS Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Move cluster sentinel constant to sdk/logical Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Expose ClusterID to Plugins via SystemView This will let plugins learn what the Cluster's ID is, without having to resort to hacks like writing a random string to its cluster-prefixed namespace and then reading it once it has replicated. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Add GRPC ClusterID implementation For any external plugins which wish to use it. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2023-01-20 21:36:18 +00:00
// Plugins using Paths.WriteForwardedStorage will need to use this sentinel
// in their path to write cross-cluster. See the description of that parameter
// for more information.
const PBPWFClusterSentinel = "{{clusterId}}"
// Storage is the way that logical backends are able read/write data.
type Storage interface {
List(context.Context, string) ([]string, error)
Get(context.Context, string) (*StorageEntry, error)
Put(context.Context, *StorageEntry) error
Delete(context.Context, string) error
}
// StorageEntry is the entry for an item in a Storage implementation.
type StorageEntry struct {
2017-10-23 20:42:56 +00:00
Key string
Value []byte
SealWrap bool
}
2015-03-20 16:59:48 +00:00
// DecodeJSON decodes the 'Value' present in StorageEntry.
2015-03-20 16:59:48 +00:00
func (e *StorageEntry) DecodeJSON(out interface{}) error {
return jsonutil.DecodeJSON(e.Value, out)
2015-03-20 16:59:48 +00:00
}
// StorageEntryJSON creates a StorageEntry with a JSON-encoded value.
func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) {
encodedBytes, err := jsonutil.EncodeJSON(v)
if err != nil {
return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err)
2015-03-20 16:59:48 +00:00
}
return &StorageEntry{
Key: k,
Value: encodedBytes,
2015-03-20 16:59:48 +00:00
}, nil
}
2017-01-06 20:42:18 +00:00
type ClearableView interface {
List(context.Context, string) ([]string, error)
Delete(context.Context, string) error
2017-01-06 20:42:18 +00:00
}
// ScanView is used to scan all the keys in a view iteratively
func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error {
2017-01-06 20:42:18 +00:00
frontier := []string{""}
for len(frontier) > 0 {
n := len(frontier)
current := frontier[n-1]
frontier = frontier[:n-1]
// List the contents
contents, err := view.List(ctx, current)
2017-01-06 20:42:18 +00:00
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err)
2017-01-06 20:42:18 +00:00
}
// Handle the contents in the directory
for _, c := range contents {
// Exit if the context has been canceled
if ctx.Err() != nil {
return ctx.Err()
}
2017-01-06 20:42:18 +00:00
fullPath := current + c
if strings.HasSuffix(c, "/") {
frontier = append(frontier, fullPath)
} else {
cb(fullPath)
}
}
}
return nil
}
// CollectKeys is used to collect all the keys in a view
func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) {
return CollectKeysWithPrefix(ctx, view, "")
}
2019-05-02 01:48:12 +00:00
// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string
func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) {
var keys []string
2017-01-06 20:42:18 +00:00
cb := func(path string) {
if strings.HasPrefix(path, prefix) {
keys = append(keys, path)
}
2017-01-06 20:42:18 +00:00
}
// Scan for all the keys
if err := ScanView(ctx, view, cb); err != nil {
2017-01-06 20:42:18 +00:00
return nil, err
}
return keys, nil
2017-01-06 20:42:18 +00:00
}
// ClearView is used to delete all the keys in a view
func ClearView(ctx context.Context, view ClearableView) error {
return ClearViewWithLogging(ctx, view, nil)
}
func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error {
2017-10-23 20:42:56 +00:00
if view == nil {
return nil
}
if logger == nil {
logger = hclog.NewNullLogger()
}
2017-01-06 20:42:18 +00:00
// Collect all the keys
keys, err := CollectKeys(ctx, view)
2017-01-06 20:42:18 +00:00
if err != nil {
return err
}
logger.Debug("clearing view", "total_keys", len(keys))
2017-01-06 20:42:18 +00:00
// Delete all the keys
var pctDone int
for idx, key := range keys {
// Rather than keep trying to do stuff with a canceled context, bail;
// storage will fail anyways
if ctx.Err() != nil {
return ctx.Err()
}
if err := view.Delete(ctx, key); err != nil {
2017-01-06 20:42:18 +00:00
return err
}
newPctDone := idx * 100.0 / len(keys)
if int(newPctDone) > pctDone {
pctDone = int(newPctDone)
logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx)
}
2017-01-06 20:42:18 +00:00
}
logger.Debug("view cleared")
2017-01-06 20:42:18 +00:00
return nil
}