oss changes for cross ns remount (#14104)
This commit is contained in:
parent
c98c62be20
commit
0ceccaa51d
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
@ -65,7 +66,31 @@ func (c *Sys) Unmount(path string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Remount kicks off a remount operation, polls the status endpoint using
|
||||
// the migration ID till either success or failure state is observed
|
||||
func (c *Sys) Remount(from, to string) error {
|
||||
remountResp, err := c.StartRemount(from, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
remountStatusResp, err := c.RemountStatus(remountResp.MigrationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if remountStatusResp.MigrationInfo.MigrationStatus == "success" {
|
||||
return nil
|
||||
}
|
||||
if remountStatusResp.MigrationInfo.MigrationStatus == "failure" {
|
||||
return fmt.Errorf("Failure! Error encountered moving mount %s to %s, with migration ID %s", from, to, remountResp.MigrationID)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// StartRemount kicks off a mount migration and returns a response with the migration ID
|
||||
func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) {
|
||||
body := map[string]interface{}{
|
||||
"from": from,
|
||||
"to": to,
|
||||
|
@ -73,16 +98,59 @@ func (c *Sys) Remount(from, to string) error {
|
|||
|
||||
r := c.c.NewRequest("POST", "/v1/sys/remount")
|
||||
if err := r.SetJSONBody(body); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
resp, err := c.c.RawRequestWithContext(ctx, r)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
defer resp.Body.Close()
|
||||
secret, err := ParseSecret(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil || secret.Data == nil {
|
||||
return nil, errors.New("data from server response is empty")
|
||||
}
|
||||
|
||||
var result MountMigrationOutput
|
||||
err = mapstructure.Decode(secret.Data, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, err
|
||||
}
|
||||
|
||||
// RemountStatus checks the status of a mount migration operation with the provided ID
|
||||
func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) {
|
||||
r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/remount/status/%s", migrationID))
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
resp, err := c.c.RawRequestWithContext(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
secret, err := ParseSecret(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil || secret.Data == nil {
|
||||
return nil, errors.New("data from server response is empty")
|
||||
}
|
||||
|
||||
var result MountMigrationStatusOutput
|
||||
err = mapstructure.Decode(secret.Data, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (c *Sys) TuneMount(path string, config MountConfigInput) error {
|
||||
|
@ -187,3 +255,18 @@ type MountConfigOutput struct {
|
|||
// Deprecated: This field will always be blank for newer server responses.
|
||||
PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
|
||||
}
|
||||
|
||||
type MountMigrationOutput struct {
|
||||
MigrationID string `mapstructure:"migration_id"`
|
||||
}
|
||||
|
||||
type MountMigrationStatusOutput struct {
|
||||
MigrationID string `mapstructure:"migration_id"`
|
||||
MigrationInfo *MountMigrationStatusInfo `mapstructure:"migration_info"`
|
||||
}
|
||||
|
||||
type MountMigrationStatusInfo struct {
|
||||
SourceMount string `mapstructure:"source_mount"`
|
||||
TargetMount string `mapstructure:"target_mount"`
|
||||
MigrationStatus string `mapstructure:"status"`
|
||||
}
|
||||
|
|
|
@ -29,8 +29,8 @@ Usage: vault secrets move [options] SOURCE DESTINATION
|
|||
secrets engine are revoked, but all configuration associated with the engine
|
||||
is preserved.
|
||||
|
||||
This command only works within a namespace; it cannot be used to move engines
|
||||
to different namespaces.
|
||||
This command works within or across namespaces, both source and destination paths
|
||||
can be prefixed with a namespace heirarchy relative to the current namespace.
|
||||
|
||||
WARNING! Moving an existing secrets engine will revoke any leases from the
|
||||
old engine.
|
||||
|
@ -39,6 +39,11 @@ Usage: vault secrets move [options] SOURCE DESTINATION
|
|||
|
||||
$ vault secrets move secret/ generic/
|
||||
|
||||
Move the existing secrets engine at ns1/secret/ across namespaces to ns2/generic/,
|
||||
where ns1 and ns2 are child namespaces of the current namespace:
|
||||
|
||||
$ vault secrets move ns1/secret/ ns2/generic/
|
||||
|
||||
` + c.Flags().Help()
|
||||
|
||||
return strings.TrimSpace(helpText)
|
||||
|
@ -84,11 +89,12 @@ func (c *SecretsMoveCommand) Run(args []string) int {
|
|||
return 2
|
||||
}
|
||||
|
||||
if err := client.Sys().Remount(source, destination); err != nil {
|
||||
remountResp, err := client.Sys().StartRemount(source, destination)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error moving secrets engine %s to %s: %s", source, destination, err))
|
||||
return 2
|
||||
}
|
||||
|
||||
c.UI.Output(fmt.Sprintf("Success! Moved secrets engine %s to: %s", source, destination))
|
||||
c.UI.Output(fmt.Sprintf("Success! Started moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID))
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package command
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
@ -91,12 +92,16 @@ func TestSecretsMoveCommand_Run(t *testing.T) {
|
|||
t.Errorf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
expected := "Success! Moved secrets engine secret/ to: generic/"
|
||||
expected := "Success! Started moving secrets engine secret/ to generic/"
|
||||
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
|
||||
if !strings.Contains(combined, expected) {
|
||||
t.Errorf("expected %q to contain %q", combined, expected)
|
||||
}
|
||||
|
||||
// Wait for the move command to complete. Ideally we'd check remount status
|
||||
// explicitly but we don't have migration id here
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
mounts, err := client.Sys().ListMounts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -125,3 +125,20 @@ func SplitIDFromString(input string) (string, string) {
|
|||
|
||||
return prefix + input[:idx], input[idx+1:]
|
||||
}
|
||||
|
||||
// MountPathDetails contains the details of a mount's location,
|
||||
// consisting of the namespace of the mount and the path of the
|
||||
// mount within the namespace
|
||||
type MountPathDetails struct {
|
||||
Namespace *Namespace
|
||||
MountPath string
|
||||
}
|
||||
|
||||
func (mpd *MountPathDetails) GetRelativePath(currNs *Namespace) string {
|
||||
subNsPath := strings.TrimPrefix(mpd.Namespace.Path, currNs.Path)
|
||||
return subNsPath + mpd.MountPath
|
||||
}
|
||||
|
||||
func (mpd *MountPathDetails) GetFullPath() string {
|
||||
return mpd.Namespace.Path + mpd.MountPath
|
||||
}
|
||||
|
|
|
@ -2,8 +2,10 @@ package http
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
|
||||
|
@ -374,8 +376,24 @@ func TestSysRemount(t *testing.T) {
|
|||
"from": "foo",
|
||||
"to": "bar",
|
||||
})
|
||||
testResponseStatus(t, resp, 204)
|
||||
testResponseStatus(t, resp, 200)
|
||||
|
||||
// Poll until the remount succeeds
|
||||
var remountResp map[string]interface{}
|
||||
testResponseBody(t, resp, &remountResp)
|
||||
vault.RetryUntil(t, 5*time.Second, func() error {
|
||||
resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string))
|
||||
testResponseStatus(t, resp, 200)
|
||||
|
||||
var remountStatusResp map[string]interface{}
|
||||
testResponseBody(t, resp, &remountStatusResp)
|
||||
|
||||
status := remountStatusResp["data"].(map[string]interface{})["migration_info"].(map[string]interface{})["status"]
|
||||
if status != "success" {
|
||||
return fmt.Errorf("Expected migration status to be successful, got %q", status)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
|
||||
|
||||
var actual map[string]interface{}
|
||||
|
|
|
@ -424,10 +424,15 @@ func (c *Core) taintCredEntry(ctx context.Context, path string, updateStorage bo
|
|||
c.authLock.Lock()
|
||||
defer c.authLock.Unlock()
|
||||
|
||||
ns, err := namespace.FromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Taint the entry from the auth table
|
||||
// We do this on the original since setting the taint operates
|
||||
// on the entries which a shallow clone shares anyways
|
||||
entry, err := c.auth.setTaint(ctx, strings.TrimPrefix(path, credentialRoutePrefix), true, mountStateUnmounting)
|
||||
entry, err := c.auth.setTaint(ns.ID, strings.TrimPrefix(path, credentialRoutePrefix), true, mountStateUnmounting)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -306,6 +306,10 @@ type Core struct {
|
|||
// change underneath a calling function
|
||||
mountsLock sync.RWMutex
|
||||
|
||||
// mountMigrationTracker tracks past and ongoing remount operations
|
||||
// against their migration ids
|
||||
mountMigrationTracker *sync.Map
|
||||
|
||||
// auth is loaded after unseal since it is a protected
|
||||
// configuration
|
||||
auth *MountTable
|
||||
|
@ -844,6 +848,7 @@ func CreateCore(conf *CoreConfig) (*Core, error) {
|
|||
disableAutopilot: conf.DisableAutopilot,
|
||||
enableResponseHeaderHostname: conf.EnableResponseHeaderHostname,
|
||||
enableResponseHeaderRaftNodeID: conf.EnableResponseHeaderRaftNodeID,
|
||||
mountMigrationTracker: &sync.Map{},
|
||||
}
|
||||
c.standbyStopCh.Store(make(chan struct{}))
|
||||
atomic.StoreUint32(c.sealed, 1)
|
||||
|
|
|
@ -178,7 +178,7 @@ func NewSystemBackend(core *Core, logger log.Logger) *SystemBackend {
|
|||
b.Backend.Paths = append(b.Backend.Paths, b.capabilitiesPaths()...)
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.internalPaths()...)
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.pprofPaths()...)
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.remountPath())
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.remountPaths()...)
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.metricsPath())
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.monitorPath())
|
||||
b.Backend.Paths = append(b.Backend.Paths, b.inFlightRequestPath())
|
||||
|
@ -1199,11 +1199,33 @@ func (b *SystemBackend) handleRemount(ctx context.Context, req *logical.Request,
|
|||
logical.ErrInvalidRequest
|
||||
}
|
||||
|
||||
if err = validateMountPath(toPath); err != nil {
|
||||
return handleError(fmt.Errorf("'to' %v", err))
|
||||
fromPathDetails := b.Core.splitNamespaceAndMountFromPath(ns.Path, fromPath)
|
||||
toPathDetails := b.Core.splitNamespaceAndMountFromPath(ns.Path, toPath)
|
||||
|
||||
if err = validateMountPath(toPathDetails.MountPath); err != nil {
|
||||
return handleError(fmt.Errorf("invalid destination mount: %v", err))
|
||||
}
|
||||
|
||||
entry := b.Core.router.MatchingMountEntry(ctx, fromPath)
|
||||
// Prevent target and source mounts from being in a protected path
|
||||
for _, p := range protectedMounts {
|
||||
if strings.HasPrefix(fromPathDetails.MountPath, p) {
|
||||
return handleError(fmt.Errorf("cannot remount %q", fromPathDetails.MountPath))
|
||||
}
|
||||
|
||||
if strings.HasPrefix(toPathDetails.MountPath, p) {
|
||||
return handleError(fmt.Errorf("cannot remount to destination %+v", toPathDetails.MountPath))
|
||||
}
|
||||
}
|
||||
|
||||
entry := b.Core.router.MatchingMountEntry(ctx, sanitizePath(fromPath))
|
||||
|
||||
if entry == nil {
|
||||
return handleError(fmt.Errorf("no matching mount at %q", sanitizePath(fromPath)))
|
||||
}
|
||||
|
||||
if match := b.Core.router.MatchingMount(ctx, toPath); match != "" {
|
||||
return handleError(fmt.Errorf("existing mount at %q", match))
|
||||
}
|
||||
// If we are a performance secondary cluster we should forward the request
|
||||
// to the primary. We fail early here since the view in use isn't marked as
|
||||
// readonly
|
||||
|
@ -1211,31 +1233,76 @@ func (b *SystemBackend) handleRemount(ctx context.Context, req *logical.Request,
|
|||
return nil, logical.ErrReadOnly
|
||||
}
|
||||
|
||||
migrationID, err := b.Core.createMigrationStatus(fromPathDetails, toPathDetails)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating migration status %+v", err)
|
||||
}
|
||||
// Start up a goroutine to handle the remount operations, and return early to the caller
|
||||
go func(migrationID string) {
|
||||
b.Core.stateLock.RLock()
|
||||
defer b.Core.stateLock.RUnlock()
|
||||
|
||||
logger := b.Core.Logger().Named("mounts.migration").With("migration_id", migrationID, "namespace", ns.Path, "to_path", toPath, "from_path", fromPath)
|
||||
|
||||
var err error
|
||||
if !strings.Contains(fromPath, "auth") {
|
||||
err = b.moveSecretsEngine(ns, logger, migrationID, entry.ViewPath(), fromPathDetails, toPathDetails)
|
||||
} else {
|
||||
logger.Error("Remount is unsupported for the source mount", "err", err)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error("remount failed", "error", err)
|
||||
if err := b.Core.setMigrationStatus(migrationID, MigrationFailureStatus); err != nil {
|
||||
logger.Error("Setting migration status failed", "error", err, "target_status", MigrationFailureStatus)
|
||||
}
|
||||
}
|
||||
}(migrationID)
|
||||
|
||||
resp := &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"migration_id": migrationID,
|
||||
},
|
||||
}
|
||||
resp.AddWarning("Mount move has been queued. Progress will be reported in Vault's server log, tagged with the returned migration_id")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// moveSecretsEngine carries out a remount operation on the secrets engine, updating the migration status as required
|
||||
// It is expected to be called asynchronously outside of a request context, hence it creates a context derived from the active one
|
||||
// and intermittently checks to see if it is still open.
|
||||
func (b *SystemBackend) moveSecretsEngine(ns *namespace.Namespace, logger log.Logger, migrationID, viewPath string, fromPathDetails, toPathDetails namespace.MountPathDetails) error {
|
||||
logger.Info("Starting to update the mount table and revoke leases")
|
||||
revokeCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns)
|
||||
// Attempt remount
|
||||
if err := b.Core.remount(ctx, fromPath, toPath, !b.Core.perfStandby); err != nil {
|
||||
b.Backend.Logger().Error("remount failed", "from_path", fromPath, "to_path", toPath, "error", err)
|
||||
return handleError(err)
|
||||
if err := b.Core.remountSecretsEngine(revokeCtx, fromPathDetails, toPathDetails, !b.Core.perfStandby); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the view path if available
|
||||
var viewPath string
|
||||
if entry != nil {
|
||||
viewPath = entry.ViewPath()
|
||||
if err := revokeCtx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Removing the source mount from filtered paths on secondaries")
|
||||
// Remove from filtered mounts and restart evaluation process
|
||||
if err := b.Core.removePathFromFilteredPaths(ctx, ns.Path+fromPath, viewPath); err != nil {
|
||||
b.Backend.Logger().Error("filtered path removal failed", fromPath, "error", err)
|
||||
return handleError(err)
|
||||
if err := b.Core.removePathFromFilteredPaths(revokeCtx, fromPathDetails.GetFullPath(), viewPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update quotas with the new path
|
||||
if err := b.Core.quotaManager.HandleRemount(ctx, ns.Path, sanitizePath(fromPath), sanitizePath(toPath)); err != nil {
|
||||
b.Core.logger.Error("failed to update quotas after remount", "ns_path", ns.Path, "from_path", fromPath, "to_path", toPath, "error", err)
|
||||
return handleError(err)
|
||||
if err := revokeCtx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
logger.Info("Updating quotas associated with the source mount")
|
||||
// Update quotas with the new path and namespace
|
||||
if err := b.Core.quotaManager.HandleRemount(revokeCtx, fromPathDetails, toPathDetails); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.Core.setMigrationStatus(migrationID, MigrationSuccessStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Completed mount move operations")
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleAuthTuneRead is used to get config settings on a auth path
|
||||
|
@ -1249,6 +1316,34 @@ func (b *SystemBackend) handleAuthTuneRead(ctx context.Context, req *logical.Req
|
|||
return b.handleTuneReadCommon(ctx, "auth/"+path)
|
||||
}
|
||||
|
||||
func (b *SystemBackend) handleRemountStatusCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
repState := b.Core.ReplicationState()
|
||||
|
||||
migrationID := data.Get("migration_id").(string)
|
||||
if migrationID == "" {
|
||||
return logical.ErrorResponse(
|
||||
"migrationID must be specified"),
|
||||
logical.ErrInvalidRequest
|
||||
}
|
||||
|
||||
migrationInfo := b.Core.readMigrationStatus(migrationID)
|
||||
if migrationInfo == nil {
|
||||
// If the migration info is not found and this is a perf secondary
|
||||
// forward the request to the primary cluster
|
||||
if repState.HasState(consts.ReplicationPerformanceSecondary) {
|
||||
return nil, logical.ErrReadOnly
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
resp := &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"migration_id": migrationID,
|
||||
"migration_info": migrationInfo,
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// handleMountTuneRead is used to get config settings on a backend
|
||||
func (b *SystemBackend) handleMountTuneRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
path := data.Get("path").(string)
|
||||
|
@ -4519,7 +4614,7 @@ in the plugin catalog.`,
|
|||
},
|
||||
|
||||
"remount": {
|
||||
"Move the mount point of an already-mounted backend.",
|
||||
"Move the mount point of an already-mounted backend, within or across namespaces",
|
||||
`
|
||||
This path responds to the following HTTP methods.
|
||||
|
||||
|
@ -4528,6 +4623,15 @@ This path responds to the following HTTP methods.
|
|||
`,
|
||||
},
|
||||
|
||||
"remount-status": {
|
||||
"Check the status of a mount move operation",
|
||||
`
|
||||
This path responds to the following HTTP methods.
|
||||
GET /sys/remount/status/:migration_id
|
||||
Check the status of a mount move operation for the given migration_id
|
||||
`,
|
||||
},
|
||||
|
||||
"auth_tune": {
|
||||
"Tune the configuration parameters for an auth path.",
|
||||
`Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
|
||||
|
|
|
@ -1308,27 +1308,50 @@ func (b *SystemBackend) leasePaths() []*framework.Path {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *SystemBackend) remountPath() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "remount",
|
||||
func (b *SystemBackend) remountPaths() []*framework.Path {
|
||||
return []*framework.Path{
|
||||
{
|
||||
Pattern: "remount",
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"from": {
|
||||
Type: framework.TypeString,
|
||||
Description: "The previous mount point.",
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"from": {
|
||||
Type: framework.TypeString,
|
||||
Description: "The previous mount point.",
|
||||
},
|
||||
"to": {
|
||||
Type: framework.TypeString,
|
||||
Description: "The new mount point.",
|
||||
},
|
||||
},
|
||||
"to": {
|
||||
Type: framework.TypeString,
|
||||
Description: "The new mount point.",
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.handleRemount,
|
||||
Summary: "Initiate a mount migration",
|
||||
},
|
||||
},
|
||||
HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]),
|
||||
HelpDescription: strings.TrimSpace(sysHelp["remount"][1]),
|
||||
},
|
||||
{
|
||||
Pattern: "remount/status/(?P<migration_id>.+?)$",
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.UpdateOperation: b.handleRemount,
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"migration_id": {
|
||||
Type: framework.TypeString,
|
||||
Description: "The ID of the migration operation",
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.handleRemountStatusCheck,
|
||||
Summary: "Check status of a mount migration",
|
||||
},
|
||||
},
|
||||
HelpSynopsis: strings.TrimSpace(sysHelp["remount-status"][0]),
|
||||
HelpDescription: strings.TrimSpace(sysHelp["remount-status"][1]),
|
||||
},
|
||||
|
||||
HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]),
|
||||
HelpDescription: strings.TrimSpace(sysHelp["remount"][1]),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -212,10 +212,10 @@ func (b *SystemBackend) handleRateLimitQuotasUpdate() framework.OperationFunc {
|
|||
case quota == nil:
|
||||
quota = quotas.NewRateLimitQuota(name, ns.Path, mountPath, rate, interval, blockInterval)
|
||||
default:
|
||||
rlq := quota.(*quotas.RateLimitQuota)
|
||||
// Re-inserting the already indexed object in memdb might cause problems.
|
||||
// So, clone the object. See https://github.com/hashicorp/go-memdb/issues/76.
|
||||
rlq = rlq.Clone()
|
||||
clonedQuota := quota.Clone()
|
||||
rlq := clonedQuota.(*quotas.RateLimitQuota)
|
||||
rlq.NamespacePath = ns.Path
|
||||
rlq.MountPath = mountPath
|
||||
rlq.Rate = rate
|
||||
|
|
|
@ -691,12 +691,18 @@ func TestSystemBackend_remount(t *testing.T) {
|
|||
req.Data["to"] = "foo"
|
||||
req.Data["config"] = structs.Map(MountConfig{})
|
||||
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp != nil {
|
||||
t.Fatalf("bad: %v", resp)
|
||||
}
|
||||
RetryUntil(t, 5*time.Second, func() error {
|
||||
req = logical.TestRequest(t, logical.ReadOperation, fmt.Sprintf("remount/status/%s", resp.Data["migration_id"]))
|
||||
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
migrationInfo := resp.Data["migration_info"].(*MountMigrationInfo)
|
||||
if migrationInfo.MigrationStatus != MigrationSuccessStatus.String() {
|
||||
return fmt.Errorf("Expected migration status to be successful, got %q", migrationInfo.MigrationStatus)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestSystemBackend_remount_invalid(t *testing.T) {
|
||||
|
@ -710,8 +716,8 @@ func TestSystemBackend_remount_invalid(t *testing.T) {
|
|||
if err != logical.ErrInvalidRequest {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp.Data["error"] != `no matching mount at "unknown/"` {
|
||||
t.Fatalf("bad: %v", resp)
|
||||
if !strings.Contains(resp.Data["error"].(string), "no matching mount at \"unknown/\"") {
|
||||
t.Fatalf("Found unexpected error %q", resp.Data["error"].(string))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -725,8 +731,8 @@ func TestSystemBackend_remount_system(t *testing.T) {
|
|||
if err != logical.ErrInvalidRequest {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp.Data["error"] != `cannot remount "sys/"` {
|
||||
t.Fatalf("bad: %v", resp)
|
||||
if !strings.Contains(resp.Data["error"].(string), "cannot remount \"sys/\"") {
|
||||
t.Fatalf("Found unexpected error %q", resp.Data["error"].(string))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -741,7 +747,7 @@ func TestSystemBackend_remount_clean(t *testing.T) {
|
|||
if err != logical.ErrInvalidRequest {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp.Data["error"] != `'to' path 'foo//bar' does not match cleaned path 'foo/bar'` {
|
||||
if resp.Data["error"] != `invalid destination mount: path 'foo//bar/' does not match cleaned path 'foo/bar/'` {
|
||||
t.Fatalf("bad: %v", resp)
|
||||
}
|
||||
}
|
||||
|
@ -757,7 +763,7 @@ func TestSystemBackend_remount_nonPrintable(t *testing.T) {
|
|||
if err != logical.ErrInvalidRequest {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp.Data["error"] != `'to' path cannot contain non-printable characters` {
|
||||
if resp.Data["error"] != `invalid destination mount: path cannot contain non-printable characters` {
|
||||
t.Fatalf("bad: %v", resp)
|
||||
}
|
||||
}
|
||||
|
|
199
vault/mount.go
199
vault/mount.go
|
@ -126,6 +126,32 @@ type MountTable struct {
|
|||
Entries []*MountEntry `json:"entries"`
|
||||
}
|
||||
|
||||
type MountMigrationStatus int
|
||||
|
||||
const (
|
||||
MigrationInProgressStatus MountMigrationStatus = iota
|
||||
MigrationSuccessStatus
|
||||
MigrationFailureStatus
|
||||
)
|
||||
|
||||
func (m MountMigrationStatus) String() string {
|
||||
switch m {
|
||||
case MigrationInProgressStatus:
|
||||
return "in-progress"
|
||||
case MigrationSuccessStatus:
|
||||
return "success"
|
||||
case MigrationFailureStatus:
|
||||
return "failure"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
type MountMigrationInfo struct {
|
||||
SourceMount string `json:"source_mount"`
|
||||
TargetMount string `json:"target_mount"`
|
||||
MigrationStatus string `json:"status"`
|
||||
}
|
||||
|
||||
// tableMetrics is responsible for setting gauge metrics for
|
||||
// mount table storage sizes (in bytes) and mount table num
|
||||
// entries. It does this via setGaugeWithLabels. It then
|
||||
|
@ -195,14 +221,10 @@ func (t *MountTable) shallowClone() *MountTable {
|
|||
|
||||
// setTaint is used to set the taint on given entry Accepts either the mount
|
||||
// entry's path or namespace + path, i.e. <ns-path>/secret/ or <ns-path>/token/
|
||||
func (t *MountTable) setTaint(ctx context.Context, path string, tainted bool, mountState string) (*MountEntry, error) {
|
||||
func (t *MountTable) setTaint(nsID, path string, tainted bool, mountState string) (*MountEntry, error) {
|
||||
n := len(t.Entries)
|
||||
ns, err := namespace.FromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID {
|
||||
if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == nsID {
|
||||
t.Entries[i].Tainted = tainted
|
||||
t.Entries[i].MountState = mountState
|
||||
return t.Entries[i], nil
|
||||
|
@ -662,7 +684,7 @@ func (c *Core) unmountInternal(ctx context.Context, path string, updateStorage b
|
|||
entry := c.router.MatchingMountEntry(ctx, path)
|
||||
|
||||
// Mark the entry as tainted
|
||||
if err := c.taintMountEntry(ctx, path, updateStorage, true); err != nil {
|
||||
if err := c.taintMountEntry(ctx, ns.ID, path, updateStorage, true); err != nil {
|
||||
c.logger.Error("failed to taint mount entry for path being unmounted", "error", err, "path", path)
|
||||
return err
|
||||
}
|
||||
|
@ -780,7 +802,7 @@ func (c *Core) removeMountEntry(ctx context.Context, path string, updateStorage
|
|||
}
|
||||
|
||||
// taintMountEntry is used to mark an entry in the mount table as tainted
|
||||
func (c *Core) taintMountEntry(ctx context.Context, path string, updateStorage, unmounting bool) error {
|
||||
func (c *Core) taintMountEntry(ctx context.Context, nsID, mountPath string, updateStorage, unmounting bool) error {
|
||||
c.mountsLock.Lock()
|
||||
defer c.mountsLock.Unlock()
|
||||
|
||||
|
@ -791,12 +813,12 @@ func (c *Core) taintMountEntry(ctx context.Context, path string, updateStorage,
|
|||
|
||||
// As modifying the taint of an entry affects shallow clones,
|
||||
// we simply use the original
|
||||
entry, err := c.mounts.setTaint(ctx, path, true, mountState)
|
||||
entry, err := c.mounts.setTaint(nsID, mountPath, true, mountState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry == nil {
|
||||
c.logger.Error("nil entry found tainting entry in mounts table", "path", path)
|
||||
c.logger.Error("nil entry found tainting entry in mounts table", "path", mountPath)
|
||||
return logical.CodedError(500, "failed to taint entry in mounts table")
|
||||
}
|
||||
|
||||
|
@ -846,99 +868,90 @@ func (c *Core) remountForceInternal(ctx context.Context, path string, updateStor
|
|||
return nil
|
||||
}
|
||||
|
||||
// Remount is used to remount a path at a new mount point.
|
||||
func (c *Core) remount(ctx context.Context, src, dst string, updateStorage bool) error {
|
||||
func (c *Core) remountSecretsEngineCurrentNamespace(ctx context.Context, src, dst string, updateStorage bool) error {
|
||||
ns, err := namespace.FromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we end the path in a slash
|
||||
if !strings.HasSuffix(src, "/") {
|
||||
src += "/"
|
||||
}
|
||||
if !strings.HasSuffix(dst, "/") {
|
||||
dst += "/"
|
||||
}
|
||||
srcPathDetails := c.splitNamespaceAndMountFromPath(ns.Path, src)
|
||||
dstPathDetails := c.splitNamespaceAndMountFromPath(ns.Path, dst)
|
||||
return c.remountSecretsEngine(ctx, srcPathDetails, dstPathDetails, updateStorage)
|
||||
}
|
||||
|
||||
// Prevent protected paths from being remounted
|
||||
for _, p := range protectedMounts {
|
||||
if strings.HasPrefix(src, p) {
|
||||
return fmt.Errorf("cannot remount %q", src)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify exact match of the route
|
||||
srcMatch := c.router.MatchingMountEntry(ctx, src)
|
||||
if srcMatch == nil {
|
||||
return fmt.Errorf("no matching mount at %q", src)
|
||||
}
|
||||
if srcMatch.NamespaceID != ns.ID {
|
||||
return fmt.Errorf("source mount in a different namespace than request")
|
||||
}
|
||||
|
||||
if err := verifyNamespace(c, ns, &MountEntry{Path: dst}); err != nil {
|
||||
// remountSecretsEngine is used to remount a path at a new mount point.
|
||||
func (c *Core) remountSecretsEngine(ctx context.Context, src, dst namespace.MountPathDetails, updateStorage bool) error {
|
||||
ns, err := namespace.FromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if match := c.router.MatchingMount(ctx, dst); match != "" {
|
||||
// Prevent protected paths from being remounted, or target mounts being in protected paths
|
||||
for _, p := range protectedMounts {
|
||||
if strings.HasPrefix(src.MountPath, p) {
|
||||
return fmt.Errorf("cannot remount %q", src.MountPath)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(dst.MountPath, p) {
|
||||
return fmt.Errorf("cannot remount to destination %+v", dst)
|
||||
}
|
||||
}
|
||||
|
||||
srcRelativePath := src.GetRelativePath(ns)
|
||||
dstRelativePath := dst.GetRelativePath(ns)
|
||||
|
||||
// Verify exact match of the route
|
||||
srcMatch := c.router.MatchingMountEntry(ctx, srcRelativePath)
|
||||
if srcMatch == nil {
|
||||
return fmt.Errorf("no matching mount at %+v", src)
|
||||
}
|
||||
|
||||
if match := c.router.MatchingMount(ctx, dstRelativePath); match != "" {
|
||||
return fmt.Errorf("existing mount at %q", match)
|
||||
}
|
||||
|
||||
// Mark the entry as tainted
|
||||
if err := c.taintMountEntry(ctx, src, updateStorage, false); err != nil {
|
||||
if err := c.taintMountEntry(ctx, src.Namespace.ID, src.MountPath, updateStorage, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Taint the router path to prevent routing
|
||||
if err := c.router.Taint(ctx, src); err != nil {
|
||||
if err := c.router.Taint(ctx, srcRelativePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.IsDRSecondary() {
|
||||
// Invoke the rollback manager a final time
|
||||
rCtx := namespace.ContextWithNamespace(c.activeContext, ns)
|
||||
if c.rollback != nil {
|
||||
if err := c.rollback.Rollback(rCtx, src); err != nil {
|
||||
if c.rollback != nil && c.router.MatchingBackend(ctx, srcRelativePath) != nil {
|
||||
if err := c.rollback.Rollback(rCtx, srcRelativePath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if entry := c.router.MatchingMountEntry(ctx, src); entry == nil {
|
||||
return fmt.Errorf("no matching mount at %q", src)
|
||||
}
|
||||
|
||||
revokeCtx := namespace.ContextWithNamespace(ctx, src.Namespace)
|
||||
// Revoke all the dynamic keys
|
||||
if err := c.expiration.RevokePrefix(rCtx, src, true); err != nil {
|
||||
if err := c.expiration.RevokePrefix(revokeCtx, src.MountPath, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.mountsLock.Lock()
|
||||
if match := c.router.MatchingMount(ctx, dst); match != "" {
|
||||
if match := c.router.MatchingMount(ctx, dstRelativePath); match != "" {
|
||||
c.mountsLock.Unlock()
|
||||
return fmt.Errorf("existing mount at %q", match)
|
||||
}
|
||||
var entry *MountEntry
|
||||
for _, mountEntry := range c.mounts.Entries {
|
||||
if mountEntry.Path == src && mountEntry.NamespaceID == ns.ID {
|
||||
entry = mountEntry
|
||||
entry.Path = dst
|
||||
entry.Tainted = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if entry == nil {
|
||||
c.mountsLock.Unlock()
|
||||
c.logger.Error("failed to find entry in mounts table")
|
||||
return logical.CodedError(500, "failed to find entry in mounts table")
|
||||
}
|
||||
srcMatch.Tainted = false
|
||||
srcMatch.NamespaceID = dst.Namespace.ID
|
||||
srcMatch.namespace = dst.Namespace
|
||||
srcPath := srcMatch.Path
|
||||
srcMatch.Path = dst.MountPath
|
||||
|
||||
// Update the mount table
|
||||
if err := c.persistMounts(ctx, c.mounts, &entry.Local); err != nil {
|
||||
entry.Path = src
|
||||
entry.Tainted = true
|
||||
if err := c.persistMounts(ctx, c.mounts, &srcMatch.Local); err != nil {
|
||||
srcMatch.Path = srcPath
|
||||
srcMatch.Tainted = true
|
||||
c.mountsLock.Unlock()
|
||||
if err == logical.ErrReadOnly && c.perfStandby {
|
||||
return err
|
||||
|
@ -949,23 +962,37 @@ func (c *Core) remount(ctx context.Context, src, dst string, updateStorage bool)
|
|||
}
|
||||
|
||||
// Remount the backend
|
||||
if err := c.router.Remount(ctx, src, dst); err != nil {
|
||||
if err := c.router.Remount(ctx, srcRelativePath, dstRelativePath); err != nil {
|
||||
c.mountsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
c.mountsLock.Unlock()
|
||||
|
||||
// Un-taint the path
|
||||
if err := c.router.Untaint(ctx, dst); err != nil {
|
||||
if err := c.router.Untaint(ctx, dstRelativePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("successful remount", "old_path", src, "new_path", dst)
|
||||
}
|
||||
c.logger.Info("successful remount", "old_path", src, "new_path", dst)
|
||||
return nil
|
||||
}
|
||||
|
||||
// From an input path that has a relative namespace heirarchy followed by a mount point, return the full
|
||||
// namespace of the mount point, along with the mount point without the namespace related prefix.
|
||||
// For example, in a heirarchy ns1/ns2/ns3/secret-mount, when currNs is ns1 and path is ns2/ns3/secret-mount,
|
||||
// this returns the namespace object for ns1/ns2/ns3/, and the string "secret-mount"
|
||||
func (c *Core) splitNamespaceAndMountFromPath(currNs, path string) namespace.MountPathDetails {
|
||||
fullPath := currNs + path
|
||||
fullNs := c.namespaceByPath(fullPath)
|
||||
|
||||
mountPath := strings.TrimPrefix(fullPath, fullNs.Path)
|
||||
|
||||
return namespace.MountPathDetails{
|
||||
Namespace: fullNs,
|
||||
MountPath: sanitizePath(mountPath),
|
||||
}
|
||||
}
|
||||
|
||||
// loadMounts is invoked as part of postUnseal to load the mount table
|
||||
func (c *Core) loadMounts(ctx context.Context) error {
|
||||
// Load the existing mount table
|
||||
|
@ -1580,3 +1607,37 @@ func (c *Core) setCoreBackend(entry *MountEntry, backend logical.Backend, view *
|
|||
c.identityStore = backend.(*IdentityStore)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Core) createMigrationStatus(from, to namespace.MountPathDetails) (string, error) {
|
||||
migrationID, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error generating uuid for mount move invocation: %w", err)
|
||||
}
|
||||
migrationInfo := MountMigrationInfo{
|
||||
SourceMount: from.Namespace.Path + from.MountPath,
|
||||
TargetMount: to.Namespace.Path + to.MountPath,
|
||||
MigrationStatus: MigrationInProgressStatus.String(),
|
||||
}
|
||||
c.mountMigrationTracker.Store(migrationID, migrationInfo)
|
||||
return migrationID, nil
|
||||
}
|
||||
|
||||
func (c *Core) setMigrationStatus(migrationID string, migrationStatus MountMigrationStatus) error {
|
||||
migrationInfoRaw, ok := c.mountMigrationTracker.Load(migrationID)
|
||||
if !ok {
|
||||
return fmt.Errorf("Migration Tracker entry missing for ID %s", migrationID)
|
||||
}
|
||||
migrationInfo := migrationInfoRaw.(MountMigrationInfo)
|
||||
migrationInfo.MigrationStatus = migrationStatus.String()
|
||||
c.mountMigrationTracker.Store(migrationID, migrationInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) readMigrationStatus(migrationID string) *MountMigrationInfo {
|
||||
migrationInfoRaw, ok := c.mountMigrationTracker.Load(migrationID)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
migrationInfo := migrationInfoRaw.(MountMigrationInfo)
|
||||
return &migrationInfo
|
||||
}
|
||||
|
|
|
@ -476,7 +476,7 @@ func TestCore_RemountConcurrent(t *testing.T) {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := c2.remount(namespace.RootContext(nil), "test1", "foo", true)
|
||||
err := c2.remountSecretsEngineCurrentNamespace(namespace.RootContext(nil), "test1", "foo", true)
|
||||
if err != nil {
|
||||
t.Logf("err: %v", err)
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ func TestCore_RemountConcurrent(t *testing.T) {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := c2.remount(namespace.RootContext(nil), "test2", "foo", true)
|
||||
err := c2.remountSecretsEngineCurrentNamespace(namespace.RootContext(nil), "test2", "foo", true)
|
||||
if err != nil {
|
||||
t.Logf("err: %v", err)
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ func TestCore_RemountConcurrent(t *testing.T) {
|
|||
|
||||
func TestCore_Remount(t *testing.T) {
|
||||
c, keys, _ := TestCoreUnsealed(t)
|
||||
err := c.remount(namespace.RootContext(nil), "secret", "foo", true)
|
||||
err := c.remountSecretsEngineCurrentNamespace(namespace.RootContext(nil), "secret", "foo", true)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -612,7 +612,7 @@ func TestCore_Remount_Cleanup(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remount, this should cleanup
|
||||
if err := c.remount(namespace.RootContext(nil), "test/", "new/", true); err != nil {
|
||||
if err := c.remountSecretsEngineCurrentNamespace(namespace.RootContext(nil), "test/", "new/", true); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
|
@ -641,7 +641,7 @@ func TestCore_Remount_Cleanup(t *testing.T) {
|
|||
|
||||
func TestCore_Remount_Protected(t *testing.T) {
|
||||
c, _, _ := TestCoreUnsealed(t)
|
||||
err := c.remount(namespace.RootContext(nil), "sys", "foo", true)
|
||||
err := c.remountSecretsEngineCurrentNamespace(namespace.RootContext(nil), "sys", "foo", true)
|
||||
if err.Error() != `cannot remount "sys/"` {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/sdk/helper/pathmanager"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
@ -183,8 +184,11 @@ type Quota interface {
|
|||
// rule is deleted.
|
||||
close(context.Context) error
|
||||
|
||||
// handleRemount takes in the new mount path in the quota
|
||||
handleRemount(string)
|
||||
// Clone creates a clone of the calling quota
|
||||
Clone() Quota
|
||||
|
||||
// handleRemount updates the mount and namesapce paths of the quota
|
||||
handleRemount(string, string)
|
||||
}
|
||||
|
||||
// Response holds information about the result of the Allow() call. The response
|
||||
|
@ -268,17 +272,41 @@ func (m *Manager) SetQuota(ctx context.Context, qType string, quota Quota, loadi
|
|||
return m.setQuotaLocked(ctx, qType, quota, loading)
|
||||
}
|
||||
|
||||
// setQuotaLocked adds or updates a quota rule, modifying the db as well as
|
||||
// any runtime elements such as goroutines.
|
||||
// It should be called with the write lock held.
|
||||
// setQuotaLocked creates a transaction, passes it into setQuotaLockedWithTxn and manages its lifecycle
|
||||
// along with updating lease quota counts
|
||||
func (m *Manager) setQuotaLocked(ctx context.Context, qType string, quota Quota, loading bool) error {
|
||||
txn := m.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
err := m.setQuotaLockedWithTxn(ctx, qType, quota, loading, txn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if loading {
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// For the lease count type, recompute the counters
|
||||
if !loading && qType == TypeLeaseCount.String() {
|
||||
if err := m.recomputeLeaseCounts(ctx, txn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// setQuotaLockedWithTxn adds or updates a quota rule, modifying the db as well as
|
||||
// any runtime elements such as goroutines, using the transaction passed in
|
||||
// It should be called with the write lock held.
|
||||
func (m *Manager) setQuotaLockedWithTxn(ctx context.Context, qType string, quota Quota, loading bool, txn *memdb.Txn) error {
|
||||
if qType == TypeLeaseCount.String() {
|
||||
m.setIsPerfStandby(quota)
|
||||
}
|
||||
|
||||
txn := m.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
raw, err := txn.First(qType, indexID, quota.quotaID())
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -306,19 +334,6 @@ func (m *Manager) setQuotaLocked(ctx context.Context, qType string, quota Quota,
|
|||
return err
|
||||
}
|
||||
|
||||
if loading {
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// For the lease count type, recompute the counters
|
||||
if !loading && qType == TypeLeaseCount.String() {
|
||||
if err := m.recomputeLeaseCounts(ctx, txn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -937,23 +952,30 @@ func QuotaStoragePath(quotaType, name string) string {
|
|||
|
||||
// HandleRemount updates the quota subsystem about the remount operation that
|
||||
// took place. Quota manager will trigger the quota specific updates including
|
||||
// the mount path update..
|
||||
func (m *Manager) HandleRemount(ctx context.Context, nsPath, fromPath, toPath string) error {
|
||||
// the mount path update and the namespace update
|
||||
func (m *Manager) HandleRemount(ctx context.Context, from, to namespace.MountPathDetails) error {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
// Grab a write transaction, as we want to save the updated quota in memdb
|
||||
txn := m.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// nsPath would have been made non-empty during insertion. Use non-empty value
|
||||
// quota namespace would have been made non-empty during insertion. Use non-empty value
|
||||
// during query as well.
|
||||
if nsPath == "" {
|
||||
nsPath = "root"
|
||||
fromNs := from.Namespace.Path
|
||||
if fromNs == "" {
|
||||
fromNs = namespace.RootNamespaceID
|
||||
}
|
||||
|
||||
toNs := to.Namespace.Path
|
||||
if toNs == "" {
|
||||
toNs = namespace.RootNamespaceID
|
||||
}
|
||||
|
||||
idx := indexNamespaceMount
|
||||
leaseQuotaUpdated := false
|
||||
args := []interface{}{nsPath, fromPath}
|
||||
args := []interface{}{fromNs, from.MountPath}
|
||||
for _, quotaType := range quotaTypes() {
|
||||
iter, err := txn.Get(quotaType, idx, args...)
|
||||
if err != nil {
|
||||
|
@ -961,7 +983,11 @@ func (m *Manager) HandleRemount(ctx context.Context, nsPath, fromPath, toPath st
|
|||
}
|
||||
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
||||
quota := raw.(Quota)
|
||||
quota.handleRemount(toPath)
|
||||
|
||||
// Clone the object and update it
|
||||
clonedQuota := quota.Clone()
|
||||
clonedQuota.handleRemount(to.MountPath, toNs)
|
||||
// Update both underlying storage and memdb with the quota change
|
||||
entry, err := logical.StorageEntryJSON(QuotaStoragePath(quotaType, quota.QuotaName()), quota)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -969,6 +995,9 @@ func (m *Manager) HandleRemount(ctx context.Context, nsPath, fromPath, toPath st
|
|||
if err := m.storage.Put(ctx, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.setQuotaLockedWithTxn(ctx, quotaType, clonedQuota, false, txn); err != nil {
|
||||
return err
|
||||
}
|
||||
if quotaType == TypeLeaseCount.String() {
|
||||
leaseQuotaUpdated = true
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ func NewRateLimitQuota(name, nsPath, mountPath string, rate float64, interval, b
|
|||
}
|
||||
}
|
||||
|
||||
func (q *RateLimitQuota) Clone() *RateLimitQuota {
|
||||
func (q *RateLimitQuota) Clone() Quota {
|
||||
rlq := &RateLimitQuota{
|
||||
ID: q.ID,
|
||||
Name: q.Name,
|
||||
|
@ -337,6 +337,7 @@ func (rlq *RateLimitQuota) close(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (rlq *RateLimitQuota) handleRemount(toPath string) {
|
||||
rlq.MountPath = toPath
|
||||
func (rlq *RateLimitQuota) handleRemount(mountpath, nspath string) {
|
||||
rlq.MountPath = mountpath
|
||||
rlq.NamespacePath = nspath
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestQuotas_MountPathOverwrite(t *testing.T) {
|
|||
|
||||
quota := NewRateLimitQuota("tq", "", "kv1/", 10, time.Second, 0)
|
||||
require.NoError(t, qm.SetQuota(context.Background(), TypeRateLimit.String(), quota, false))
|
||||
quota = quota.Clone()
|
||||
quota = quota.Clone().(*RateLimitQuota)
|
||||
quota.MountPath = "kv2/"
|
||||
require.NoError(t, qm.SetQuota(context.Background(), TypeRateLimit.String(), quota, false))
|
||||
|
||||
|
|
|
@ -60,6 +60,10 @@ func (l LeaseCountQuota) close(_ context.Context) error {
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (l LeaseCountQuota) handleRemount(s string) {
|
||||
func (l LeaseCountQuota) Clone() Quota {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (l LeaseCountQuota) handleRemount(mountPath, nsPath string) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue