Don't run rollback and upgrade functionality if we are a replication secondary (#3900)
* Don't run rollback and upgrade functionality if we are a replication secondary, but do if the mount is local.
This commit is contained in:
parent
effdc09a71
commit
fc6564e4ee
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/vault/helper/consts"
|
||||
"github.com/hashicorp/vault/helper/locksutil"
|
||||
"github.com/hashicorp/vault/helper/salt"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
|
@ -142,7 +143,9 @@ func (b *backend) invalidate(_ context.Context, key string) {
|
|||
// to delay the removal of SecretIDs by a minute.
|
||||
func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error {
|
||||
// Initiate clean-up of expired SecretID entries
|
||||
b.tidySecretID(ctx, req.Storage)
|
||||
if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
|
||||
b.tidySecretID(ctx, req.Storage)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/vault/helper/consts"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
"github.com/patrickmn/go-cache"
|
||||
|
@ -143,29 +144,33 @@ func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error
|
|||
// Run the tidy operations for the first time. Then run it when current
|
||||
// time matches the nextTidyTime.
|
||||
if b.nextTidyTime.IsZero() || !time.Now().Before(b.nextTidyTime) {
|
||||
// safety_buffer defaults to 180 days for roletag blacklist
|
||||
safety_buffer := 15552000
|
||||
tidyBlacklistConfigEntry, err := b.lockedConfigTidyRoleTags(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skipBlacklistTidy := false
|
||||
// check if tidying of role tags was configured
|
||||
if tidyBlacklistConfigEntry != nil {
|
||||
// check if periodic tidying of role tags was disabled
|
||||
if tidyBlacklistConfigEntry.DisablePeriodicTidy {
|
||||
skipBlacklistTidy = true
|
||||
if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
|
||||
// safety_buffer defaults to 180 days for roletag blacklist
|
||||
safety_buffer := 15552000
|
||||
tidyBlacklistConfigEntry, err := b.lockedConfigTidyRoleTags(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skipBlacklistTidy := false
|
||||
// check if tidying of role tags was configured
|
||||
if tidyBlacklistConfigEntry != nil {
|
||||
// check if periodic tidying of role tags was disabled
|
||||
if tidyBlacklistConfigEntry.DisablePeriodicTidy {
|
||||
skipBlacklistTidy = true
|
||||
}
|
||||
// overwrite the default safety_buffer with the configured value
|
||||
safety_buffer = tidyBlacklistConfigEntry.SafetyBuffer
|
||||
}
|
||||
// tidy role tags if explicitly not disabled
|
||||
if !skipBlacklistTidy {
|
||||
b.tidyBlacklistRoleTag(ctx, req.Storage, safety_buffer)
|
||||
}
|
||||
// overwrite the default safety_buffer with the configured value
|
||||
safety_buffer = tidyBlacklistConfigEntry.SafetyBuffer
|
||||
}
|
||||
// tidy role tags if explicitly not disabled
|
||||
if !skipBlacklistTidy {
|
||||
b.tidyBlacklistRoleTag(ctx, req.Storage, safety_buffer)
|
||||
}
|
||||
|
||||
// reset the safety_buffer to 72h
|
||||
safety_buffer = 259200
|
||||
// We don't check for replication state for whitelist identities as
|
||||
// these are locally stored
|
||||
|
||||
safety_buffer := 259200
|
||||
tidyWhitelistConfigEntry, err := b.lockedConfigTidyIdentities(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -44,7 +44,7 @@ func Backend() *backend {
|
|||
secretAccessKeys(&b),
|
||||
},
|
||||
|
||||
WALRollback: walRollback,
|
||||
WALRollback: b.walRollback,
|
||||
WALRollbackMinAge: 5 * time.Minute,
|
||||
BackendType: logical.TypeLogical,
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/helper/consts"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
@ -12,7 +13,11 @@ var walRollbackMap = map[string]framework.WALRollbackFunc{
|
|||
"user": pathUserRollback,
|
||||
}
|
||||
|
||||
func walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error {
|
||||
func (b *backend) walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error {
|
||||
if !b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformancePrimary) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, ok := walRollbackMap[kind]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown type to rollback")
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/consts"
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
|
@ -315,7 +316,7 @@ func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*ro
|
|||
modified = true
|
||||
}
|
||||
|
||||
if modified {
|
||||
if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) {
|
||||
jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -175,6 +175,10 @@ func (m *RollbackManager) attemptRollback(ctx context.Context, path string, rs *
|
|||
if err == logical.ErrUnsupportedOperation {
|
||||
err = nil
|
||||
}
|
||||
// If we failed due to read-only storage, we can't do anything; ignore
|
||||
if err != nil && strings.Contains(err.Error(), logical.ErrReadOnly.Error()) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
m.logger.Error("rollback: error rolling back", "path", path, "error", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue