Sync
This commit is contained in:
parent
6d08c94866
commit
07dcdc8b79
|
@ -340,6 +340,7 @@ func TestPredict_Plugins(t *testing.T) {
|
|||
"hana-database-plugin",
|
||||
"influxdb-database-plugin",
|
||||
"jwt",
|
||||
"kmip",
|
||||
"kubernetes",
|
||||
"kv",
|
||||
"ldap",
|
||||
|
|
|
@ -832,6 +832,27 @@ func (l *RaftLock) monitorLeadership(stopCh <-chan struct{}) <-chan struct{} {
|
|||
// Lock blocks until we become leader or are shutdown. It returns a channel that
|
||||
// is closed when we detect a loss of leadership.
|
||||
func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
// Check to see if we are already leader.
|
||||
l.b.l.RLock()
|
||||
if l.b.raft.State() == raft.Leader {
|
||||
err := l.b.applyLog(context.Background(), &LogData{
|
||||
Operations: []*LogOperation{
|
||||
&LogOperation{
|
||||
OpType: putOp,
|
||||
Key: l.key,
|
||||
Value: l.value,
|
||||
},
|
||||
},
|
||||
})
|
||||
l.b.l.RUnlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return l.monitorLeadership(stopCh), nil
|
||||
}
|
||||
l.b.l.RUnlock()
|
||||
|
||||
for {
|
||||
select {
|
||||
case isLeader := <-l.b.raftNotifyCh:
|
||||
|
|
|
@ -1420,10 +1420,10 @@ func (c *Core) UIHeaders() (http.Header, error) {
|
|||
// sealInternal is an internal method used to seal the vault. It does not do
|
||||
// any authorization checking.
|
||||
func (c *Core) sealInternal() error {
|
||||
return c.sealInternalWithOptions(true, false)
|
||||
return c.sealInternalWithOptions(true, false, true)
|
||||
}
|
||||
|
||||
func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock bool) error {
|
||||
func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock, shutdownRaft bool) error {
|
||||
// Mark sealed, and if already marked return
|
||||
if swapped := atomic.CompareAndSwapUint32(c.sealed, 0, 1); !swapped {
|
||||
return nil
|
||||
|
@ -1503,15 +1503,17 @@ func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock bool) error {
|
|||
}
|
||||
|
||||
// If the storage backend needs to be sealed
|
||||
if raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend); ok {
|
||||
if err := raftStorage.TeardownCluster(c.clusterListener); err != nil {
|
||||
c.logger.Error("error stopping storage cluster", "error", err)
|
||||
return err
|
||||
if shutdownRaft {
|
||||
if raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend); ok {
|
||||
if err := raftStorage.TeardownCluster(c.clusterListener); err != nil {
|
||||
c.logger.Error("error stopping storage cluster", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the cluster listener
|
||||
c.stopClusterListener()
|
||||
// Stop the cluster listener
|
||||
c.stopClusterListener()
|
||||
}
|
||||
|
||||
c.logger.Debug("sealing barrier")
|
||||
if err := c.barrier.Seal(); err != nil {
|
||||
|
@ -1702,14 +1704,14 @@ func (c *Core) preSeal() error {
|
|||
|
||||
c.stopPeriodicRaftTLSRotate()
|
||||
|
||||
c.stopForwarding()
|
||||
|
||||
c.clusterParamsLock.Lock()
|
||||
if err := stopReplication(c); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("error stopping replication: {{err}}", err))
|
||||
}
|
||||
c.clusterParamsLock.Unlock()
|
||||
|
||||
c.stopForwarding()
|
||||
|
||||
if err := c.teardownAudits(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
multierror "github.com/hashicorp/go-multierror"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/physical/raft"
|
||||
"github.com/hashicorp/vault/sdk/helper/certutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
|
@ -678,6 +679,7 @@ func (c *Core) periodicLeaderRefresh(newLeaderCh chan func(), stopCh chan struct
|
|||
// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
|
||||
func (c *Core) periodicCheckKeyUpgrades(ctx context.Context, stopCh chan struct{}) {
|
||||
opCount := new(int32)
|
||||
_, isRaft := c.underlyingPhysical.(*raft.RaftBackend)
|
||||
for {
|
||||
select {
|
||||
case <-time.After(keyRotateCheckInterval):
|
||||
|
@ -706,7 +708,10 @@ func (c *Core) periodicCheckKeyUpgrades(ctx context.Context, stopCh chan struct{
|
|||
entry, _ := c.barrier.Get(ctx, poisonPillPath)
|
||||
if entry != nil && len(entry.Value) > 0 {
|
||||
c.logger.Warn("encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
|
||||
go c.Shutdown()
|
||||
// If we are using raft storage we do not want to shut down
|
||||
// raft during replication secondary enablement. This will
|
||||
// allow us to keep making progress on the raft log.
|
||||
go c.sealInternalWithOptions(true, false, !isRaft)
|
||||
atomic.AddInt32(lopCount, -1)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -483,8 +483,6 @@ func (c *Core) mountInternal(ctx context.Context, entry *MountEntry, updateStora
|
|||
|
||||
addPathCheckers(c, entry, backend, viewPath)
|
||||
|
||||
addLicenseCallback(c, backend)
|
||||
|
||||
c.setCoreBackend(entry, backend, view)
|
||||
|
||||
// If the mount is filtered or we are on a DR secondary we don't want to
|
||||
|
@ -1218,6 +1216,8 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView
|
|||
if b == nil {
|
||||
return nil, fmt.Errorf("nil backend of type %q returned from factory", t)
|
||||
}
|
||||
addLicenseCallback(c, b)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ func (c *Core) setupPolicyStore(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
|
||||
if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationDRSecondary) {
|
||||
// Policies will sync from the primary
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ func (c *Core) raftSnapshotRestoreCallback(grabLock bool) func() error {
|
|||
|
||||
// Seal ourselves
|
||||
c.logger.Info("failed to perform key upgrades, sealing", "error", err)
|
||||
c.sealInternalWithOptions(false, false)
|
||||
c.sealInternalWithOptions(false, false, true)
|
||||
return err
|
||||
default:
|
||||
// If we are using an auto-unseal we can try to use the seal to
|
||||
|
@ -412,17 +412,17 @@ func (c *Core) raftSnapshotRestoreCallback(grabLock bool) func() error {
|
|||
keys, err := c.seal.GetStoredKeys(ctx)
|
||||
if err != nil {
|
||||
c.logger.Error("raft snapshot restore failed to get stored keys", "error", err)
|
||||
c.sealInternalWithOptions(false, false)
|
||||
c.sealInternalWithOptions(false, false, true)
|
||||
return err
|
||||
}
|
||||
if err := c.barrier.Seal(); err != nil {
|
||||
c.logger.Error("raft snapshot restore failed to seal barrier", "error", err)
|
||||
c.sealInternalWithOptions(false, false)
|
||||
c.sealInternalWithOptions(false, false, true)
|
||||
return err
|
||||
}
|
||||
if err := c.barrier.Unseal(ctx, keys[0]); err != nil {
|
||||
c.logger.Error("raft snapshot restore failed to unseal barrier", "error", err)
|
||||
c.sealInternalWithOptions(false, false)
|
||||
c.sealInternalWithOptions(false, false, true)
|
||||
return err
|
||||
}
|
||||
c.logger.Info("done reloading master key using auto seal")
|
||||
|
|
Loading…
Reference in New Issue