Vault 8308 Background thread to update locked user entries (#18673)
* background thread changes * adding changelog * fix changelog typo
This commit is contained in:
parent
0f0b48eda4
commit
78546af8fc
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods.
|
||||
```
|
147
vault/core.go
147
vault/core.go
|
@ -2276,6 +2276,10 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c
|
|||
if err := c.setupHeaderHMACKey(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.runLockedUserEntryUpdates(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
c.updateLockedUserEntries()
|
||||
|
||||
if !c.IsDRSecondary() {
|
||||
if err := c.startRollback(); err != nil {
|
||||
|
@ -3371,6 +3375,149 @@ func (c *Core) setupCachedMFAResponseAuth() {
|
|||
return
|
||||
}
|
||||
|
||||
// updateLockedUserEntries runs every 15 mins to remove stale user entries from storage
|
||||
// it also updates the userFailedLoginInfo map with correct information for locked users if incorrect
|
||||
func (c *Core) updateLockedUserEntries() {
|
||||
ctx := c.activeContext
|
||||
go func() {
|
||||
ticker := time.NewTicker(15 * time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := c.runLockedUserEntryUpdates(ctx); err != nil {
|
||||
c.Logger().Error("failed to run locked user entry updates", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
// runLockedUserEntryUpdates runs updates for locked user storage entries and userFailedLoginInfo map
|
||||
func (c *Core) runLockedUserEntryUpdates(ctx context.Context) error {
|
||||
// check environment variable to see if user lockout workflow is disabled
|
||||
var disableUserLockout bool
|
||||
if disableUserLockoutEnv := os.Getenv(consts.VaultDisableUserLockout); disableUserLockoutEnv != "" {
|
||||
var err error
|
||||
disableUserLockout, err = strconv.ParseBool(disableUserLockoutEnv)
|
||||
if err != nil {
|
||||
c.Logger().Error("Error parsing the environment variable VAULT_DISABLE_USER_LOCKOUT", "error", err)
|
||||
}
|
||||
}
|
||||
if disableUserLockout {
|
||||
return nil
|
||||
}
|
||||
|
||||
// get the list of namespaces of locked users from locked users path in storage
|
||||
nsIDs, err := c.barrier.List(ctx, coreLockedUsersPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, nsID := range nsIDs {
|
||||
// get the list of mount accessors of locked users for each namespace
|
||||
mountAccessors, err := c.barrier.List(ctx, coreLockedUsersPath+nsID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update the entries for locked users for each mount accessor
|
||||
// if storage entry is stale i.e; the lockout duration has passed
|
||||
// remove this entry from storage and userFailedLoginInfo map
|
||||
// else check if the userFailedLoginInfo map has correct failed login information
|
||||
// if incorrect, update the entry in userFailedLoginInfo map
|
||||
for _, mountAccessorPath := range mountAccessors {
|
||||
mountAccessor := strings.TrimSuffix(mountAccessorPath, "/")
|
||||
if err := c.runLockedUserEntryUpdatesForMountAccessor(ctx, mountAccessor, coreLockedUsersPath+nsID+mountAccessorPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runLockedUserEntryUpdatesForMountAccessor updates the storage entry for each locked user (alias name)
|
||||
// if the entry is stale, it removes it from storage and userFailedLoginInfo map if present
|
||||
// if the entry is not stale, it updates the userFailedLoginInfo map with correct values for entry if incorrect
|
||||
func (c *Core) runLockedUserEntryUpdatesForMountAccessor(ctx context.Context, mountAccessor string, path string) error {
|
||||
// get mount entry for mountAccessor
|
||||
mountEntry := c.router.MatchingMountByAccessor(mountAccessor)
|
||||
if mountEntry == nil {
|
||||
mountEntry = &MountEntry{}
|
||||
}
|
||||
// get configuration for mount entry
|
||||
userLockoutConfiguration := c.getUserLockoutConfiguration(mountEntry)
|
||||
|
||||
// get the list of aliases for mount accessor
|
||||
aliases, err := c.barrier.List(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check storage entry for each alias to update
|
||||
for _, alias := range aliases {
|
||||
loginUserInfoKey := FailedLoginUser{
|
||||
aliasName: alias,
|
||||
mountAccessor: mountAccessor,
|
||||
}
|
||||
|
||||
existingEntry, err := c.barrier.Get(ctx, path+alias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if existingEntry == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var lastLoginTime int
|
||||
err = jsonutil.DecodeJSON(existingEntry.Value, &lastLoginTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastFailedLoginTimeFromStorageEntry := time.Unix(int64(lastLoginTime), 0)
|
||||
lockoutDurationFromConfiguration := userLockoutConfiguration.LockoutDuration
|
||||
|
||||
// get the entry for the locked user from userFailedLoginInfo map
|
||||
failedLoginInfoFromMap := c.GetUserFailedLoginInfo(ctx, loginUserInfoKey)
|
||||
|
||||
// check if the storage entry for locked user is stale
|
||||
if time.Now().After(lastFailedLoginTimeFromStorageEntry.Add(lockoutDurationFromConfiguration)) {
|
||||
// stale entry, remove from storage
|
||||
if err := c.barrier.Delete(ctx, path+alias); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove entry for this user from userFailedLoginInfo map if present as the user is not locked
|
||||
if failedLoginInfoFromMap != nil {
|
||||
if err = c.UpdateUserFailedLoginInfo(ctx, loginUserInfoKey, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// this is not a stale entry
|
||||
// update the map with actual failed login information
|
||||
actualFailedLoginInfo := FailedLoginInfo{
|
||||
lastFailedLoginTime: lastLoginTime,
|
||||
count: uint(userLockoutConfiguration.LockoutThreshold),
|
||||
}
|
||||
|
||||
if failedLoginInfoFromMap != &actualFailedLoginInfo {
|
||||
// entry is invalid, updating the entry in userFailedLoginMap with correct information
|
||||
if err = c.UpdateUserFailedLoginInfo(ctx, loginUserInfoKey, &actualFailedLoginInfo, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopMFAResponseAuthByID pops an item from the mfaResponseAuthQueue by ID
|
||||
// it returns the cached auth response or an error
|
||||
func (c *Core) PopMFAResponseAuthByID(reqID string) (*MFACachedAuthResponse, error) {
|
||||
|
|
|
@ -16,7 +16,9 @@ import (
|
|||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/internalshared/configutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
|
@ -362,6 +364,131 @@ func TestCore_SealUnseal(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestCore_RunLockedUserUpdatesForStaleEntry tests that stale locked user entries
|
||||
// get deleted upon unseal
|
||||
func TestCore_RunLockedUserUpdatesForStaleEntry(t *testing.T) {
|
||||
core, keys, root := TestCoreUnsealed(t)
|
||||
storageUserLockoutPath := fmt.Sprintf(coreLockedUsersPath + "ns1/mountAccessor1/aliasName1")
|
||||
|
||||
// cleanup
|
||||
defer core.barrier.Delete(context.Background(), storageUserLockoutPath)
|
||||
|
||||
// create invalid entry in storage to test stale entries get deleted on unseal
|
||||
// last failed login time for this path is 1970-01-01 00:00:00 +0000 UTC
|
||||
// since user lockout configurations are not configured, lockout duration will
|
||||
// be set to default (15m) internally
|
||||
compressedBytes, err := jsonutil.EncodeJSONAndCompress(int(time.Unix(0, 0).Unix()), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an entry
|
||||
entry := &logical.StorageEntry{
|
||||
Key: storageUserLockoutPath,
|
||||
Value: compressedBytes,
|
||||
}
|
||||
|
||||
// Write to the physical backend
|
||||
err = core.barrier.Put(context.Background(), entry)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write invalid locked user entry, err: %v", err)
|
||||
}
|
||||
|
||||
// seal and unseal vault
|
||||
if err := core.Seal(root); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
for i, key := range keys {
|
||||
unseal, err := TestCoreUnseal(core, key)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if i+1 == len(keys) && !unseal {
|
||||
t.Fatalf("err: should be unsealed")
|
||||
}
|
||||
}
|
||||
|
||||
// locked user entry must be deleted upon unseal as it is stale
|
||||
lastFailedLoginRaw, err := core.barrier.Get(context.Background(), storageUserLockoutPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lastFailedLoginRaw != nil {
|
||||
t.Fatal("err: stale locked user entry exists")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCore_RunLockedUserUpdatesForValidEntry tests that valid locked user entries
|
||||
// do not get removed on unseal
|
||||
// Also tests that the userFailedLoginInfo map gets updated with correct information
|
||||
func TestCore_RunLockedUserUpdatesForValidEntry(t *testing.T) {
|
||||
core, keys, root := TestCoreUnsealed(t)
|
||||
storageUserLockoutPath := fmt.Sprintf(coreLockedUsersPath + "ns1/mountAccessor1/aliasName1")
|
||||
|
||||
// cleanup
|
||||
defer core.barrier.Delete(context.Background(), storageUserLockoutPath)
|
||||
|
||||
// create valid storage entry for locked user
|
||||
lastFailedLoginTime := int(time.Now().Unix())
|
||||
|
||||
compressedBytes, err := jsonutil.EncodeJSONAndCompress(lastFailedLoginTime, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an entry
|
||||
entry := &logical.StorageEntry{
|
||||
Key: storageUserLockoutPath,
|
||||
Value: compressedBytes,
|
||||
}
|
||||
|
||||
// Write to the physical backend
|
||||
err = core.barrier.Put(context.Background(), entry)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write invalid locked user entry, err: %v", err)
|
||||
}
|
||||
|
||||
// seal and unseal vault
|
||||
if err := core.Seal(root); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
for i, key := range keys {
|
||||
unseal, err := TestCoreUnseal(core, key)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if i+1 == len(keys) && !unseal {
|
||||
t.Fatalf("err: should be unsealed")
|
||||
}
|
||||
}
|
||||
|
||||
// locked user entry must exist as it is still valid
|
||||
existingEntry, err := core.barrier.Get(context.Background(), storageUserLockoutPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if existingEntry == nil {
|
||||
t.Fatalf("err: entry must exist for locked user in storage")
|
||||
}
|
||||
|
||||
// userFailedLoginInfo map should have the correct information for locked user
|
||||
loginUserInfoKey := FailedLoginUser{
|
||||
aliasName: "aliasName1",
|
||||
mountAccessor: "mountAccessor1",
|
||||
}
|
||||
|
||||
failedLoginInfoFromMap := core.GetUserFailedLoginInfo(context.Background(), loginUserInfoKey)
|
||||
if failedLoginInfoFromMap == nil {
|
||||
t.Fatalf("err: entry must exist for locked user in userFailedLoginInfo map")
|
||||
}
|
||||
if failedLoginInfoFromMap.lastFailedLoginTime != lastFailedLoginTime {
|
||||
t.Fatalf("err: incorrect failed login time information for locked user updated in userFailedLoginInfo map")
|
||||
}
|
||||
if int(failedLoginInfoFromMap.count) != configutil.UserLockoutThresholdDefault {
|
||||
t.Fatalf("err: incorrect failed login count information for locked user updated in userFailedLoginInfo map")
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to shutdown after unseal
|
||||
func TestCore_Shutdown(t *testing.T) {
|
||||
c, _, _ := TestCoreUnsealed(t)
|
||||
|
|
Loading…
Reference in New Issue