9ef0680e7f
* Two things: * Change how we populate and clear leader UUID. This fixes a case where if a standby disconnects from an active node and reconnects, without the active node restarting, the UUID doesn't change so triggers on a new active node don't get run. * Add a bunch of test helpers and minor updates to things.
60 lines
1.3 KiB
Go
60 lines
1.3 KiB
Go
package locksutil
|
|
|
|
import (
|
|
"sync"
|
|
|
|
"github.com/hashicorp/vault/helper/cryptoutil"
|
|
)
|
|
|
|
const (
|
|
LockCount = 256
|
|
)
|
|
|
|
type LockEntry struct {
|
|
sync.RWMutex
|
|
}
|
|
|
|
// CreateLocks returns an array so that the locks can be iterated over in
|
|
// order.
|
|
//
|
|
// This is only threadsafe if a process is using a single lock, or iterating
|
|
// over the entire lock slice in order. Using a consistent order avoids
|
|
// deadlocks because you can never have the following:
|
|
//
|
|
// Lock A, Lock B
|
|
// Lock B, Lock A
|
|
//
|
|
// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A
|
|
//
|
|
func CreateLocks() []*LockEntry {
|
|
ret := make([]*LockEntry, LockCount)
|
|
for i := range ret {
|
|
ret[i] = new(LockEntry)
|
|
}
|
|
return ret
|
|
}
|
|
|
|
func LockIndexForKey(key string) uint8 {
|
|
return uint8(cryptoutil.Blake2b256Hash(key)[0])
|
|
}
|
|
|
|
func LockForKey(locks []*LockEntry, key string) *LockEntry {
|
|
return locks[LockIndexForKey(key)]
|
|
}
|
|
|
|
func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry {
|
|
lockIndexes := make(map[uint8]struct{}, len(keys))
|
|
for _, k := range keys {
|
|
lockIndexes[LockIndexForKey(k)] = struct{}{}
|
|
}
|
|
|
|
locksToReturn := make([]*LockEntry, 0, len(keys))
|
|
for i, l := range locks {
|
|
if _, ok := lockIndexes[uint8(i)]; ok {
|
|
locksToReturn = append(locksToReturn, l)
|
|
}
|
|
}
|
|
|
|
return locksToReturn
|
|
}
|