Create global quotas of each type in every NewTestCluster. (#18038)
Create global quotas of each type in every NewTestCluster. Also switch some key locks to use DeadlockMutex to make it easier to discover deadlocks in testing. NewTestCluster also now starts the cluster, and the Start method becomes a no-op. Unless SkipInit is provided, we also wait for a node to become active, eliminating the need for WaitForActiveNode. This was needed because otherwise we can't safely make the quota api call. We can't do it in Start because Start doesn't return an error, and I didn't want to begin storing the testing object T instead TestCluster just so we could call t.Fatal inside Start. The last change here was to address the problem of how to skip setting up quotas when creating a cluster with a nonstandard handler that might not even implement the quotas endpoint. The challenge is that because we were taking a func pointer to generate the real handler func, we didn't have any way to compare that func pointer to the standard handler-generating func http.Handler without creating a circular dependency between packages vault and http. The solution was to pass a method instead of an anonymous func pointer so that we can do reflection on it.
This commit is contained in:
parent
347cdf811c
commit
12e1b609ac
|
@ -1302,7 +1302,7 @@ func (h *handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
|||
}
|
||||
h.t.Logf("passing GET request on %s", req.URL.Path)
|
||||
}
|
||||
vaulthttp.Handler(h.props).ServeHTTP(resp, req)
|
||||
vaulthttp.Handler.Handler(h.props).ServeHTTP(resp, req)
|
||||
}
|
||||
|
||||
// TestAgent_Template_Retry verifies that the template server retries requests
|
||||
|
@ -1325,11 +1325,12 @@ func TestAgent_Template_Retry(t *testing.T) {
|
|||
},
|
||||
&vault.TestClusterOptions{
|
||||
NumCores: 1,
|
||||
HandlerFunc: func(properties *vault.HandlerProperties) http.Handler {
|
||||
h.props = properties
|
||||
h.t = t
|
||||
return &h
|
||||
},
|
||||
HandlerFunc: vaulthttp.HandlerFunc(
|
||||
func(properties *vault.HandlerProperties) http.Handler {
|
||||
h.props = properties
|
||||
h.t = t
|
||||
return &h
|
||||
}),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -1612,11 +1613,11 @@ func TestAgent_Cache_Retry(t *testing.T) {
|
|||
},
|
||||
&vault.TestClusterOptions{
|
||||
NumCores: 1,
|
||||
HandlerFunc: func(properties *vault.HandlerProperties) http.Handler {
|
||||
HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler {
|
||||
h.props = properties
|
||||
h.t = t
|
||||
return &h
|
||||
},
|
||||
}),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
|
|
@ -649,7 +649,7 @@ func (c *ServerCommand) runRecoveryMode() int {
|
|||
c.UI.Output("")
|
||||
|
||||
for _, ln := range lns {
|
||||
handler := vaulthttp.Handler(&vault.HandlerProperties{
|
||||
handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{
|
||||
Core: core,
|
||||
ListenerConfig: ln.Config,
|
||||
DisablePrintableCheck: config.DisablePrintableCheck,
|
||||
|
@ -1383,7 +1383,7 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
// This needs to happen before we first unseal, so before we trigger dev
|
||||
// mode if it's set
|
||||
core.SetClusterListenerAddrs(clusterAddrs)
|
||||
core.SetClusterHandler(vaulthttp.Handler(&vault.HandlerProperties{
|
||||
core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{
|
||||
Core: core,
|
||||
}))
|
||||
|
||||
|
@ -1934,7 +1934,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m
|
|||
c.UI.Output("")
|
||||
|
||||
for _, core := range testCluster.Cores {
|
||||
core.Server.Handler = vaulthttp.Handler(&vault.HandlerProperties{
|
||||
core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{
|
||||
Core: core.Core,
|
||||
})
|
||||
core.SetClusterHandler(core.Server.Handler)
|
||||
|
@ -2791,7 +2791,7 @@ func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config,
|
|||
return err
|
||||
}
|
||||
|
||||
handler := vaulthttp.Handler(&vault.HandlerProperties{
|
||||
handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{
|
||||
Core: core,
|
||||
ListenerConfig: ln.Config,
|
||||
DisablePrintableCheck: config.DisablePrintableCheck,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//go:build deadlock
|
||||
|
||||
package vault
|
||||
package locking
|
||||
|
||||
import (
|
||||
"github.com/sasha-s/go-deadlock"
|
|
@ -1,6 +1,6 @@
|
|||
//go:build !deadlock
|
||||
|
||||
package vault
|
||||
package locking
|
||||
|
||||
import (
|
||||
"sync"
|
|
@ -46,7 +46,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
|
||||
HandlerFunc: testHandler,
|
||||
HandlerFunc: HandlerFunc(testHandler),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -89,7 +89,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
|
||||
HandlerFunc: testHandler,
|
||||
HandlerFunc: HandlerFunc(testHandler),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -125,7 +125,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
|
||||
HandlerFunc: testHandler,
|
||||
HandlerFunc: HandlerFunc(testHandler),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -159,7 +159,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
|
||||
HandlerFunc: testHandler,
|
||||
HandlerFunc: HandlerFunc(testHandler),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -193,7 +193,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
|
||||
HandlerFunc: testHandler,
|
||||
HandlerFunc: HandlerFunc(testHandler),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -230,7 +230,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
|
||||
HandlerFunc: testHandler,
|
||||
HandlerFunc: HandlerFunc(testHandler),
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
|
|
@ -118,9 +118,25 @@ func init() {
|
|||
})
|
||||
}
|
||||
|
||||
// Handler returns an http.Handler for the API. This can be used on
|
||||
type HandlerAnchor struct{}
|
||||
|
||||
func (h HandlerAnchor) Handler(props *vault.HandlerProperties) http.Handler {
|
||||
return handler(props)
|
||||
}
|
||||
|
||||
var Handler vault.HandlerHandler = HandlerAnchor{}
|
||||
|
||||
type HandlerFunc func(props *vault.HandlerProperties) http.Handler
|
||||
|
||||
func (h HandlerFunc) Handler(props *vault.HandlerProperties) http.Handler {
|
||||
return h(props)
|
||||
}
|
||||
|
||||
var _ vault.HandlerHandler = HandlerFunc(func(props *vault.HandlerProperties) http.Handler { return nil })
|
||||
|
||||
// handler returns an http.Handler for the API. This can be used on
|
||||
// its own to mount the Vault API within another web server.
|
||||
func Handler(props *vault.HandlerProperties) http.Handler {
|
||||
func handler(props *vault.HandlerProperties) http.Handler {
|
||||
core := props.Core
|
||||
|
||||
// Create the muxer to handle the actual endpoints
|
||||
|
|
|
@ -31,7 +31,7 @@ func TestServerWithListenerAndProperties(tb testing.TB, ln net.Listener, addr st
|
|||
// for tests.
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth))
|
||||
mux.Handle("/", Handler(props))
|
||||
mux.Handle("/", Handler.Handler(props))
|
||||
|
||||
server := &http.Server{
|
||||
Addr: ln.Addr().String(),
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/command/server"
|
||||
"github.com/hashicorp/vault/helper/identity/mfa"
|
||||
"github.com/hashicorp/vault/helper/locking"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/helper/osutil"
|
||||
|
@ -298,7 +299,7 @@ type Core struct {
|
|||
auditBackends map[string]audit.Factory
|
||||
|
||||
// stateLock protects mutable state
|
||||
stateLock DeadlockRWMutex
|
||||
stateLock locking.DeadlockRWMutex
|
||||
sealed *uint32
|
||||
|
||||
standby bool
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-secure-stdlib/base62"
|
||||
"github.com/hashicorp/vault/helper/fairshare"
|
||||
"github.com/hashicorp/vault/helper/locking"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
|
@ -110,7 +111,7 @@ type ExpirationManager struct {
|
|||
pending sync.Map
|
||||
nonexpiring sync.Map
|
||||
leaseCount int
|
||||
pendingLock sync.RWMutex
|
||||
pendingLock locking.DeadlockRWMutex
|
||||
|
||||
// A sync.Lock for every active leaseID
|
||||
lockPerLease sync.Map
|
||||
|
@ -138,7 +139,7 @@ type ExpirationManager struct {
|
|||
quitCh chan struct{}
|
||||
|
||||
// do not hold coreStateLock in any API handler code - it is already held
|
||||
coreStateLock *DeadlockRWMutex
|
||||
coreStateLock *locking.DeadlockRWMutex
|
||||
quitContext context.Context
|
||||
leaseCheckCounter *uint32
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ func waitForRemovalOrTimeout(c *api.Client, path string, tick, to time.Duration)
|
|||
|
||||
func TestQuotas_RateLimit_DupName(t *testing.T) {
|
||||
conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil)
|
||||
opts.NoDefaultQuotas = true
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -163,6 +164,7 @@ func TestQuotas_RateLimit_DupName(t *testing.T) {
|
|||
|
||||
func TestQuotas_RateLimit_DupPath(t *testing.T) {
|
||||
conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil)
|
||||
opts.NoDefaultQuotas = true
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -201,6 +203,7 @@ func TestQuotas_RateLimit_DupPath(t *testing.T) {
|
|||
|
||||
func TestQuotas_RateLimitQuota_ExemptPaths(t *testing.T) {
|
||||
conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil)
|
||||
opts.NoDefaultQuotas = true
|
||||
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
|
@ -340,6 +343,7 @@ func TestQuotas_RateLimitQuota_Mount(t *testing.T) {
|
|||
|
||||
func TestQuotas_RateLimitQuota_MountPrecedence(t *testing.T) {
|
||||
conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil)
|
||||
opts.NoDefaultQuotas = true
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
@ -426,6 +430,7 @@ func TestQuotas_RateLimitQuota_MountPrecedence(t *testing.T) {
|
|||
|
||||
func TestQuotas_RateLimitQuota(t *testing.T) {
|
||||
conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil)
|
||||
opts.NoDefaultQuotas = true
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
|
|
@ -6,10 +6,10 @@ import (
|
|||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/vault/helper/locking"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/sdk/helper/pathmanager"
|
||||
|
@ -167,13 +167,13 @@ type Manager struct {
|
|||
metricSink *metricsutil.ClusterMetricSink
|
||||
|
||||
// quotaLock is a lock for manipulating quotas and anything not covered by a more specific lock
|
||||
quotaLock *sync.RWMutex
|
||||
quotaLock *locking.DeadlockRWMutex
|
||||
|
||||
// quotaConfigLock is a lock for accessing config items, such as RateLimitExemptPaths
|
||||
quotaConfigLock *sync.RWMutex
|
||||
quotaConfigLock *locking.DeadlockRWMutex
|
||||
|
||||
// dbAndCacheLock is a lock for db and path caches that need to be reset during Reset()
|
||||
dbAndCacheLock *sync.RWMutex
|
||||
dbAndCacheLock *locking.DeadlockRWMutex
|
||||
}
|
||||
|
||||
// QuotaLeaseInformation contains all of the information lease-count quotas require
|
||||
|
@ -281,9 +281,9 @@ func NewManager(logger log.Logger, walkFunc leaseWalkFunc, ms *metricsutil.Clust
|
|||
metricSink: ms,
|
||||
rateLimitPathManager: pathmanager.New(),
|
||||
config: new(Config),
|
||||
quotaLock: new(sync.RWMutex),
|
||||
quotaConfigLock: new(sync.RWMutex),
|
||||
dbAndCacheLock: new(sync.RWMutex),
|
||||
quotaLock: new(locking.DeadlockRWMutex),
|
||||
quotaConfigLock: new(locking.DeadlockRWMutex),
|
||||
dbAndCacheLock: new(locking.DeadlockRWMutex),
|
||||
}
|
||||
|
||||
manager.init(walkFunc)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -35,6 +36,7 @@ import (
|
|||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/builtin/credential/approle"
|
||||
"github.com/hashicorp/vault/command/server"
|
||||
"github.com/hashicorp/vault/helper/constants"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/internalshared/configutil"
|
||||
|
@ -900,9 +902,14 @@ type TestCluster struct {
|
|||
base *CoreConfig
|
||||
LicensePublicKey ed25519.PublicKey
|
||||
LicensePrivateKey ed25519.PrivateKey
|
||||
opts *TestClusterOptions
|
||||
}
|
||||
|
||||
func (c *TestCluster) Start() {
|
||||
}
|
||||
|
||||
func (c *TestCluster) start(t testing.T) {
|
||||
t.Helper()
|
||||
for i, core := range c.Cores {
|
||||
if core.Server != nil {
|
||||
for _, ln := range core.Listeners {
|
||||
|
@ -914,6 +921,54 @@ func (c *TestCluster) Start() {
|
|||
if c.SetupFunc != nil {
|
||||
c.SetupFunc()
|
||||
}
|
||||
|
||||
if c.opts != nil && c.opts.SkipInit {
|
||||
// SkipInit implies that vault may not be ready to service requests, or that
|
||||
// we're restarting a cluster from an existing storage.
|
||||
return
|
||||
}
|
||||
|
||||
activeCore := -1
|
||||
WAITACTIVE:
|
||||
for i := 0; i < 60; i++ {
|
||||
for i, core := range c.Cores {
|
||||
if standby, _ := core.Core.Standby(); !standby {
|
||||
activeCore = i
|
||||
break WAITACTIVE
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
if activeCore == -1 {
|
||||
t.Fatalf("no core became active")
|
||||
}
|
||||
|
||||
switch {
|
||||
case c.opts == nil:
|
||||
case c.opts.NoDefaultQuotas:
|
||||
case c.opts.HandlerFunc == nil:
|
||||
// If no HandlerFunc is provided that means that we can't actually do
|
||||
// regular vault requests.
|
||||
case reflect.TypeOf(c.opts.HandlerFunc).PkgPath() != "github.com/hashicorp/vault/http":
|
||||
case reflect.TypeOf(c.opts.HandlerFunc).Name() != "Handler":
|
||||
default:
|
||||
cli := c.Cores[activeCore].Client
|
||||
_, err := cli.Logical().Write("sys/quotas/rate-limit/rl-NewTestCluster", map[string]interface{}{
|
||||
"rate": 1000000,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error setting up global rate limit quota: %v", err)
|
||||
}
|
||||
if constants.IsEnterprise {
|
||||
_, err = cli.Logical().Write("sys/quotas/lease-count/lc-NewTestCluster", map[string]interface{}{
|
||||
"max_leases": 1000000,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error setting up global lease count quota: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnsealCores uses the cluster barrier keys to unseal the test cluster cores
|
||||
|
@ -1145,10 +1200,14 @@ type PhysicalBackendBundle struct {
|
|||
Cleanup func()
|
||||
}
|
||||
|
||||
type HandlerHandler interface {
|
||||
Handler(*HandlerProperties) http.Handler
|
||||
}
|
||||
|
||||
type TestClusterOptions struct {
|
||||
KeepStandbysSealed bool
|
||||
SkipInit bool
|
||||
HandlerFunc func(*HandlerProperties) http.Handler
|
||||
HandlerFunc HandlerHandler
|
||||
DefaultHandlerProperties HandlerProperties
|
||||
|
||||
// BaseListenAddress is used to explicitly assign ports in sequence to the
|
||||
|
@ -1221,6 +1280,8 @@ type TestClusterOptions struct {
|
|||
RedundancyZoneMap map[int]string
|
||||
KVVersion string
|
||||
EffectiveSDKVersionMap map[int]string
|
||||
|
||||
NoDefaultQuotas bool
|
||||
}
|
||||
|
||||
var DefaultNumCores = 3
|
||||
|
@ -1801,6 +1862,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||
}
|
||||
}
|
||||
|
||||
testCluster.opts = opts
|
||||
testCluster.start(t)
|
||||
return &testCluster
|
||||
}
|
||||
|
||||
|
@ -1997,7 +2060,7 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo
|
|||
if props.ListenerConfig != nil && props.ListenerConfig.MaxRequestDuration == 0 {
|
||||
props.ListenerConfig.MaxRequestDuration = DefaultMaxRequestDuration
|
||||
}
|
||||
handler = opts.HandlerFunc(&props)
|
||||
handler = opts.HandlerFunc.Handler(&props)
|
||||
}
|
||||
|
||||
// Set this in case the Seal was manually set before the core was
|
||||
|
|
Loading…
Reference in New Issue