2015-03-13 18:11:59 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
2015-07-10 22:18:02 +00:00
|
|
|
"bytes"
|
2018-01-08 18:31:38 +00:00
|
|
|
"context"
|
2017-07-31 15:28:06 +00:00
|
|
|
"crypto/ecdsa"
|
|
|
|
"crypto/elliptic"
|
2016-01-15 15:55:35 +00:00
|
|
|
"crypto/rand"
|
2015-11-19 01:26:03 +00:00
|
|
|
"crypto/sha256"
|
2016-08-15 13:42:42 +00:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
2017-07-31 15:28:06 +00:00
|
|
|
"crypto/x509/pkix"
|
|
|
|
"encoding/base64"
|
2016-08-15 13:42:42 +00:00
|
|
|
"encoding/pem"
|
2018-05-20 06:42:15 +00:00
|
|
|
"errors"
|
2015-07-10 15:56:14 +00:00
|
|
|
"fmt"
|
2017-04-07 22:50:03 +00:00
|
|
|
"io"
|
2017-07-31 15:28:06 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"math/big"
|
|
|
|
mathrand "math/rand"
|
2015-07-10 22:18:02 +00:00
|
|
|
"net"
|
2016-08-15 13:42:42 +00:00
|
|
|
"net/http"
|
2017-04-07 22:50:03 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-05-24 00:36:20 +00:00
|
|
|
"sync"
|
2018-09-18 03:03:00 +00:00
|
|
|
"sync/atomic"
|
2015-09-02 19:56:58 +00:00
|
|
|
"time"
|
2015-03-13 18:11:59 +00:00
|
|
|
|
2019-09-17 00:50:51 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2021-04-20 22:25:04 +00:00
|
|
|
"github.com/hashicorp/go-cleanhttp"
|
2020-06-16 18:12:22 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2021-07-16 00:17:31 +00:00
|
|
|
"github.com/hashicorp/go-secure-stdlib/reloadutil"
|
2020-06-16 18:12:22 +00:00
|
|
|
raftlib "github.com/hashicorp/raft"
|
2017-02-24 15:45:29 +00:00
|
|
|
"github.com/hashicorp/vault/api"
|
2015-04-02 01:36:13 +00:00
|
|
|
"github.com/hashicorp/vault/audit"
|
2022-04-14 20:54:23 +00:00
|
|
|
"github.com/hashicorp/vault/builtin/credential/approle"
|
2019-10-08 17:57:15 +00:00
|
|
|
"github.com/hashicorp/vault/command/server"
|
2020-10-13 23:38:21 +00:00
|
|
|
"github.com/hashicorp/vault/helper/metricsutil"
|
2019-02-14 19:55:32 +00:00
|
|
|
"github.com/hashicorp/vault/helper/namespace"
|
2020-10-13 23:38:21 +00:00
|
|
|
"github.com/hashicorp/vault/internalshared/configutil"
|
2019-04-13 07:44:06 +00:00
|
|
|
dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
|
2022-08-30 02:42:26 +00:00
|
|
|
v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
|
2019-04-13 07:44:06 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/framework"
|
|
|
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
|
|
"github.com/hashicorp/vault/sdk/helper/logging"
|
2022-08-30 02:42:26 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/pluginutil"
|
2019-04-12 21:54:35 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/salt"
|
|
|
|
"github.com/hashicorp/vault/sdk/logical"
|
|
|
|
"github.com/hashicorp/vault/sdk/physical"
|
|
|
|
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
|
2022-08-30 02:42:26 +00:00
|
|
|
backendplugin "github.com/hashicorp/vault/sdk/plugin"
|
2020-06-16 18:12:22 +00:00
|
|
|
"github.com/hashicorp/vault/vault/cluster"
|
|
|
|
"github.com/hashicorp/vault/vault/seal"
|
2020-06-23 19:04:13 +00:00
|
|
|
"github.com/mitchellh/copystructure"
|
2021-04-20 22:25:04 +00:00
|
|
|
"github.com/mitchellh/go-testing-interface"
|
2020-06-23 19:04:13 +00:00
|
|
|
"golang.org/x/crypto/ed25519"
|
|
|
|
"golang.org/x/net/http2"
|
2015-03-13 18:11:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// This file contains a number of methods that are useful for unit
|
|
|
|
// tests within other packages.
|
|
|
|
|
2015-07-10 22:18:02 +00:00
|
|
|
const (
|
|
|
|
testSharedPublicKey = `
|
2015-07-10 23:27:21 +00:00
|
|
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
|
2015-07-10 22:18:02 +00:00
|
|
|
`
|
|
|
|
testSharedPrivateKey = `
|
|
|
|
-----BEGIN RSA PRIVATE KEY-----
|
|
|
|
MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
|
|
|
|
A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
|
|
|
|
/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
|
|
|
|
mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
|
|
|
|
GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
|
|
|
|
nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
|
|
|
|
+aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
|
|
|
|
MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
|
|
|
|
Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
|
|
|
|
4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
|
|
|
|
oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
|
|
|
|
Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
|
|
|
|
6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
|
|
|
|
IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
|
|
|
|
CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
|
|
|
|
AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
|
|
|
|
kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
|
|
|
|
rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
|
|
|
|
AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
|
|
|
|
cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
|
|
|
|
5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
|
|
|
|
My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
|
|
|
|
O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
|
|
|
|
oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
|
|
|
|
+B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
|
|
|
|
-----END RSA PRIVATE KEY-----
|
|
|
|
`
|
|
|
|
)
|
|
|
|
|
2015-03-13 18:11:59 +00:00
|
|
|
// TestCore returns a pure in-memory, uninitialized core for testing.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCore(t testing.T) *Core {
|
2017-09-15 04:21:35 +00:00
|
|
|
return TestCoreWithSeal(t, nil, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw
|
|
|
|
// storage endpoints are enabled with this core.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreRaw(t testing.T) *Core {
|
2017-09-15 04:21:35 +00:00
|
|
|
return TestCoreWithSeal(t, nil, true)
|
2016-04-25 19:39:04 +00:00
|
|
|
}
|
|
|
|
|
2017-08-07 13:52:49 +00:00
|
|
|
// TestCoreNewSeal returns a pure in-memory, uninitialized core with
|
|
|
|
// the new seal configuration.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreNewSeal(t testing.T) *Core {
|
2017-10-23 17:42:04 +00:00
|
|
|
seal := NewTestSeal(t, nil)
|
|
|
|
return TestCoreWithSeal(t, seal, false)
|
2017-01-12 00:55:10 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
// TestCoreWithConfig returns a pure in-memory, uninitialized core with the
|
2018-11-13 16:16:10 +00:00
|
|
|
// specified core configurations overridden for testing.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreWithConfig(t testing.T, conf *CoreConfig) *Core {
|
2018-09-18 03:03:00 +00:00
|
|
|
return TestCoreWithSealAndUI(t, conf)
|
|
|
|
}
|
|
|
|
|
2016-04-25 19:39:04 +00:00
|
|
|
// TestCoreWithSeal returns a pure in-memory, uninitialized core with the
|
|
|
|
// specified seal for testing.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
|
2018-09-18 03:03:00 +00:00
|
|
|
conf := &CoreConfig{
|
2018-11-07 01:21:24 +00:00
|
|
|
Seal: testSeal,
|
|
|
|
EnableUI: false,
|
|
|
|
EnableRaw: enableRaw,
|
|
|
|
BuiltinRegistry: NewMockBuiltinRegistry(),
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
|
|
|
return TestCoreWithSealAndUI(t, conf)
|
|
|
|
}
|
|
|
|
|
2021-10-13 15:06:33 +00:00
|
|
|
func TestCoreWithCustomResponseHeaderAndUI(t testing.T, CustomResponseHeaders map[string]map[string]string, enableUI bool) (*Core, [][]byte, string) {
|
|
|
|
confRaw := &server.Config{
|
|
|
|
SharedConfig: &configutil.SharedConfig{
|
|
|
|
Listeners: []*configutil.Listener{
|
|
|
|
{
|
|
|
|
Type: "tcp",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
CustomResponseHeaders: CustomResponseHeaders,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
DisableMlock: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
conf := &CoreConfig{
|
|
|
|
RawConfig: confRaw,
|
|
|
|
EnableUI: enableUI,
|
|
|
|
EnableRaw: true,
|
|
|
|
BuiltinRegistry: NewMockBuiltinRegistry(),
|
|
|
|
}
|
|
|
|
core := TestCoreWithSealAndUI(t, conf)
|
|
|
|
return testCoreUnsealed(t, core)
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreUI(t testing.T, enableUI bool) *Core {
|
2018-09-18 03:03:00 +00:00
|
|
|
conf := &CoreConfig{
|
2018-11-07 01:21:24 +00:00
|
|
|
EnableUI: enableUI,
|
|
|
|
EnableRaw: true,
|
|
|
|
BuiltinRegistry: NewMockBuiltinRegistry(),
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
|
|
|
return TestCoreWithSealAndUI(t, conf)
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreWithSealAndUI(t testing.T, opts *CoreConfig) *Core {
|
2022-02-23 15:33:52 +00:00
|
|
|
c := TestCoreWithSealAndUINoCleanup(t, opts)
|
|
|
|
|
|
|
|
t.Cleanup(func() {
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
t.Log("panic closing core during cleanup", "panic", r)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
err := c.ShutdownWait()
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("shutdown returned error: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger := logging.NewVaultLogger(log.Trace)
|
2017-08-03 17:24:27 +00:00
|
|
|
physicalBackend, err := physInmem.NewInmem(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-02-16 18:16:06 +00:00
|
|
|
|
2019-08-06 19:21:23 +00:00
|
|
|
errInjector := physical.NewErrorInjector(physicalBackend, 0, logger)
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
// Start off with base test core config
|
2019-08-06 19:21:23 +00:00
|
|
|
conf := testCoreConfig(t, errInjector, logger)
|
2017-02-16 18:16:06 +00:00
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
// Override config values with ones that gets passed in
|
|
|
|
conf.EnableUI = opts.EnableUI
|
|
|
|
conf.EnableRaw = opts.EnableRaw
|
|
|
|
conf.Seal = opts.Seal
|
|
|
|
conf.LicensingConfig = opts.LicensingConfig
|
2018-11-19 21:13:16 +00:00
|
|
|
conf.DisableKeyEncodingChecks = opts.DisableKeyEncodingChecks
|
2019-10-04 07:29:51 +00:00
|
|
|
conf.MetricsHelper = opts.MetricsHelper
|
2020-07-22 17:52:10 +00:00
|
|
|
conf.MetricSink = opts.MetricSink
|
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
|
|
|
conf.NumExpirationWorkers = numExpirationWorkersTest
|
2021-04-06 23:40:43 +00:00
|
|
|
conf.RawConfig = opts.RawConfig
|
2021-04-20 22:25:04 +00:00
|
|
|
conf.EnableResponseHeaderHostname = opts.EnableResponseHeaderHostname
|
2022-02-17 19:43:07 +00:00
|
|
|
conf.DisableSSCTokens = opts.DisableSSCTokens
|
2022-04-14 20:54:23 +00:00
|
|
|
conf.PluginDirectory = opts.PluginDirectory
|
2017-02-16 18:16:06 +00:00
|
|
|
|
2019-03-08 21:46:24 +00:00
|
|
|
if opts.Logger != nil {
|
|
|
|
conf.Logger = opts.Logger
|
|
|
|
}
|
|
|
|
|
2021-11-10 18:35:31 +00:00
|
|
|
if opts.RedirectAddr != "" {
|
|
|
|
conf.RedirectAddr = opts.RedirectAddr
|
|
|
|
}
|
|
|
|
|
2018-12-12 01:21:23 +00:00
|
|
|
for k, v := range opts.LogicalBackends {
|
|
|
|
conf.LogicalBackends[k] = v
|
|
|
|
}
|
|
|
|
for k, v := range opts.CredentialBackends {
|
|
|
|
conf.CredentialBackends[k] = v
|
|
|
|
}
|
|
|
|
|
2019-03-08 21:46:24 +00:00
|
|
|
for k, v := range opts.AuditBackends {
|
|
|
|
conf.AuditBackends[k] = v
|
|
|
|
}
|
|
|
|
|
2017-02-16 18:16:06 +00:00
|
|
|
c, err := NewCore(conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2015-04-02 01:36:13 +00:00
|
|
|
noopAudits := map[string]audit.Factory{
|
2018-01-19 06:44:44 +00:00
|
|
|
"noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) {
|
2015-11-19 01:26:03 +00:00
|
|
|
view := &logical.InmemStorage{}
|
2018-01-19 06:44:44 +00:00
|
|
|
view.Put(context.Background(), &logical.StorageEntry{
|
2015-11-19 01:26:03 +00:00
|
|
|
Key: "salt",
|
|
|
|
Value: []byte("foo"),
|
|
|
|
})
|
2017-05-24 00:36:20 +00:00
|
|
|
config.SaltConfig = &salt.Config{
|
2015-11-19 01:26:03 +00:00
|
|
|
HMAC: sha256.New,
|
|
|
|
HMACType: "hmac-sha256",
|
|
|
|
}
|
2017-05-24 00:36:20 +00:00
|
|
|
config.SaltView = view
|
2019-07-02 22:18:40 +00:00
|
|
|
|
|
|
|
n := &noopAudit{
|
2015-09-18 21:36:42 +00:00
|
|
|
Config: config,
|
2019-07-02 22:18:40 +00:00
|
|
|
}
|
|
|
|
n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
|
|
|
|
SaltFunc: n.Salt,
|
|
|
|
}
|
|
|
|
return n, nil
|
2015-04-02 01:36:13 +00:00
|
|
|
},
|
|
|
|
}
|
2017-10-23 18:59:37 +00:00
|
|
|
|
2015-04-01 03:24:51 +00:00
|
|
|
noopBackends := make(map[string]logical.Factory)
|
2018-01-19 06:44:44 +00:00
|
|
|
noopBackends["noop"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
2015-09-04 20:58:12 +00:00
|
|
|
b := new(framework.Backend)
|
2018-01-19 06:44:44 +00:00
|
|
|
b.Setup(ctx, config)
|
2018-11-07 01:21:24 +00:00
|
|
|
b.BackendType = logical.TypeCredential
|
2015-09-04 20:58:12 +00:00
|
|
|
return b, nil
|
2015-04-01 03:24:51 +00:00
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
noopBackends["http"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
2015-05-27 21:19:12 +00:00
|
|
|
return new(rawHTTP), nil
|
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
|
|
|
|
credentialBackends := make(map[string]logical.Factory)
|
|
|
|
for backendName, backendFactory := range noopBackends {
|
|
|
|
credentialBackends[backendName] = backendFactory
|
|
|
|
}
|
|
|
|
for backendName, backendFactory := range testCredentialBackends {
|
|
|
|
credentialBackends[backendName] = backendFactory
|
|
|
|
}
|
|
|
|
|
2015-07-10 15:56:14 +00:00
|
|
|
logicalBackends := make(map[string]logical.Factory)
|
|
|
|
for backendName, backendFactory := range noopBackends {
|
|
|
|
logicalBackends[backendName] = backendFactory
|
|
|
|
}
|
2017-10-23 18:59:37 +00:00
|
|
|
|
2017-09-15 13:02:29 +00:00
|
|
|
logicalBackends["kv"] = LeasedPassthroughBackendFactory
|
2015-07-10 15:56:14 +00:00
|
|
|
for backendName, backendFactory := range testLogicalBackends {
|
|
|
|
logicalBackends[backendName] = backendFactory
|
|
|
|
}
|
2015-04-01 03:24:51 +00:00
|
|
|
|
2016-04-25 19:39:04 +00:00
|
|
|
conf := &CoreConfig{
|
2015-04-01 03:24:51 +00:00
|
|
|
Physical: physicalBackend,
|
2015-04-02 01:36:13 +00:00
|
|
|
AuditBackends: noopAudits,
|
2015-07-10 15:56:14 +00:00
|
|
|
LogicalBackends: logicalBackends,
|
2017-10-11 17:21:20 +00:00
|
|
|
CredentialBackends: credentialBackends,
|
2015-04-29 01:12:57 +00:00
|
|
|
DisableMlock: true,
|
2016-04-26 03:10:32 +00:00
|
|
|
Logger: logger,
|
2018-11-07 01:21:24 +00:00
|
|
|
BuiltinRegistry: NewMockBuiltinRegistry(),
|
2016-04-25 19:39:04 +00:00
|
|
|
}
|
|
|
|
|
2017-02-16 18:16:06 +00:00
|
|
|
return conf
|
2015-03-13 18:11:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestCoreInit initializes the core with a single key, and returns
|
2015-03-24 18:37:07 +00:00
|
|
|
// the key that must be used to unseal the core and a root token.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreInit(t testing.T, core *Core) ([][]byte, string) {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2019-04-04 17:02:44 +00:00
|
|
|
secretShares, _, root := TestCoreInitClusterWrapperSetup(t, core, nil)
|
2017-10-23 18:59:37 +00:00
|
|
|
return secretShares, root
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, handler http.Handler) ([][]byte, [][]byte, string) {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2017-05-24 14:38:48 +00:00
|
|
|
core.SetClusterHandler(handler)
|
2017-10-23 18:59:37 +00:00
|
|
|
|
|
|
|
barrierConfig := &SealConfig{
|
|
|
|
SecretShares: 3,
|
|
|
|
SecretThreshold: 3,
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
switch core.seal.StoredKeysSupported() {
|
2020-01-11 01:39:52 +00:00
|
|
|
case seal.StoredKeysNotSupported:
|
2019-10-18 18:46:00 +00:00
|
|
|
barrierConfig.StoredShares = 0
|
|
|
|
default:
|
|
|
|
barrierConfig.StoredShares = 1
|
2017-10-23 18:59:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
recoveryConfig := &SealConfig{
|
|
|
|
SecretShares: 3,
|
|
|
|
SecretThreshold: 3,
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
initParams := &InitParams{
|
2017-10-23 18:59:37 +00:00
|
|
|
BarrierConfig: barrierConfig,
|
|
|
|
RecoveryConfig: recoveryConfig,
|
2019-10-18 18:46:00 +00:00
|
|
|
}
|
2020-01-11 01:39:52 +00:00
|
|
|
if core.seal.StoredKeysSupported() == seal.StoredKeysNotSupported {
|
2019-10-18 18:46:00 +00:00
|
|
|
initParams.LegacyShamirSeal = true
|
|
|
|
}
|
|
|
|
result, err := core.Initialize(context.Background(), initParams)
|
2015-03-13 18:11:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2022-02-17 19:43:07 +00:00
|
|
|
innerToken, err := core.DecodeSSCToken(result.RootToken)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
return result.SecretShares, result.RecoveryShares, innerToken
|
2015-03-15 00:47:11 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func TestCoreUnseal(core *Core, key []byte) (bool, error) {
|
|
|
|
return core.Unseal(key)
|
|
|
|
}
|
|
|
|
|
2022-09-06 18:11:04 +00:00
|
|
|
func TestCoreSeal(core *Core) error {
|
|
|
|
return core.sealInternal()
|
|
|
|
}
|
|
|
|
|
2015-03-15 00:47:11 +00:00
|
|
|
// TestCoreUnsealed returns a pure in-memory core that is already
|
|
|
|
// initialized and unsealed.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2015-03-15 00:47:11 +00:00
|
|
|
core := TestCore(t)
|
2017-09-15 04:21:35 +00:00
|
|
|
return testCoreUnsealed(t, core)
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreUnsealedWithMetrics(t testing.T) (*Core, [][]byte, string, *metrics.InmemSink) {
|
2020-07-22 17:52:10 +00:00
|
|
|
t.Helper()
|
|
|
|
inmemSink := metrics.NewInmemSink(1000000*time.Hour, 2000000*time.Hour)
|
|
|
|
conf := &CoreConfig{
|
|
|
|
BuiltinRegistry: NewMockBuiltinRegistry(),
|
|
|
|
MetricSink: metricsutil.NewClusterMetricSink("test-cluster", inmemSink),
|
2020-10-27 15:24:43 +00:00
|
|
|
MetricsHelper: metricsutil.NewMetricsHelper(inmemSink, false),
|
2020-07-22 17:52:10 +00:00
|
|
|
}
|
|
|
|
core, keys, root := testCoreUnsealed(t, TestCoreWithSealAndUI(t, conf))
|
|
|
|
return core, keys, root, inmemSink
|
|
|
|
}
|
|
|
|
|
2017-09-15 04:21:35 +00:00
|
|
|
// TestCoreUnsealedRaw returns a pure in-memory core that is already
|
|
|
|
// initialized, unsealed, and with raw endpoints enabled.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2017-09-15 04:21:35 +00:00
|
|
|
core := TestCoreRaw(t)
|
|
|
|
return testCoreUnsealed(t, core)
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
// TestCoreUnsealedWithConfig returns a pure in-memory core that is already
|
2018-11-13 16:16:10 +00:00
|
|
|
// initialized, unsealed, with the any provided core config values overridden.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreUnsealedWithConfig(t testing.T, conf *CoreConfig) (*Core, [][]byte, string) {
|
2018-09-18 03:03:00 +00:00
|
|
|
t.Helper()
|
|
|
|
core := TestCoreWithConfig(t, conf)
|
|
|
|
return testCoreUnsealed(t, core)
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2022-09-06 18:11:04 +00:00
|
|
|
token, keys := TestInitUnsealCore(t, core)
|
|
|
|
|
|
|
|
testCoreAddSecretMount(t, core, token)
|
|
|
|
return core, keys, token
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestInitUnsealCore(t testing.T, core *Core) (string, [][]byte) {
|
2017-01-17 20:43:10 +00:00
|
|
|
keys, token := TestCoreInit(t, core)
|
|
|
|
for _, key := range keys {
|
|
|
|
if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
|
|
|
|
t.Fatalf("unseal err: %s", err)
|
|
|
|
}
|
2015-03-15 00:47:11 +00:00
|
|
|
}
|
2018-07-24 20:57:25 +00:00
|
|
|
if core.Sealed() {
|
2015-03-15 00:47:11 +00:00
|
|
|
t.Fatal("should not be sealed")
|
|
|
|
}
|
|
|
|
|
2022-09-06 18:11:04 +00:00
|
|
|
return token, keys
|
2015-03-13 18:11:59 +00:00
|
|
|
}
|
2015-03-15 01:25:36 +00:00
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func testCoreAddSecretMount(t testing.T, core *Core, token string) {
|
2019-02-14 19:55:32 +00:00
|
|
|
kvReq := &logical.Request{
|
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
ClientToken: token,
|
|
|
|
Path: "sys/mounts/secret",
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"type": "kv",
|
|
|
|
"path": "secret/",
|
|
|
|
"description": "key/value secret storage",
|
|
|
|
"options": map[string]string{
|
|
|
|
"version": "1",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp, err := core.HandleRequest(namespace.RootContext(nil), kvReq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if resp.IsError() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Helper()
|
2018-04-03 00:46:59 +00:00
|
|
|
logger := logging.NewVaultLogger(log.Trace)
|
2017-02-16 18:16:06 +00:00
|
|
|
conf := testCoreConfig(t, backend, logger)
|
2017-10-23 17:42:04 +00:00
|
|
|
conf.Seal = NewTestSeal(t, nil)
|
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
|
|
|
conf.NumExpirationWorkers = numExpirationWorkersTest
|
2017-02-16 18:16:06 +00:00
|
|
|
|
|
|
|
core, err := NewCore(conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2016-01-09 02:21:02 +00:00
|
|
|
|
2017-02-16 18:16:06 +00:00
|
|
|
keys, token := TestCoreInit(t, core)
|
|
|
|
for _, key := range keys {
|
|
|
|
if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
|
|
|
|
t.Fatalf("unseal err: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
2017-10-23 18:59:37 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-07-24 20:57:25 +00:00
|
|
|
if core.Sealed() {
|
2017-02-16 18:16:06 +00:00
|
|
|
t.Fatal("should not be sealed")
|
|
|
|
}
|
|
|
|
|
2021-02-12 20:04:48 +00:00
|
|
|
t.Cleanup(func() {
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
t.Log("panic closing core during cleanup", "panic", r)
|
|
|
|
}
|
|
|
|
}()
|
2022-02-23 15:33:52 +00:00
|
|
|
err := core.ShutdownWait()
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("shutdown returned error: %v", err)
|
|
|
|
}
|
2021-02-12 20:04:48 +00:00
|
|
|
})
|
|
|
|
|
2017-02-16 18:16:06 +00:00
|
|
|
return core, keys, token
|
|
|
|
}
|
|
|
|
|
2015-03-15 01:25:36 +00:00
|
|
|
// TestKeyCopy is a silly little function to just copy the key so that
|
|
|
|
// it can be used with Unseal easily.
|
|
|
|
func TestKeyCopy(key []byte) []byte {
|
|
|
|
result := make([]byte, len(key))
|
|
|
|
copy(result, key)
|
|
|
|
return result
|
|
|
|
}
|
2015-04-02 01:36:13 +00:00
|
|
|
|
2021-09-27 16:08:07 +00:00
|
|
|
func TestDynamicSystemView(c *Core, ns *namespace.Namespace) *dynamicSystemView {
|
2017-04-07 22:50:03 +00:00
|
|
|
me := &MountEntry{
|
|
|
|
Config: MountConfig{
|
|
|
|
DefaultLeaseTTL: 24 * time.Hour,
|
|
|
|
MaxLeaseTTL: 2 * 24 * time.Hour,
|
|
|
|
},
|
2021-09-27 16:08:07 +00:00
|
|
|
NamespaceID: namespace.RootNamespace.ID,
|
|
|
|
namespace: namespace.RootNamespace,
|
|
|
|
}
|
|
|
|
|
|
|
|
if ns != nil {
|
|
|
|
me.NamespaceID = ns.ID
|
|
|
|
me.namespace = ns
|
2017-04-07 22:50:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &dynamicSystemView{c, me}
|
|
|
|
}
|
|
|
|
|
2017-12-15 18:31:57 +00:00
|
|
|
// TestAddTestPlugin registers the testFunc as part of the plugin command to the
|
2018-09-20 17:50:29 +00:00
|
|
|
// plugin catalog. If provided, uses tmpDir as the plugin directory.
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestAddTestPlugin(t testing.T, c *Core, name string, pluginType consts.PluginType, testFunc string, env []string, tempDir string) {
|
2017-04-10 21:12:28 +00:00
|
|
|
file, err := os.Open(os.Args[0])
|
2017-04-07 22:50:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
dirPath := filepath.Dir(os.Args[0])
|
|
|
|
fileName := filepath.Base(os.Args[0])
|
2017-03-23 22:54:15 +00:00
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
if tempDir != "" {
|
|
|
|
fi, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-12-15 18:31:57 +00:00
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
// Copy over the file to the temp dir
|
|
|
|
dst := filepath.Join(tempDir, fileName)
|
|
|
|
out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer out.Close()
|
2017-12-15 18:31:57 +00:00
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
if _, err = io.Copy(out, file); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = out.Sync()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-12-15 18:31:57 +00:00
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
dirPath = tempDir
|
2017-12-15 18:31:57 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
// Determine plugin directory full path, evaluating potential symlink path
|
|
|
|
fullPath, err := filepath.EvalSymlinks(dirPath)
|
2017-12-15 18:31:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-09-20 17:50:29 +00:00
|
|
|
reader, err := os.Open(filepath.Join(fullPath, fileName))
|
2017-12-15 18:31:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-12-15 18:38:01 +00:00
|
|
|
defer reader.Close()
|
2017-12-15 18:31:57 +00:00
|
|
|
|
|
|
|
// Find out the sha256
|
|
|
|
hash := sha256.New()
|
|
|
|
|
|
|
|
_, err = io.Copy(hash, reader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sum := hash.Sum(nil)
|
|
|
|
|
|
|
|
// Set core's plugin directory and plugin catalog directory
|
|
|
|
c.pluginDirectory = fullPath
|
|
|
|
c.pluginCatalog.directory = fullPath
|
|
|
|
|
2018-01-18 00:19:28 +00:00
|
|
|
args := []string{fmt.Sprintf("--test.run=%s", testFunc)}
|
2022-08-25 20:31:42 +00:00
|
|
|
version := ""
|
|
|
|
err = c.pluginCatalog.Set(context.Background(), name, pluginType, version, fileName, args, env, sum)
|
2017-12-15 18:31:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-30 02:42:26 +00:00
|
|
|
// TestRunTestPlugin runs the testFunc which has already been registered to the
|
|
|
|
// plugin catalog and returns a pluginClient. This can be called after calling
|
|
|
|
// TestAddTestPlugin.
|
|
|
|
func TestRunTestPlugin(t testing.T, c *Core, pluginType consts.PluginType, pluginName string) *pluginClient {
|
|
|
|
t.Helper()
|
|
|
|
config := TestPluginClientConfig(c, pluginType, pluginName)
|
|
|
|
client, err := c.pluginCatalog.NewPluginClient(context.Background(), config)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return client
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPluginClientConfig(c *Core, pluginType consts.PluginType, pluginName string) pluginutil.PluginClientConfig {
|
|
|
|
switch pluginType {
|
|
|
|
case consts.PluginTypeCredential, consts.PluginTypeSecrets:
|
|
|
|
dsv := TestDynamicSystemView(c, nil)
|
|
|
|
return pluginutil.PluginClientConfig{
|
|
|
|
Name: pluginName,
|
|
|
|
PluginType: pluginType,
|
|
|
|
PluginSets: backendplugin.PluginSet,
|
|
|
|
HandshakeConfig: backendplugin.HandshakeConfig,
|
|
|
|
Logger: log.NewNullLogger(),
|
|
|
|
AutoMTLS: true,
|
|
|
|
IsMetadataMode: false,
|
|
|
|
Wrapper: dsv,
|
|
|
|
}
|
|
|
|
case consts.PluginTypeDatabase:
|
|
|
|
return pluginutil.PluginClientConfig{
|
|
|
|
Name: pluginName,
|
|
|
|
PluginType: pluginType,
|
|
|
|
PluginSets: v5.PluginSets,
|
|
|
|
HandshakeConfig: v5.HandshakeConfig,
|
|
|
|
Logger: log.NewNullLogger(),
|
|
|
|
AutoMTLS: true,
|
|
|
|
IsMetadataMode: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pluginutil.PluginClientConfig{}
|
|
|
|
}
|
|
|
|
|
2021-04-08 16:43:39 +00:00
|
|
|
var (
|
|
|
|
testLogicalBackends = map[string]logical.Factory{}
|
|
|
|
testCredentialBackends = map[string]logical.Factory{}
|
|
|
|
)
|
2015-07-10 15:56:14 +00:00
|
|
|
|
2017-10-11 17:21:20 +00:00
|
|
|
// This adds a credential backend for the test core. This needs to be
|
|
|
|
// invoked before the test core is created.
|
|
|
|
func AddTestCredentialBackend(name string, factory logical.Factory) error {
|
|
|
|
if name == "" {
|
|
|
|
return fmt.Errorf("missing backend name")
|
|
|
|
}
|
|
|
|
if factory == nil {
|
|
|
|
return fmt.Errorf("missing backend factory function")
|
|
|
|
}
|
|
|
|
testCredentialBackends[name] = factory
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-10 15:56:14 +00:00
|
|
|
// This adds a logical backend for the test core. This needs to be
|
|
|
|
// invoked before the test core is created.
|
|
|
|
func AddTestLogicalBackend(name string, factory logical.Factory) error {
|
|
|
|
if name == "" {
|
2018-04-09 18:35:21 +00:00
|
|
|
return fmt.Errorf("missing backend name")
|
2015-07-10 15:56:14 +00:00
|
|
|
}
|
|
|
|
if factory == nil {
|
2018-04-09 18:35:21 +00:00
|
|
|
return fmt.Errorf("missing backend factory function")
|
2015-07-10 15:56:14 +00:00
|
|
|
}
|
|
|
|
testLogicalBackends[name] = factory
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-18 21:36:42 +00:00
|
|
|
type noopAudit struct {
|
2017-05-24 00:36:20 +00:00
|
|
|
Config *audit.BackendConfig
|
|
|
|
salt *salt.Salt
|
|
|
|
saltMutex sync.RWMutex
|
2019-07-02 22:18:40 +00:00
|
|
|
formatter audit.AuditFormatter
|
|
|
|
records [][]byte
|
2019-07-03 20:56:30 +00:00
|
|
|
l sync.RWMutex
|
2015-09-18 21:36:42 +00:00
|
|
|
}
|
2015-04-02 01:36:13 +00:00
|
|
|
|
2018-03-08 19:21:11 +00:00
|
|
|
func (n *noopAudit) GetHash(ctx context.Context, data string) (string, error) {
|
|
|
|
salt, err := n.Salt(ctx)
|
2017-05-24 00:36:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return salt.GetIdentifiedHMAC(data), nil
|
2015-11-19 01:26:03 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 22:18:40 +00:00
|
|
|
func (n *noopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
|
2019-07-03 20:56:30 +00:00
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
2019-07-02 22:18:40 +00:00
|
|
|
var w bytes.Buffer
|
|
|
|
err := n.formatter.FormatRequest(ctx, &w, audit.FormatterConfig{}, in)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
n.records = append(n.records, w.Bytes())
|
2015-04-02 01:36:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-02 22:18:40 +00:00
|
|
|
func (n *noopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
|
2019-07-03 20:56:30 +00:00
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
2019-07-02 22:18:40 +00:00
|
|
|
var w bytes.Buffer
|
|
|
|
err := n.formatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
n.records = append(n.records, w.Bytes())
|
2015-04-02 01:36:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-05-27 21:19:12 +00:00
|
|
|
|
2020-12-16 22:00:32 +00:00
|
|
|
func (n *noopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error {
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
var w bytes.Buffer
|
|
|
|
tempFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"])
|
|
|
|
err := tempFormatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
n.records = append(n.records, w.Bytes())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (n *noopAudit) Reload(_ context.Context) error {
|
2016-09-30 19:04:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (n *noopAudit) Invalidate(_ context.Context) {
|
2017-05-24 00:36:20 +00:00
|
|
|
n.saltMutex.Lock()
|
|
|
|
defer n.saltMutex.Unlock()
|
|
|
|
n.salt = nil
|
|
|
|
}
|
|
|
|
|
2018-03-08 19:21:11 +00:00
|
|
|
func (n *noopAudit) Salt(ctx context.Context) (*salt.Salt, error) {
|
2017-05-24 00:36:20 +00:00
|
|
|
n.saltMutex.RLock()
|
|
|
|
if n.salt != nil {
|
|
|
|
defer n.saltMutex.RUnlock()
|
|
|
|
return n.salt, nil
|
|
|
|
}
|
|
|
|
n.saltMutex.RUnlock()
|
|
|
|
n.saltMutex.Lock()
|
|
|
|
defer n.saltMutex.Unlock()
|
|
|
|
if n.salt != nil {
|
|
|
|
return n.salt, nil
|
|
|
|
}
|
2018-03-08 19:21:11 +00:00
|
|
|
salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig)
|
2017-05-24 00:36:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
n.salt = salt
|
|
|
|
return salt, nil
|
|
|
|
}
|
|
|
|
|
2020-02-06 16:56:37 +00:00
|
|
|
func AddNoopAudit(conf *CoreConfig, records **[][]byte) {
|
2019-04-11 15:12:37 +00:00
|
|
|
conf.AuditBackends = map[string]audit.Factory{
|
|
|
|
"noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) {
|
|
|
|
view := &logical.InmemStorage{}
|
|
|
|
view.Put(context.Background(), &logical.StorageEntry{
|
|
|
|
Key: "salt",
|
|
|
|
Value: []byte("foo"),
|
|
|
|
})
|
2019-07-02 22:18:40 +00:00
|
|
|
n := &noopAudit{
|
2019-04-11 15:12:37 +00:00
|
|
|
Config: config,
|
2019-07-02 22:18:40 +00:00
|
|
|
}
|
|
|
|
n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
|
|
|
|
SaltFunc: n.Salt,
|
|
|
|
}
|
2020-02-06 16:56:37 +00:00
|
|
|
if records != nil {
|
|
|
|
*records = &n.records
|
|
|
|
}
|
2019-07-02 22:18:40 +00:00
|
|
|
return n, nil
|
2019-04-11 15:12:37 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-27 21:19:12 +00:00
|
|
|
type rawHTTP struct{}
|
|
|
|
|
2018-01-08 18:31:38 +00:00
|
|
|
func (n *rawHTTP) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) {
|
2015-05-27 21:19:12 +00:00
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
logical.HTTPStatusCode: 200,
|
|
|
|
logical.HTTPContentType: "plain/text",
|
|
|
|
logical.HTTPRawBody: []byte("hello world"),
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:31:38 +00:00
|
|
|
func (n *rawHTTP) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) {
|
2016-01-12 20:09:16 +00:00
|
|
|
return false, false, nil
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
|
|
|
|
2015-05-27 21:19:12 +00:00
|
|
|
func (n *rawHTTP) SpecialPaths() *logical.Paths {
|
|
|
|
return &logical.Paths{Unauthenticated: []string{"*"}}
|
|
|
|
}
|
2015-09-02 19:56:58 +00:00
|
|
|
|
|
|
|
func (n *rawHTTP) System() logical.SystemView {
|
|
|
|
return logical.StaticSystemView{
|
|
|
|
DefaultLeaseTTLVal: time.Hour * 24,
|
2016-09-28 22:32:49 +00:00
|
|
|
MaxLeaseTTLVal: time.Hour * 24 * 32,
|
2015-09-02 19:56:58 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-10 14:11:37 +00:00
|
|
|
|
Backend plugin system (#2874)
* Add backend plugin changes
* Fix totp backend plugin tests
* Fix logical/plugin InvalidateKey test
* Fix plugin catalog CRUD test, fix NoopBackend
* Clean up commented code block
* Fix system backend mount test
* Set plugin_name to omitempty, fix handleMountTable config parsing
* Clean up comments, keep shim connections alive until cleanup
* Include pluginClient, disallow LookupPlugin call from within a plugin
* Add wrapper around backendPluginClient for proper cleanup
* Add logger shim tests
* Add logger, storage, and system shim tests
* Use pointer receivers for system view shim
* Use plugin name if no path is provided on mount
* Enable plugins for auth backends
* Add backend type attribute, move builtin/plugin/package
* Fix merge conflict
* Fix missing plugin name in mount config
* Add integration tests on enabling auth backend plugins
* Remove dependency cycle on mock-plugin
* Add passthrough backend plugin, use logical.BackendType to determine lease generation
* Remove vault package dependency on passthrough package
* Add basic impl test for passthrough plugin
* Incorporate feedback; set b.backend after shims creation on backendPluginServer
* Fix totp plugin test
* Add plugin backends docs
* Fix tests
* Fix builtin/plugin tests
* Remove flatten from PluginRunner fields
* Move mock plugin to logical/plugin, remove totp and passthrough plugins
* Move pluginMap into newPluginClient
* Do not create storage RPC connection on HandleRequest and HandleExistenceCheck
* Change shim logger's Fatal to no-op
* Change BackendType to uint32, match UX backend types
* Change framework.Backend Setup signature
* Add Setup func to logical.Backend interface
* Move OptionallyEnableMlock call into plugin.Serve, update docs and comments
* Remove commented var in plugin package
* RegisterLicense on logical.Backend interface (#3017)
* Add RegisterLicense to logical.Backend interface
* Update RegisterLicense to use callback func on framework.Backend
* Refactor framework.Backend.RegisterLicense
* plugin: Prevent plugin.SystemViewClient.ResponseWrapData from getting JWTs
* plugin: Revert BackendType to remove TypePassthrough and related references
* Fix typo in plugin backends docs
2017-07-20 17:28:40 +00:00
|
|
|
func (n *rawHTTP) Logger() log.Logger {
|
2018-04-03 00:46:59 +00:00
|
|
|
return logging.NewVaultLogger(log.Trace)
|
Backend plugin system (#2874)
* Add backend plugin changes
* Fix totp backend plugin tests
* Fix logical/plugin InvalidateKey test
* Fix plugin catalog CRUD test, fix NoopBackend
* Clean up commented code block
* Fix system backend mount test
* Set plugin_name to omitempty, fix handleMountTable config parsing
* Clean up comments, keep shim connections alive until cleanup
* Include pluginClient, disallow LookupPlugin call from within a plugin
* Add wrapper around backendPluginClient for proper cleanup
* Add logger shim tests
* Add logger, storage, and system shim tests
* Use pointer receivers for system view shim
* Use plugin name if no path is provided on mount
* Enable plugins for auth backends
* Add backend type attribute, move builtin/plugin/package
* Fix merge conflict
* Fix missing plugin name in mount config
* Add integration tests on enabling auth backend plugins
* Remove dependency cycle on mock-plugin
* Add passthrough backend plugin, use logical.BackendType to determine lease generation
* Remove vault package dependency on passthrough package
* Add basic impl test for passthrough plugin
* Incorporate feedback; set b.backend after shims creation on backendPluginServer
* Fix totp plugin test
* Add plugin backends docs
* Fix tests
* Fix builtin/plugin tests
* Remove flatten from PluginRunner fields
* Move mock plugin to logical/plugin, remove totp and passthrough plugins
* Move pluginMap into newPluginClient
* Do not create storage RPC connection on HandleRequest and HandleExistenceCheck
* Change shim logger's Fatal to no-op
* Change BackendType to uint32, match UX backend types
* Change framework.Backend Setup signature
* Add Setup func to logical.Backend interface
* Move OptionallyEnableMlock call into plugin.Serve, update docs and comments
* Remove commented var in plugin package
* RegisterLicense on logical.Backend interface (#3017)
* Add RegisterLicense to logical.Backend interface
* Update RegisterLicense to use callback func on framework.Backend
* Refactor framework.Backend.RegisterLicense
* plugin: Prevent plugin.SystemViewClient.ResponseWrapData from getting JWTs
* plugin: Revert BackendType to remove TypePassthrough and related references
* Fix typo in plugin backends docs
2017-07-20 17:28:40 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (n *rawHTTP) Cleanup(ctx context.Context) {
|
2015-09-10 14:11:37 +00:00
|
|
|
// noop
|
|
|
|
}
|
2016-01-15 15:55:35 +00:00
|
|
|
|
2019-07-05 23:55:40 +00:00
|
|
|
func (n *rawHTTP) Initialize(ctx context.Context, req *logical.InitializationRequest) error {
|
2017-01-13 19:51:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (n *rawHTTP) InvalidateKey(context.Context, string) {
|
2017-01-07 23:18:22 +00:00
|
|
|
// noop
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (n *rawHTTP) Setup(ctx context.Context, config *logical.BackendConfig) error {
|
Backend plugin system (#2874)
* Add backend plugin changes
* Fix totp backend plugin tests
* Fix logical/plugin InvalidateKey test
* Fix plugin catalog CRUD test, fix NoopBackend
* Clean up commented code block
* Fix system backend mount test
* Set plugin_name to omitempty, fix handleMountTable config parsing
* Clean up comments, keep shim connections alive until cleanup
* Include pluginClient, disallow LookupPlugin call from within a plugin
* Add wrapper around backendPluginClient for proper cleanup
* Add logger shim tests
* Add logger, storage, and system shim tests
* Use pointer receivers for system view shim
* Use plugin name if no path is provided on mount
* Enable plugins for auth backends
* Add backend type attribute, move builtin/plugin/package
* Fix merge conflict
* Fix missing plugin name in mount config
* Add integration tests on enabling auth backend plugins
* Remove dependency cycle on mock-plugin
* Add passthrough backend plugin, use logical.BackendType to determine lease generation
* Remove vault package dependency on passthrough package
* Add basic impl test for passthrough plugin
* Incorporate feedback; set b.backend after shims creation on backendPluginServer
* Fix totp plugin test
* Add plugin backends docs
* Fix tests
* Fix builtin/plugin tests
* Remove flatten from PluginRunner fields
* Move mock plugin to logical/plugin, remove totp and passthrough plugins
* Move pluginMap into newPluginClient
* Do not create storage RPC connection on HandleRequest and HandleExistenceCheck
* Change shim logger's Fatal to no-op
* Change BackendType to uint32, match UX backend types
* Change framework.Backend Setup signature
* Add Setup func to logical.Backend interface
* Move OptionallyEnableMlock call into plugin.Serve, update docs and comments
* Remove commented var in plugin package
* RegisterLicense on logical.Backend interface (#3017)
* Add RegisterLicense to logical.Backend interface
* Update RegisterLicense to use callback func on framework.Backend
* Refactor framework.Backend.RegisterLicense
* plugin: Prevent plugin.SystemViewClient.ResponseWrapData from getting JWTs
* plugin: Revert BackendType to remove TypePassthrough and related references
* Fix typo in plugin backends docs
2017-07-20 17:28:40 +00:00
|
|
|
// noop
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *rawHTTP) Type() logical.BackendType {
|
2018-11-07 01:21:24 +00:00
|
|
|
return logical.TypeLogical
|
Backend plugin system (#2874)
* Add backend plugin changes
* Fix totp backend plugin tests
* Fix logical/plugin InvalidateKey test
* Fix plugin catalog CRUD test, fix NoopBackend
* Clean up commented code block
* Fix system backend mount test
* Set plugin_name to omitempty, fix handleMountTable config parsing
* Clean up comments, keep shim connections alive until cleanup
* Include pluginClient, disallow LookupPlugin call from within a plugin
* Add wrapper around backendPluginClient for proper cleanup
* Add logger shim tests
* Add logger, storage, and system shim tests
* Use pointer receivers for system view shim
* Use plugin name if no path is provided on mount
* Enable plugins for auth backends
* Add backend type attribute, move builtin/plugin/package
* Fix merge conflict
* Fix missing plugin name in mount config
* Add integration tests on enabling auth backend plugins
* Remove dependency cycle on mock-plugin
* Add passthrough backend plugin, use logical.BackendType to determine lease generation
* Remove vault package dependency on passthrough package
* Add basic impl test for passthrough plugin
* Incorporate feedback; set b.backend after shims creation on backendPluginServer
* Fix totp plugin test
* Add plugin backends docs
* Fix tests
* Fix builtin/plugin tests
* Remove flatten from PluginRunner fields
* Move mock plugin to logical/plugin, remove totp and passthrough plugins
* Move pluginMap into newPluginClient
* Do not create storage RPC connection on HandleRequest and HandleExistenceCheck
* Change shim logger's Fatal to no-op
* Change BackendType to uint32, match UX backend types
* Change framework.Backend Setup signature
* Add Setup func to logical.Backend interface
* Move OptionallyEnableMlock call into plugin.Serve, update docs and comments
* Remove commented var in plugin package
* RegisterLicense on logical.Backend interface (#3017)
* Add RegisterLicense to logical.Backend interface
* Update RegisterLicense to use callback func on framework.Backend
* Refactor framework.Backend.RegisterLicense
* plugin: Prevent plugin.SystemViewClient.ResponseWrapData from getting JWTs
* plugin: Revert BackendType to remove TypePassthrough and related references
* Fix typo in plugin backends docs
2017-07-20 17:28:40 +00:00
|
|
|
}
|
|
|
|
|
2016-01-15 15:55:35 +00:00
|
|
|
func GenerateRandBytes(length int) ([]byte, error) {
|
|
|
|
if length < 0 {
|
|
|
|
return nil, fmt.Errorf("length must be >= 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, length)
|
|
|
|
if length == 0 {
|
|
|
|
return buf, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := rand.Read(buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if n != length {
|
|
|
|
return nil, fmt.Errorf("unable to read %d bytes; only read %d", length, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf, nil
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestWaitActive(t testing.T, core *Core) {
|
2017-09-01 05:02:03 +00:00
|
|
|
t.Helper()
|
2018-05-20 06:42:15 +00:00
|
|
|
if err := TestWaitActiveWithError(core); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func TestWaitActiveForwardingReady(t testing.T, core *Core) {
|
2020-09-16 19:31:06 +00:00
|
|
|
TestWaitActive(t, core)
|
|
|
|
|
|
|
|
deadline := time.Now().Add(2 * time.Second)
|
|
|
|
for time.Now().Before(deadline) {
|
|
|
|
if _, ok := core.getClusterListener().Handler(consts.RequestForwardingALPN); ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
t.Fatal("timed out waiting for request forwarding handler to be registered")
|
|
|
|
}
|
|
|
|
|
2018-05-20 06:42:15 +00:00
|
|
|
func TestWaitActiveWithError(core *Core) error {
|
2016-08-15 13:42:42 +00:00
|
|
|
start := time.Now()
|
|
|
|
var standby bool
|
|
|
|
var err error
|
2019-02-06 02:01:18 +00:00
|
|
|
for time.Now().Sub(start) < 30*time.Second {
|
2016-08-15 13:42:42 +00:00
|
|
|
standby, err = core.Standby()
|
|
|
|
if err != nil {
|
2018-05-20 06:42:15 +00:00
|
|
|
return err
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
if !standby {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if standby {
|
2018-05-20 06:42:15 +00:00
|
|
|
return errors.New("should not be in standby mode")
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2018-05-20 06:42:15 +00:00
|
|
|
return nil
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2017-07-03 18:54:01 +00:00
|
|
|
type TestCluster struct {
|
2019-06-24 01:50:27 +00:00
|
|
|
BarrierKeys [][]byte
|
|
|
|
RecoveryKeys [][]byte
|
|
|
|
CACert *x509.Certificate
|
|
|
|
CACertBytes []byte
|
|
|
|
CACertPEM []byte
|
|
|
|
CACertPEMFile string
|
|
|
|
CAKey *ecdsa.PrivateKey
|
|
|
|
CAKeyPEM []byte
|
|
|
|
Cores []*TestClusterCore
|
|
|
|
ID string
|
|
|
|
RootToken string
|
|
|
|
RootCAs *x509.CertPool
|
|
|
|
TempDir string
|
|
|
|
ClientAuthRequired bool
|
2019-07-23 19:17:37 +00:00
|
|
|
Logger log.Logger
|
2019-08-23 19:51:25 +00:00
|
|
|
CleanupFunc func()
|
|
|
|
SetupFunc func()
|
2020-06-16 18:12:22 +00:00
|
|
|
|
2021-05-17 18:10:26 +00:00
|
|
|
cleanupFuncs []func()
|
|
|
|
base *CoreConfig
|
|
|
|
LicensePublicKey ed25519.PublicKey
|
|
|
|
LicensePrivateKey ed25519.PrivateKey
|
2017-07-03 18:54:01 +00:00
|
|
|
}
|
|
|
|
|
2017-08-15 21:06:38 +00:00
|
|
|
func (c *TestCluster) Start() {
|
2020-05-14 12:31:02 +00:00
|
|
|
for i, core := range c.Cores {
|
2017-07-03 18:54:01 +00:00
|
|
|
if core.Server != nil {
|
|
|
|
for _, ln := range core.Listeners {
|
2020-05-14 12:31:02 +00:00
|
|
|
c.Logger.Info("starting listener for test core", "core", i, "port", ln.Address.Port)
|
2017-07-03 18:54:01 +00:00
|
|
|
go core.Server.Serve(ln)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-08-23 19:51:25 +00:00
|
|
|
if c.SetupFunc != nil {
|
|
|
|
c.SetupFunc()
|
|
|
|
}
|
2017-07-03 18:54:01 +00:00
|
|
|
}
|
|
|
|
|
2018-03-27 20:34:06 +00:00
|
|
|
// UnsealCores uses the cluster barrier keys to unseal the test cluster cores
|
|
|
|
func (c *TestCluster) UnsealCores(t testing.T) {
|
2019-10-18 18:46:00 +00:00
|
|
|
t.Helper()
|
2019-10-22 13:35:48 +00:00
|
|
|
if err := c.UnsealCoresWithError(false); err != nil {
|
2018-05-20 06:42:15 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-22 13:35:48 +00:00
|
|
|
func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error {
|
|
|
|
unseal := func(core *Core) error {
|
|
|
|
for _, key := range c.BarrierKeys {
|
|
|
|
if _, err := core.Unseal(TestKeyCopy(key)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if useStoredKeys {
|
|
|
|
unseal = func(core *Core) error {
|
|
|
|
return core.UnsealWithStoredKeys(context.Background())
|
|
|
|
}
|
|
|
|
}
|
2018-03-27 20:34:06 +00:00
|
|
|
|
|
|
|
// Unseal first core
|
2019-10-22 13:35:48 +00:00
|
|
|
if err := unseal(c.Cores[0].Core); err != nil {
|
|
|
|
return fmt.Errorf("unseal core %d err: %s", 0, err)
|
2018-03-27 20:34:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify unsealed
|
2018-07-24 20:57:25 +00:00
|
|
|
if c.Cores[0].Sealed() {
|
2018-05-20 06:42:15 +00:00
|
|
|
return fmt.Errorf("should not be sealed")
|
2018-03-27 20:34:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-20 06:42:15 +00:00
|
|
|
if err := TestWaitActiveWithError(c.Cores[0].Core); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-27 20:34:06 +00:00
|
|
|
|
|
|
|
// Unseal other cores
|
2019-10-22 13:35:48 +00:00
|
|
|
for i := 1; i < len(c.Cores); i++ {
|
|
|
|
if err := unseal(c.Cores[i].Core); err != nil {
|
|
|
|
return fmt.Errorf("unseal core %d err: %s", i, err)
|
2018-03-27 20:34:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Let them come fully up to standby
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
|
|
|
// Ensure cluster connection info is populated.
|
|
|
|
// Other cores should not come up as leaders.
|
2019-10-22 13:35:48 +00:00
|
|
|
for i := 1; i < len(c.Cores); i++ {
|
2018-03-27 20:34:06 +00:00
|
|
|
isLeader, _, _, err := c.Cores[i].Leader()
|
|
|
|
if err != nil {
|
2018-05-20 06:42:15 +00:00
|
|
|
return err
|
2018-03-27 20:34:06 +00:00
|
|
|
}
|
|
|
|
if isLeader {
|
2018-05-20 06:42:15 +00:00
|
|
|
return fmt.Errorf("core[%d] should not be leader", i)
|
2018-03-27 20:34:06 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-20 06:42:15 +00:00
|
|
|
|
|
|
|
return nil
|
2018-03-27 20:34:06 +00:00
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
|
2021-05-19 20:07:58 +00:00
|
|
|
err := c.AttemptUnsealCore(core)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *TestCluster) AttemptUnsealCore(core *TestClusterCore) error {
|
2020-02-13 21:27:31 +00:00
|
|
|
var keys [][]byte
|
|
|
|
if core.seal.RecoveryKeySupported() {
|
|
|
|
keys = c.RecoveryKeys
|
|
|
|
} else {
|
|
|
|
keys = c.BarrierKeys
|
|
|
|
}
|
|
|
|
for _, key := range keys {
|
2019-06-20 19:14:58 +00:00
|
|
|
if _, err := core.Core.Unseal(TestKeyCopy(key)); err != nil {
|
2021-05-19 20:07:58 +00:00
|
|
|
return fmt.Errorf("unseal err: %w", err)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-19 20:07:58 +00:00
|
|
|
return nil
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
|
2020-10-23 18:16:04 +00:00
|
|
|
t.Helper()
|
2020-06-11 19:07:59 +00:00
|
|
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
func (c *TestCluster) EnsureCoresSealed(t testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
if err := c.ensureCoresSealed(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func (c *TestClusterCore) Seal(t testing.T) {
|
2019-02-06 02:01:18 +00:00
|
|
|
t.Helper()
|
|
|
|
if err := c.Core.sealInternal(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
func (c *TestClusterCore) stop() error {
|
|
|
|
c.Logger().Info("stopping vault test core")
|
|
|
|
|
|
|
|
if c.Listeners != nil {
|
|
|
|
for _, ln := range c.Listeners {
|
|
|
|
ln.Close()
|
|
|
|
}
|
|
|
|
c.Logger().Info("listeners successfully shut down")
|
|
|
|
}
|
|
|
|
if c.licensingStopCh != nil {
|
|
|
|
close(c.licensingStopCh)
|
|
|
|
c.licensingStopCh = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.Shutdown(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
timeout := time.Now().Add(60 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
|
|
|
return errors.New("timeout waiting for core to seal")
|
|
|
|
}
|
|
|
|
if c.Sealed() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Logger().Info("vault test core stopped")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-15 21:06:38 +00:00
|
|
|
func (c *TestCluster) Cleanup() {
|
2019-07-23 19:17:37 +00:00
|
|
|
c.Logger.Info("cleaning up vault cluster")
|
2020-09-23 17:40:00 +00:00
|
|
|
if tl, ok := c.Logger.(*TestLogger); ok {
|
|
|
|
tl.StopLogging()
|
2019-07-23 19:17:37 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
wg := &sync.WaitGroup{}
|
2017-08-15 21:06:38 +00:00
|
|
|
for _, core := range c.Cores {
|
2018-09-18 03:03:00 +00:00
|
|
|
wg.Add(1)
|
|
|
|
lc := core
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2020-06-16 18:12:22 +00:00
|
|
|
if err := lc.stop(); err != nil {
|
2020-09-23 17:40:00 +00:00
|
|
|
// Note that this log won't be seen if using TestLogger, due to
|
|
|
|
// the above call to StopLogging.
|
2020-06-16 18:12:22 +00:00
|
|
|
lc.Logger().Error("error during cleanup", "error", err)
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
|
|
|
}()
|
2017-07-03 18:54:01 +00:00
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
wg.Wait()
|
2017-09-01 05:02:03 +00:00
|
|
|
|
|
|
|
// Remove any temp dir that exists
|
|
|
|
if c.TempDir != "" {
|
|
|
|
os.RemoveAll(c.TempDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Give time to actually shut down/clean up before the next test
|
|
|
|
time.Sleep(time.Second)
|
2019-08-23 19:51:25 +00:00
|
|
|
if c.CleanupFunc != nil {
|
|
|
|
c.CleanupFunc()
|
|
|
|
}
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *TestCluster) ensureCoresSealed() error {
|
2017-08-15 21:06:38 +00:00
|
|
|
for _, core := range c.Cores {
|
|
|
|
if err := core.Shutdown(); err != nil {
|
2017-09-01 05:02:03 +00:00
|
|
|
return err
|
2017-08-15 21:06:38 +00:00
|
|
|
}
|
|
|
|
timeout := time.Now().Add(60 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
2017-09-01 05:02:03 +00:00
|
|
|
return fmt.Errorf("timeout waiting for core to seal")
|
2017-08-15 21:06:38 +00:00
|
|
|
}
|
2018-07-24 20:57:25 +00:00
|
|
|
if core.Sealed() {
|
2017-11-30 14:43:07 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
func SetReplicationFailureMode(core *TestClusterCore, mode uint32) {
|
|
|
|
atomic.StoreUint32(core.Core.replicationFailure, mode)
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
type TestListener struct {
|
|
|
|
net.Listener
|
|
|
|
Address *net.TCPAddr
|
|
|
|
}
|
|
|
|
|
|
|
|
type TestClusterCore struct {
|
|
|
|
*Core
|
2019-06-20 19:14:58 +00:00
|
|
|
CoreConfig *CoreConfig
|
|
|
|
Client *api.Client
|
|
|
|
Handler http.Handler
|
2020-06-16 18:12:22 +00:00
|
|
|
Address *net.TCPAddr
|
2019-06-20 19:14:58 +00:00
|
|
|
Listeners []*TestListener
|
2020-02-15 19:58:05 +00:00
|
|
|
ReloadFuncs *map[string][]reloadutil.ReloadFunc
|
2019-06-20 19:14:58 +00:00
|
|
|
ReloadFuncsLock *sync.RWMutex
|
|
|
|
Server *http.Server
|
|
|
|
ServerCert *x509.Certificate
|
|
|
|
ServerCertBytes []byte
|
|
|
|
ServerCertPEM []byte
|
|
|
|
ServerKey *ecdsa.PrivateKey
|
|
|
|
ServerKeyPEM []byte
|
|
|
|
TLSConfig *tls.Config
|
|
|
|
UnderlyingStorage physical.Backend
|
|
|
|
UnderlyingRawStorage physical.Backend
|
2020-06-23 19:04:13 +00:00
|
|
|
UnderlyingHAStorage physical.HABackend
|
2019-06-20 19:14:58 +00:00
|
|
|
Barrier SecurityBarrier
|
|
|
|
NodeID string
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2019-08-23 19:51:25 +00:00
|
|
|
type PhysicalBackendBundle struct {
|
|
|
|
Backend physical.Backend
|
|
|
|
HABackend physical.HABackend
|
|
|
|
Cleanup func()
|
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
type TestClusterOptions struct {
|
2019-10-15 04:55:31 +00:00
|
|
|
KeepStandbysSealed bool
|
|
|
|
SkipInit bool
|
|
|
|
HandlerFunc func(*HandlerProperties) http.Handler
|
|
|
|
DefaultHandlerProperties HandlerProperties
|
2020-05-14 12:31:02 +00:00
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
// BaseListenAddress is used to explicitly assign ports in sequence to the
|
2020-07-09 21:16:31 +00:00
|
|
|
// listener of each core. It should be a string of the form
|
2020-06-11 19:07:59 +00:00
|
|
|
// "127.0.0.1:20000"
|
|
|
|
//
|
|
|
|
// WARNING: Using an explicitly assigned port above 30000 may clash with
|
|
|
|
// ephemeral ports that have been assigned by the OS in other tests. The
|
2020-07-09 21:16:31 +00:00
|
|
|
// use of explicitly assigned ports below 30000 is strongly recommended.
|
|
|
|
// In addition, you should be careful to use explicitly assigned ports that
|
2020-06-11 19:07:59 +00:00
|
|
|
// do not clash with any other explicitly assigned ports in other tests.
|
2020-05-14 12:31:02 +00:00
|
|
|
BaseListenAddress string
|
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
// BaseClusterListenPort is used to explicitly assign ports in sequence to
|
|
|
|
// the cluster listener of each core. If BaseClusterListenPort is
|
|
|
|
// specified, then BaseListenAddress must also be specified. Each cluster
|
|
|
|
// listener will use the same host as the one specified in
|
|
|
|
// BaseListenAddress.
|
|
|
|
//
|
|
|
|
// WARNING: Using an explicitly assigned port above 30000 may clash with
|
|
|
|
// ephemeral ports that have been assigned by the OS in other tests. The
|
2020-07-09 21:16:31 +00:00
|
|
|
// use of explicitly assigned ports below 30000 is strongly recommended.
|
|
|
|
// In addition, you should be careful to use explicitly assigned ports that
|
2020-06-11 19:07:59 +00:00
|
|
|
// do not clash with any other explicitly assigned ports in other tests.
|
2020-05-14 12:31:02 +00:00
|
|
|
BaseClusterListenPort int
|
|
|
|
|
2020-10-23 18:16:04 +00:00
|
|
|
NumCores int
|
|
|
|
SealFunc func() Seal
|
|
|
|
UnwrapSealFunc func() Seal
|
|
|
|
Logger log.Logger
|
|
|
|
TempDir string
|
|
|
|
CACert []byte
|
|
|
|
CAKey *ecdsa.PrivateKey
|
2019-08-23 19:51:25 +00:00
|
|
|
// PhysicalFactory is used to create backends.
|
|
|
|
// The int argument is the index of the core within the cluster, i.e. first
|
|
|
|
// core in cluster will have 0, second 1, etc.
|
|
|
|
// If the backend is shared across the cluster (i.e. is not Raft) then it
|
|
|
|
// should return nil when coreIdx != 0.
|
2021-03-03 18:59:50 +00:00
|
|
|
PhysicalFactory func(t testing.T, coreIdx int, logger log.Logger, conf map[string]interface{}) *PhysicalBackendBundle
|
2019-08-23 19:51:25 +00:00
|
|
|
// FirstCoreNumber is used to assign a unique number to each core within
|
|
|
|
// a multi-cluster setup.
|
|
|
|
FirstCoreNumber int
|
|
|
|
RequireClientAuth bool
|
|
|
|
// SetupFunc is called after the cluster is started.
|
2021-02-18 20:40:18 +00:00
|
|
|
SetupFunc func(t testing.T, c *TestCluster)
|
2020-01-17 07:03:02 +00:00
|
|
|
PR1103Disabled bool
|
|
|
|
|
|
|
|
// ClusterLayers are used to override the default cluster connection layer
|
|
|
|
ClusterLayers cluster.NetworkLayerSet
|
2021-03-17 21:23:13 +00:00
|
|
|
// InmemClusterLayers is a shorthand way of asking for ClusterLayers to be
|
|
|
|
// built using the inmem implementation.
|
|
|
|
InmemClusterLayers bool
|
2020-06-16 18:12:22 +00:00
|
|
|
|
|
|
|
// RaftAddressProvider is used to set the raft ServerAddressProvider on
|
|
|
|
// each core.
|
|
|
|
//
|
|
|
|
// If SkipInit is true, then RaftAddressProvider has no effect.
|
|
|
|
// RaftAddressProvider should only be specified if the underlying physical
|
|
|
|
// storage is Raft.
|
|
|
|
RaftAddressProvider raftlib.ServerAddressProvider
|
2020-10-29 17:55:26 +00:00
|
|
|
|
|
|
|
CoreMetricSinkProvider func(clusterName string) (*metricsutil.ClusterMetricSink, *metricsutil.MetricsHelper)
|
2021-03-03 18:59:50 +00:00
|
|
|
|
|
|
|
PhysicalFactoryConfig map[string]interface{}
|
2021-05-17 18:10:26 +00:00
|
|
|
LicensePublicKey ed25519.PublicKey
|
|
|
|
LicensePrivateKey ed25519.PrivateKey
|
2022-05-20 20:49:11 +00:00
|
|
|
|
|
|
|
// this stores the vault version that should be used for each core config
|
|
|
|
VersionMap map[int]string
|
|
|
|
RedundancyZoneMap map[int]string
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
var DefaultNumCores = 3
|
|
|
|
|
|
|
|
type certInfo struct {
|
|
|
|
cert *x509.Certificate
|
|
|
|
certPEM []byte
|
|
|
|
certBytes []byte
|
|
|
|
key *ecdsa.PrivateKey
|
|
|
|
keyPEM []byte
|
|
|
|
}
|
|
|
|
|
2020-05-05 16:11:36 +00:00
|
|
|
type TestLogger struct {
|
2020-06-16 18:12:22 +00:00
|
|
|
log.Logger
|
2020-05-05 16:11:36 +00:00
|
|
|
Path string
|
|
|
|
File *os.File
|
2020-09-23 17:40:00 +00:00
|
|
|
sink log.SinkAdapter
|
2020-05-05 16:11:36 +00:00
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func NewTestLogger(t testing.T) *TestLogger {
|
2020-09-23 17:40:00 +00:00
|
|
|
var logFile *os.File
|
|
|
|
var logPath string
|
|
|
|
output := os.Stderr
|
|
|
|
|
2021-04-08 16:43:39 +00:00
|
|
|
logDir := os.Getenv("VAULT_TEST_LOG_DIR")
|
2020-09-23 17:40:00 +00:00
|
|
|
if logDir != "" {
|
|
|
|
logPath = filepath.Join(logDir, t.Name()+".log")
|
|
|
|
// t.Name may include slashes.
|
|
|
|
dir, _ := filepath.Split(logPath)
|
2021-04-08 16:43:39 +00:00
|
|
|
err := os.MkdirAll(dir, 0o755)
|
2020-09-23 17:40:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2020-05-05 16:11:36 +00:00
|
|
|
}
|
2020-09-23 17:40:00 +00:00
|
|
|
logFile, err = os.Create(logPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
output = logFile
|
|
|
|
}
|
|
|
|
|
|
|
|
// We send nothing on the regular logger, that way we can later deregister
|
|
|
|
// the sink to stop logging during cluster cleanup.
|
|
|
|
logger := log.NewInterceptLogger(&log.LoggerOptions{
|
2022-06-27 15:39:53 +00:00
|
|
|
Output: ioutil.Discard,
|
|
|
|
IndependentLevels: true,
|
2020-09-23 17:40:00 +00:00
|
|
|
})
|
|
|
|
sink := log.NewSinkAdapter(&log.LoggerOptions{
|
2022-06-27 15:39:53 +00:00
|
|
|
Output: output,
|
|
|
|
Level: log.Trace,
|
|
|
|
IndependentLevels: true,
|
2020-09-23 17:40:00 +00:00
|
|
|
})
|
|
|
|
logger.RegisterSink(sink)
|
2020-05-05 16:11:36 +00:00
|
|
|
return &TestLogger{
|
2020-09-23 17:40:00 +00:00
|
|
|
Path: logPath,
|
2020-05-05 16:11:36 +00:00
|
|
|
File: logFile,
|
2020-09-23 17:40:00 +00:00
|
|
|
Logger: logger,
|
|
|
|
sink: sink,
|
2020-05-05 16:11:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-23 17:40:00 +00:00
|
|
|
func (tl *TestLogger) StopLogging() {
|
|
|
|
tl.Logger.(log.InterceptLogger).DeregisterSink(tl.sink)
|
|
|
|
}
|
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
// NewTestCluster creates a new test cluster based on the provided core config
|
|
|
|
// and test cluster options.
|
2017-10-23 18:59:37 +00:00
|
|
|
//
|
|
|
|
// N.B. Even though a single base CoreConfig is provided, NewTestCluster will instantiate a
|
|
|
|
// core config for each core it creates. If separate seal per core is desired, opts.SealFunc
|
|
|
|
// can be provided to generate a seal for each one. Otherwise, the provided base.Seal will be
|
|
|
|
// shared among cores. NewCore's default behavior is to generate a new DefaultSeal if the
|
|
|
|
// provided Seal in coreConfig (i.e. base.Seal) is nil.
|
2019-07-23 19:17:37 +00:00
|
|
|
//
|
|
|
|
// If opts.Logger is provided, it takes precedence and will be used as the cluster
|
|
|
|
// logger and will be the basis for each core's logger. If no opts.Logger is
|
|
|
|
// given, one will be generated based on t.Name() for the cluster logger, and if
|
|
|
|
// no base.Logger is given will also be used as the basis for each core's logger.
|
2021-02-18 20:40:18 +00:00
|
|
|
func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
|
2018-02-22 05:23:37 +00:00
|
|
|
var err error
|
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
var numCores int
|
|
|
|
if opts == nil || opts.NumCores == 0 {
|
|
|
|
numCores = DefaultNumCores
|
|
|
|
} else {
|
|
|
|
numCores = opts.NumCores
|
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
certIPs := []net.IP{
|
|
|
|
net.IPv6loopback,
|
|
|
|
net.ParseIP("127.0.0.1"),
|
|
|
|
}
|
|
|
|
var baseAddr *net.TCPAddr
|
2017-07-31 16:13:29 +00:00
|
|
|
if opts != nil && opts.BaseListenAddress != "" {
|
2017-07-31 15:28:06 +00:00
|
|
|
baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("could not parse given base IP")
|
|
|
|
}
|
|
|
|
certIPs = append(certIPs, baseAddr.IP)
|
2020-06-16 18:12:22 +00:00
|
|
|
} else {
|
|
|
|
baseAddr = &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 0,
|
2020-05-14 12:31:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
var testCluster TestCluster
|
2020-06-16 18:12:22 +00:00
|
|
|
testCluster.base = base
|
2019-07-23 19:17:37 +00:00
|
|
|
|
2020-05-01 16:26:26 +00:00
|
|
|
switch {
|
|
|
|
case opts != nil && opts.Logger != nil:
|
2019-07-23 19:17:37 +00:00
|
|
|
testCluster.Logger = opts.Logger
|
2020-05-01 16:26:26 +00:00
|
|
|
default:
|
2020-05-05 16:11:36 +00:00
|
|
|
testCluster.Logger = NewTestLogger(t)
|
2019-07-23 19:17:37 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 23:02:35 +00:00
|
|
|
if opts != nil && opts.TempDir != "" {
|
|
|
|
if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) {
|
2021-04-08 16:43:39 +00:00
|
|
|
if err := os.MkdirAll(opts.TempDir, 0o700); err != nil {
|
2017-12-11 23:02:35 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
testCluster.TempDir = opts.TempDir
|
|
|
|
} else {
|
|
|
|
tempDir, err := ioutil.TempDir("", "vault-test-cluster-")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
testCluster.TempDir = tempDir
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 05:23:37 +00:00
|
|
|
var caKey *ecdsa.PrivateKey
|
|
|
|
if opts != nil && opts.CAKey != nil {
|
|
|
|
caKey = opts.CAKey
|
|
|
|
} else {
|
|
|
|
caKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
|
|
|
testCluster.CAKey = caKey
|
2018-02-22 05:23:37 +00:00
|
|
|
var caBytes []byte
|
|
|
|
if opts != nil && len(opts.CACert) > 0 {
|
|
|
|
caBytes = opts.CACert
|
|
|
|
} else {
|
|
|
|
caCertTemplate := &x509.Certificate{
|
|
|
|
Subject: pkix.Name{
|
|
|
|
CommonName: "localhost",
|
|
|
|
},
|
|
|
|
DNSNames: []string{"localhost"},
|
|
|
|
IPAddresses: certIPs,
|
|
|
|
KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
|
|
|
|
SerialNumber: big.NewInt(mathrand.Int63()),
|
|
|
|
NotBefore: time.Now().Add(-30 * time.Second),
|
|
|
|
NotAfter: time.Now().Add(262980 * time.Hour),
|
|
|
|
BasicConstraintsValid: true,
|
2018-09-04 16:29:18 +00:00
|
|
|
IsCA: true,
|
2018-02-22 05:23:37 +00:00
|
|
|
}
|
|
|
|
caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
caCert, err := x509.ParseCertificate(caBytes)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster.CACert = caCert
|
|
|
|
testCluster.CACertBytes = caBytes
|
|
|
|
testCluster.RootCAs = x509.NewCertPool()
|
|
|
|
testCluster.RootCAs.AddCert(caCert)
|
|
|
|
caCertPEMBlock := &pem.Block{
|
|
|
|
Type: "CERTIFICATE",
|
|
|
|
Bytes: caBytes,
|
|
|
|
}
|
|
|
|
testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock)
|
2017-07-31 19:31:44 +00:00
|
|
|
testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem")
|
2021-04-08 16:43:39 +00:00
|
|
|
err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0o755)
|
2017-07-31 15:28:06 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
marshaledCAKey, err := x509.MarshalECPrivateKey(caKey)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
caKeyPEMBlock := &pem.Block{
|
|
|
|
Type: "EC PRIVATE KEY",
|
|
|
|
Bytes: marshaledCAKey,
|
|
|
|
}
|
|
|
|
testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock)
|
2021-04-08 16:43:39 +00:00
|
|
|
err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0o755)
|
2017-07-31 15:28:06 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
var certInfoSlice []*certInfo
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
//
|
|
|
|
// Certs generation
|
|
|
|
//
|
|
|
|
for i := 0; i < numCores; i++ {
|
|
|
|
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
certTemplate := &x509.Certificate{
|
|
|
|
Subject: pkix.Name{
|
|
|
|
CommonName: "localhost",
|
|
|
|
},
|
2020-02-05 18:45:16 +00:00
|
|
|
// Include host.docker.internal for the sake of benchmark-vault running on MacOS/Windows.
|
|
|
|
// This allows Prometheus running in docker to scrape the cluster for metrics.
|
|
|
|
DNSNames: []string{"localhost", "host.docker.internal"},
|
2017-09-01 05:02:03 +00:00
|
|
|
IPAddresses: certIPs,
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{
|
|
|
|
x509.ExtKeyUsageServerAuth,
|
|
|
|
x509.ExtKeyUsageClientAuth,
|
|
|
|
},
|
|
|
|
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
|
|
|
|
SerialNumber: big.NewInt(mathrand.Int63()),
|
|
|
|
NotBefore: time.Now().Add(-30 * time.Second),
|
|
|
|
NotAfter: time.Now().Add(262980 * time.Hour),
|
|
|
|
}
|
|
|
|
certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, key.Public(), caKey)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
cert, err := x509.ParseCertificate(certBytes)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
certPEMBlock := &pem.Block{
|
|
|
|
Type: "CERTIFICATE",
|
|
|
|
Bytes: certBytes,
|
|
|
|
}
|
|
|
|
certPEM := pem.EncodeToMemory(certPEMBlock)
|
|
|
|
marshaledKey, err := x509.MarshalECPrivateKey(key)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
keyPEMBlock := &pem.Block{
|
|
|
|
Type: "EC PRIVATE KEY",
|
|
|
|
Bytes: marshaledKey,
|
|
|
|
}
|
|
|
|
keyPEM := pem.EncodeToMemory(keyPEMBlock)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
certInfoSlice = append(certInfoSlice, &certInfo{
|
|
|
|
cert: cert,
|
|
|
|
certPEM: certPEM,
|
|
|
|
certBytes: certBytes,
|
|
|
|
key: key,
|
|
|
|
keyPEM: keyPEM,
|
|
|
|
})
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Listener setup
|
|
|
|
//
|
2020-06-16 18:12:22 +00:00
|
|
|
addresses := []*net.TCPAddr{}
|
2017-09-01 05:02:03 +00:00
|
|
|
listeners := [][]*TestListener{}
|
|
|
|
servers := []*http.Server{}
|
|
|
|
handlers := []http.Handler{}
|
|
|
|
tlsConfigs := []*tls.Config{}
|
2020-02-15 19:58:05 +00:00
|
|
|
certGetters := []*reloadutil.CertificateGetter{}
|
2017-09-01 05:02:03 +00:00
|
|
|
for i := 0; i < numCores; i++ {
|
2020-06-16 18:12:22 +00:00
|
|
|
addr := &net.TCPAddr{
|
|
|
|
IP: baseAddr.IP,
|
|
|
|
Port: 0,
|
|
|
|
}
|
|
|
|
if baseAddr.Port != 0 {
|
|
|
|
addr.Port = baseAddr.Port + i
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := net.ListenTCP("tcp", addr)
|
2017-09-01 05:02:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
addresses = append(addresses, addr)
|
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
|
|
|
|
keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
|
2021-04-08 16:43:39 +00:00
|
|
|
err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0o755)
|
2017-09-01 05:02:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2021-04-08 16:43:39 +00:00
|
|
|
err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0o755)
|
2017-09-01 05:02:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
tlsCert, err := tls.X509KeyPair(certInfoSlice[i].certPEM, certInfoSlice[i].keyPEM)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-02-15 19:58:05 +00:00
|
|
|
certGetter := reloadutil.NewCertificateGetter(certFile, keyFile, "")
|
2017-09-01 05:02:03 +00:00
|
|
|
certGetters = append(certGetters, certGetter)
|
2020-05-14 13:19:27 +00:00
|
|
|
certGetter.Reload()
|
2017-09-01 05:02:03 +00:00
|
|
|
tlsConfig := &tls.Config{
|
|
|
|
Certificates: []tls.Certificate{tlsCert},
|
|
|
|
RootCAs: testCluster.RootCAs,
|
|
|
|
ClientCAs: testCluster.RootCAs,
|
2017-11-09 20:55:23 +00:00
|
|
|
ClientAuth: tls.RequestClientCert,
|
2017-09-01 05:02:03 +00:00
|
|
|
NextProtos: []string{"h2", "http/1.1"},
|
|
|
|
GetCertificate: certGetter.GetCertificate,
|
|
|
|
}
|
2019-06-24 01:50:27 +00:00
|
|
|
if opts != nil && opts.RequireClientAuth {
|
|
|
|
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
|
|
|
testCluster.ClientAuthRequired = true
|
|
|
|
}
|
2017-09-01 05:02:03 +00:00
|
|
|
tlsConfigs = append(tlsConfigs, tlsConfig)
|
2021-04-08 16:43:39 +00:00
|
|
|
lns := []*TestListener{
|
|
|
|
{
|
|
|
|
Listener: tls.NewListener(ln, tlsConfig),
|
|
|
|
Address: ln.Addr().(*net.TCPAddr),
|
|
|
|
},
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
|
|
|
listeners = append(listeners, lns)
|
|
|
|
var handler http.Handler = http.NewServeMux()
|
|
|
|
handlers = append(handlers, handler)
|
|
|
|
server := &http.Server{
|
2018-08-21 15:23:18 +00:00
|
|
|
Handler: handler,
|
2019-07-23 19:17:37 +00:00
|
|
|
ErrorLog: testCluster.Logger.StandardLogger(nil),
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
|
|
|
servers = append(servers, server)
|
2017-02-27 17:49:35 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
// Create three cores with the same physical and different redirect/cluster
|
|
|
|
// addrs.
|
2016-11-08 15:31:35 +00:00
|
|
|
// N.B.: On OSX, instead of random ports, it assigns new ports to new
|
|
|
|
// listeners sequentially. Aside from being a bad idea in a security sense,
|
|
|
|
// it also broke tests that assumed it was OK to just use the port above
|
2017-07-31 15:28:06 +00:00
|
|
|
// the redirect addr. This has now been changed to 105 ports above, but if
|
2016-11-08 15:31:35 +00:00
|
|
|
// we ever do more than three nodes in a cluster it may need to be bumped.
|
2017-07-31 15:28:06 +00:00
|
|
|
// Note: it's 105 so that we don't conflict with a running Consul by
|
|
|
|
// default.
|
2016-08-15 13:42:42 +00:00
|
|
|
coreConfig := &CoreConfig{
|
|
|
|
LogicalBackends: make(map[string]logical.Factory),
|
|
|
|
CredentialBackends: make(map[string]logical.Factory),
|
|
|
|
AuditBackends: make(map[string]audit.Factory),
|
2017-09-01 05:02:03 +00:00
|
|
|
RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port),
|
2019-04-04 17:02:44 +00:00
|
|
|
ClusterAddr: "https://127.0.0.1:0",
|
2016-08-15 13:42:42 +00:00
|
|
|
DisableMlock: true,
|
2017-07-31 15:28:06 +00:00
|
|
|
EnableUI: true,
|
2018-06-03 22:14:51 +00:00
|
|
|
EnableRaw: true,
|
2018-11-07 01:21:24 +00:00
|
|
|
BuiltinRegistry: NewMockBuiltinRegistry(),
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if base != nil {
|
2019-10-08 17:57:15 +00:00
|
|
|
coreConfig.RawConfig = base.RawConfig
|
2017-07-31 15:28:06 +00:00
|
|
|
coreConfig.DisableCache = base.DisableCache
|
|
|
|
coreConfig.EnableUI = base.EnableUI
|
|
|
|
coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL
|
|
|
|
coreConfig.MaxLeaseTTL = base.MaxLeaseTTL
|
|
|
|
coreConfig.CacheSize = base.CacheSize
|
|
|
|
coreConfig.PluginDirectory = base.PluginDirectory
|
|
|
|
coreConfig.Seal = base.Seal
|
2020-06-11 19:07:59 +00:00
|
|
|
coreConfig.UnwrapSeal = base.UnwrapSeal
|
2017-07-31 15:28:06 +00:00
|
|
|
coreConfig.DevToken = base.DevToken
|
2018-06-03 22:14:51 +00:00
|
|
|
coreConfig.EnableRaw = base.EnableRaw
|
2018-09-18 03:03:00 +00:00
|
|
|
coreConfig.DisableSealWrap = base.DisableSealWrap
|
|
|
|
coreConfig.DisableCache = base.DisableCache
|
2019-04-11 15:12:37 +00:00
|
|
|
coreConfig.LicensingConfig = base.LicensingConfig
|
2021-06-03 17:30:30 +00:00
|
|
|
coreConfig.License = base.License
|
|
|
|
coreConfig.LicensePath = base.LicensePath
|
2019-06-21 17:38:21 +00:00
|
|
|
coreConfig.DisablePerformanceStandby = base.DisablePerformanceStandby
|
2019-07-01 16:08:55 +00:00
|
|
|
coreConfig.MetricsHelper = base.MetricsHelper
|
2020-10-15 21:15:58 +00:00
|
|
|
coreConfig.MetricSink = base.MetricSink
|
2019-10-17 17:33:00 +00:00
|
|
|
coreConfig.SecureRandomReader = base.SecureRandomReader
|
2020-08-10 10:23:44 +00:00
|
|
|
coreConfig.DisableSentinelTrace = base.DisableSentinelTrace
|
2020-10-29 17:55:26 +00:00
|
|
|
coreConfig.ClusterName = base.ClusterName
|
2021-03-03 18:59:50 +00:00
|
|
|
coreConfig.DisableAutopilot = base.DisableAutopilot
|
2020-07-09 21:16:31 +00:00
|
|
|
|
2018-11-07 01:21:24 +00:00
|
|
|
if base.BuiltinRegistry != nil {
|
|
|
|
coreConfig.BuiltinRegistry = base.BuiltinRegistry
|
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
|
|
|
|
if !coreConfig.DisableMlock {
|
|
|
|
base.DisableMlock = false
|
|
|
|
}
|
|
|
|
|
2017-02-28 23:17:19 +00:00
|
|
|
if base.Physical != nil {
|
|
|
|
coreConfig.Physical = base.Physical
|
|
|
|
}
|
|
|
|
|
|
|
|
if base.HAPhysical != nil {
|
|
|
|
coreConfig.HAPhysical = base.HAPhysical
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// Used to set something non-working to test fallback
|
|
|
|
switch base.ClusterAddr {
|
|
|
|
case "empty":
|
|
|
|
coreConfig.ClusterAddr = ""
|
|
|
|
case "":
|
|
|
|
default:
|
|
|
|
coreConfig.ClusterAddr = base.ClusterAddr
|
|
|
|
}
|
|
|
|
|
|
|
|
if base.LogicalBackends != nil {
|
|
|
|
for k, v := range base.LogicalBackends {
|
|
|
|
coreConfig.LogicalBackends[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if base.CredentialBackends != nil {
|
|
|
|
for k, v := range base.CredentialBackends {
|
|
|
|
coreConfig.CredentialBackends[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if base.AuditBackends != nil {
|
|
|
|
for k, v := range base.AuditBackends {
|
|
|
|
coreConfig.AuditBackends[k] = v
|
|
|
|
}
|
|
|
|
}
|
2016-12-02 11:31:06 +00:00
|
|
|
if base.Logger != nil {
|
|
|
|
coreConfig.Logger = base.Logger
|
|
|
|
}
|
2017-07-31 19:31:44 +00:00
|
|
|
|
2017-08-30 20:28:23 +00:00
|
|
|
coreConfig.ClusterCipherSuites = base.ClusterCipherSuites
|
|
|
|
|
2017-07-31 19:31:44 +00:00
|
|
|
coreConfig.DisableCache = base.DisableCache
|
|
|
|
|
|
|
|
coreConfig.DevToken = base.DevToken
|
2019-10-15 04:55:31 +00:00
|
|
|
coreConfig.RecoveryMode = base.RecoveryMode
|
2020-09-22 21:47:13 +00:00
|
|
|
|
2020-10-29 23:47:34 +00:00
|
|
|
coreConfig.ActivityLogConfig = base.ActivityLogConfig
|
2021-04-20 22:25:04 +00:00
|
|
|
coreConfig.EnableResponseHeaderHostname = base.EnableResponseHeaderHostname
|
|
|
|
coreConfig.EnableResponseHeaderRaftNodeID = base.EnableResponseHeaderRaftNodeID
|
2020-10-29 23:47:34 +00:00
|
|
|
|
2020-09-22 21:47:13 +00:00
|
|
|
testApplyEntBaseConfig(coreConfig, base)
|
2019-07-02 22:18:40 +00:00
|
|
|
}
|
2020-10-29 17:55:26 +00:00
|
|
|
if coreConfig.ClusterName == "" {
|
|
|
|
coreConfig.ClusterName = t.Name()
|
|
|
|
}
|
2019-07-02 22:18:40 +00:00
|
|
|
|
2020-10-29 23:47:34 +00:00
|
|
|
if coreConfig.ClusterName == "" {
|
|
|
|
coreConfig.ClusterName = t.Name()
|
|
|
|
}
|
|
|
|
|
2020-07-27 20:10:26 +00:00
|
|
|
if coreConfig.ClusterHeartbeatInterval == 0 {
|
|
|
|
// Set this lower so that state populates quickly to standby nodes
|
|
|
|
coreConfig.ClusterHeartbeatInterval = 2 * time.Second
|
|
|
|
}
|
|
|
|
|
2019-10-08 17:57:15 +00:00
|
|
|
if coreConfig.RawConfig == nil {
|
2020-05-21 20:07:50 +00:00
|
|
|
c := new(server.Config)
|
|
|
|
c.SharedConfig = &configutil.SharedConfig{LogFormat: logging.UnspecifiedFormat.String()}
|
|
|
|
coreConfig.RawConfig = c
|
2019-10-08 17:57:15 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 22:18:40 +00:00
|
|
|
addAuditBackend := len(coreConfig.AuditBackends) == 0
|
|
|
|
if addAuditBackend {
|
2020-02-06 16:56:37 +00:00
|
|
|
AddNoopAudit(coreConfig, nil)
|
2016-12-01 20:25:05 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
if coreConfig.Physical == nil && (opts == nil || opts.PhysicalFactory == nil) {
|
2019-07-23 19:17:37 +00:00
|
|
|
coreConfig.Physical, err = physInmem.NewInmem(nil, testCluster.Logger)
|
2017-08-03 17:24:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-02-28 23:17:19 +00:00
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
if coreConfig.HAPhysical == nil && (opts == nil || opts.PhysicalFactory == nil) {
|
2019-07-23 19:17:37 +00:00
|
|
|
haPhys, err := physInmem.NewInmemHA(nil, testCluster.Logger)
|
2017-08-03 17:24:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
coreConfig.HAPhysical = haPhys.(physical.HABackend)
|
2017-02-28 23:17:19 +00:00
|
|
|
}
|
|
|
|
|
2021-05-17 18:10:26 +00:00
|
|
|
if testCluster.LicensePublicKey == nil {
|
2021-06-03 17:30:30 +00:00
|
|
|
pubKey, priKey, err := GenerateTestLicenseKeys()
|
2021-05-17 18:10:26 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
testCluster.LicensePublicKey = pubKey
|
|
|
|
testCluster.LicensePrivateKey = priKey
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 21:23:13 +00:00
|
|
|
if opts != nil && opts.InmemClusterLayers {
|
|
|
|
if opts.ClusterLayers != nil {
|
|
|
|
t.Fatalf("cannot specify ClusterLayers when InmemClusterLayers is true")
|
|
|
|
}
|
|
|
|
inmemCluster, err := cluster.NewInmemLayerCluster("inmem-cluster", numCores, testCluster.Logger.Named("inmem-cluster"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
opts.ClusterLayers = inmemCluster
|
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Create cores
|
|
|
|
testCluster.cleanupFuncs = []func(){}
|
2017-09-01 05:02:03 +00:00
|
|
|
cores := []*Core{}
|
2018-10-23 06:34:02 +00:00
|
|
|
coreConfigs := []*CoreConfig{}
|
2020-06-16 18:12:22 +00:00
|
|
|
|
2017-09-01 05:02:03 +00:00
|
|
|
for i := 0; i < numCores; i++ {
|
2021-05-17 18:10:26 +00:00
|
|
|
cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey)
|
2020-06-16 18:12:22 +00:00
|
|
|
|
|
|
|
testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup)
|
|
|
|
cores = append(cores, c)
|
|
|
|
coreConfigs = append(coreConfigs, &localConfig)
|
|
|
|
|
|
|
|
if handler != nil {
|
|
|
|
handlers[i] = handler
|
|
|
|
servers[i].Handler = handlers[i]
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Clustering setup
|
|
|
|
for i := 0; i < numCores; i++ {
|
|
|
|
testCluster.setupClusterListener(t, i, cores[i], coreConfigs[i], opts, listeners[i], handlers[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create TestClusterCores
|
|
|
|
var ret []*TestClusterCore
|
|
|
|
for i := 0; i < numCores; i++ {
|
|
|
|
tcc := &TestClusterCore{
|
|
|
|
Core: cores[i],
|
|
|
|
CoreConfig: coreConfigs[i],
|
|
|
|
ServerKey: certInfoSlice[i].key,
|
|
|
|
ServerKeyPEM: certInfoSlice[i].keyPEM,
|
|
|
|
ServerCert: certInfoSlice[i].cert,
|
|
|
|
ServerCertBytes: certInfoSlice[i].certBytes,
|
|
|
|
ServerCertPEM: certInfoSlice[i].certPEM,
|
|
|
|
Address: addresses[i],
|
|
|
|
Listeners: listeners[i],
|
|
|
|
Handler: handlers[i],
|
|
|
|
Server: servers[i],
|
|
|
|
TLSConfig: tlsConfigs[i],
|
|
|
|
Barrier: cores[i].barrier,
|
|
|
|
NodeID: fmt.Sprintf("core-%d", i),
|
|
|
|
UnderlyingRawStorage: coreConfigs[i].Physical,
|
2020-06-23 19:04:13 +00:00
|
|
|
UnderlyingHAStorage: coreConfigs[i].HAPhysical,
|
2020-01-17 07:03:02 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
tcc.ReloadFuncs = &cores[i].reloadFuncs
|
|
|
|
tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock
|
|
|
|
tcc.ReloadFuncsLock.Lock()
|
|
|
|
(*tcc.ReloadFuncs)["listener|tcp"] = []reloadutil.ReloadFunc{certGetters[i].Reload}
|
|
|
|
tcc.ReloadFuncsLock.Unlock()
|
2020-01-17 07:03:02 +00:00
|
|
|
|
2020-07-10 17:11:18 +00:00
|
|
|
testAdjustUnderlyingStorage(tcc)
|
2020-06-16 18:12:22 +00:00
|
|
|
|
|
|
|
ret = append(ret, tcc)
|
|
|
|
}
|
|
|
|
testCluster.Cores = ret
|
|
|
|
|
|
|
|
// Initialize cores
|
|
|
|
if opts == nil || !opts.SkipInit {
|
|
|
|
testCluster.initCores(t, opts, addAuditBackend)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assign clients
|
|
|
|
for i := 0; i < numCores; i++ {
|
2022-01-27 18:06:34 +00:00
|
|
|
testCluster.Cores[i].Client = testCluster.getAPIClient(t, opts, listeners[i][0].Address.Port, tlsConfigs[i])
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Extra Setup
|
|
|
|
for _, tcc := range testCluster.Cores {
|
2021-05-17 18:10:26 +00:00
|
|
|
testExtraTestCoreSetup(t, testCluster.LicensePrivateKey, tcc)
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup
|
|
|
|
testCluster.CleanupFunc = func() {
|
|
|
|
for _, c := range testCluster.cleanupFuncs {
|
|
|
|
c()
|
|
|
|
}
|
|
|
|
if l, ok := testCluster.Logger.(*TestLogger); ok {
|
|
|
|
if t.Failed() {
|
|
|
|
_ = l.File.Close()
|
|
|
|
} else {
|
|
|
|
_ = os.Remove(l.Path)
|
2019-04-12 15:37:34 +00:00
|
|
|
}
|
2019-04-11 15:12:37 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Setup
|
|
|
|
if opts != nil {
|
|
|
|
if opts.SetupFunc != nil {
|
|
|
|
testCluster.SetupFunc = func() {
|
|
|
|
opts.SetupFunc(t, &testCluster)
|
|
|
|
}
|
2019-09-17 00:50:51 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2019-09-17 00:50:51 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
return &testCluster
|
|
|
|
}
|
|
|
|
|
|
|
|
// StopCore performs an orderly shutdown of a core.
|
2021-02-18 20:40:18 +00:00
|
|
|
func (cluster *TestCluster) StopCore(t testing.T, idx int) {
|
2020-06-16 18:12:22 +00:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
if idx < 0 || idx > len(cluster.Cores) {
|
|
|
|
t.Fatalf("invalid core index %d", idx)
|
|
|
|
}
|
|
|
|
tcc := cluster.Cores[idx]
|
|
|
|
tcc.Logger().Info("stopping core", "core", idx)
|
|
|
|
|
2021-04-29 18:32:41 +00:00
|
|
|
// Stop listeners and call Finalize()
|
2020-06-16 18:12:22 +00:00
|
|
|
if err := tcc.stop(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run cleanup
|
|
|
|
cluster.cleanupFuncs[idx]()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restart a TestClusterCore that was stopped, by replacing the
|
|
|
|
// underlying Core.
|
2021-02-18 20:40:18 +00:00
|
|
|
func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOptions) {
|
2020-06-16 18:12:22 +00:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
if idx < 0 || idx > len(cluster.Cores) {
|
|
|
|
t.Fatalf("invalid core index %d", idx)
|
|
|
|
}
|
|
|
|
tcc := cluster.Cores[idx]
|
|
|
|
tcc.Logger().Info("restarting core", "core", idx)
|
|
|
|
|
|
|
|
// Set up listeners
|
|
|
|
ln, err := net.ListenTCP("tcp", tcc.Address)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2021-04-08 16:43:39 +00:00
|
|
|
tcc.Listeners = []*TestListener{
|
|
|
|
{
|
|
|
|
Listener: tls.NewListener(ln, tcc.TLSConfig),
|
|
|
|
Address: ln.Addr().(*net.TCPAddr),
|
|
|
|
},
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tcc.Handler = http.NewServeMux()
|
|
|
|
tcc.Server = &http.Server{
|
|
|
|
Handler: tcc.Handler,
|
|
|
|
ErrorLog: cluster.Logger.StandardLogger(nil),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new Core
|
2021-05-17 18:10:26 +00:00
|
|
|
cleanup, newCore, localConfig, coreHandler := cluster.newCore(t, idx, tcc.CoreConfig, opts, tcc.Listeners, cluster.LicensePublicKey)
|
2020-06-16 18:12:22 +00:00
|
|
|
if coreHandler != nil {
|
|
|
|
tcc.Handler = coreHandler
|
|
|
|
tcc.Server.Handler = coreHandler
|
|
|
|
}
|
|
|
|
|
|
|
|
cluster.cleanupFuncs[idx] = cleanup
|
|
|
|
tcc.Core = newCore
|
|
|
|
tcc.CoreConfig = &localConfig
|
|
|
|
tcc.UnderlyingRawStorage = localConfig.Physical
|
|
|
|
|
|
|
|
cluster.setupClusterListener(
|
|
|
|
t, idx, newCore, tcc.CoreConfig,
|
|
|
|
opts, tcc.Listeners, tcc.Handler)
|
|
|
|
|
|
|
|
tcc.Client = cluster.getAPIClient(t, opts, tcc.Listeners[0].Address.Port, tcc.TLSConfig)
|
|
|
|
|
2020-07-10 17:11:18 +00:00
|
|
|
testAdjustUnderlyingStorage(tcc)
|
2021-05-17 18:10:26 +00:00
|
|
|
testExtraTestCoreSetup(t, cluster.LicensePrivateKey, tcc)
|
2020-06-16 18:12:22 +00:00
|
|
|
|
|
|
|
// Start listeners
|
|
|
|
for _, ln := range tcc.Listeners {
|
|
|
|
tcc.Logger().Info("starting listener for core", "port", ln.Address.Port)
|
|
|
|
go tcc.Server.Serve(ln)
|
|
|
|
}
|
|
|
|
|
|
|
|
tcc.Logger().Info("restarted test core", "core", idx)
|
|
|
|
}
|
|
|
|
|
2021-05-17 18:10:26 +00:00
|
|
|
func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey ed25519.PublicKey) (func(), *Core, CoreConfig, http.Handler) {
|
2020-06-16 18:12:22 +00:00
|
|
|
localConfig := *coreConfig
|
|
|
|
cleanupFunc := func() {}
|
|
|
|
var handler http.Handler
|
|
|
|
|
|
|
|
var disablePR1103 bool
|
|
|
|
if opts != nil && opts.PR1103Disabled {
|
|
|
|
disablePR1103 = true
|
|
|
|
}
|
|
|
|
|
|
|
|
var firstCoreNumber int
|
|
|
|
if opts != nil {
|
|
|
|
firstCoreNumber = opts.FirstCoreNumber
|
|
|
|
}
|
|
|
|
|
|
|
|
localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[0].Address.Port)
|
|
|
|
|
|
|
|
// if opts.SealFunc is provided, use that to generate a seal for the config instead
|
|
|
|
if opts != nil && opts.SealFunc != nil {
|
|
|
|
localConfig.Seal = opts.SealFunc()
|
|
|
|
}
|
2020-10-23 18:16:04 +00:00
|
|
|
if opts != nil && opts.UnwrapSealFunc != nil {
|
|
|
|
localConfig.UnwrapSeal = opts.UnwrapSealFunc()
|
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
|
|
|
|
if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) {
|
|
|
|
localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", idx))
|
|
|
|
}
|
|
|
|
if opts != nil && opts.PhysicalFactory != nil {
|
2022-05-20 20:49:11 +00:00
|
|
|
pfc := opts.PhysicalFactoryConfig
|
|
|
|
if pfc == nil {
|
|
|
|
pfc = make(map[string]interface{})
|
|
|
|
}
|
|
|
|
if len(opts.VersionMap) > 0 {
|
|
|
|
pfc["autopilot_upgrade_version"] = opts.VersionMap[idx]
|
|
|
|
}
|
|
|
|
if len(opts.RedundancyZoneMap) > 0 {
|
|
|
|
pfc["autopilot_redundancy_zone"] = opts.RedundancyZoneMap[idx]
|
|
|
|
}
|
|
|
|
physBundle := opts.PhysicalFactory(t, idx, localConfig.Logger, pfc)
|
2020-06-16 18:12:22 +00:00
|
|
|
switch {
|
|
|
|
case physBundle == nil && coreConfig.Physical != nil:
|
|
|
|
case physBundle == nil && coreConfig.Physical == nil:
|
|
|
|
t.Fatal("PhysicalFactory produced no physical and none in CoreConfig")
|
|
|
|
case physBundle != nil:
|
2020-06-23 19:04:13 +00:00
|
|
|
// Storage backend setup
|
|
|
|
if physBundle.Backend != nil {
|
|
|
|
testCluster.Logger.Info("created physical backend", "instance", idx)
|
|
|
|
coreConfig.Physical = physBundle.Backend
|
|
|
|
localConfig.Physical = physBundle.Backend
|
|
|
|
}
|
|
|
|
|
|
|
|
// HA Backend setup
|
2020-06-16 18:12:22 +00:00
|
|
|
haBackend := physBundle.HABackend
|
|
|
|
if haBackend == nil {
|
|
|
|
if ha, ok := physBundle.Backend.(physical.HABackend); ok {
|
|
|
|
haBackend = ha
|
|
|
|
}
|
|
|
|
}
|
|
|
|
coreConfig.HAPhysical = haBackend
|
|
|
|
localConfig.HAPhysical = haBackend
|
2020-06-23 19:04:13 +00:00
|
|
|
|
|
|
|
// Cleanup setup
|
2020-06-16 18:12:22 +00:00
|
|
|
if physBundle.Cleanup != nil {
|
|
|
|
cleanupFunc = physBundle.Cleanup
|
2019-10-15 04:55:31 +00:00
|
|
|
}
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2018-10-23 06:34:02 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
if opts != nil && opts.ClusterLayers != nil {
|
|
|
|
localConfig.ClusterNetworkLayer = opts.ClusterLayers.Layers()[idx]
|
2021-03-03 18:59:50 +00:00
|
|
|
localConfig.ClusterAddr = "https://" + localConfig.ClusterNetworkLayer.Listeners()[0].Addr().String()
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case localConfig.LicensingConfig != nil:
|
|
|
|
if pubKey != nil {
|
2021-05-17 18:10:26 +00:00
|
|
|
localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey)
|
2018-10-23 06:34:02 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
default:
|
|
|
|
localConfig.LicensingConfig = testGetLicensingConfig(pubKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
if localConfig.MetricsHelper == nil {
|
|
|
|
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
|
|
|
metrics.DefaultInmemSignal(inm)
|
|
|
|
localConfig.MetricsHelper = metricsutil.NewMetricsHelper(inm, false)
|
|
|
|
}
|
2020-10-29 17:55:26 +00:00
|
|
|
if opts != nil && opts.CoreMetricSinkProvider != nil {
|
2020-10-29 23:47:34 +00:00
|
|
|
localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName)
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts != nil && opts.CoreMetricSinkProvider != nil {
|
2020-10-29 17:55:26 +00:00
|
|
|
localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName)
|
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
|
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
|
|
|
localConfig.NumExpirationWorkers = numExpirationWorkersTest
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
c, err := NewCore(&localConfig)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
c.coreNumber = firstCoreNumber + idx
|
|
|
|
c.PR1103disabled = disablePR1103
|
|
|
|
if opts != nil && opts.HandlerFunc != nil {
|
|
|
|
props := opts.DefaultHandlerProperties
|
|
|
|
props.Core = c
|
|
|
|
if props.ListenerConfig != nil && props.ListenerConfig.MaxRequestDuration == 0 {
|
|
|
|
props.ListenerConfig.MaxRequestDuration = DefaultMaxRequestDuration
|
|
|
|
}
|
|
|
|
handler = opts.HandlerFunc(&props)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set this in case the Seal was manually set before the core was
|
|
|
|
// created
|
|
|
|
if localConfig.Seal != nil {
|
|
|
|
localConfig.Seal.SetCore(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cleanupFunc, c, localConfig, handler
|
|
|
|
}
|
|
|
|
|
|
|
|
func (testCluster *TestCluster) setupClusterListener(
|
2021-02-18 20:40:18 +00:00
|
|
|
t testing.T, idx int, core *Core, coreConfig *CoreConfig,
|
2022-04-14 20:54:23 +00:00
|
|
|
opts *TestClusterOptions, listeners []*TestListener, handler http.Handler,
|
|
|
|
) {
|
2020-06-16 18:12:22 +00:00
|
|
|
if coreConfig.ClusterAddr == "" {
|
|
|
|
return
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
clusterAddrGen := func(lns []*TestListener, port int) []*net.TCPAddr {
|
2016-08-19 15:03:53 +00:00
|
|
|
ret := make([]*net.TCPAddr, len(lns))
|
2016-08-15 13:42:42 +00:00
|
|
|
for i, ln := range lns {
|
2016-08-19 15:03:53 +00:00
|
|
|
ret[i] = &net.TCPAddr{
|
|
|
|
IP: ln.Address.IP,
|
2020-05-14 12:31:02 +00:00
|
|
|
Port: port,
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
baseClusterListenPort := 0
|
|
|
|
if opts != nil && opts.BaseClusterListenPort != 0 {
|
|
|
|
if opts.BaseListenAddress == "" {
|
|
|
|
t.Fatal("BaseListenAddress is not specified")
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
baseClusterListenPort = opts.BaseClusterListenPort
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
port := 0
|
|
|
|
if baseClusterListenPort != 0 {
|
|
|
|
port = baseClusterListenPort + idx
|
|
|
|
}
|
|
|
|
core.Logger().Info("assigning cluster listener for test core", "core", idx, "port", port)
|
|
|
|
core.SetClusterListenerAddrs(clusterAddrGen(listeners, port))
|
|
|
|
core.SetClusterHandler(handler)
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:40:18 +00:00
|
|
|
func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAuditBackend bool) {
|
2020-06-16 18:12:22 +00:00
|
|
|
leader := tc.Cores[0]
|
|
|
|
|
|
|
|
bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, leader.Core, leader.Handler)
|
|
|
|
barrierKeys, _ := copystructure.Copy(bKeys)
|
|
|
|
tc.BarrierKeys = barrierKeys.([][]byte)
|
|
|
|
recoveryKeys, _ := copystructure.Copy(rKeys)
|
|
|
|
tc.RecoveryKeys = recoveryKeys.([][]byte)
|
|
|
|
tc.RootToken = root
|
|
|
|
|
|
|
|
// Write root token and barrier keys
|
2021-04-08 16:43:39 +00:00
|
|
|
err := ioutil.WriteFile(filepath.Join(tc.TempDir, "root_token"), []byte(root), 0o755)
|
2020-06-16 18:12:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
for i, key := range tc.BarrierKeys {
|
|
|
|
buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
|
|
|
|
if i < len(tc.BarrierKeys)-1 {
|
|
|
|
buf.WriteRune('\n')
|
2017-10-23 18:59:37 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2021-04-08 16:43:39 +00:00
|
|
|
err = ioutil.WriteFile(filepath.Join(tc.TempDir, "barrier_keys"), buf.Bytes(), 0o755)
|
2020-06-16 18:12:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for i, key := range tc.RecoveryKeys {
|
|
|
|
buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
|
|
|
|
if i < len(tc.RecoveryKeys)-1 {
|
|
|
|
buf.WriteRune('\n')
|
2017-10-23 18:59:37 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2021-04-08 16:43:39 +00:00
|
|
|
err = ioutil.WriteFile(filepath.Join(tc.TempDir, "recovery_keys"), buf.Bytes(), 0o755)
|
2020-06-16 18:12:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Unseal first core
|
|
|
|
for _, key := range bKeys {
|
|
|
|
if _, err := leader.Core.Unseal(TestKeyCopy(key)); err != nil {
|
|
|
|
t.Fatalf("unseal err: %s", err)
|
2017-01-17 20:43:10 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
ctx := context.Background()
|
2018-01-19 06:44:44 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// If stored keys is supported, the above will no no-op, so trigger auto-unseal
|
|
|
|
// using stored keys to try to unseal
|
|
|
|
if err := leader.Core.UnsealWithStoredKeys(ctx); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-10-23 18:59:37 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Verify unsealed
|
|
|
|
if leader.Core.Sealed() {
|
|
|
|
t.Fatal("should not be sealed")
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
TestWaitActive(t, leader.Core)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Existing tests rely on this; we can make a toggle to disable it
|
|
|
|
// later if we want
|
|
|
|
kvReq := &logical.Request{
|
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
ClientToken: tc.RootToken,
|
|
|
|
Path: "sys/mounts/secret",
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"type": "kv",
|
|
|
|
"path": "secret/",
|
|
|
|
"description": "key/value secret storage",
|
|
|
|
"options": map[string]string{
|
|
|
|
"version": "1",
|
2019-02-14 19:55:32 +00:00
|
|
|
},
|
2020-06-16 18:12:22 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
resp, err := leader.Core.HandleRequest(namespace.RootContext(ctx), kvReq)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if resp.IsError() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
cfg, err := leader.Core.seal.BarrierConfig(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-10-23 18:59:37 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Unseal other cores unless otherwise specified
|
|
|
|
numCores := len(tc.Cores)
|
|
|
|
if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 {
|
|
|
|
for i := 1; i < numCores; i++ {
|
|
|
|
tc.Cores[i].Core.seal.SetCachedBarrierConfig(cfg)
|
|
|
|
for _, key := range bKeys {
|
|
|
|
if _, err := tc.Cores[i].Core.Unseal(TestKeyCopy(key)); err != nil {
|
|
|
|
t.Fatalf("unseal err: %s", err)
|
2017-10-23 18:59:37 +00:00
|
|
|
}
|
2017-01-17 20:43:10 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// If stored keys is supported, the above will no no-op, so trigger auto-unseal
|
|
|
|
// using stored keys
|
|
|
|
if err := tc.Cores[i].Core.UnsealWithStoredKeys(ctx); err != nil {
|
|
|
|
t.Fatal(err)
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// Let them come fully up to standby
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
|
|
|
// Ensure cluster connection info is populated.
|
|
|
|
// Other cores should not come up as leaders.
|
|
|
|
for i := 1; i < numCores; i++ {
|
|
|
|
isLeader, _, _, err := tc.Cores[i].Core.Leader()
|
2019-07-02 22:18:40 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
if isLeader {
|
|
|
|
t.Fatalf("core[%d] should not be leader", i)
|
2019-07-02 22:18:40 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-28 23:17:19 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
//
|
|
|
|
// Set test cluster core(s) and test cluster
|
|
|
|
//
|
|
|
|
cluster, err := leader.Core.Cluster(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
tc.ID = cluster.ID
|
|
|
|
|
|
|
|
if addAuditBackend {
|
|
|
|
// Enable auditing.
|
|
|
|
auditReq := &logical.Request{
|
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
ClientToken: tc.RootToken,
|
|
|
|
Path: "sys/audit/noop",
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"type": "noop",
|
2017-02-24 15:45:29 +00:00
|
|
|
},
|
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
resp, err = leader.Core.HandleRequest(namespace.RootContext(ctx), auditReq)
|
2017-02-24 15:45:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
if resp.IsError() {
|
|
|
|
t.Fatal(err)
|
2017-09-01 05:02:03 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
func (testCluster *TestCluster) getAPIClient(
|
2021-02-18 20:40:18 +00:00
|
|
|
t testing.T, opts *TestClusterOptions,
|
2022-04-14 20:54:23 +00:00
|
|
|
port int, tlsConfig *tls.Config,
|
|
|
|
) *api.Client {
|
2020-06-16 18:12:22 +00:00
|
|
|
transport := cleanhttp.DefaultPooledTransport()
|
|
|
|
transport.TLSClientConfig = tlsConfig.Clone()
|
|
|
|
if err := http2.ConfigureTransport(transport); err != nil {
|
|
|
|
t.Fatal(err)
|
2019-08-23 19:51:25 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
client := &http.Client{
|
|
|
|
Transport: transport,
|
|
|
|
CheckRedirect: func(*http.Request, []*http.Request) error {
|
|
|
|
// This can of course be overridden per-test by using its own client
|
|
|
|
return fmt.Errorf("redirects not allowed in these tests")
|
|
|
|
},
|
2019-08-23 19:51:25 +00:00
|
|
|
}
|
2020-06-16 18:12:22 +00:00
|
|
|
config := api.DefaultConfig()
|
|
|
|
if config.Error != nil {
|
|
|
|
t.Fatal(config.Error)
|
|
|
|
}
|
|
|
|
config.Address = fmt.Sprintf("https://127.0.0.1:%d", port)
|
|
|
|
config.HttpClient = client
|
|
|
|
config.MaxRetries = 0
|
|
|
|
apiClient, err := api.NewClient(config)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if opts == nil || !opts.SkipInit {
|
|
|
|
apiClient.SetToken(testCluster.RootToken)
|
|
|
|
}
|
|
|
|
return apiClient
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2018-11-07 01:21:24 +00:00
|
|
|
|
2022-04-14 20:54:23 +00:00
|
|
|
func toFunc(f logical.Factory) func() (interface{}, error) {
|
|
|
|
return func() (interface{}, error) {
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-07 01:21:24 +00:00
|
|
|
func NewMockBuiltinRegistry() *mockBuiltinRegistry {
|
|
|
|
return &mockBuiltinRegistry{
|
|
|
|
forTesting: map[string]consts.PluginType{
|
|
|
|
"mysql-database-plugin": consts.PluginTypeDatabase,
|
|
|
|
"postgresql-database-plugin": consts.PluginTypeDatabase,
|
2022-04-14 20:54:23 +00:00
|
|
|
"approle": consts.PluginTypeCredential,
|
2022-08-11 01:02:05 +00:00
|
|
|
"aws": consts.PluginTypeCredential,
|
2018-11-07 01:21:24 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type mockBuiltinRegistry struct {
|
|
|
|
forTesting map[string]consts.PluginType
|
|
|
|
}
|
|
|
|
|
2022-04-14 20:54:23 +00:00
|
|
|
// Get only supports getting database plugins, and approle
|
2018-11-07 01:21:24 +00:00
|
|
|
func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) {
|
|
|
|
testPluginType, ok := m.forTesting[name]
|
|
|
|
if !ok {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
if pluginType != testPluginType {
|
|
|
|
return nil, false
|
|
|
|
}
|
2022-04-14 20:54:23 +00:00
|
|
|
|
|
|
|
if name == "approle" {
|
|
|
|
return toFunc(approle.Factory), true
|
|
|
|
}
|
|
|
|
|
2022-08-11 01:02:05 +00:00
|
|
|
if name == "aws" {
|
|
|
|
return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
|
|
|
b := new(framework.Backend)
|
|
|
|
b.Setup(ctx, config)
|
|
|
|
b.BackendType = logical.TypeCredential
|
|
|
|
return b, nil
|
|
|
|
}), true
|
|
|
|
}
|
|
|
|
|
2018-11-07 01:21:24 +00:00
|
|
|
if name == "postgresql-database-plugin" {
|
2022-08-10 22:01:24 +00:00
|
|
|
return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
|
|
|
b := new(framework.Backend)
|
|
|
|
b.Setup(ctx, config)
|
|
|
|
b.BackendType = logical.TypeLogical
|
|
|
|
return b, nil
|
|
|
|
}), true
|
2018-11-07 01:21:24 +00:00
|
|
|
}
|
2021-02-11 21:08:32 +00:00
|
|
|
return dbMysql.New(dbMysql.DefaultUserNameTemplate), true
|
2018-11-07 01:21:24 +00:00
|
|
|
}
|
|
|
|
|
2022-04-14 20:54:23 +00:00
|
|
|
// Keys only supports getting a realistic list of the keys for database plugins,
|
|
|
|
// and approle
|
2018-11-07 01:21:24 +00:00
|
|
|
func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
|
2022-04-14 20:54:23 +00:00
|
|
|
switch pluginType {
|
|
|
|
case consts.PluginTypeDatabase:
|
|
|
|
// This is a hard-coded reproduction of the db plugin keys in
|
|
|
|
// helper/builtinplugins/registry.go. The registry isn't directly used
|
|
|
|
// because it causes import cycles.
|
|
|
|
return []string{
|
|
|
|
"mysql-database-plugin",
|
|
|
|
"mysql-aurora-database-plugin",
|
|
|
|
"mysql-rds-database-plugin",
|
|
|
|
"mysql-legacy-database-plugin",
|
|
|
|
|
|
|
|
"cassandra-database-plugin",
|
|
|
|
"couchbase-database-plugin",
|
|
|
|
"elasticsearch-database-plugin",
|
|
|
|
"hana-database-plugin",
|
|
|
|
"influxdb-database-plugin",
|
|
|
|
"mongodb-database-plugin",
|
|
|
|
"mongodbatlas-database-plugin",
|
|
|
|
"mssql-database-plugin",
|
|
|
|
"postgresql-database-plugin",
|
|
|
|
"redshift-database-plugin",
|
|
|
|
"snowflake-database-plugin",
|
|
|
|
}
|
|
|
|
case consts.PluginTypeCredential:
|
|
|
|
return []string{
|
|
|
|
"approle",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return []string{}
|
2018-11-07 01:21:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool {
|
2022-08-31 20:11:14 +00:00
|
|
|
for _, key := range m.Keys(pluginType) {
|
|
|
|
if key == name {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2018-11-07 01:21:24 +00:00
|
|
|
return false
|
|
|
|
}
|
2019-07-05 21:15:14 +00:00
|
|
|
|
2022-08-23 20:34:30 +00:00
|
|
|
func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.PluginType) (consts.DeprecationStatus, bool) {
|
2022-08-31 20:11:14 +00:00
|
|
|
if m.Contains(name, pluginType) {
|
|
|
|
return consts.Supported, true
|
|
|
|
}
|
|
|
|
|
|
|
|
return consts.Unknown, false
|
2022-08-23 20:34:30 +00:00
|
|
|
}
|
|
|
|
|
2019-07-05 21:15:14 +00:00
|
|
|
type NoopAudit struct {
|
|
|
|
Config *audit.BackendConfig
|
|
|
|
ReqErr error
|
|
|
|
ReqAuth []*logical.Auth
|
|
|
|
Req []*logical.Request
|
|
|
|
ReqHeaders []map[string][]string
|
|
|
|
ReqNonHMACKeys []string
|
|
|
|
ReqErrs []error
|
|
|
|
|
|
|
|
RespErr error
|
|
|
|
RespAuth []*logical.Auth
|
|
|
|
RespReq []*logical.Request
|
|
|
|
Resp []*logical.Response
|
|
|
|
RespNonHMACKeys []string
|
|
|
|
RespReqNonHMACKeys []string
|
|
|
|
RespErrs []error
|
|
|
|
|
|
|
|
salt *salt.Salt
|
|
|
|
saltMutex sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
|
|
|
|
n.ReqAuth = append(n.ReqAuth, in.Auth)
|
|
|
|
n.Req = append(n.Req, in.Request)
|
|
|
|
n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers)
|
|
|
|
n.ReqNonHMACKeys = in.NonHMACReqDataKeys
|
|
|
|
n.ReqErrs = append(n.ReqErrs, in.OuterErr)
|
|
|
|
return n.ReqErr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
|
|
|
|
n.RespAuth = append(n.RespAuth, in.Auth)
|
|
|
|
n.RespReq = append(n.RespReq, in.Request)
|
|
|
|
n.Resp = append(n.Resp, in.Response)
|
|
|
|
n.RespErrs = append(n.RespErrs, in.OuterErr)
|
|
|
|
|
|
|
|
if in.Response != nil {
|
|
|
|
n.RespNonHMACKeys = in.NonHMACRespDataKeys
|
|
|
|
n.RespReqNonHMACKeys = in.NonHMACReqDataKeys
|
|
|
|
}
|
|
|
|
|
|
|
|
return n.RespErr
|
|
|
|
}
|
|
|
|
|
2020-12-16 22:00:32 +00:00
|
|
|
func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, options map[string]string) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-05 21:15:14 +00:00
|
|
|
func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) {
|
|
|
|
n.saltMutex.RLock()
|
|
|
|
if n.salt != nil {
|
|
|
|
defer n.saltMutex.RUnlock()
|
|
|
|
return n.salt, nil
|
|
|
|
}
|
|
|
|
n.saltMutex.RUnlock()
|
|
|
|
n.saltMutex.Lock()
|
|
|
|
defer n.saltMutex.Unlock()
|
|
|
|
if n.salt != nil {
|
|
|
|
return n.salt, nil
|
|
|
|
}
|
|
|
|
salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
n.salt = salt
|
|
|
|
return salt, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) {
|
|
|
|
salt, err := n.Salt(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return salt.GetIdentifiedHMAC(data), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NoopAudit) Reload(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NoopAudit) Invalidate(ctx context.Context) {
|
|
|
|
n.saltMutex.Lock()
|
|
|
|
defer n.saltMutex.Unlock()
|
|
|
|
n.salt = nil
|
|
|
|
}
|
2021-11-30 19:49:58 +00:00
|
|
|
|
|
|
|
// RetryUntil runs f until it returns a nil result or the timeout is reached.
|
|
|
|
// If a nil result hasn't been obtained by timeout, calls t.Fatal.
|
|
|
|
func RetryUntil(t testing.T, timeout time.Duration, f func() error) {
|
|
|
|
t.Helper()
|
|
|
|
deadline := time.Now().Add(timeout)
|
|
|
|
var err error
|
|
|
|
for time.Now().Before(deadline) {
|
|
|
|
if err = f(); err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
t.Fatalf("did not complete before deadline, err: %v", err)
|
|
|
|
}
|
Enable periodic, automatic rebuilding of CRLs (#16762)
* Allow automatic rebuilding of CRLs
When enabled, periodic rebuilding of CRLs will improve PKI mounts in two
way:
1. Reduced load during periods of high (new) revocations, as the CRL
isn't rebuilt after each revocation but instead on a fixed schedule.
2. Ensuring the CRL is never stale as long as the cluster remains up,
by checking for next CRL expiry and regenerating CRLs before that
happens. This may increase cluster load when operators have large
CRLs that they'd prefer to let go stale, rather than regenerating
fresh copies.
In particular, we set a grace period before expiration of CRLs where,
when the periodic function triggers (about once a minute), we check
upcoming CRL expirations and check if we need to rebuild the CRLs.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on periodic rebuilding
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Allow modification of rollback period for testing
When testing backends that use the periodic func, and specifically,
testing the behavior of that periodic func, waiting for the usual 1m
interval can lead to excessively long test execution. By switching to a
shorter period--strictly for testing--we can make these tests execute
faster.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-rebuilding of CRLs
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Remove non-updating getConfig variant
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Avoid double reload of config
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-23 17:27:15 +00:00
|
|
|
|
Add ability to perform automatic tidy operations (#16900)
* Add ability to perform automatic tidy operations
This enables the PKI secrets engine to allow tidy to be started
periodically by the engine itself, avoiding the need for interaction.
This operation is disabled by default (to avoid load on clusters which
don't need tidy to be run) but can be enabled.
In particular, a default tidy configuration is written (via
/config/auto-tidy) which mirrors the options passed to /tidy. Two
additional parameters, enabled and interval, are accepted, allowing
auto-tidy to be enabled or disabled and controlling the interval
(between successful tidy runs) to attempt auto-tidy.
Notably, a manual execution of tidy will delay additional auto-tidy
operations. Status is reported via the existing /tidy-status endpoint.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on auto-tidy
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-tidy
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Prevent race during parallel testing
We modified the RollbackManager's execution window to allow more
faithful testing of the periodicFunc. However, the TestAutoRebuild and
the new TestAutoTidy would then race against each other for modifying
the period and creating their clusters (before resetting to the old
value).
This changeset adds a lock around this, preventing the races.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Use tidyStatusLock to gate lastTidy time
This prevents a data race between the periodic func and the execution of
the running tidy.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add read lock around tidyStatus gauges
When reading from tidyStatus for computing gauges, since the underlying
values aren't atomics, we really should be gating these with a read lock
around the status access.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-30 19:45:54 +00:00
|
|
|
// CreateTestClusterWithRollbackPeriod lets us modify the periodic func
|
|
|
|
// invocation time period to some other value.
|
|
|
|
//
|
|
|
|
// Because multiple tests in the PKI mount use this helper, we've added
|
|
|
|
// a lock around it and created the cluster immediately in this helper.
|
|
|
|
// This ensures the tests don't race against each other.
|
|
|
|
var rollbackPeriodLock sync.Mutex
|
|
|
|
|
|
|
|
func CreateTestClusterWithRollbackPeriod(t testing.T, newPeriod time.Duration, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
|
|
|
|
rollbackPeriodLock.Lock()
|
|
|
|
defer rollbackPeriodLock.Unlock()
|
|
|
|
|
|
|
|
// Set the period
|
Enable periodic, automatic rebuilding of CRLs (#16762)
* Allow automatic rebuilding of CRLs
When enabled, periodic rebuilding of CRLs will improve PKI mounts in two
way:
1. Reduced load during periods of high (new) revocations, as the CRL
isn't rebuilt after each revocation but instead on a fixed schedule.
2. Ensuring the CRL is never stale as long as the cluster remains up,
by checking for next CRL expiry and regenerating CRLs before that
happens. This may increase cluster load when operators have large
CRLs that they'd prefer to let go stale, rather than regenerating
fresh copies.
In particular, we set a grace period before expiration of CRLs where,
when the periodic function triggers (about once a minute), we check
upcoming CRL expirations and check if we need to rebuild the CRLs.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on periodic rebuilding
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Allow modification of rollback period for testing
When testing backends that use the periodic func, and specifically,
testing the behavior of that periodic func, waiting for the usual 1m
interval can lead to excessively long test execution. By switching to a
shorter period--strictly for testing--we can make these tests execute
faster.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-rebuilding of CRLs
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Remove non-updating getConfig variant
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Avoid double reload of config
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-23 17:27:15 +00:00
|
|
|
oldPeriod := rollbackPeriod
|
Add ability to perform automatic tidy operations (#16900)
* Add ability to perform automatic tidy operations
This enables the PKI secrets engine to allow tidy to be started
periodically by the engine itself, avoiding the need for interaction.
This operation is disabled by default (to avoid load on clusters which
don't need tidy to be run) but can be enabled.
In particular, a default tidy configuration is written (via
/config/auto-tidy) which mirrors the options passed to /tidy. Two
additional parameters, enabled and interval, are accepted, allowing
auto-tidy to be enabled or disabled and controlling the interval
(between successful tidy runs) to attempt auto-tidy.
Notably, a manual execution of tidy will delay additional auto-tidy
operations. Status is reported via the existing /tidy-status endpoint.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on auto-tidy
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-tidy
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Prevent race during parallel testing
We modified the RollbackManager's execution window to allow more
faithful testing of the periodicFunc. However, the TestAutoRebuild and
the new TestAutoTidy would then race against each other for modifying
the period and creating their clusters (before resetting to the old
value).
This changeset adds a lock around this, preventing the races.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Use tidyStatusLock to gate lastTidy time
This prevents a data race between the periodic func and the execution of
the running tidy.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add read lock around tidyStatus gauges
When reading from tidyStatus for computing gauges, since the underlying
values aren't atomics, we really should be gating these with a read lock
around the status access.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-30 19:45:54 +00:00
|
|
|
|
|
|
|
// Create and start a new cluster.
|
Enable periodic, automatic rebuilding of CRLs (#16762)
* Allow automatic rebuilding of CRLs
When enabled, periodic rebuilding of CRLs will improve PKI mounts in two
way:
1. Reduced load during periods of high (new) revocations, as the CRL
isn't rebuilt after each revocation but instead on a fixed schedule.
2. Ensuring the CRL is never stale as long as the cluster remains up,
by checking for next CRL expiry and regenerating CRLs before that
happens. This may increase cluster load when operators have large
CRLs that they'd prefer to let go stale, rather than regenerating
fresh copies.
In particular, we set a grace period before expiration of CRLs where,
when the periodic function triggers (about once a minute), we check
upcoming CRL expirations and check if we need to rebuild the CRLs.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on periodic rebuilding
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Allow modification of rollback period for testing
When testing backends that use the periodic func, and specifically,
testing the behavior of that periodic func, waiting for the usual 1m
interval can lead to excessively long test execution. By switching to a
shorter period--strictly for testing--we can make these tests execute
faster.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-rebuilding of CRLs
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Remove non-updating getConfig variant
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Avoid double reload of config
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-23 17:27:15 +00:00
|
|
|
rollbackPeriod = newPeriod
|
Add ability to perform automatic tidy operations (#16900)
* Add ability to perform automatic tidy operations
This enables the PKI secrets engine to allow tidy to be started
periodically by the engine itself, avoiding the need for interaction.
This operation is disabled by default (to avoid load on clusters which
don't need tidy to be run) but can be enabled.
In particular, a default tidy configuration is written (via
/config/auto-tidy) which mirrors the options passed to /tidy. Two
additional parameters, enabled and interval, are accepted, allowing
auto-tidy to be enabled or disabled and controlling the interval
(between successful tidy runs) to attempt auto-tidy.
Notably, a manual execution of tidy will delay additional auto-tidy
operations. Status is reported via the existing /tidy-status endpoint.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on auto-tidy
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-tidy
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Prevent race during parallel testing
We modified the RollbackManager's execution window to allow more
faithful testing of the periodicFunc. However, the TestAutoRebuild and
the new TestAutoTidy would then race against each other for modifying
the period and creating their clusters (before resetting to the old
value).
This changeset adds a lock around this, preventing the races.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Use tidyStatusLock to gate lastTidy time
This prevents a data race between the periodic func and the execution of
the running tidy.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add read lock around tidyStatus gauges
When reading from tidyStatus for computing gauges, since the underlying
values aren't atomics, we really should be gating these with a read lock
around the status access.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-30 19:45:54 +00:00
|
|
|
cluster := NewTestCluster(t, base, opts)
|
|
|
|
cluster.Start()
|
|
|
|
|
|
|
|
// Reset the period
|
|
|
|
rollbackPeriod = oldPeriod
|
|
|
|
|
|
|
|
// Return the cluster.
|
|
|
|
return cluster
|
Enable periodic, automatic rebuilding of CRLs (#16762)
* Allow automatic rebuilding of CRLs
When enabled, periodic rebuilding of CRLs will improve PKI mounts in two
way:
1. Reduced load during periods of high (new) revocations, as the CRL
isn't rebuilt after each revocation but instead on a fixed schedule.
2. Ensuring the CRL is never stale as long as the cluster remains up,
by checking for next CRL expiry and regenerating CRLs before that
happens. This may increase cluster load when operators have large
CRLs that they'd prefer to let go stale, rather than regenerating
fresh copies.
In particular, we set a grace period before expiration of CRLs where,
when the periodic function triggers (about once a minute), we check
upcoming CRL expirations and check if we need to rebuild the CRLs.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add changelog entry
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add documentation on periodic rebuilding
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Allow modification of rollback period for testing
When testing backends that use the periodic func, and specifically,
testing the behavior of that periodic func, waiting for the usual 1m
interval can lead to excessively long test execution. By switching to a
shorter period--strictly for testing--we can make these tests execute
faster.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add tests for auto-rebuilding of CRLs
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Remove non-updating getConfig variant
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Avoid double reload of config
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2022-08-23 17:27:15 +00:00
|
|
|
}
|
2022-08-31 18:23:05 +00:00
|
|
|
|
|
|
|
// MakeTestPluginDir creates a temporary directory suitable for holding plugins.
|
|
|
|
// This helper also resolves symlinks to make tests happy on OS X.
|
|
|
|
func MakeTestPluginDir(t testing.T) (string, func(t testing.T)) {
|
|
|
|
if t != nil {
|
|
|
|
t.Helper()
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, err := os.MkdirTemp("", "")
|
|
|
|
if err != nil {
|
|
|
|
if t == nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// OSX tempdir are /var, but actually symlinked to /private/var
|
|
|
|
dir, err = filepath.EvalSymlinks(dir)
|
|
|
|
if err != nil {
|
|
|
|
if t == nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return dir, func(t testing.T) {
|
|
|
|
if err := os.RemoveAll(dir); err != nil {
|
|
|
|
if t == nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|