open-vault/command/server/config_test_helpers.go

1067 lines
28 KiB
Go
Raw Normal View History

package server
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/go-test/deep"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
"github.com/hashicorp/vault/internalshared/configutil"
)
var DefaultCustomHeaders = map[string]map[string]string{
"default": {
"Strict-Transport-Security": configutil.StrictTransportSecurity,
},
}
func boolPointer(x bool) *bool {
return &x
}
func testConfigRaftRetryJoin(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/raft_retry_join.hcl")
if err != nil {
t.Fatal(err)
}
retryJoinConfig := `[{"leader_api_addr":"http://127.0.0.1:8200"},{"leader_api_addr":"http://127.0.0.2:8200"},{"leader_api_addr":"http://127.0.0.3:8200"}]`
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8200",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
DisableMlock: true,
},
Storage: &Storage{
Type: "raft",
Config: map[string]string{
"path": "/storage/path/raft",
"node_id": "raft1",
"retry_join": retryJoinConfig,
},
},
}
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testLoadConfigFile_topLevel(t *testing.T, entropy *configutil.Entropy) {
config, err := LoadConfigFile("./test-fixtures/config2.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Telemetry: &configutil.Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: 30 * time.Second,
UsageGaugePeriod: 5 * time.Minute,
MaximumGaugeCardinality: 125,
LeaseMetricsEpsilon: time.Hour,
NumLeaseMetricsTimeBuckets: 168,
LeaseMetricsNameSpaceLabels: false,
},
DisableMlock: true,
PidFile: "./pidfile",
ClusterName: "testcluster",
Seals: []*configutil.KMS{
{
Type: "nopurpose",
},
{
Type: "stringpurpose",
Purpose: []string{"foo"},
},
{
Type: "commastringpurpose",
Purpose: []string{"foo", "bar"},
},
{
Type: "slicepurpose",
Purpose: []string{"zip", "zap"},
},
},
},
Storage: &Storage{
Type: "consul",
RedirectAddr: "top_level_api_addr",
ClusterAddr: "top_level_cluster_addr",
Config: map[string]string{
"foo": "bar",
},
},
HAStorage: &Storage{
Type: "consul",
RedirectAddr: "top_level_api_addr",
ClusterAddr: "top_level_cluster_addr",
Config: map[string]string{
"bar": "baz",
},
DisableClustering: true,
},
ServiceRegistration: &ServiceRegistration{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
},
DisableCache: true,
DisableCacheRaw: true,
EnableUI: true,
EnableUIRaw: true,
EnableRawEndpoint: true,
EnableRawEndpointRaw: true,
DisableSealWrap: true,
DisableSealWrapRaw: true,
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
APIAddr: "top_level_api_addr",
ClusterAddr: "top_level_cluster_addr",
}
addExpectedEntConfig(expected, []string{})
if entropy != nil {
expected.Entropy = entropy
}
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) {
config, err := LoadConfigFile("./test-fixtures/config2.hcl.json")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
{
Type: "tcp",
Address: "127.0.0.1:444",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Telemetry: &configutil.Telemetry{
StatsiteAddr: "foo",
StatsdAddr: "bar",
DisableHostname: true,
UsageGaugePeriod: 5 * time.Minute,
MaximumGaugeCardinality: 125,
CirconusAPIToken: "0",
CirconusAPIApp: "vault",
CirconusAPIURL: "http://api.circonus.com/v2",
CirconusSubmissionInterval: "10s",
CirconusCheckSubmissionURL: "https://someplace.com/metrics",
CirconusCheckID: "0",
CirconusCheckForceMetricActivation: "true",
CirconusCheckInstanceID: "node1:vault",
CirconusCheckSearchTag: "service:vault",
CirconusCheckDisplayName: "node1:vault",
CirconusCheckTags: "cat1:tag1,cat2:tag2",
CirconusBrokerID: "0",
CirconusBrokerSelectTag: "dc:sfo",
PrometheusRetentionTime: 30 * time.Second,
LeaseMetricsEpsilon: time.Hour,
NumLeaseMetricsTimeBuckets: 168,
LeaseMetricsNameSpaceLabels: false,
},
},
Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
},
HAStorage: &Storage{
Type: "consul",
Config: map[string]string{
"bar": "baz",
},
DisableClustering: true,
},
ServiceRegistration: &ServiceRegistration{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
},
CacheSize: 45678,
EnableUI: true,
EnableUIRaw: true,
EnableRawEndpoint: true,
EnableRawEndpointRaw: true,
DisableSealWrap: true,
DisableSealWrapRaw: true,
}
addExpectedEntConfig(expected, []string{"http"})
if entropy != nil {
expected.Entropy = entropy
}
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testParseEntropy(t *testing.T, oss bool) {
tests := []struct {
inConfig string
outErr error
outEntropy configutil.Entropy
}{
{
inConfig: `entropy "seal" {
mode = "augmentation"
}`,
outErr: nil,
outEntropy: configutil.Entropy{Mode: configutil.EntropyAugmentation},
},
{
inConfig: `entropy "seal" {
mode = "a_mode_that_is_not_supported"
}`,
outErr: fmt.Errorf("the specified entropy mode %q is not supported", "a_mode_that_is_not_supported"),
},
{
inConfig: `entropy "device_that_is_not_supported" {
mode = "augmentation"
}`,
outErr: fmt.Errorf("only the %q type of external entropy is supported", "seal"),
},
{
inConfig: `entropy "seal" {
mode = "augmentation"
}
entropy "seal" {
mode = "augmentation"
}`,
outErr: fmt.Errorf("only one %q block is permitted", "entropy"),
},
}
config := Config{
SharedConfig: &configutil.SharedConfig{},
}
for _, test := range tests {
obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig))
list, _ := obj.Node.(*ast.ObjectList)
objList := list.Filter("entropy")
err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy")
// validate the error, both should be nil or have the same Error()
switch {
case oss:
if config.Entropy != nil {
t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy)
}
case err != nil && test.outErr != nil:
if err.Error() != test.outErr.Error() {
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
}
case err != test.outErr:
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy:
2019-11-07 16:54:34 +00:00
fmt.Printf("\n config.Entropy: %#v", config.Entropy)
t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy)
}
}
}
func testLoadConfigFileIntegerAndBooleanValues(t *testing.T) {
testLoadConfigFileIntegerAndBooleanValuesCommon(t, "./test-fixtures/config4.hcl")
}
func testLoadConfigFileIntegerAndBooleanValuesJson(t *testing.T) {
testLoadConfigFileIntegerAndBooleanValuesCommon(t, "./test-fixtures/config4.hcl.json")
}
func testLoadConfigFileIntegerAndBooleanValuesCommon(t *testing.T, path string) {
config, err := LoadConfigFile(path)
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8200",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
DisableMlock: true,
},
Storage: &Storage{
Type: "raft",
Config: map[string]string{
"path": "/storage/path/raft",
"node_id": "raft1",
"performance_multiplier": "1",
"foo": "bar",
"baz": "true",
},
ClusterAddr: "127.0.0.1:8201",
},
ClusterAddr: "127.0.0.1:8201",
DisableCache: true,
DisableCacheRaw: true,
EnableUI: true,
EnableUIRaw: true,
}
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testLoadConfigFile(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/config.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Telemetry: &configutil.Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
UsageGaugePeriod: 5 * time.Minute,
MaximumGaugeCardinality: 100,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
MetricsPrefix: "myprefix",
LeaseMetricsEpsilon: time.Hour,
NumLeaseMetricsTimeBuckets: 168,
LeaseMetricsNameSpaceLabels: false,
},
DisableMlock: true,
Entropy: nil,
PidFile: "./pidfile",
ClusterName: "testcluster",
},
Storage: &Storage{
Type: "consul",
RedirectAddr: "foo",
Config: map[string]string{
"foo": "bar",
},
},
HAStorage: &Storage{
Type: "consul",
RedirectAddr: "snafu",
Config: map[string]string{
"bar": "baz",
},
DisableClustering: true,
},
ServiceRegistration: &ServiceRegistration{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
},
DisableCache: true,
DisableCacheRaw: true,
DisablePrintableCheckRaw: true,
DisablePrintableCheck: true,
EnableUI: true,
EnableUIRaw: true,
EnableRawEndpoint: true,
EnableRawEndpointRaw: true,
DisableSealWrap: true,
DisableSealWrapRaw: true,
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
EnableResponseHeaderHostname: true,
EnableResponseHeaderHostnameRaw: true,
EnableResponseHeaderRaftNodeID: true,
EnableResponseHeaderRaftNodeIDRaw: true,
LicensePath: "/path/to/license",
}
addExpectedEntConfig(expected, []string{})
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testUnknownFieldValidation(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/config.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []configutil.ConfigError{
{
Problem: "unknown or unsupported field bad_value found in configuration",
Position: token.Pos{
Filename: "./test-fixtures/config.hcl",
Offset: 583,
Line: 34,
Column: 5,
},
},
}
errors := config.Validate("./test-fixtures/config.hcl")
for _, er1 := range errors {
found := false
if strings.Contains(er1.String(), "sentinel") {
// This happens on OSS, and is fine
continue
}
for _, ex := range expected {
// TODO: Only test the string, pos may change
if ex.Problem == er1.Problem && reflect.DeepEqual(ex.Position, er1.Position) {
found = true
break
}
}
if !found {
t.Fatalf("found unexpected error: %v", er1.String())
}
}
for _, ex := range expected {
found := false
for _, er1 := range errors {
if ex.Problem == er1.Problem && reflect.DeepEqual(ex.Position, er1.Position) {
found = true
}
}
if !found {
t.Fatalf("could not find expected error: %v", ex.String())
}
}
}
func testLoadConfigFile_json(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/config.hcl.json")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Telemetry: &configutil.Telemetry{
StatsiteAddr: "baz",
StatsdAddr: "",
DisableHostname: false,
UsageGaugePeriod: 5 * time.Minute,
MaximumGaugeCardinality: 100,
CirconusAPIToken: "",
CirconusAPIApp: "",
CirconusAPIURL: "",
CirconusSubmissionInterval: "",
CirconusCheckSubmissionURL: "",
CirconusCheckID: "",
CirconusCheckForceMetricActivation: "",
CirconusCheckInstanceID: "",
CirconusCheckSearchTag: "",
CirconusCheckDisplayName: "",
CirconusCheckTags: "",
CirconusBrokerID: "",
CirconusBrokerSelectTag: "",
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
LeaseMetricsEpsilon: time.Hour,
NumLeaseMetricsTimeBuckets: 168,
LeaseMetricsNameSpaceLabels: false,
},
PidFile: "./pidfile",
Entropy: nil,
ClusterName: "testcluster",
},
Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
DisableClustering: true,
},
ServiceRegistration: &ServiceRegistration{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
},
ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
DisableCacheRaw: interface{}(nil),
EnableUI: true,
EnableUIRaw: true,
EnableRawEndpoint: true,
EnableRawEndpointRaw: true,
DisableSealWrap: true,
DisableSealWrapRaw: true,
}
addExpectedEntConfig(expected, []string{})
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testLoadConfigDir(t *testing.T) {
config, err := LoadConfigDir("./test-fixtures/config-dir")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
DisableMlock: true,
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Telemetry: &configutil.Telemetry{
StatsiteAddr: "qux",
StatsdAddr: "baz",
DisableHostname: true,
UsageGaugePeriod: 5 * time.Minute,
MaximumGaugeCardinality: 100,
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
LeaseMetricsEpsilon: time.Hour,
NumLeaseMetricsTimeBuckets: 168,
LeaseMetricsNameSpaceLabels: false,
},
ClusterName: "testcluster",
},
DisableCache: true,
DisableClustering: false,
DisableClusteringRaw: false,
APIAddr: "https://vault.local",
ClusterAddr: "https://127.0.0.1:444",
Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
RedirectAddr: "https://vault.local",
ClusterAddr: "https://127.0.0.1:444",
DisableClustering: false,
},
EnableUI: true,
EnableRawEndpoint: true,
MaxLeaseTTL: 10 * time.Hour,
DefaultLeaseTTL: 10 * time.Hour,
}
addExpectedEntConfig(expected, []string{"http"})
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testConfig_Sanitized(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/config3.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
sanitizedConfig := config.Sanitized()
expected := map[string]interface{}{
"api_addr": "top_level_api_addr",
"cache_size": 0,
"cluster_addr": "top_level_cluster_addr",
"cluster_cipher_suites": "",
"cluster_name": "testcluster",
"default_lease_ttl": 10 * time.Hour,
"default_max_request_duration": 0 * time.Second,
"disable_cache": true,
"disable_clustering": false,
"disable_indexing": false,
"disable_mlock": true,
"disable_performance_standby": false,
"disable_printable_check": false,
"disable_sealwrap": true,
"raw_storage_endpoint": true,
"disable_sentinel_trace": true,
"enable_ui": true,
"enable_response_header_hostname": false,
"enable_response_header_raft_node_id": false,
"log_requests_level": "basic",
"ha_storage": map[string]interface{}{
"cluster_addr": "top_level_cluster_addr",
"disable_clustering": true,
"redirect_addr": "top_level_api_addr",
"type": "consul",
},
"listeners": []interface{}{
map[string]interface{}{
"config": map[string]interface{}{
"address": "127.0.0.1:443",
},
"type": "tcp",
},
},
"log_format": "",
"log_level": "",
"max_lease_ttl": 10 * time.Hour,
"pid_file": "./pidfile",
"plugin_directory": "",
"seals": []interface{}{
map[string]interface{}{
"disabled": false,
"type": "awskms",
},
},
"storage": map[string]interface{}{
"cluster_addr": "top_level_cluster_addr",
"disable_clustering": false,
"redirect_addr": "top_level_api_addr",
"type": "consul",
},
"service_registration": map[string]interface{}{
"type": "consul",
},
"telemetry": map[string]interface{}{
"usage_gauge_period": 5 * time.Minute,
"maximum_gauge_cardinality": 100,
"circonus_api_app": "",
"circonus_api_token": "",
"circonus_api_url": "",
"circonus_broker_id": "",
"circonus_broker_select_tag": "",
"circonus_check_display_name": "",
"circonus_check_force_metric_activation": "",
"circonus_check_id": "",
"circonus_check_instance_id": "",
"circonus_check_search_tag": "",
"circonus_submission_url": "",
"circonus_check_tags": "",
"circonus_submission_interval": "",
"disable_hostname": false,
"metrics_prefix": "pfx",
"dogstatsd_addr": "",
"dogstatsd_tags": []string(nil),
"prometheus_retention_time": 24 * time.Hour,
"stackdriver_location": "",
"stackdriver_namespace": "",
"stackdriver_project_id": "",
"stackdriver_debug_logs": false,
"statsd_address": "bar",
"statsite_address": "",
"lease_metrics_epsilon": time.Hour,
"num_lease_metrics_buckets": 168,
"add_lease_metrics_namespace_labels": false,
},
}
addExpectedEntSanitizedConfig(expected, []string{"http"})
config.Prune()
if diff := deep.Equal(sanitizedConfig, expected); len(diff) > 0 {
t.Fatalf("bad, diff: %#v", diff)
}
}
func testParseListeners(t *testing.T) {
obj, _ := hcl.Parse(strings.TrimSpace(`
listener "tcp" {
address = "127.0.0.1:443"
cluster_address = "127.0.0.1:8201"
tls_disable = false
tls_cert_file = "./certs/server.crt"
tls_key_file = "./certs/server.key"
tls_client_ca_file = "./certs/rootca.crt"
tls_min_version = "tls12"
tls_max_version = "tls13"
tls_require_and_verify_client_cert = true
tls_disable_client_certs = true
telemetry {
unauthenticated_metrics_access = true
}
profiling {
unauthenticated_pprof_access = true
}
}`))
config := Config{
SharedConfig: &configutil.SharedConfig{},
}
list, _ := obj.Node.(*ast.ObjectList)
objList := list.Filter("listener")
configutil.ParseListeners(config.SharedConfig, objList)
listeners := config.Listeners
if len(listeners) == 0 {
t.Fatalf("expected at least one listener in the config")
}
listener := listeners[0]
if listener.Type != "tcp" {
t.Fatalf("expected tcp listener in the config")
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
ClusterAddress: "127.0.0.1:8201",
TLSCertFile: "./certs/server.crt",
TLSKeyFile: "./certs/server.key",
TLSClientCAFile: "./certs/rootca.crt",
TLSMinVersion: "tls12",
TLSMaxVersion: "tls13",
TLSRequireAndVerifyClientCert: true,
TLSDisableClientCerts: true,
Telemetry: configutil.ListenerTelemetry{
UnauthenticatedMetricsAccess: true,
},
Profiling: configutil.ListenerProfiling{
UnauthenticatedPProfAccess: true,
},
CustomResponseHeaders: DefaultCustomHeaders,
},
},
},
}
config.Prune()
if diff := deep.Equal(config, *expected); diff != nil {
t.Fatal(diff)
}
2019-11-07 16:54:34 +00:00
}
func testParseSockaddrTemplate(t *testing.T) {
config, err := ParseConfig(`
api_addr = <<EOF
{{- GetAllInterfaces | include "flags" "loopback" | include "type" "ipv4" | attr "address" -}}
EOF
listener "tcp" {
address = <<EOF
{{- GetAllInterfaces | include "flags" "loopback" | include "type" "ipv4" | attr "address" -}}:443
EOF
cluster_address = <<EOF
{{- GetAllInterfaces | include "flags" "loopback" | include "type" "ipv4" | attr "address" -}}:8201
EOF
tls_disable = true
}`, "")
if err != nil {
t.Fatal(err)
}
expected := &Config{
APIAddr: "127.0.0.1",
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
ClusterAddress: "127.0.0.1:8201",
TLSDisable: true,
CustomResponseHeaders: DefaultCustomHeaders,
},
},
},
}
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testParseSeals(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/config_seals.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
config.Listeners[0].RawConfig = nil
expected := &Config{
Storage: &Storage{
Type: "consul",
Config: map[string]string{},
},
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Seals: []*configutil.KMS{
{
Type: "pkcs11",
Purpose: []string{"many", "purposes"},
Config: map[string]string{
"lib": "/usr/lib/libcklog2.so",
"slot": "0.0",
"pin": "XXXXXXXX",
"key_label": "HASHICORP",
"mechanism": "0x1082",
"hmac_mechanism": "0x0251",
"hmac_key_label": "vault-hsm-hmac-key",
"default_hmac_key_label": "vault-hsm-hmac-key",
"generate_key": "true",
},
},
{
Type: "pkcs11",
Purpose: []string{"single"},
Disabled: true,
Config: map[string]string{
"lib": "/usr/lib/libcklog2.so",
"slot": "0.0",
"pin": "XXXXXXXX",
"key_label": "HASHICORP",
"mechanism": "4226",
"hmac_mechanism": "593",
"hmac_key_label": "vault-hsm-hmac-key",
"default_hmac_key_label": "vault-hsm-hmac-key",
"generate_key": "true",
},
},
},
},
}
config.Prune()
require.Equal(t, config, expected)
}
func testLoadConfigFileLeaseMetrics(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/config5.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
CustomResponseHeaders: DefaultCustomHeaders,
},
},
Telemetry: &configutil.Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
UsageGaugePeriod: 5 * time.Minute,
MaximumGaugeCardinality: 100,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
MetricsPrefix: "myprefix",
LeaseMetricsEpsilon: time.Hour,
NumLeaseMetricsTimeBuckets: 2,
LeaseMetricsNameSpaceLabels: true,
},
DisableMlock: true,
Entropy: nil,
PidFile: "./pidfile",
ClusterName: "testcluster",
},
Storage: &Storage{
Type: "consul",
RedirectAddr: "foo",
Config: map[string]string{
"foo": "bar",
},
},
HAStorage: &Storage{
Type: "consul",
RedirectAddr: "snafu",
Config: map[string]string{
"bar": "baz",
},
DisableClustering: true,
},
ServiceRegistration: &ServiceRegistration{
Type: "consul",
Config: map[string]string{
"foo": "bar",
},
},
DisableCache: true,
DisableCacheRaw: true,
DisablePrintableCheckRaw: true,
DisablePrintableCheck: true,
EnableUI: true,
EnableUIRaw: true,
EnableRawEndpoint: true,
EnableRawEndpointRaw: true,
DisableSealWrap: true,
DisableSealWrapRaw: true,
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
}
addExpectedEntConfig(expected, []string{})
config.Prune()
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
Autopilot: Server Stabilization, State and Dead Server Cleanup (#10856) * k8s doc: update for 0.9.1 and 0.8.0 releases (#10825) * k8s doc: update for 0.9.1 and 0.8.0 releases * Update website/content/docs/platform/k8s/helm/configuration.mdx Co-authored-by: Theron Voran <tvoran@users.noreply.github.com> Co-authored-by: Theron Voran <tvoran@users.noreply.github.com> * Autopilot initial commit * Move autopilot related backend implementations to its own file * Abstract promoter creation * Add nil check for health * Add server state oss no-ops * Config ext stub for oss * Make way for non-voters * s/health/state * s/ReadReplica/NonVoter * Add synopsis and description * Remove struct tags from AutopilotConfig * Use var for config storage path * Handle nin-config when reading * Enable testing autopilot by using inmem cluster * First passing test * Only report the server as known if it is present in raft config * Autopilot defaults to on for all existing and new clusters * Add locking to some functions * Persist initial config * Clarify the command usage doc * Add health metric for each node * Fix audit logging issue * Don't set DisablePerformanceStandby to true in test * Use node id label for health metric * Log updates to autopilot config * Less aggressively consume config loading failures * Return a mutable config * Return early from known servers if raft config is unable to be pulled * Update metrics name * Reduce log level for potentially noisy log * Add knob to disable autopilot * Don't persist if default config is in use * Autopilot: Dead server cleanup (#10857) * Dead server cleanup * Initialize channel in any case * Fix a bunch of tests * Fix panic * Add follower locking in heartbeat tracker * Add LastContactFailureThreshold to config * Add log when marking node as dead * Update follower state locking in heartbeat tracker * Avoid follower states being nil * Pull test to its own file * Add execution status to state response * Optionally enable autopilot in some tests * Updates * Added API function to fetch autopilot configuration * Add test for default autopilot configuration * Configuration tests * Add State API test * Update test * Added TestClusterOptions.PhysicalFactoryConfig * Update locking * Adjust locking in heartbeat tracker * s/last_contact_failure_threshold/left_server_last_contact_threshold * Add disabling autopilot as a core config option * Disable autopilot in some tests * s/left_server_last_contact_threshold/dead_server_last_contact_threshold * Set the lastheartbeat of followers to now when setting up active node * Don't use config defaults from CLI command * Remove config file support * Remove HCL test as well * Persist only supplied config; merge supplied config with default to operate * Use pointer to structs for storing follower information * Test update * Retrieve non voter status from configbucket and set it up when a node comes up * Manage desired suffrage * Consider bucket being created already * Move desired suffrage to its own entry * s/DesiredSuffrageKey/LocalNodeConfigKey * s/witnessSuffrage/recordSuffrage * Fix test compilation * Handle local node config post a snapshot install * Commit to storage first; then record suffrage in fsm * No need of local node config being nili case, post snapshot restore * Reconcile autopilot config when a new leader takes over duty * Grab fsm lock when recording suffrage * s/Suffrage/DesiredSuffrage in FollowerState * Instantiate autopilot only in leader * Default to old ways in more scenarios * Make API gracefully handle 404 * Address some feedback * Make IsDead an atomic.Value * Simplify follower hearbeat tracking * Use uber.atomic * Don't have multiple causes for having autopilot disabled * Don't remove node from follower states if we fail to remove the dead server * Autopilot server removals map (#11019) * Don't remove node from follower states if we fail to remove the dead server * Use map to track dead server removals * Use lock and map * Use delegate lock * Adjust when to remove entry from map * Only hold the lock while accessing map * Fix race * Don't set default min_quorum * Fix test * Ensure follower states is not nil before starting autopilot * Fix race Co-authored-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Co-authored-by: Theron Voran <tvoran@users.noreply.github.com>
2021-03-03 18:59:50 +00:00
func testConfigRaftAutopilot(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/raft_autopilot.hcl")
if err != nil {
t.Fatal(err)
}
autopilotConfig := `[{"cleanup_dead_servers":true,"last_contact_threshold":"500ms","max_trailing_logs":250,"min_quorum":3,"server_stabilization_time":"10s"}]`
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8200",
},
},
DisableMlock: true,
},
Storage: &Storage{
Type: "raft",
Config: map[string]string{
"path": "/storage/path/raft",
"node_id": "raft1",
"autopilot": autopilotConfig,
},
},
}
config.Prune()
Autopilot: Server Stabilization, State and Dead Server Cleanup (#10856) * k8s doc: update for 0.9.1 and 0.8.0 releases (#10825) * k8s doc: update for 0.9.1 and 0.8.0 releases * Update website/content/docs/platform/k8s/helm/configuration.mdx Co-authored-by: Theron Voran <tvoran@users.noreply.github.com> Co-authored-by: Theron Voran <tvoran@users.noreply.github.com> * Autopilot initial commit * Move autopilot related backend implementations to its own file * Abstract promoter creation * Add nil check for health * Add server state oss no-ops * Config ext stub for oss * Make way for non-voters * s/health/state * s/ReadReplica/NonVoter * Add synopsis and description * Remove struct tags from AutopilotConfig * Use var for config storage path * Handle nin-config when reading * Enable testing autopilot by using inmem cluster * First passing test * Only report the server as known if it is present in raft config * Autopilot defaults to on for all existing and new clusters * Add locking to some functions * Persist initial config * Clarify the command usage doc * Add health metric for each node * Fix audit logging issue * Don't set DisablePerformanceStandby to true in test * Use node id label for health metric * Log updates to autopilot config * Less aggressively consume config loading failures * Return a mutable config * Return early from known servers if raft config is unable to be pulled * Update metrics name * Reduce log level for potentially noisy log * Add knob to disable autopilot * Don't persist if default config is in use * Autopilot: Dead server cleanup (#10857) * Dead server cleanup * Initialize channel in any case * Fix a bunch of tests * Fix panic * Add follower locking in heartbeat tracker * Add LastContactFailureThreshold to config * Add log when marking node as dead * Update follower state locking in heartbeat tracker * Avoid follower states being nil * Pull test to its own file * Add execution status to state response * Optionally enable autopilot in some tests * Updates * Added API function to fetch autopilot configuration * Add test for default autopilot configuration * Configuration tests * Add State API test * Update test * Added TestClusterOptions.PhysicalFactoryConfig * Update locking * Adjust locking in heartbeat tracker * s/last_contact_failure_threshold/left_server_last_contact_threshold * Add disabling autopilot as a core config option * Disable autopilot in some tests * s/left_server_last_contact_threshold/dead_server_last_contact_threshold * Set the lastheartbeat of followers to now when setting up active node * Don't use config defaults from CLI command * Remove config file support * Remove HCL test as well * Persist only supplied config; merge supplied config with default to operate * Use pointer to structs for storing follower information * Test update * Retrieve non voter status from configbucket and set it up when a node comes up * Manage desired suffrage * Consider bucket being created already * Move desired suffrage to its own entry * s/DesiredSuffrageKey/LocalNodeConfigKey * s/witnessSuffrage/recordSuffrage * Fix test compilation * Handle local node config post a snapshot install * Commit to storage first; then record suffrage in fsm * No need of local node config being nili case, post snapshot restore * Reconcile autopilot config when a new leader takes over duty * Grab fsm lock when recording suffrage * s/Suffrage/DesiredSuffrage in FollowerState * Instantiate autopilot only in leader * Default to old ways in more scenarios * Make API gracefully handle 404 * Address some feedback * Make IsDead an atomic.Value * Simplify follower hearbeat tracking * Use uber.atomic * Don't have multiple causes for having autopilot disabled * Don't remove node from follower states if we fail to remove the dead server * Autopilot server removals map (#11019) * Don't remove node from follower states if we fail to remove the dead server * Use map to track dead server removals * Use lock and map * Use delegate lock * Adjust when to remove entry from map * Only hold the lock while accessing map * Fix race * Don't set default min_quorum * Fix test * Ensure follower states is not nil before starting autopilot * Fix race Co-authored-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Co-authored-by: Theron Voran <tvoran@users.noreply.github.com>
2021-03-03 18:59:50 +00:00
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}