2020-02-13 17:56:29 +00:00
|
|
|
package kubernetes
|
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
sr "github.com/hashicorp/vault/serviceregistration"
|
|
|
|
"github.com/hashicorp/vault/serviceregistration/kubernetes/client"
|
|
|
|
kubetest "github.com/hashicorp/vault/serviceregistration/kubernetes/testing"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestRetryHandlerSimple(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping because this test takes 10-15 seconds")
|
|
|
|
}
|
|
|
|
|
|
|
|
testState, testConf, closeFunc := kubetest.Server(t)
|
|
|
|
defer closeFunc()
|
|
|
|
|
|
|
|
client.Scheme = testConf.ClientScheme
|
|
|
|
client.TokenFile = testConf.PathToTokenFile
|
|
|
|
client.RootCAFile = testConf.PathToRootCAFile
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := hclog.NewNullLogger()
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
wait := &sync.WaitGroup{}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
c, err := client.New(logger)
|
2020-02-13 17:56:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
r := &retryHandler{
|
|
|
|
logger: logger,
|
|
|
|
namespace: kubetest.ExpectedNamespace,
|
|
|
|
podName: kubetest.ExpectedPodName,
|
|
|
|
patchesToRetry: make(map[string]*client.Patch),
|
2020-05-15 18:06:58 +00:00
|
|
|
client: c,
|
|
|
|
initialState: sr.State{},
|
2020-02-13 17:56:29 +00:00
|
|
|
}
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Run(shutdownCh, wait)
|
2020-02-13 17:56:29 +00:00
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
// Initial number of patches upon Run from setting the initial state
|
|
|
|
initStatePatches := testState.NumPatches()
|
|
|
|
if initStatePatches == 0 {
|
|
|
|
t.Fatalf("expected number of states patches after initial patches to be non-zero")
|
2020-02-13 17:56:29 +00:00
|
|
|
}
|
2020-05-15 18:06:58 +00:00
|
|
|
|
|
|
|
// Send a new patch
|
|
|
|
testPatch := &client.Patch{
|
|
|
|
Operation: client.Add,
|
|
|
|
Path: "patch-path",
|
|
|
|
Value: "true",
|
|
|
|
}
|
|
|
|
r.Notify(testPatch)
|
|
|
|
|
2020-02-13 17:56:29 +00:00
|
|
|
// Wait ample until the next try should have occurred.
|
|
|
|
<-time.NewTimer(retryFreq * 2).C
|
2020-05-15 18:06:58 +00:00
|
|
|
|
|
|
|
if testState.NumPatches() != initStatePatches+1 {
|
|
|
|
t.Fatalf("expected 1 patch, got: %d", testState.NumPatches())
|
2020-02-13 17:56:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRetryHandlerAdd(t *testing.T) {
|
|
|
|
_, testConf, closeFunc := kubetest.Server(t)
|
|
|
|
defer closeFunc()
|
|
|
|
|
|
|
|
client.Scheme = testConf.ClientScheme
|
|
|
|
client.TokenFile = testConf.PathToTokenFile
|
|
|
|
client.RootCAFile = testConf.PathToRootCAFile
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := hclog.NewNullLogger()
|
2020-05-15 18:06:58 +00:00
|
|
|
c, err := client.New(logger)
|
2020-02-13 17:56:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
r := &retryHandler{
|
|
|
|
logger: hclog.NewNullLogger(),
|
|
|
|
namespace: "some-namespace",
|
|
|
|
podName: "some-pod-name",
|
|
|
|
patchesToRetry: make(map[string]*client.Patch),
|
2020-05-15 18:06:58 +00:00
|
|
|
client: c,
|
2020-02-13 17:56:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
testPatch1 := &client.Patch{
|
|
|
|
Operation: client.Add,
|
|
|
|
Path: "one",
|
|
|
|
Value: "true",
|
|
|
|
}
|
|
|
|
testPatch2 := &client.Patch{
|
|
|
|
Operation: client.Add,
|
|
|
|
Path: "two",
|
|
|
|
Value: "true",
|
|
|
|
}
|
|
|
|
testPatch3 := &client.Patch{
|
|
|
|
Operation: client.Add,
|
|
|
|
Path: "three",
|
|
|
|
Value: "true",
|
|
|
|
}
|
|
|
|
testPatch4 := &client.Patch{
|
|
|
|
Operation: client.Add,
|
|
|
|
Path: "four",
|
|
|
|
Value: "true",
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be able to add all 4 patches.
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(testPatch1)
|
2020-02-13 17:56:29 +00:00
|
|
|
if len(r.patchesToRetry) != 1 {
|
|
|
|
t.Fatal("expected 1 patch")
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(testPatch2)
|
2020-02-13 17:56:29 +00:00
|
|
|
if len(r.patchesToRetry) != 2 {
|
|
|
|
t.Fatal("expected 2 patches")
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(testPatch3)
|
2020-02-13 17:56:29 +00:00
|
|
|
if len(r.patchesToRetry) != 3 {
|
|
|
|
t.Fatal("expected 3 patches")
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(testPatch4)
|
2020-02-13 17:56:29 +00:00
|
|
|
if len(r.patchesToRetry) != 4 {
|
|
|
|
t.Fatal("expected 4 patches")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding a dupe should result in no change.
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(testPatch4)
|
2020-02-13 17:56:29 +00:00
|
|
|
if len(r.patchesToRetry) != 4 {
|
|
|
|
t.Fatal("expected 4 patches")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding a reversion should result in its twin being subtracted.
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(&client.Patch{
|
2020-02-13 17:56:29 +00:00
|
|
|
Operation: client.Add,
|
|
|
|
Path: "four",
|
|
|
|
Value: "false",
|
|
|
|
})
|
|
|
|
if len(r.patchesToRetry) != 4 {
|
|
|
|
t.Fatal("expected 4 patches")
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(&client.Patch{
|
2020-02-13 17:56:29 +00:00
|
|
|
Operation: client.Add,
|
|
|
|
Path: "three",
|
|
|
|
Value: "false",
|
|
|
|
})
|
|
|
|
if len(r.patchesToRetry) != 4 {
|
|
|
|
t.Fatal("expected 4 patches")
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(&client.Patch{
|
2020-02-13 17:56:29 +00:00
|
|
|
Operation: client.Add,
|
|
|
|
Path: "two",
|
|
|
|
Value: "false",
|
|
|
|
})
|
|
|
|
if len(r.patchesToRetry) != 4 {
|
|
|
|
t.Fatal("expected 4 patches")
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(&client.Patch{
|
2020-02-13 17:56:29 +00:00
|
|
|
Operation: client.Add,
|
|
|
|
Path: "one",
|
|
|
|
Value: "false",
|
|
|
|
})
|
|
|
|
if len(r.patchesToRetry) != 4 {
|
|
|
|
t.Fatal("expected 4 patches")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is meant to be run with the -race flag on.
|
|
|
|
func TestRetryHandlerRacesAndDeadlocks(t *testing.T) {
|
|
|
|
_, testConf, closeFunc := kubetest.Server(t)
|
|
|
|
defer closeFunc()
|
|
|
|
|
|
|
|
client.Scheme = testConf.ClientScheme
|
|
|
|
client.TokenFile = testConf.PathToTokenFile
|
|
|
|
client.RootCAFile = testConf.PathToRootCAFile
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := hclog.NewNullLogger()
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
wait := &sync.WaitGroup{}
|
|
|
|
testPatch := &client.Patch{
|
|
|
|
Operation: client.Add,
|
|
|
|
Path: "patch-path",
|
|
|
|
Value: "true",
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
c, err := client.New(logger)
|
2020-02-13 17:56:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
r := &retryHandler{
|
|
|
|
logger: logger,
|
|
|
|
namespace: kubetest.ExpectedNamespace,
|
|
|
|
podName: kubetest.ExpectedPodName,
|
|
|
|
patchesToRetry: make(map[string]*client.Patch),
|
2020-05-15 18:06:58 +00:00
|
|
|
initialState: sr.State{},
|
|
|
|
client: c,
|
2020-02-13 17:56:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now hit it as quickly as possible to see if we can produce
|
|
|
|
// races or deadlocks.
|
|
|
|
start := make(chan struct{})
|
|
|
|
done := make(chan bool)
|
|
|
|
numRoutines := 100
|
|
|
|
for i := 0; i < numRoutines; i++ {
|
|
|
|
go func() {
|
|
|
|
<-start
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Notify(testPatch)
|
2020-02-13 17:56:29 +00:00
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
<-start
|
2020-05-15 18:06:58 +00:00
|
|
|
r.Run(shutdownCh, wait)
|
2020-02-13 17:56:29 +00:00
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
close(start)
|
|
|
|
|
|
|
|
// Allow up to 5 seconds for everything to finish.
|
|
|
|
timer := time.NewTimer(5 * time.Second)
|
2020-05-15 18:06:58 +00:00
|
|
|
for i := 0; i < numRoutines*2; i++ {
|
2020-02-13 17:56:29 +00:00
|
|
|
select {
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatal("test took too long to complete, check for deadlock")
|
|
|
|
case <-done:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this test, the API server sends bad responses for 5 seconds,
|
|
|
|
// then sends good responses, and we make sure we get the expected behavior.
|
|
|
|
func TestRetryHandlerAPIConnectivityProblemsInitialState(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip()
|
|
|
|
}
|
|
|
|
|
|
|
|
testState, testConf, closeFunc := kubetest.Server(t)
|
|
|
|
defer closeFunc()
|
|
|
|
kubetest.ReturnGatewayTimeouts.Store(true)
|
|
|
|
|
|
|
|
client.Scheme = testConf.ClientScheme
|
|
|
|
client.TokenFile = testConf.PathToTokenFile
|
|
|
|
client.RootCAFile = testConf.PathToRootCAFile
|
|
|
|
client.RetryMax = 0
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
wait := &sync.WaitGroup{}
|
|
|
|
reg, err := NewServiceRegistration(map[string]string{
|
|
|
|
"namespace": kubetest.ExpectedNamespace,
|
|
|
|
"pod_name": kubetest.ExpectedPodName,
|
|
|
|
}, hclog.NewNullLogger(), sr.State{
|
|
|
|
VaultVersion: "vault-version",
|
|
|
|
IsInitialized: true,
|
|
|
|
IsSealed: true,
|
|
|
|
IsActive: true,
|
|
|
|
IsPerformanceStandby: true,
|
2020-05-15 18:06:58 +00:00
|
|
|
})
|
2020-02-13 17:56:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-15 18:06:58 +00:00
|
|
|
if err := reg.Run(shutdownCh, wait, ""); err != nil {
|
2020-02-13 17:56:29 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, since the initial state can't be set,
|
|
|
|
// remotely we should have false for all these labels.
|
|
|
|
patch := testState.Get(pathToLabels + labelVaultVersion)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelActive)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelSealed)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelPerfStandby)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelInitialized)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
|
|
|
|
kubetest.ReturnGatewayTimeouts.Store(false)
|
|
|
|
|
|
|
|
// Now we need to wait to give the retry handler
|
|
|
|
// a chance to update these values.
|
|
|
|
time.Sleep(retryFreq + time.Second)
|
|
|
|
val := testState.Get(pathToLabels + labelVaultVersion)["value"]
|
|
|
|
if val != "vault-version" {
|
|
|
|
t.Fatal("expected vault-version")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelActive)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelSealed)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelPerfStandby)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelInitialized)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this test, the API server sends bad responses for 5 seconds,
|
|
|
|
// then sends good responses, and we make sure we get the expected behavior.
|
|
|
|
func TestRetryHandlerAPIConnectivityProblemsNotifications(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip()
|
|
|
|
}
|
|
|
|
|
|
|
|
testState, testConf, closeFunc := kubetest.Server(t)
|
|
|
|
defer closeFunc()
|
|
|
|
kubetest.ReturnGatewayTimeouts.Store(true)
|
|
|
|
|
|
|
|
client.Scheme = testConf.ClientScheme
|
|
|
|
client.TokenFile = testConf.PathToTokenFile
|
|
|
|
client.RootCAFile = testConf.PathToRootCAFile
|
|
|
|
client.RetryMax = 0
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
wait := &sync.WaitGroup{}
|
|
|
|
reg, err := NewServiceRegistration(map[string]string{
|
|
|
|
"namespace": kubetest.ExpectedNamespace,
|
|
|
|
"pod_name": kubetest.ExpectedPodName,
|
|
|
|
}, hclog.NewNullLogger(), sr.State{
|
|
|
|
VaultVersion: "vault-version",
|
|
|
|
IsInitialized: false,
|
|
|
|
IsSealed: false,
|
|
|
|
IsActive: false,
|
|
|
|
IsPerformanceStandby: false,
|
2020-05-15 18:06:58 +00:00
|
|
|
})
|
2020-02-13 17:56:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := reg.NotifyActiveStateChange(true); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := reg.NotifyInitializedStateChange(true); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := reg.NotifyPerformanceStandbyStateChange(true); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := reg.NotifySealedStateChange(true); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:06:58 +00:00
|
|
|
if err := reg.Run(shutdownCh, wait, ""); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-02-13 17:56:29 +00:00
|
|
|
// At this point, since the initial state can't be set,
|
|
|
|
// remotely we should have false for all these labels.
|
|
|
|
patch := testState.Get(pathToLabels + labelVaultVersion)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelActive)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelSealed)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelPerfStandby)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
patch = testState.Get(pathToLabels + labelInitialized)
|
|
|
|
if patch != nil {
|
|
|
|
t.Fatal("expected no value")
|
|
|
|
}
|
|
|
|
|
|
|
|
kubetest.ReturnGatewayTimeouts.Store(false)
|
|
|
|
|
|
|
|
// Now we need to wait to give the retry handler
|
|
|
|
// a chance to update these values.
|
|
|
|
time.Sleep(retryFreq + time.Second)
|
|
|
|
|
|
|
|
// They should be "true" if the Notifications were set after the
|
|
|
|
// initial state.
|
|
|
|
val := testState.Get(pathToLabels + labelVaultVersion)["value"]
|
|
|
|
if val != "vault-version" {
|
|
|
|
t.Fatal("expected vault-version")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelActive)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelSealed)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelPerfStandby)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
val = testState.Get(pathToLabels + labelInitialized)["value"]
|
|
|
|
if val != "true" {
|
|
|
|
t.Fatal("expected true")
|
|
|
|
}
|
|
|
|
}
|