2016-07-26 06:25:33 +00:00
|
|
|
package vault
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
import (
|
|
|
|
"bytes"
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2016-08-15 13:42:42 +00:00
|
|
|
"crypto/tls"
|
2019-02-15 02:14:56 +00:00
|
|
|
"crypto/x509"
|
2016-08-15 13:42:42 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2016-07-26 06:25:33 +00:00
|
|
|
|
2018-04-03 00:46:59 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2017-02-16 20:15:02 +00:00
|
|
|
"github.com/hashicorp/vault/helper/consts"
|
2018-04-03 00:46:59 +00:00
|
|
|
"github.com/hashicorp/vault/helper/logging"
|
2016-08-15 13:42:42 +00:00
|
|
|
"github.com/hashicorp/vault/logical"
|
|
|
|
"github.com/hashicorp/vault/physical"
|
2017-08-03 17:24:27 +00:00
|
|
|
"github.com/hashicorp/vault/physical/inmem"
|
2016-08-15 13:42:42 +00:00
|
|
|
)
|
|
|
|
|
2016-11-08 15:30:44 +00:00
|
|
|
var (
|
|
|
|
clusterTestPausePeriod = 2 * time.Second
|
|
|
|
)
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func TestClusterFetching(t *testing.T) {
|
2016-07-26 06:25:33 +00:00
|
|
|
c, _, _ := TestCoreUnsealed(t)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
err := c.setupCluster(context.Background())
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
cluster, err := c.Cluster(context.Background())
|
2016-07-26 06:25:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
// Test whether expected values are found
|
|
|
|
if cluster == nil || cluster.Name == "" || cluster.ID == "" {
|
|
|
|
t.Fatalf("cluster information missing: cluster: %#v", cluster)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClusterHAFetching(t *testing.T) {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger := logging.NewVaultLogger(log.Trace)
|
2016-08-19 20:45:17 +00:00
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
redirect := "http://127.0.0.1:8200"
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
inm, err := inmem.NewInmemHA(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
inmha, err := inmem.NewInmemHA(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
c, err := NewCore(&CoreConfig{
|
2017-08-03 17:24:27 +00:00
|
|
|
Physical: inm,
|
|
|
|
HAPhysical: inmha.(physical.HABackend),
|
2016-08-15 13:42:42 +00:00
|
|
|
RedirectAddr: redirect,
|
|
|
|
DisableMlock: true,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-17 20:43:10 +00:00
|
|
|
keys, _ := TestCoreInit(t, c)
|
|
|
|
for _, key := range keys {
|
|
|
|
if _, err := TestCoreUnseal(c, TestKeyCopy(key)); err != nil {
|
|
|
|
t.Fatalf("unseal err: %s", err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify unsealed
|
2018-07-24 20:57:25 +00:00
|
|
|
if c.Sealed() {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("should not be sealed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for core to become active
|
|
|
|
TestWaitActive(t, c)
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
cluster, err := c.Cluster(context.Background())
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// Test whether expected values are found
|
2016-07-26 06:25:33 +00:00
|
|
|
if cluster == nil || cluster.Name == "" || cluster.ID == "" {
|
2016-07-26 14:01:35 +00:00
|
|
|
t.Fatalf("cluster information missing: cluster:%#v", cluster)
|
2016-07-26 06:25:33 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
func TestCluster_ListenForRequests(t *testing.T) {
|
|
|
|
// Make this nicer for tests
|
|
|
|
manualStepDownSleepPeriod = 5 * time.Second
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
cluster := NewTestCluster(t, nil, &TestClusterOptions{
|
|
|
|
KeepStandbysSealed: true,
|
|
|
|
})
|
|
|
|
cluster.Start()
|
|
|
|
defer cluster.Cleanup()
|
2017-07-03 18:54:01 +00:00
|
|
|
cores := cluster.Cores
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Wait for core to become active
|
|
|
|
TestWaitActive(t, cores[0].Core)
|
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
// Use this to have a valid config after sealing since ClusterTLSConfig returns nil
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc := func(expectFail bool) {
|
2019-02-15 02:14:56 +00:00
|
|
|
cores[0].clusterListener.AddClient(requestForwardingALPN, &requestForwardingClusterClient{cores[0].Core})
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2019-02-15 02:14:56 +00:00
|
|
|
parsedCert := cores[0].localClusterParsedCert.Load().(*x509.Certificate)
|
|
|
|
dialer := cores[0].getGRPCDialer(context.Background(), requestForwardingALPN, parsedCert.Subject.CommonName, parsedCert)
|
2016-08-15 13:42:42 +00:00
|
|
|
for _, ln := range cores[0].Listeners {
|
|
|
|
tcpAddr, ok := ln.Addr().(*net.TCPAddr)
|
|
|
|
if !ok {
|
2016-12-21 18:08:27 +00:00
|
|
|
t.Fatalf("%s not a TCP port", tcpAddr.String())
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2019-02-15 02:14:56 +00:00
|
|
|
netConn, err := dialer(fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), 0)
|
|
|
|
conn := netConn.(*tls.Conn)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
if expectFail {
|
2017-07-31 15:28:06 +00:00
|
|
|
t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
|
2016-08-15 13:42:42 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-02-15 02:14:56 +00:00
|
|
|
t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[0])
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
if expectFail {
|
2017-07-31 15:28:06 +00:00
|
|
|
t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
err = conn.Handshake()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
connState := conn.ConnectionState()
|
|
|
|
switch {
|
|
|
|
case connState.Version != tls.VersionTLS12:
|
|
|
|
t.Fatal("version mismatch")
|
2019-02-15 02:14:56 +00:00
|
|
|
case connState.NegotiatedProtocol != requestForwardingALPN || !connState.NegotiatedProtocolIsMutual:
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad protocol negotiation")
|
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+105)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc(false)
|
|
|
|
|
2018-07-24 21:50:49 +00:00
|
|
|
err := cores[0].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
2017-07-31 15:28:06 +00:00
|
|
|
ClientToken: cluster.RootToken,
|
2016-08-15 13:42:42 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StepDown doesn't wait during actual preSeal so give time for listeners
|
|
|
|
// to close
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc(true)
|
|
|
|
|
|
|
|
// After this period it should be active again
|
|
|
|
time.Sleep(manualStepDownSleepPeriod)
|
|
|
|
checkListenersFunc(false)
|
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
err = cores[0].Core.Seal(cluster.RootToken)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
// After sealing it should be inactive again
|
|
|
|
checkListenersFunc(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCluster_ForwardRequests(t *testing.T) {
|
|
|
|
// Make this nicer for tests
|
|
|
|
manualStepDownSleepPeriod = 5 * time.Second
|
|
|
|
|
2017-05-24 13:34:59 +00:00
|
|
|
testCluster_ForwardRequestsCommon(t)
|
2016-08-26 21:53:47 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 13:34:59 +00:00
|
|
|
func testCluster_ForwardRequestsCommon(t *testing.T) {
|
2017-07-31 15:28:06 +00:00
|
|
|
cluster := NewTestCluster(t, nil, nil)
|
2017-07-03 18:54:01 +00:00
|
|
|
cores := cluster.Cores
|
2017-07-31 15:28:06 +00:00
|
|
|
cores[0].Handler.(*http.ServeMux).HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
|
2017-07-03 18:54:01 +00:00
|
|
|
w.Header().Add("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Write([]byte("core1"))
|
|
|
|
})
|
2017-07-31 15:28:06 +00:00
|
|
|
cores[1].Handler.(*http.ServeMux).HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
|
2017-07-03 18:54:01 +00:00
|
|
|
w.Header().Add("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(202)
|
|
|
|
w.Write([]byte("core2"))
|
|
|
|
})
|
2017-07-31 15:28:06 +00:00
|
|
|
cores[2].Handler.(*http.ServeMux).HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
|
2017-07-03 18:54:01 +00:00
|
|
|
w.Header().Add("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(203)
|
|
|
|
w.Write([]byte("core3"))
|
|
|
|
})
|
2017-07-31 15:28:06 +00:00
|
|
|
cluster.Start()
|
|
|
|
defer cluster.Cleanup()
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
root := cluster.RootToken
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Wait for core to become active
|
|
|
|
TestWaitActive(t, cores[0].Core)
|
|
|
|
|
|
|
|
// Test forwarding a request. Since we're going directly from core to core
|
|
|
|
// with no fallback we know that if it worked, request handling is working
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[1], root, "core1")
|
|
|
|
testCluster_ForwardRequests(t, cores[2], root, "core1")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Now we do a bunch of round-robining. The point is to make sure that as
|
|
|
|
// nodes come and go, we can always successfully forward to the active
|
|
|
|
// node.
|
|
|
|
//
|
|
|
|
|
|
|
|
// Ensure active core is cores[1] and test
|
2018-07-24 21:50:49 +00:00
|
|
|
err := cores[0].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2018-07-24 21:50:49 +00:00
|
|
|
_ = cores[2].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
TestWaitActive(t, cores[1].Core)
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[0], root, "core2")
|
|
|
|
testCluster_ForwardRequests(t, cores[2], root, "core2")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[2] and test
|
2018-07-24 21:50:49 +00:00
|
|
|
err = cores[1].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2018-07-24 21:50:49 +00:00
|
|
|
_ = cores[0].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
TestWaitActive(t, cores[2].Core)
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[0], root, "core3")
|
|
|
|
testCluster_ForwardRequests(t, cores[1], root, "core3")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[0] and test
|
2018-07-24 21:50:49 +00:00
|
|
|
err = cores[2].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2018-07-24 21:50:49 +00:00
|
|
|
_ = cores[1].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
TestWaitActive(t, cores[0].Core)
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[1], root, "core1")
|
|
|
|
testCluster_ForwardRequests(t, cores[2], root, "core1")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[1] and test
|
2018-07-24 21:50:49 +00:00
|
|
|
err = cores[0].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2018-07-24 21:50:49 +00:00
|
|
|
_ = cores[2].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
TestWaitActive(t, cores[1].Core)
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[0], root, "core2")
|
|
|
|
testCluster_ForwardRequests(t, cores[2], root, "core2")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[2] and test
|
2018-07-24 21:50:49 +00:00
|
|
|
err = cores[1].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2018-07-24 21:50:49 +00:00
|
|
|
_ = cores[0].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: root,
|
|
|
|
})
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
TestWaitActive(t, cores[2].Core)
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[0], root, "core3")
|
|
|
|
testCluster_ForwardRequests(t, cores[1], root, "core3")
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, rootToken, remoteCoreID string) {
|
2016-08-15 13:42:42 +00:00
|
|
|
standby, err := c.Standby()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !standby {
|
|
|
|
t.Fatal("expected core to be standby")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to call Leader as that refreshes the connection info
|
2017-07-31 22:25:27 +00:00
|
|
|
isLeader, _, _, err := c.Leader()
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if isLeader {
|
|
|
|
t.Fatal("core should not be leader")
|
|
|
|
}
|
|
|
|
|
|
|
|
bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": "zap" }`))
|
|
|
|
req, err := http.NewRequest("PUT", "https://pushit.real.good:9281/"+remoteCoreID, bodBuf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-09-25 16:34:40 +00:00
|
|
|
req.Header.Add(consts.AuthHeaderName, rootToken)
|
2018-09-18 03:03:00 +00:00
|
|
|
req = req.WithContext(context.WithValue(req.Context(), "original_request_path", req.URL.Path))
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2016-08-26 21:53:47 +00:00
|
|
|
statusCode, header, respBytes, err := c.ForwardRequest(req)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-08-26 21:53:47 +00:00
|
|
|
|
|
|
|
if header == nil {
|
|
|
|
t.Fatal("err: expected at least a content-type header")
|
|
|
|
}
|
|
|
|
if header.Get("Content-Type") != "application/json" {
|
|
|
|
t.Fatalf("bad content-type: %s", header.Get("Content-Type"))
|
|
|
|
}
|
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
body := string(respBytes)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
if body != remoteCoreID {
|
|
|
|
t.Fatalf("expected %s, got %s", remoteCoreID, body)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2016-08-19 15:03:53 +00:00
|
|
|
switch body {
|
2016-08-15 13:42:42 +00:00
|
|
|
case "core1":
|
2016-08-19 15:03:53 +00:00
|
|
|
if statusCode != 201 {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad response")
|
|
|
|
}
|
|
|
|
case "core2":
|
2016-08-19 15:03:53 +00:00
|
|
|
if statusCode != 202 {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad response")
|
|
|
|
}
|
|
|
|
case "core3":
|
2016-08-19 15:03:53 +00:00
|
|
|
if statusCode != 203 {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad response")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-08-30 20:28:23 +00:00
|
|
|
|
|
|
|
func TestCluster_CustomCipherSuites(t *testing.T) {
|
|
|
|
cluster := NewTestCluster(t, &CoreConfig{
|
|
|
|
ClusterCipherSuites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
|
|
|
}, nil)
|
|
|
|
cluster.Start()
|
|
|
|
defer cluster.Cleanup()
|
|
|
|
core := cluster.Cores[0]
|
|
|
|
|
|
|
|
// Wait for core to become active
|
|
|
|
TestWaitActive(t, core.Core)
|
|
|
|
|
2019-02-15 02:14:56 +00:00
|
|
|
core.clusterListener.AddClient(requestForwardingALPN, &requestForwardingClusterClient{core.Core})
|
|
|
|
|
|
|
|
parsedCert := core.localClusterParsedCert.Load().(*x509.Certificate)
|
|
|
|
dialer := core.getGRPCDialer(context.Background(), requestForwardingALPN, parsedCert.Subject.CommonName, parsedCert)
|
2017-08-30 20:28:23 +00:00
|
|
|
|
2019-02-15 02:14:56 +00:00
|
|
|
netConn, err := dialer(fmt.Sprintf("%s:%d", core.Listeners[0].Address.IP.String(), core.Listeners[0].Address.Port+105), 0)
|
|
|
|
conn := netConn.(*tls.Conn)
|
2017-08-30 20:28:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
err = conn.Handshake()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if conn.ConnectionState().CipherSuite != tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 {
|
|
|
|
var availCiphers string
|
|
|
|
for _, cipher := range core.clusterCipherSuites {
|
|
|
|
availCiphers += fmt.Sprintf("%x ", cipher)
|
|
|
|
}
|
|
|
|
t.Fatalf("got bad negotiated cipher %x, core-set suites are %s", conn.ConnectionState().CipherSuite, availCiphers)
|
|
|
|
}
|
|
|
|
}
|