2016-07-26 06:25:33 +00:00
|
|
|
package vault
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
import (
|
|
|
|
"bytes"
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2016-08-15 13:42:42 +00:00
|
|
|
"crypto/tls"
|
2022-08-26 16:50:10 +00:00
|
|
|
"fmt"
|
2016-08-15 13:42:42 +00:00
|
|
|
"net/http"
|
2020-02-15 00:39:13 +00:00
|
|
|
"sync"
|
2016-08-15 13:42:42 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2016-07-26 06:25:33 +00:00
|
|
|
|
2018-04-03 00:46:59 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2023-02-01 13:33:16 +00:00
|
|
|
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
|
2019-04-12 21:54:35 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
|
|
"github.com/hashicorp/vault/sdk/helper/logging"
|
|
|
|
"github.com/hashicorp/vault/sdk/logical"
|
|
|
|
"github.com/hashicorp/vault/sdk/physical"
|
|
|
|
"github.com/hashicorp/vault/sdk/physical/inmem"
|
2020-01-17 07:03:02 +00:00
|
|
|
"github.com/hashicorp/vault/vault/cluster"
|
2016-08-15 13:42:42 +00:00
|
|
|
)
|
|
|
|
|
2021-04-08 16:43:39 +00:00
|
|
|
var clusterTestPausePeriod = 2 * time.Second
|
2016-11-08 15:30:44 +00:00
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func TestClusterFetching(t *testing.T) {
|
2016-07-26 06:25:33 +00:00
|
|
|
c, _, _ := TestCoreUnsealed(t)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
err := c.setupCluster(context.Background())
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
cluster, err := c.Cluster(context.Background())
|
2016-07-26 06:25:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
// Test whether expected values are found
|
|
|
|
if cluster == nil || cluster.Name == "" || cluster.ID == "" {
|
|
|
|
t.Fatalf("cluster information missing: cluster: %#v", cluster)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClusterHAFetching(t *testing.T) {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger := logging.NewVaultLogger(log.Trace)
|
2016-08-19 20:45:17 +00:00
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
redirect := "http://127.0.0.1:8200"
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
inm, err := inmem.NewInmemHA(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
inmha, err := inmem.NewInmemHA(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
c, err := NewCore(&CoreConfig{
|
2017-08-03 17:24:27 +00:00
|
|
|
Physical: inm,
|
|
|
|
HAPhysical: inmha.(physical.HABackend),
|
2016-08-15 13:42:42 +00:00
|
|
|
RedirectAddr: redirect,
|
|
|
|
DisableMlock: true,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2021-02-12 20:04:48 +00:00
|
|
|
defer c.Shutdown()
|
2017-01-17 20:43:10 +00:00
|
|
|
keys, _ := TestCoreInit(t, c)
|
|
|
|
for _, key := range keys {
|
|
|
|
if _, err := TestCoreUnseal(c, TestKeyCopy(key)); err != nil {
|
|
|
|
t.Fatalf("unseal err: %s", err)
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify unsealed
|
2018-07-24 20:57:25 +00:00
|
|
|
if c.Sealed() {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("should not be sealed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for core to become active
|
|
|
|
TestWaitActive(t, c)
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
cluster, err := c.Cluster(context.Background())
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// Test whether expected values are found
|
2016-07-26 06:25:33 +00:00
|
|
|
if cluster == nil || cluster.Name == "" || cluster.ID == "" {
|
2016-07-26 14:01:35 +00:00
|
|
|
t.Fatalf("cluster information missing: cluster:%#v", cluster)
|
2016-07-26 06:25:33 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
func TestCluster_ListenForRequests(t *testing.T) {
|
|
|
|
// Make this nicer for tests
|
|
|
|
manualStepDownSleepPeriod = 5 * time.Second
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
cluster := NewTestCluster(t, nil, &TestClusterOptions{
|
|
|
|
KeepStandbysSealed: true,
|
|
|
|
})
|
|
|
|
cluster.Start()
|
|
|
|
defer cluster.Cleanup()
|
2017-07-03 18:54:01 +00:00
|
|
|
cores := cluster.Cores
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Wait for core to become active
|
|
|
|
TestWaitActive(t, cores[0].Core)
|
|
|
|
|
2020-01-17 07:03:02 +00:00
|
|
|
clusterListener := cores[0].getClusterListener()
|
|
|
|
clusterListener.AddClient(consts.RequestForwardingALPN, &requestForwardingClusterClient{cores[0].Core})
|
2019-09-03 15:59:56 +00:00
|
|
|
addrs := cores[0].getClusterListener().Addrs()
|
2019-04-17 20:50:31 +00:00
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
// Use this to have a valid config after sealing since ClusterTLSConfig returns nil
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc := func(expectFail bool) {
|
2020-01-17 07:03:02 +00:00
|
|
|
dialer := clusterListener.GetDialerFunc(context.Background(), consts.RequestForwardingALPN)
|
2019-04-04 17:02:44 +00:00
|
|
|
for i := range cores[0].Listeners {
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2019-04-17 20:50:31 +00:00
|
|
|
clnAddr := addrs[i]
|
2019-04-04 17:02:44 +00:00
|
|
|
netConn, err := dialer(clnAddr.String(), 0)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
if expectFail {
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Logf("testing %s unsuccessful as expected", clnAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Fatalf("error: %v\ncluster listener is %s", err, clnAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
if expectFail {
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Fatalf("testing %s not unsuccessful as expected", clnAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2019-04-17 20:50:31 +00:00
|
|
|
conn := netConn.(*tls.Conn)
|
2016-08-15 13:42:42 +00:00
|
|
|
err = conn.Handshake()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
connState := conn.ConnectionState()
|
|
|
|
switch {
|
2019-11-19 04:10:22 +00:00
|
|
|
case connState.Version != tls.VersionTLS12 && connState.Version != tls.VersionTLS13:
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("version mismatch")
|
2019-04-17 20:50:31 +00:00
|
|
|
case connState.NegotiatedProtocol != consts.RequestForwardingALPN || !connState.NegotiatedProtocolIsMutual:
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad protocol negotiation")
|
|
|
|
}
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Logf("testing %s successful", clnAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc(false)
|
|
|
|
|
2018-07-24 21:50:49 +00:00
|
|
|
err := cores[0].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
2017-07-31 15:28:06 +00:00
|
|
|
ClientToken: cluster.RootToken,
|
2016-08-15 13:42:42 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StepDown doesn't wait during actual preSeal so give time for listeners
|
|
|
|
// to close
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc(true)
|
|
|
|
|
|
|
|
// After this period it should be active again
|
2019-04-17 20:50:31 +00:00
|
|
|
TestWaitActive(t, cores[0].Core)
|
2019-09-03 15:59:56 +00:00
|
|
|
cores[0].getClusterListener().AddClient(consts.RequestForwardingALPN, &requestForwardingClusterClient{cores[0].Core})
|
2016-08-15 13:42:42 +00:00
|
|
|
checkListenersFunc(false)
|
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
err = cores[0].Core.Seal(cluster.RootToken)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2016-08-15 13:42:42 +00:00
|
|
|
// After sealing it should be inactive again
|
|
|
|
checkListenersFunc(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCluster_ForwardRequests(t *testing.T) {
|
|
|
|
// Make this nicer for tests
|
|
|
|
manualStepDownSleepPeriod = 5 * time.Second
|
|
|
|
|
2020-01-17 07:03:02 +00:00
|
|
|
t.Run("tcpLayer", func(t *testing.T) {
|
|
|
|
testCluster_ForwardRequestsCommon(t, nil)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("inmemLayer", func(t *testing.T) {
|
|
|
|
// Run again with in-memory network
|
2020-02-15 00:39:13 +00:00
|
|
|
inmemCluster, err := cluster.NewInmemLayerCluster("inmem-cluster", 3, log.New(&log.LoggerOptions{
|
|
|
|
Mutex: &sync.Mutex{},
|
|
|
|
Level: log.Trace,
|
|
|
|
Name: "inmem-cluster",
|
|
|
|
}))
|
2020-01-17 07:03:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
testCluster_ForwardRequestsCommon(t, &TestClusterOptions{
|
|
|
|
ClusterLayers: inmemCluster,
|
|
|
|
})
|
|
|
|
})
|
2016-08-26 21:53:47 +00:00
|
|
|
}
|
|
|
|
|
2020-01-17 07:03:02 +00:00
|
|
|
func testCluster_ForwardRequestsCommon(t *testing.T, clusterOpts *TestClusterOptions) {
|
|
|
|
cluster := NewTestCluster(t, nil, clusterOpts)
|
2017-07-03 18:54:01 +00:00
|
|
|
cores := cluster.Cores
|
2017-07-31 15:28:06 +00:00
|
|
|
cores[0].Handler.(*http.ServeMux).HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
|
2017-07-03 18:54:01 +00:00
|
|
|
w.Header().Add("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Write([]byte("core1"))
|
|
|
|
})
|
2017-07-31 15:28:06 +00:00
|
|
|
cores[1].Handler.(*http.ServeMux).HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
|
2017-07-03 18:54:01 +00:00
|
|
|
w.Header().Add("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(202)
|
|
|
|
w.Write([]byte("core2"))
|
|
|
|
})
|
2017-07-31 15:28:06 +00:00
|
|
|
cores[2].Handler.(*http.ServeMux).HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
|
2017-07-03 18:54:01 +00:00
|
|
|
w.Header().Add("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(203)
|
|
|
|
w.Write([]byte("core3"))
|
|
|
|
})
|
2017-07-31 15:28:06 +00:00
|
|
|
cluster.Start()
|
|
|
|
defer cluster.Cleanup()
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
root := cluster.RootToken
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Wait for core to become active
|
2020-09-16 19:31:06 +00:00
|
|
|
TestWaitActiveForwardingReady(t, cores[0].Core)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Test forwarding a request. Since we're going directly from core to core
|
|
|
|
// with no fallback we know that if it worked, request handling is working
|
2017-07-31 15:28:06 +00:00
|
|
|
testCluster_ForwardRequests(t, cores[1], root, "core1")
|
|
|
|
testCluster_ForwardRequests(t, cores[2], root, "core1")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Now we do a bunch of round-robining. The point is to make sure that as
|
|
|
|
// nodes come and go, we can always successfully forward to the active
|
|
|
|
// node.
|
|
|
|
//
|
|
|
|
|
|
|
|
// Ensure active core is cores[1] and test
|
2020-09-16 19:31:06 +00:00
|
|
|
testCluster_Forwarding(t, cluster, 0, 1, root, "core2")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[2] and test
|
2020-09-16 19:31:06 +00:00
|
|
|
testCluster_Forwarding(t, cluster, 1, 2, root, "core3")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[0] and test
|
2020-09-16 19:31:06 +00:00
|
|
|
testCluster_Forwarding(t, cluster, 2, 0, root, "core1")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[1] and test
|
2020-09-16 19:31:06 +00:00
|
|
|
testCluster_Forwarding(t, cluster, 0, 1, root, "core2")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// Ensure active core is cores[2] and test
|
2020-09-16 19:31:06 +00:00
|
|
|
testCluster_Forwarding(t, cluster, 1, 2, root, "core3")
|
|
|
|
}
|
|
|
|
|
|
|
|
func testCluster_Forwarding(t *testing.T, cluster *TestCluster, oldLeaderCoreIdx, newLeaderCoreIdx int, rootToken, remoteCoreID string) {
|
|
|
|
t.Logf("new leaderidx will be %d, stepping down other cores to make it so", newLeaderCoreIdx)
|
|
|
|
err := cluster.Cores[oldLeaderCoreIdx].StepDown(context.Background(), &logical.Request{
|
2016-08-15 13:42:42 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
2020-09-16 19:31:06 +00:00
|
|
|
ClientToken: rootToken,
|
2016-08-15 13:42:42 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-11-08 15:30:44 +00:00
|
|
|
time.Sleep(clusterTestPausePeriod)
|
2020-09-16 19:31:06 +00:00
|
|
|
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
if i != oldLeaderCoreIdx && i != newLeaderCoreIdx {
|
|
|
|
_ = cluster.Cores[i].StepDown(context.Background(), &logical.Request{
|
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/step-down",
|
|
|
|
ClientToken: rootToken,
|
|
|
|
})
|
|
|
|
time.Sleep(clusterTestPausePeriod)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TestWaitActiveForwardingReady(t, cluster.Cores[newLeaderCoreIdx].Core)
|
|
|
|
|
|
|
|
deadline := time.Now().Add(5 * time.Second)
|
|
|
|
var ready int
|
|
|
|
for time.Now().Before(deadline) {
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
if i != newLeaderCoreIdx {
|
|
|
|
leaderParams := cluster.Cores[i].clusterLeaderParams.Load().(*ClusterLeaderParams)
|
|
|
|
if leaderParams != nil && leaderParams.LeaderClusterAddr == cluster.Cores[newLeaderCoreIdx].ClusterAddr() {
|
|
|
|
ready++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ready == 2 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
ready = 0
|
|
|
|
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
if ready != 2 {
|
|
|
|
t.Fatal("standbys have not discovered the new active node in time")
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
if i != newLeaderCoreIdx {
|
|
|
|
testCluster_ForwardRequests(t, cluster.Cores[i], rootToken, remoteCoreID)
|
|
|
|
}
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, rootToken, remoteCoreID string) {
|
2020-09-16 19:31:06 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
standby, err := c.Standby()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !standby {
|
|
|
|
t.Fatal("expected core to be standby")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to call Leader as that refreshes the connection info
|
2017-07-31 22:25:27 +00:00
|
|
|
isLeader, _, _, err := c.Leader()
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if isLeader {
|
|
|
|
t.Fatal("core should not be leader")
|
|
|
|
}
|
2023-02-01 13:33:16 +00:00
|
|
|
corehelpers.RetryUntil(t, 5*time.Second, func() error {
|
2022-08-26 16:50:10 +00:00
|
|
|
state := c.ActiveNodeReplicationState()
|
|
|
|
if state == 0 {
|
|
|
|
return fmt.Errorf("heartbeats have not yet returned a valid active node replication state: %d", state)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": "zap" }`))
|
|
|
|
req, err := http.NewRequest("PUT", "https://pushit.real.good:9281/"+remoteCoreID, bodBuf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-09-25 16:34:40 +00:00
|
|
|
req.Header.Add(consts.AuthHeaderName, rootToken)
|
2018-09-18 03:03:00 +00:00
|
|
|
req = req.WithContext(context.WithValue(req.Context(), "original_request_path", req.URL.Path))
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2016-08-26 21:53:47 +00:00
|
|
|
statusCode, header, respBytes, err := c.ForwardRequest(req)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-08-26 21:53:47 +00:00
|
|
|
|
|
|
|
if header == nil {
|
|
|
|
t.Fatal("err: expected at least a content-type header")
|
|
|
|
}
|
|
|
|
if header.Get("Content-Type") != "application/json" {
|
|
|
|
t.Fatalf("bad content-type: %s", header.Get("Content-Type"))
|
|
|
|
}
|
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
body := string(respBytes)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
if body != remoteCoreID {
|
|
|
|
t.Fatalf("expected %s, got %s", remoteCoreID, body)
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2016-08-19 15:03:53 +00:00
|
|
|
switch body {
|
2016-08-15 13:42:42 +00:00
|
|
|
case "core1":
|
2016-08-19 15:03:53 +00:00
|
|
|
if statusCode != 201 {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad response")
|
|
|
|
}
|
|
|
|
case "core2":
|
2016-08-19 15:03:53 +00:00
|
|
|
if statusCode != 202 {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad response")
|
|
|
|
}
|
|
|
|
case "core3":
|
2016-08-19 15:03:53 +00:00
|
|
|
if statusCode != 203 {
|
2016-08-15 13:42:42 +00:00
|
|
|
t.Fatal("bad response")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|