open-consul/agent/consul/snapshot_endpoint_test.go
R.B. Boyer a7fb26f50f
wan federation via mesh gateways (#6884)
This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch:

There are several distinct chunks of code that are affected:

* new flags and config options for the server

* retry join WAN is slightly different

* retry join code is shared to discover primary mesh gateways from secondary datacenters

* because retry join logic runs in the *agent* and the results of that
  operation for primary mesh gateways are needed in the *server* there are
  some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur
  at multiple layers of abstraction just to pass the data down to the right
  layer.

* new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers

* the function signature for RPC dialing picked up a new required field (the
  node name of the destination)

* several new RPCs for manipulating a FederationState object:
  `FederationState:{Apply,Get,List,ListMeshGateways}`

* 3 read-only internal APIs for debugging use to invoke those RPCs from curl

* raft and fsm changes to persist these FederationStates

* replication for FederationStates as they are canonically stored in the
  Primary and replicated to the Secondaries.

* a special derivative of anti-entropy that runs in secondaries to snapshot
  their local mesh gateway `CheckServiceNodes` and sync them into their upstream
  FederationState in the primary (this works in conjunction with the
  replication to distribute addresses for all mesh gateways in all DCs to all
  other DCs)

* a "gateway locator" convenience object to make use of this data to choose
  the addresses of gateways to use for any given RPC or gossip operation to a
  remote DC. This gets data from the "retry join" logic in the agent and also
  directly calls into the FSM.

* RPC (`:8300`) on the server sniffs the first byte of a new connection to
  determine if it's actually doing native TLS. If so it checks the ALPN header
  for protocol determination (just like how the existing system uses the
  type-byte marker).

* 2 new kinds of protocols are exclusively decoded via this native TLS
  mechanism: one for ferrying "packet" operations (udp-like) from the gossip
  layer and one for "stream" operations (tcp-like). The packet operations
  re-use sockets (using length-prefixing) to cut down on TLS re-negotiation
  overhead.

* the server instances specially wrap the `memberlist.NetTransport` when running
  with gateway federation enabled (in a `wanfed.Transport`). The general gist is
  that if it tries to dial a node in the SAME datacenter (deduced by looking
  at the suffix of the node name) there is no change. If dialing a DIFFERENT
  datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh
  gateways to eventually end up in a server's :8300 port.

* a new flag when launching a mesh gateway via `consul connect envoy` to
  indicate that the servers are to be exposed. This sets a special service
  meta when registering the gateway into the catalog.

* `proxycfg/xds` notice this metadata blob to activate additional watches for
  the FederationState objects as well as the location of all of the consul
  servers in that datacenter.

* `xds:` if the extra metadata is in place additional clusters are defined in a
  DC to bulk sink all traffic to another DC's gateways. For the current
  datacenter we listen on a wildcard name (`server.<dc>.consul`) that load
  balances all servers as well as one mini-cluster per node
  (`<node>.server.<dc>.consul`)

* the `consul tls cert create` command got a new flag (`-node`) to help create
  an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 15:59:02 -05:00

418 lines
11 KiB
Go

package consul
import (
"bytes"
"os"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
)
// verifySnapshot is a helper that does a snapshot and restore.
func verifySnapshot(t *testing.T, s *Server, dc, token string) {
codec := rpcClient(t, s)
defer codec.Close()
// Set a key to a before value.
{
args := structs.KVSRequest{
Datacenter: dc,
Op: api.KVSet,
DirEnt: structs.DirEntry{
Key: "test",
Value: []byte("hello"),
},
WriteRequest: structs.WriteRequest{
Token: token,
},
}
var out bool
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
// Take a snapshot.
args := structs.SnapshotRequest{
Datacenter: dc,
Token: token,
Op: structs.SnapshotSave,
}
var reply structs.SnapshotResponse
snap, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr, false,
&args, bytes.NewReader([]byte("")), &reply)
if err != nil {
t.Fatalf("err: %v", err)
}
defer snap.Close()
// Read back the before value.
{
getR := structs.KeyRequest{
Datacenter: dc,
Key: "test",
QueryOptions: structs.QueryOptions{
Token: token,
},
}
var dirent structs.IndexedDirEntries
if err := msgpackrpc.CallWithCodec(codec, "KVS.Get", &getR, &dirent); err != nil {
t.Fatalf("err: %v", err)
}
if len(dirent.Entries) != 1 {
t.Fatalf("Bad: %v", dirent)
}
d := dirent.Entries[0]
if string(d.Value) != "hello" {
t.Fatalf("bad: %v", d)
}
}
// Set a key to an after value.
{
args := structs.KVSRequest{
Datacenter: dc,
Op: api.KVSet,
DirEnt: structs.DirEntry{
Key: "test",
Value: []byte("goodbye"),
},
WriteRequest: structs.WriteRequest{
Token: token,
},
}
var out bool
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
// Read back the before value. We do this with a retry and stale mode so
// we can query the server we are working with, which might not be the
// leader.
retry.Run(t, func(r *retry.R) {
getR := structs.KeyRequest{
Datacenter: dc,
Key: "test",
QueryOptions: structs.QueryOptions{
Token: token,
AllowStale: true,
},
}
var dirent structs.IndexedDirEntries
if err := msgpackrpc.CallWithCodec(codec, "KVS.Get", &getR, &dirent); err != nil {
r.Fatalf("err: %v", err)
}
if len(dirent.Entries) != 1 {
r.Fatalf("Bad: %v", dirent)
}
d := dirent.Entries[0]
if string(d.Value) != "goodbye" {
r.Fatalf("bad: %v", d)
}
})
// Restore the snapshot.
args.Op = structs.SnapshotRestore
restore, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr, false,
&args, snap, &reply)
if err != nil {
t.Fatalf("err: %v", err)
}
defer restore.Close()
// Read back the before value post-snapshot. Similar rationale here; use
// stale to query the server we are working with.
retry.Run(t, func(r *retry.R) {
getR := structs.KeyRequest{
Datacenter: dc,
Key: "test",
QueryOptions: structs.QueryOptions{
Token: token,
AllowStale: true,
},
}
var dirent structs.IndexedDirEntries
if err := msgpackrpc.CallWithCodec(codec, "KVS.Get", &getR, &dirent); err != nil {
r.Fatalf("err: %v", err)
}
if len(dirent.Entries) != 1 {
r.Fatalf("Bad: %v", dirent)
}
d := dirent.Entries[0]
if string(d.Value) != "hello" {
r.Fatalf("bad: %v", d)
}
})
}
func TestSnapshot(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
verifySnapshot(t, s1, "dc1", "")
}
func TestSnapshot_LeaderState(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
codec := rpcClient(t, s1)
defer codec.Close()
// Make a before session.
var before string
{
args := structs.SessionRequest{
Datacenter: s1.config.Datacenter,
Op: structs.SessionCreate,
Session: structs.Session{
Node: s1.config.NodeName,
TTL: "60s",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &args, &before); err != nil {
t.Fatalf("err: %v", err)
}
}
// Take a snapshot.
args := structs.SnapshotRequest{
Datacenter: s1.config.Datacenter,
Op: structs.SnapshotSave,
}
var reply structs.SnapshotResponse
snap, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr, false,
&args, bytes.NewReader([]byte("")), &reply)
if err != nil {
t.Fatalf("err: %v", err)
}
defer snap.Close()
// Make an after session.
var after string
{
args := structs.SessionRequest{
Datacenter: s1.config.Datacenter,
Op: structs.SessionCreate,
Session: structs.Session{
Node: s1.config.NodeName,
TTL: "60s",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &args, &after); err != nil {
t.Fatalf("err: %v", err)
}
}
// Make sure the leader has timers setup.
if s1.sessionTimers.Get(before) == nil {
t.Fatalf("missing session timer")
}
if s1.sessionTimers.Get(after) == nil {
t.Fatalf("missing session timer")
}
// Restore the snapshot.
args.Op = structs.SnapshotRestore
restore, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr, false,
&args, snap, &reply)
if err != nil {
t.Fatalf("err: %v", err)
}
defer restore.Close()
// Make sure the before time is still there, and that the after timer
// got reverted. This proves we fully cycled the leader state.
if s1.sessionTimers.Get(before) == nil {
t.Fatalf("missing session timer")
}
if s1.sessionTimers.Get(after) != nil {
t.Fatalf("unexpected session timer")
}
}
func TestSnapshot_ACLDeny(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLMasterToken = "root"
c.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Take a snapshot.
func() {
args := structs.SnapshotRequest{
Datacenter: "dc1",
Op: structs.SnapshotSave,
}
var reply structs.SnapshotResponse
_, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr, false,
&args, bytes.NewReader([]byte("")), &reply)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("err: %v", err)
}
}()
// Restore a snapshot.
func() {
args := structs.SnapshotRequest{
Datacenter: "dc1",
Op: structs.SnapshotRestore,
}
var reply structs.SnapshotResponse
_, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr, false,
&args, bytes.NewReader([]byte("")), &reply)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("err: %v", err)
}
}()
// With the token in place everything should go through.
verifySnapshot(t, s1, "dc1", "root")
}
func TestSnapshot_Forward_Leader(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = true
c.SerfWANConfig = nil
// Effectively disable autopilot
// Changes in server config leads flakiness because snapshotting
// fails if there are config changes outstanding
c.AutopilotInterval = 50 * time.Second
// Since we are doing multiple restores to the same leader,
// the default short time for a reconcile can cause the
// reconcile to get aborted by our snapshot restore. By
// setting it much longer than the test, we avoid this case.
c.ReconcileInterval = 60 * time.Second
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.SerfWANConfig = nil
c.AutopilotInterval = 50 * time.Second
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join.
joinLAN(t, s2, s1)
testrpc.WaitForLeader(t, s2.RPC, "dc1")
// Run against the leader and the follower to ensure we forward. When
// we changed to Raft protocol version 3, since we only have two servers,
// the second one isn't a voter, so the snapshot API doesn't wait for
// that to replicate before returning success. We added some logic to
// verifySnapshot() to poll the server we are working with in stale mode
// in order to verify that the snapshot contents are there. Previously,
// with Raft protocol version 2, the snapshot API would wait until the
// follower got the information as well since it was required to meet
// the quorum (2/2 servers), so things were synchronized properly with
// no special logic.
verifySnapshot(t, s1, "dc1", "")
verifySnapshot(t, s2, "dc1", "")
}
func TestSnapshot_Forward_Datacenter(t *testing.T) {
t.Parallel()
dir1, s1 := testServerDC(t, "dc1")
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDC(t, "dc2")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
testrpc.WaitForTestAgent(t, s2.RPC, "dc2")
// Try to WAN join.
joinWAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Run a snapshot from each server locally and remotely to ensure we
// forward.
for _, s := range []*Server{s1, s2} {
verifySnapshot(t, s, "dc1", "")
verifySnapshot(t, s, "dc2", "")
}
}
func TestSnapshot_AllowStale(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Run against the servers which aren't haven't been set up to establish
// a leader and make sure we get a no leader error.
for _, s := range []*Server{s1, s2} {
// Take a snapshot.
args := structs.SnapshotRequest{
Datacenter: s.config.Datacenter,
Op: structs.SnapshotSave,
}
var reply structs.SnapshotResponse
_, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr, false,
&args, bytes.NewReader([]byte("")), &reply)
if err == nil || !strings.Contains(err.Error(), structs.ErrNoLeader.Error()) {
t.Fatalf("err: %v", err)
}
}
// Run in stale mode and make sure we get an error from Raft (snapshot
// was attempted), and not a no leader error.
for _, s := range []*Server{s1, s2} {
// Take a snapshot.
args := structs.SnapshotRequest{
Datacenter: s.config.Datacenter,
AllowStale: true,
Op: structs.SnapshotSave,
}
var reply structs.SnapshotResponse
_, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr, false,
&args, bytes.NewReader([]byte("")), &reply)
if err == nil || !strings.Contains(err.Error(), "Raft error when taking snapshot") {
t.Fatalf("err: %v", err)
}
}
}