open-consul/agent/consul/snapshot_endpoint.go
Matt Keeler 99e0a124cb
New ACLs (#4791)
This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week.
Description

At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers.

On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though.

    Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though.
    All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management.
    Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are:
        A server running the new system must still support other clients using the legacy system.
        A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system.
        The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode.

So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 12:04:07 -04:00

249 lines
7.7 KiB
Go

// The snapshot endpoint is a special non-RPC endpoint that supports streaming
// for taking and restoring snapshots for disaster recovery. This gets wired
// directly into Consul's stream handler, and a new TCP connection is made for
// each request.
//
// This also includes a SnapshotRPC() function, which acts as a lightweight
// client that knows the details of the stream protocol.
package consul
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"time"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/snapshot"
"github.com/hashicorp/go-msgpack/codec"
)
// dispatchSnapshotRequest takes an incoming request structure with possibly some
// streaming data (for a restore) and returns possibly some streaming data (for
// a snapshot save). We can't use the normal RPC mechanism in a streaming manner
// like this, so we have to dispatch these by hand.
func (s *Server) dispatchSnapshotRequest(args *structs.SnapshotRequest, in io.Reader,
reply *structs.SnapshotResponse) (io.ReadCloser, error) {
// Perform DC forwarding.
if dc := args.Datacenter; dc != s.config.Datacenter {
manager, server, ok := s.router.FindRoute(dc)
if !ok {
return nil, structs.ErrNoDCPath
}
snap, err := SnapshotRPC(s.connPool, dc, server.Addr, server.UseTLS, args, in, reply)
if err != nil {
manager.NotifyFailedServer(server)
return nil, err
}
return snap, nil
}
// Perform leader forwarding if required.
if !args.AllowStale {
if isLeader, server := s.getLeader(); !isLeader {
if server == nil {
return nil, structs.ErrNoLeader
}
return SnapshotRPC(s.connPool, args.Datacenter, server.Addr, server.UseTLS, args, in, reply)
}
}
// Verify token is allowed to operate on snapshots. There's only a
// single ACL sense here (not read and write) since reading gets you
// all the ACLs and you could escalate from there.
if rule, err := s.ResolveToken(args.Token); err != nil {
return nil, err
} else if rule != nil && !rule.Snapshot() {
return nil, acl.ErrPermissionDenied
}
// Dispatch the operation.
switch args.Op {
case structs.SnapshotSave:
if !args.AllowStale {
if err := s.consistentRead(); err != nil {
return nil, err
}
}
// Set the metadata here before we do anything; this should always be
// pessimistic if we get more data while the snapshot is being taken.
s.setQueryMeta(&reply.QueryMeta)
// Take the snapshot and capture the index.
snap, err := snapshot.New(s.logger, s.raft)
reply.Index = snap.Index()
return snap, err
case structs.SnapshotRestore:
if args.AllowStale {
return nil, fmt.Errorf("stale not allowed for restore")
}
// Restore the snapshot.
if err := snapshot.Restore(s.logger, in, s.raft); err != nil {
return nil, err
}
// Run a barrier so we are sure that our FSM is caught up with
// any snapshot restore details (it's also part of Raft's restore
// process but we don't want to depend on that detail for this to
// be correct). Once that works, we can redo the leader actions
// so our leader-maintained state will be up to date.
barrier := s.raft.Barrier(0)
if err := barrier.Error(); err != nil {
return nil, err
}
// This'll be used for feedback from the leader loop.
errCh := make(chan error, 1)
timeoutCh := time.After(time.Minute)
select {
// Tell the leader loop to reassert leader actions since we just
// replaced the state store contents.
case s.reassertLeaderCh <- errCh:
// We might have lost leadership while waiting to kick the loop.
case <-timeoutCh:
return nil, fmt.Errorf("timed out waiting to re-run leader actions")
// Make sure we don't get stuck during shutdown
case <-s.shutdownCh:
}
select {
// Wait for the leader loop to finish up.
case err := <-errCh:
if err != nil {
return nil, err
}
// We might have lost leadership while the loop was doing its
// thing.
case <-timeoutCh:
return nil, fmt.Errorf("timed out waiting for re-run of leader actions")
// Make sure we don't get stuck during shutdown
case <-s.shutdownCh:
}
// Give the caller back an empty reader since there's nothing to
// stream back.
return ioutil.NopCloser(bytes.NewReader([]byte(""))), nil
default:
return nil, fmt.Errorf("unrecognized snapshot op %q", args.Op)
}
}
// handleSnapshotRequest reads the request from the conn and dispatches it. This
// will be called from a goroutine after an incoming stream is determined to be
// a snapshot request.
func (s *Server) handleSnapshotRequest(conn net.Conn) error {
var args structs.SnapshotRequest
dec := codec.NewDecoder(conn, &codec.MsgpackHandle{})
if err := dec.Decode(&args); err != nil {
return fmt.Errorf("failed to decode request: %v", err)
}
var reply structs.SnapshotResponse
snap, err := s.dispatchSnapshotRequest(&args, conn, &reply)
if err != nil {
reply.Error = err.Error()
goto RESPOND
}
defer func() {
if err := snap.Close(); err != nil {
s.logger.Printf("[ERR] consul: Failed to close snapshot: %v", err)
}
}()
RESPOND:
enc := codec.NewEncoder(conn, &codec.MsgpackHandle{})
if err := enc.Encode(&reply); err != nil {
return fmt.Errorf("failed to encode response: %v", err)
}
if snap != nil {
if _, err := io.Copy(conn, snap); err != nil {
return fmt.Errorf("failed to stream snapshot: %v", err)
}
}
return nil
}
// SnapshotRPC is a streaming client function for performing a snapshot RPC
// request to a remote server. It will create a fresh connection for each
// request, send the request header, and then stream in any data from the
// reader (for a restore). It will then parse the received response header, and
// if there's no error will return an io.ReadCloser (that you must close) with
// the streaming output (for a snapshot). If the reply contains an error, this
// will always return an error as well, so you don't need to check the error
// inside the filled-in reply.
func SnapshotRPC(connPool *pool.ConnPool, dc string, addr net.Addr, useTLS bool,
args *structs.SnapshotRequest, in io.Reader, reply *structs.SnapshotResponse) (io.ReadCloser, error) {
conn, hc, err := connPool.DialTimeout(dc, addr, 10*time.Second, useTLS)
if err != nil {
return nil, err
}
// keep will disarm the defer on success if we are returning the caller
// our connection to stream the output.
var keep bool
defer func() {
if !keep {
conn.Close()
}
}()
// Write the snapshot RPC byte to set the mode, then perform the
// request.
if _, err := conn.Write([]byte{byte(pool.RPCSnapshot)}); err != nil {
return nil, fmt.Errorf("failed to write stream type: %v", err)
}
// Push the header encoded as msgpack, then stream the input.
enc := codec.NewEncoder(conn, &codec.MsgpackHandle{})
if err := enc.Encode(&args); err != nil {
return nil, fmt.Errorf("failed to encode request: %v", err)
}
if _, err := io.Copy(conn, in); err != nil {
return nil, fmt.Errorf("failed to copy snapshot in: %v", err)
}
// Our RPC protocol requires support for a half-close in order to signal
// the other side that they are done reading the stream, since we don't
// know the size in advance. This saves us from having to buffer just to
// calculate the size.
if hc != nil {
if err := hc.CloseWrite(); err != nil {
return nil, fmt.Errorf("failed to half close snapshot connection: %v", err)
}
} else {
return nil, fmt.Errorf("snapshot connection requires half-close support")
}
// Pull the header decoded as msgpack. The caller can continue to read
// the conn to stream the remaining data.
dec := codec.NewDecoder(conn, &codec.MsgpackHandle{})
if err := dec.Decode(reply); err != nil {
return nil, fmt.Errorf("failed to decode response: %v", err)
}
if reply.Error != "" {
return nil, errors.New(reply.Error)
}
keep = true
return conn, nil
}