open-consul/agent/consul/internal_endpoint.go

281 lines
7.7 KiB
Go
Raw Normal View History

2014-04-27 19:56:06 +00:00
package consul
import (
2015-07-07 00:28:09 +00:00
"fmt"
"github.com/hashicorp/consul/acl"
pkg refactor command/agent/* -> agent/* command/consul/* -> agent/consul/* command/agent/command{,_test}.go -> command/agent{,_test}.go command/base/command.go -> command/base.go command/base/* -> command/* commands.go -> command/commands.go The script which did the refactor is: ( cd $GOPATH/src/github.com/hashicorp/consul git mv command/agent/command.go command/agent.go git mv command/agent/command_test.go command/agent_test.go git mv command/agent/flag_slice_value{,_test}.go command/ git mv command/agent . git mv command/base/command.go command/base.go git mv command/base/config_util{,_test}.go command/ git mv commands.go command/ git mv consul agent rmdir command/base/ gsed -i -e 's|package agent|package command|' command/agent{,_test}.go gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go gsed -i -e 's|package main|package command|' command/commands.go gsed -i -e 's|base.Command|BaseCommand|' command/commands.go gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go gsed -i -e 's|base\.||' command/commands.go gsed -i -e 's|command\.||' command/commands.go gsed -i -e 's|command|c|' main.go gsed -i -e 's|range Commands|range command.Commands|' main.go gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go gsed -i -e 's|base.Command|BaseCommand|' command/*.go gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go gsed -i -e 's|base\.||' command/*_test.go gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go # fix imports f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f goimports -w $f f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f goimports -w $f goimports -w command/*.go main.go )
2017-06-09 22:28:28 +00:00
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
bexpr "github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/serf/serf"
2014-04-27 19:56:06 +00:00
)
2014-04-28 21:44:36 +00:00
// Internal endpoint is used to query the miscellaneous info that
2014-04-27 19:56:06 +00:00
// does not necessarily fit into the other systems. It is also
// used to hold undocumented APIs that users should not rely on.
2014-04-28 21:44:36 +00:00
type Internal struct {
srv *Server
logger hclog.Logger
2014-04-27 19:56:06 +00:00
}
// NodeInfo is used to retrieve information about a specific node.
2014-04-28 21:44:36 +00:00
func (m *Internal) NodeInfo(args *structs.NodeSpecificRequest,
2014-04-27 19:56:06 +00:00
reply *structs.IndexedNodeDump) error {
2014-04-28 21:44:36 +00:00
if done, err := m.srv.forward("Internal.NodeInfo", args, args, reply); done {
2014-04-27 19:56:06 +00:00
return err
}
return m.srv.blockingQuery(
&args.QueryOptions,
2014-04-27 19:56:06 +00:00
&reply.QueryMeta,
2017-04-21 00:46:29 +00:00
func(ws memdb.WatchSet, state *state.Store) error {
index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta)
if err != nil {
return err
}
reply.Index, reply.Dump = index, dump
return m.srv.filterACL(args.Token, reply)
2014-04-27 19:56:06 +00:00
})
}
// NodeDump is used to generate information about all of the nodes.
2014-04-28 21:44:36 +00:00
func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
2014-04-27 19:56:06 +00:00
reply *structs.IndexedNodeDump) error {
2014-04-28 21:44:36 +00:00
if done, err := m.srv.forward("Internal.NodeDump", args, args, reply); done {
2014-04-27 19:56:06 +00:00
return err
}
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.Dump)
if err != nil {
return err
}
return m.srv.blockingQuery(
&args.QueryOptions,
2014-04-27 19:56:06 +00:00
&reply.QueryMeta,
2017-04-21 00:46:29 +00:00
func(ws memdb.WatchSet, state *state.Store) error {
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta)
if err != nil {
return err
}
reply.Index, reply.Dump = index, dump
if err := m.srv.filterACL(args.Token, reply); err != nil {
return err
}
raw, err := filter.Execute(reply.Dump)
if err != nil {
return err
}
reply.Dump = raw.(structs.NodeDump)
return nil
})
}
func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.IndexedCheckServiceNodes) error {
if done, err := m.srv.forward("Internal.ServiceDump", args, args, reply); done {
return err
}
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.Nodes)
if err != nil {
return err
}
return m.srv.blockingQuery(
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta)
if err != nil {
return err
}
reply.Index, reply.Nodes = index, nodes
if err := m.srv.filterACL(args.Token, reply); err != nil {
return err
}
raw, err := filter.Execute(reply.Nodes)
if err != nil {
return err
}
reply.Nodes = raw.(structs.CheckServiceNodes)
return nil
2014-04-27 19:56:06 +00:00
})
}
2014-08-28 22:00:49 +00:00
// EventFire is a bit of an odd endpoint, but it allows for a cross-DC RPC
// call to fire an event. The primary use case is to enable user events being
// triggered in a remote DC.
func (m *Internal) EventFire(args *structs.EventFireRequest,
reply *structs.EventFireResponse) error {
if done, err := m.srv.forward("Internal.EventFire", args, args, reply); done {
return err
}
2015-06-18 01:57:17 +00:00
// Check ACLs
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
rule, err := m.srv.ResolveToken(args.Token)
2015-06-18 01:57:17 +00:00
if err != nil {
return err
}
if rule != nil && rule.EventWrite(args.Name, nil) != acl.Allow {
accessorID := m.aclAccessorID(args.Token)
m.logger.Warn("user event blocked by ACLs", "event", args.Name, "accessorID", accessorID)
return acl.ErrPermissionDenied
2015-06-18 01:57:17 +00:00
}
2014-08-28 22:00:49 +00:00
// Set the query meta data
m.srv.setQueryMeta(&reply.QueryMeta)
// Add the consul prefix to the event name
eventName := userEventName(args.Name)
// Fire the event on all LAN segments
segments := m.srv.LANSegments()
var errs error
for name, segment := range segments {
err := segment.UserEvent(eventName, args.Payload, false)
if err != nil {
err = fmt.Errorf("error broadcasting event to segment %q: %v", name, err)
errs = multierror.Append(errs, err)
}
}
return errs
2014-08-28 22:00:49 +00:00
}
// KeyringOperation will query the WAN and LAN gossip keyrings of all nodes.
func (m *Internal) KeyringOperation(
2014-09-25 01:30:34 +00:00
args *structs.KeyringRequest,
reply *structs.KeyringResponses) error {
2015-07-07 00:28:09 +00:00
// Check ACLs
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
rule, err := m.srv.ResolveToken(args.Token)
2015-07-07 00:28:09 +00:00
if err != nil {
return err
}
if rule != nil {
2015-07-07 00:28:09 +00:00
switch args.Operation {
case structs.KeyringList:
if rule.KeyringRead(nil) != acl.Allow {
2015-07-07 00:28:09 +00:00
return fmt.Errorf("Reading keyring denied by ACLs")
}
case structs.KeyringInstall:
fallthrough
case structs.KeyringUse:
fallthrough
case structs.KeyringRemove:
if rule.KeyringWrite(nil) != acl.Allow {
2015-07-07 00:28:09 +00:00
return fmt.Errorf("Modifying keyring denied due to ACLs")
}
default:
panic("Invalid keyring operation")
}
}
// Validate use of local-only
if args.LocalOnly && args.Operation != structs.KeyringList {
// Error aggressively to be clear about LocalOnly behavior
return fmt.Errorf("argument error: LocalOnly can only be used for List operations")
}
// args.LocalOnly should always be false for non-GET requests
if !args.LocalOnly {
// Only perform WAN keyring querying and RPC forwarding once
if !args.Forwarded && m.srv.serfWAN != nil {
args.Forwarded = true
m.executeKeyringOp(args, reply, true)
return m.srv.globalRPC("Internal.KeyringOperation", args, reply)
}
}
// Query the LAN keyring of this node's DC
m.executeKeyringOp(args, reply, false)
return nil
}
// executeKeyringOp executes the keyring-related operation in the request
// on either the WAN or LAN pools.
2014-10-05 20:59:27 +00:00
func (m *Internal) executeKeyringOp(
2014-09-25 01:30:34 +00:00
args *structs.KeyringRequest,
2014-10-05 20:59:27 +00:00
reply *structs.KeyringResponses,
wan bool) {
if wan {
mgr := m.srv.KeyManagerWAN()
m.executeKeyringOpMgr(mgr, args, reply, wan, "")
2014-10-05 20:59:27 +00:00
} else {
segments := m.srv.LANSegments()
for name, segment := range segments {
mgr := segment.KeyManager()
m.executeKeyringOpMgr(mgr, args, reply, wan, name)
}
2014-10-05 20:59:27 +00:00
}
}
// executeKeyringOpMgr executes the appropriate keyring-related function based on
// the type of keyring operation in the request. It takes the KeyManager as an
// argument, so it can handle any operation for either LAN or WAN pools.
func (m *Internal) executeKeyringOpMgr(
mgr *serf.KeyManager,
args *structs.KeyringRequest,
reply *structs.KeyringResponses,
2017-09-07 19:26:58 +00:00
wan bool,
segment string) {
var serfResp *serf.KeyResponse
var err error
opts := &serf.KeyRequestOptions{RelayFactor: args.RelayFactor}
switch args.Operation {
case structs.KeyringList:
serfResp, err = mgr.ListKeysWithOptions(opts)
case structs.KeyringInstall:
serfResp, err = mgr.InstallKeyWithOptions(args.Key, opts)
case structs.KeyringUse:
serfResp, err = mgr.UseKeyWithOptions(args.Key, opts)
case structs.KeyringRemove:
serfResp, err = mgr.RemoveKeyWithOptions(args.Key, opts)
}
errStr := ""
if err != nil {
errStr = err.Error()
}
reply.Responses = append(reply.Responses, &structs.KeyringResponse{
WAN: wan,
2014-10-06 22:14:30 +00:00
Datacenter: m.srv.config.Datacenter,
Segment: segment,
Messages: serfResp.Messages,
Keys: serfResp.Keys,
NumNodes: serfResp.NumNodes,
Error: errStr,
})
}
// aclAccessorID is used to convert an ACLToken's secretID to its accessorID for non-
// critical purposes, such as logging. Therefore we interpret all errors as empty-string
// so we can safely log it without handling non-critical errors at the usage site.
func (m *Internal) aclAccessorID(secretID string) string {
_, ident, err := m.srv.ResolveIdentityFromToken(secretID)
if acl.IsErrNotFound(err) {
return ""
}
if err != nil {
m.logger.Debug("non-critical error resolving acl token accessor for logging", "error", err)
return ""
}
if ident == nil {
return ""
}
return ident.ID()
}