2014-01-04 01:15:51 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2014-01-30 23:35:38 +00:00
|
|
|
"fmt"
|
2016-11-16 21:45:26 +00:00
|
|
|
"log"
|
2014-01-04 01:15:51 +00:00
|
|
|
"net/http"
|
2018-03-26 00:52:26 +00:00
|
|
|
"net/url"
|
2015-01-21 17:53:31 +00:00
|
|
|
"strconv"
|
2014-01-04 01:15:51 +00:00
|
|
|
"strings"
|
2018-04-18 20:05:30 +00:00
|
|
|
"time"
|
2016-06-06 08:53:30 +00:00
|
|
|
|
2018-06-14 12:52:48 +00:00
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
|
2018-04-19 10:15:32 +00:00
|
|
|
"github.com/hashicorp/go-memdb"
|
|
|
|
"github.com/mitchellh/hashstructure"
|
|
|
|
|
2017-08-23 14:52:48 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2018-04-11 08:52:51 +00:00
|
|
|
"github.com/hashicorp/consul/agent/cache-types"
|
2017-10-25 09:18:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/checks"
|
2017-10-10 23:40:59 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2018-03-21 17:55:39 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2017-05-15 20:10:36 +00:00
|
|
|
"github.com/hashicorp/consul/ipaddr"
|
2017-08-14 14:36:07 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2016-11-16 21:45:26 +00:00
|
|
|
"github.com/hashicorp/consul/logger"
|
2016-06-06 20:19:31 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2016-11-16 21:45:26 +00:00
|
|
|
"github.com/hashicorp/logutils"
|
2016-06-06 08:53:30 +00:00
|
|
|
"github.com/hashicorp/serf/coordinate"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
2018-04-06 06:55:49 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
2014-01-04 01:15:51 +00:00
|
|
|
)
|
|
|
|
|
2017-04-21 00:46:29 +00:00
|
|
|
type Self struct {
|
2017-10-04 17:43:17 +00:00
|
|
|
Config interface{}
|
|
|
|
DebugConfig map[string]interface{}
|
|
|
|
Coord *coordinate.Coordinate
|
|
|
|
Member serf.Member
|
|
|
|
Stats map[string]map[string]string
|
|
|
|
Meta map[string]string
|
2014-05-27 22:09:28 +00:00
|
|
|
}
|
|
|
|
|
2014-05-25 23:59:48 +00:00
|
|
|
func (s *HTTPServer) AgentSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 17:33:57 +00:00
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentRead(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 23:25:22 +00:00
|
|
|
var cs lib.CoordinateSet
|
|
|
|
if !s.agent.config.DisableCoordinates {
|
|
|
|
var err error
|
|
|
|
if cs, err = s.agent.GetLANCoordinate(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-04 17:43:17 +00:00
|
|
|
config := struct {
|
|
|
|
Datacenter string
|
|
|
|
NodeName string
|
2018-01-10 23:17:33 +00:00
|
|
|
NodeID string
|
2017-10-04 17:43:17 +00:00
|
|
|
Revision string
|
|
|
|
Server bool
|
|
|
|
Version string
|
|
|
|
}{
|
|
|
|
Datacenter: s.agent.config.Datacenter,
|
|
|
|
NodeName: s.agent.config.NodeName,
|
2018-01-10 23:17:33 +00:00
|
|
|
NodeID: string(s.agent.config.NodeID),
|
2017-10-04 17:43:17 +00:00
|
|
|
Revision: s.agent.config.Revision,
|
|
|
|
Server: s.agent.config.ServerMode,
|
|
|
|
Version: s.agent.config.Version,
|
|
|
|
}
|
2017-04-21 00:46:29 +00:00
|
|
|
return Self{
|
2017-10-04 17:43:17 +00:00
|
|
|
Config: config,
|
|
|
|
DebugConfig: s.agent.config.Sanitized(),
|
|
|
|
Coord: cs[s.agent.config.SegmentName],
|
|
|
|
Member: s.agent.LocalMember(),
|
|
|
|
Stats: s.agent.Stats(),
|
2017-08-28 12:17:13 +00:00
|
|
|
Meta: s.agent.State.Metadata(),
|
2014-05-27 22:09:28 +00:00
|
|
|
}, nil
|
2014-05-25 23:59:48 +00:00
|
|
|
}
|
|
|
|
|
2018-04-09 11:16:03 +00:00
|
|
|
// enablePrometheusOutput will look for Prometheus mime-type or format Query parameter the same way as Nomad
|
|
|
|
func enablePrometheusOutput(req *http.Request) bool {
|
|
|
|
if format := req.URL.Query().Get("format"); format == "prometheus" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-08-08 20:05:38 +00:00
|
|
|
func (s *HTTPServer) AgentMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2017-08-08 20:05:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentRead(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2017-08-08 20:05:38 +00:00
|
|
|
}
|
2018-04-09 11:16:03 +00:00
|
|
|
if enablePrometheusOutput(req) {
|
2018-06-14 12:52:48 +00:00
|
|
|
if s.agent.config.Telemetry.PrometheusRetentionTime < 1 {
|
2018-04-06 12:21:05 +00:00
|
|
|
resp.WriteHeader(http.StatusUnsupportedMediaType)
|
|
|
|
fmt.Fprint(resp, "Prometheus is not enable since its retention time is not positive")
|
|
|
|
return nil, nil
|
|
|
|
}
|
2018-04-06 06:55:49 +00:00
|
|
|
handlerOptions := promhttp.HandlerOpts{
|
2018-04-09 11:16:03 +00:00
|
|
|
ErrorLog: s.agent.logger,
|
|
|
|
ErrorHandling: promhttp.ContinueOnError,
|
2018-04-06 06:55:49 +00:00
|
|
|
}
|
2017-08-08 20:05:38 +00:00
|
|
|
|
2018-04-06 06:55:49 +00:00
|
|
|
handler := promhttp.HandlerFor(prometheus.DefaultGatherer, handlerOptions)
|
|
|
|
handler.ServeHTTP(resp, req)
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-08-08 20:05:38 +00:00
|
|
|
return s.agent.MemSink.DisplayMetrics(resp, req)
|
|
|
|
}
|
|
|
|
|
2016-11-30 18:29:42 +00:00
|
|
|
func (s *HTTPServer) AgentReload(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 17:33:57 +00:00
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentWrite(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
2016-11-30 18:29:42 +00:00
|
|
|
|
|
|
|
// Trigger the reload
|
2016-12-14 17:33:57 +00:00
|
|
|
errCh := make(chan error, 0)
|
2016-11-30 18:29:42 +00:00
|
|
|
select {
|
2017-05-19 15:51:39 +00:00
|
|
|
case <-s.agent.shutdownCh:
|
2016-11-30 18:29:42 +00:00
|
|
|
return nil, fmt.Errorf("Agent was shutdown before reload could be completed")
|
|
|
|
case s.agent.reloadCh <- errCh:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the result of the reload, or for the agent to shutdown
|
|
|
|
select {
|
2017-05-19 15:51:39 +00:00
|
|
|
case <-s.agent.shutdownCh:
|
2016-11-30 18:29:42 +00:00
|
|
|
return nil, fmt.Errorf("Agent was shutdown before reload could be completed")
|
|
|
|
case err := <-errCh:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-04 01:15:51 +00:00
|
|
|
func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 22:16:46 +00:00
|
|
|
// Fetch the ACL token, if any.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
services := s.agent.State.Services()
|
2016-12-14 22:16:46 +00:00
|
|
|
if err := s.agent.filterServices(token, &services); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-28 01:22:07 +00:00
|
|
|
|
2018-04-20 13:24:24 +00:00
|
|
|
proxies := s.agent.State.Proxies()
|
|
|
|
|
|
|
|
// Convert into api.AgentService since that includes Connect config but so far
|
|
|
|
// NodeService doesn't need to internally. They are otherwise identical since
|
|
|
|
// that is the struct used in client for reading the one we output here
|
|
|
|
// anyway.
|
|
|
|
agentSvcs := make(map[string]*api.AgentService)
|
|
|
|
|
2017-04-28 01:22:07 +00:00
|
|
|
// Use empty list instead of nil
|
2018-02-07 15:02:10 +00:00
|
|
|
for id, s := range services {
|
2018-04-20 13:24:24 +00:00
|
|
|
as := &api.AgentService{
|
|
|
|
Kind: api.ServiceKind(s.Kind),
|
|
|
|
ID: s.ID,
|
|
|
|
Service: s.Service,
|
|
|
|
Tags: s.Tags,
|
2018-05-12 10:27:44 +00:00
|
|
|
Meta: s.Meta,
|
2018-04-20 13:24:24 +00:00
|
|
|
Port: s.Port,
|
|
|
|
Address: s.Address,
|
|
|
|
EnableTagOverride: s.EnableTagOverride,
|
|
|
|
CreateIndex: s.CreateIndex,
|
|
|
|
ModifyIndex: s.ModifyIndex,
|
|
|
|
ProxyDestination: s.ProxyDestination,
|
|
|
|
}
|
|
|
|
if as.Tags == nil {
|
|
|
|
as.Tags = []string{}
|
|
|
|
}
|
|
|
|
if as.Meta == nil {
|
|
|
|
as.Meta = map[string]string{}
|
|
|
|
}
|
|
|
|
// Attach Connect configs if the exist
|
|
|
|
if proxy, ok := proxies[id+"-proxy"]; ok {
|
|
|
|
as.Connect = &api.AgentServiceConnect{
|
|
|
|
Proxy: &api.AgentServiceConnectProxy{
|
|
|
|
ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()),
|
|
|
|
Command: proxy.Proxy.Command,
|
|
|
|
Config: proxy.Proxy.Config,
|
|
|
|
},
|
2018-04-21 15:34:29 +00:00
|
|
|
}
|
2017-04-28 01:22:07 +00:00
|
|
|
}
|
2018-04-20 13:24:24 +00:00
|
|
|
agentSvcs[id] = as
|
2017-04-28 01:22:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 13:24:24 +00:00
|
|
|
return agentSvcs, nil
|
2014-01-21 01:00:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 22:16:46 +00:00
|
|
|
// Fetch the ACL token, if any.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
checks := s.agent.State.Checks()
|
2016-12-14 22:16:46 +00:00
|
|
|
if err := s.agent.filterChecks(token, &checks); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-28 01:22:07 +00:00
|
|
|
|
|
|
|
// Use empty list instead of nil
|
2018-02-07 04:35:55 +00:00
|
|
|
for id, c := range checks {
|
2017-04-28 01:22:07 +00:00
|
|
|
if c.ServiceTags == nil {
|
2018-02-07 04:35:55 +00:00
|
|
|
clone := *c
|
|
|
|
clone.ServiceTags = make([]string, 0)
|
|
|
|
checks[id] = &clone
|
2017-04-28 01:22:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-21 01:00:52 +00:00
|
|
|
return checks, nil
|
2014-01-04 01:15:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 22:16:46 +00:00
|
|
|
// Fetch the ACL token, if any.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
|
2014-01-04 01:15:51 +00:00
|
|
|
// Check if the WAN is being queried
|
|
|
|
wan := false
|
|
|
|
if other := req.URL.Query().Get("wan"); other != "" {
|
|
|
|
wan = true
|
|
|
|
}
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
segment := req.URL.Query().Get("segment")
|
2017-09-05 20:40:19 +00:00
|
|
|
if wan {
|
|
|
|
switch segment {
|
|
|
|
case "", api.AllSegments:
|
|
|
|
// The zero value and the special "give me all members"
|
|
|
|
// key are ok, otherwise the argument doesn't apply to
|
|
|
|
// the WAN.
|
|
|
|
default:
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprint(resp, "Cannot provide a segment with wan=true")
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-08-14 14:36:07 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
var members []serf.Member
|
2014-01-04 01:15:51 +00:00
|
|
|
if wan {
|
2016-12-14 22:16:46 +00:00
|
|
|
members = s.agent.WANMembers()
|
2017-08-14 14:36:07 +00:00
|
|
|
} else {
|
|
|
|
var err error
|
2017-09-05 19:22:20 +00:00
|
|
|
if segment == api.AllSegments {
|
|
|
|
members, err = s.agent.delegate.LANMembersAllSegments()
|
|
|
|
} else {
|
|
|
|
members, err = s.agent.delegate.LANSegmentMembers(segment)
|
|
|
|
}
|
2017-08-14 14:36:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
if err := s.agent.filterMembers(token, &members); err != nil {
|
|
|
|
return nil, err
|
2014-01-04 01:15:51 +00:00
|
|
|
}
|
2016-12-14 22:16:46 +00:00
|
|
|
return members, nil
|
2014-01-04 01:15:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentJoin(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 17:33:57 +00:00
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentWrite(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2014-01-04 01:15:51 +00:00
|
|
|
// Check if the WAN is being queried
|
|
|
|
wan := false
|
|
|
|
if other := req.URL.Query().Get("wan"); other != "" {
|
|
|
|
wan = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the address
|
|
|
|
addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/join/")
|
|
|
|
if wan {
|
2017-04-21 01:59:42 +00:00
|
|
|
_, err = s.agent.JoinWAN([]string{addr})
|
2014-01-04 01:15:51 +00:00
|
|
|
} else {
|
2017-04-21 01:59:42 +00:00
|
|
|
_, err = s.agent.JoinLAN([]string{addr})
|
2014-01-04 01:15:51 +00:00
|
|
|
}
|
2017-04-21 01:59:42 +00:00
|
|
|
return nil, err
|
2014-01-04 01:15:51 +00:00
|
|
|
}
|
|
|
|
|
2016-11-30 18:29:42 +00:00
|
|
|
func (s *HTTPServer) AgentLeave(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 17:33:57 +00:00
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentWrite(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2016-11-30 18:29:42 +00:00
|
|
|
if err := s.agent.Leave(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-06-20 07:29:20 +00:00
|
|
|
return nil, s.agent.ShutdownAgent()
|
2016-11-30 18:29:42 +00:00
|
|
|
}
|
|
|
|
|
2014-01-04 01:15:51 +00:00
|
|
|
func (s *HTTPServer) AgentForceLeave(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 17:33:57 +00:00
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentWrite(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2014-01-04 01:15:51 +00:00
|
|
|
addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/force-leave/")
|
2014-02-19 22:27:01 +00:00
|
|
|
return nil, s.agent.ForceLeave(addr)
|
2014-01-04 01:15:51 +00:00
|
|
|
}
|
2014-01-30 22:58:36 +00:00
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
// syncChanges is a helper function which wraps a blocking call to sync
|
|
|
|
// services and checks to the server. If the operation fails, we only
|
|
|
|
// only warn because the write did succeed and anti-entropy will sync later.
|
|
|
|
func (s *HTTPServer) syncChanges() {
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := s.agent.State.SyncChanges(); err != nil {
|
2017-05-19 09:53:41 +00:00
|
|
|
s.agent.logger.Printf("[ERR] agent: failed to sync changes: %v", err)
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-30 22:58:36 +00:00
|
|
|
func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2017-06-15 16:46:06 +00:00
|
|
|
var args structs.CheckDefinition
|
2016-12-14 22:16:46 +00:00
|
|
|
// Fixup the type decode of TTL or Interval.
|
2014-04-21 22:02:36 +00:00
|
|
|
decodeCB := func(raw interface{}) error {
|
|
|
|
return FixupCheckType(raw)
|
|
|
|
}
|
|
|
|
if err := decodeBody(req, &args, decodeCB); err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
2014-01-30 23:35:38 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Verify the check has a name.
|
2014-01-30 23:35:38 +00:00
|
|
|
if args.Name == "" {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Missing check name")
|
2014-01-30 23:35:38 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-04-12 00:53:48 +00:00
|
|
|
if args.Status != "" && !structs.ValidStatus(args.Status) {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Bad check status")
|
2015-04-12 00:53:48 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Construct the health check.
|
2014-02-03 23:15:35 +00:00
|
|
|
health := args.HealthCheck(s.agent.config.NodeName)
|
2014-01-30 23:35:38 +00:00
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Verify the check type.
|
2017-05-15 19:49:13 +00:00
|
|
|
chkType := args.CheckType()
|
2017-10-10 23:54:06 +00:00
|
|
|
err := chkType.Validate()
|
|
|
|
if err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
2017-10-10 23:54:06 +00:00
|
|
|
fmt.Fprint(resp, fmt.Errorf("Invalid check: %v", err))
|
2014-01-30 23:35:38 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
2015-04-28 01:26:23 +00:00
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err := s.agent.vetCheckRegister(token, health); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-04-28 01:26:23 +00:00
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Add the check.
|
2015-05-05 00:36:17 +00:00
|
|
|
if err := s.agent.AddCheck(health, chkType, true, token); err != nil {
|
2015-02-20 23:45:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-06-06 20:19:31 +00:00
|
|
|
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/"))
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-02-20 23:45:06 +00:00
|
|
|
if err := s.agent.RemoveCheck(checkID, true); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-06-06 20:19:31 +00:00
|
|
|
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/pass/"))
|
2014-01-30 23:18:05 +00:00
|
|
|
note := req.URL.Query().Get("note")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
if err := s.agent.updateTTLCheck(checkID, api.HealthPassing, note); err != nil {
|
2015-02-20 23:45:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-06-06 20:19:31 +00:00
|
|
|
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/warn/"))
|
2014-01-30 23:18:05 +00:00
|
|
|
note := req.URL.Query().Get("note")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
if err := s.agent.updateTTLCheck(checkID, api.HealthWarning, note); err != nil {
|
2015-02-20 23:45:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-06-06 20:19:31 +00:00
|
|
|
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/fail/"))
|
2014-01-30 23:18:05 +00:00
|
|
|
note := req.URL.Query().Get("note")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
if err := s.agent.updateTTLCheck(checkID, api.HealthCritical, note); err != nil {
|
2015-02-20 23:45:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
|
|
|
|
2016-03-03 01:08:06 +00:00
|
|
|
// checkUpdate is the payload for a PUT to AgentCheckUpdate.
|
|
|
|
type checkUpdate struct {
|
2017-04-19 23:00:11 +00:00
|
|
|
// Status us one of the api.Health* states, "passing", "warning", or
|
2016-03-03 01:08:06 +00:00
|
|
|
// "critical".
|
|
|
|
Status string
|
|
|
|
|
|
|
|
// Output is the information to post to the UI for operators as the
|
|
|
|
// output of the process that decided to hit the TTL check. This is
|
|
|
|
// different from the note field that's associated with the check
|
|
|
|
// itself.
|
|
|
|
Output string
|
|
|
|
}
|
|
|
|
|
|
|
|
// AgentCheckUpdate is a PUT-based alternative to the GET-based Pass/Warn/Fail
|
|
|
|
// APIs.
|
|
|
|
func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
var update checkUpdate
|
|
|
|
if err := decodeBody(req, &update, nil); err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
2016-03-03 01:08:06 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch update.Status {
|
2017-04-19 23:00:11 +00:00
|
|
|
case api.HealthPassing:
|
|
|
|
case api.HealthWarning:
|
|
|
|
case api.HealthCritical:
|
2016-03-03 01:08:06 +00:00
|
|
|
default:
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Invalid check status: '%s'", update.Status)
|
2016-03-03 01:08:06 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
total := len(update.Output)
|
2017-10-25 09:18:07 +00:00
|
|
|
if total > checks.BufSize {
|
2016-03-03 03:47:00 +00:00
|
|
|
update.Output = fmt.Sprintf("%s ... (captured %d of %d bytes)",
|
2017-10-25 09:18:07 +00:00
|
|
|
update.Output[:checks.BufSize], checks.BufSize, total)
|
2016-03-03 01:08:06 +00:00
|
|
|
}
|
|
|
|
|
2016-06-06 20:19:31 +00:00
|
|
|
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/update/"))
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
if err := s.agent.updateTTLCheck(checkID, update.Status, update.Output); err != nil {
|
2016-03-03 01:08:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2014-01-30 22:58:36 +00:00
|
|
|
func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2017-06-15 16:46:06 +00:00
|
|
|
var args structs.ServiceDefinition
|
2016-12-14 22:16:46 +00:00
|
|
|
// Fixup the type decode of TTL or Interval if a check if provided.
|
2014-04-21 22:02:36 +00:00
|
|
|
decodeCB := func(raw interface{}) error {
|
|
|
|
rawMap, ok := raw.(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2014-04-25 02:44:27 +00:00
|
|
|
|
2017-10-10 23:40:59 +00:00
|
|
|
// see https://github.com/hashicorp/consul/pull/3557 why we need this
|
|
|
|
// and why we should get rid of it.
|
|
|
|
config.TranslateKeys(rawMap, map[string]string{
|
|
|
|
"enable_tag_override": "EnableTagOverride",
|
|
|
|
})
|
|
|
|
|
2014-04-25 02:44:27 +00:00
|
|
|
for k, v := range rawMap {
|
2015-01-14 03:08:30 +00:00
|
|
|
switch strings.ToLower(k) {
|
|
|
|
case "check":
|
|
|
|
if err := FixupCheckType(v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case "checks":
|
|
|
|
chkTypes, ok := v.([]interface{})
|
|
|
|
if !ok {
|
2015-01-24 02:50:51 +00:00
|
|
|
continue
|
2015-01-14 03:08:30 +00:00
|
|
|
}
|
|
|
|
for _, chkType := range chkTypes {
|
|
|
|
if err := FixupCheckType(chkType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2014-04-25 02:44:27 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-14 03:08:30 +00:00
|
|
|
return nil
|
2014-04-21 22:02:36 +00:00
|
|
|
}
|
|
|
|
if err := decodeBody(req, &args, decodeCB); err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
2014-01-30 23:35:38 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Verify the service has a name.
|
2014-01-30 23:35:38 +00:00
|
|
|
if args.Name == "" {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Missing service name")
|
2014-01-30 23:35:38 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:34:45 +00:00
|
|
|
// Check the service address here and in the catalog RPC endpoint
|
2018-03-19 16:56:00 +00:00
|
|
|
// since service registration isn't synchronous.
|
2017-05-15 20:10:36 +00:00
|
|
|
if ipaddr.IsAny(args.Address) {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
2017-05-08 16:34:45 +00:00
|
|
|
fmt.Fprintf(resp, "Invalid service address")
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Get the node service.
|
2014-02-03 23:15:35 +00:00
|
|
|
ns := args.NodeService()
|
2018-03-28 14:04:50 +00:00
|
|
|
if err := structs.ValidateMetadata(ns.Meta, false); err != nil {
|
2018-02-07 00:54:42 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
2018-03-27 20:22:42 +00:00
|
|
|
fmt.Fprint(resp, fmt.Errorf("Invalid Service Meta: %v", err))
|
2018-02-07 00:54:42 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2014-01-30 23:35:38 +00:00
|
|
|
|
2018-03-11 01:42:30 +00:00
|
|
|
// Run validation. This is the same validation that would happen on
|
|
|
|
// the catalog endpoint so it helps ensure the sync will work properly.
|
|
|
|
if err := ns.Validate(); err != nil {
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprintf(resp, err.Error())
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Verify the check type.
|
2017-10-10 23:54:06 +00:00
|
|
|
chkTypes, err := args.CheckTypes()
|
|
|
|
if err != nil {
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprint(resp, fmt.Errorf("Invalid check: %v", err))
|
|
|
|
return nil, nil
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
for _, check := range chkTypes {
|
2015-04-12 00:53:48 +00:00
|
|
|
if check.Status != "" && !structs.ValidStatus(check.Status) {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Status for checks must 'passing', 'warning', 'critical'")
|
2015-04-12 00:53:48 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2014-01-30 23:35:38 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
2015-04-28 01:26:23 +00:00
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err := s.agent.vetServiceRegister(token, ns); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-04-28 01:26:23 +00:00
|
|
|
|
2018-04-17 12:29:02 +00:00
|
|
|
// Get any proxy registrations
|
|
|
|
proxy, err := args.ConnectManagedProxy()
|
|
|
|
if err != nil {
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprintf(resp, err.Error())
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-06-12 15:35:59 +00:00
|
|
|
// If we have a proxy, verify that we're allowed to add a proxy via the API
|
|
|
|
if proxy != nil && !s.agent.config.ConnectProxyAllowManagedAPIRegistration {
|
|
|
|
return nil, &BadRequestError{
|
|
|
|
Reason: "Managed proxy registration via the API is disallowed."}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Add the service.
|
2015-05-05 00:36:17 +00:00
|
|
|
if err := s.agent.AddService(ns, chkTypes, true, token); err != nil {
|
2015-02-20 23:45:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-17 12:29:02 +00:00
|
|
|
// Add proxy (which will add proxy service so do it before we trigger sync)
|
|
|
|
if proxy != nil {
|
2018-05-14 20:55:24 +00:00
|
|
|
if err := s.agent.AddProxy(proxy, true, ""); err != nil {
|
2018-04-17 12:29:02 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2015-02-20 23:45:06 +00:00
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2014-01-30 23:18:05 +00:00
|
|
|
serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
if err := s.agent.vetServiceUpdate(token, serviceID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-13 08:00:23 +00:00
|
|
|
// Verify this isn't a proxy
|
|
|
|
if s.agent.State.Proxy(serviceID) != nil {
|
|
|
|
return nil, &BadRequestError{
|
|
|
|
Reason: "Managed proxy service cannot be deregistered directly. " +
|
|
|
|
"Deregister the service that has a managed proxy to automatically " +
|
|
|
|
"deregister the managed proxy itself."}
|
|
|
|
}
|
|
|
|
|
2015-02-20 23:45:06 +00:00
|
|
|
if err := s.agent.RemoveService(serviceID, true); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-13 07:57:48 +00:00
|
|
|
|
|
|
|
// Remove the associated managed proxy if it exists
|
|
|
|
for proxyID, p := range s.agent.State.Proxies() {
|
|
|
|
if p.Proxy.TargetServiceID == serviceID {
|
|
|
|
if err := s.agent.RemoveProxy(proxyID, true); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-20 23:45:06 +00:00
|
|
|
s.syncChanges()
|
|
|
|
return nil, nil
|
2014-01-30 22:58:36 +00:00
|
|
|
}
|
2015-01-15 08:16:34 +00:00
|
|
|
|
|
|
|
func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
// Ensure we have a service ID
|
|
|
|
serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/")
|
|
|
|
if serviceID == "" {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Missing service ID")
|
2015-01-15 08:16:34 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have some action
|
|
|
|
params := req.URL.Query()
|
|
|
|
if _, ok := params["enable"]; !ok {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Missing value for enable")
|
2015-01-15 08:16:34 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
raw := params.Get("enable")
|
2015-01-21 17:53:31 +00:00
|
|
|
enable, err := strconv.ParseBool(raw)
|
|
|
|
if err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Invalid value for enable: %q", raw)
|
2015-01-15 08:16:34 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
2015-09-10 18:43:59 +00:00
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err := s.agent.vetServiceUpdate(token, serviceID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-09-10 18:43:59 +00:00
|
|
|
|
2015-01-15 08:16:34 +00:00
|
|
|
if enable {
|
2015-01-21 20:21:57 +00:00
|
|
|
reason := params.Get("reason")
|
2015-09-10 18:43:59 +00:00
|
|
|
if err = s.agent.EnableServiceMaintenance(serviceID, reason, token); err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusNotFound)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, err.Error())
|
2015-01-21 21:28:26 +00:00
|
|
|
return nil, nil
|
2015-01-15 09:17:35 +00:00
|
|
|
}
|
2015-01-15 08:16:34 +00:00
|
|
|
} else {
|
2015-01-15 09:17:35 +00:00
|
|
|
if err = s.agent.DisableServiceMaintenance(serviceID); err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusNotFound)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, err.Error())
|
2015-01-21 21:28:26 +00:00
|
|
|
return nil, nil
|
2015-01-15 09:17:35 +00:00
|
|
|
}
|
2015-01-15 08:16:34 +00:00
|
|
|
}
|
2015-02-20 23:45:06 +00:00
|
|
|
s.syncChanges()
|
2015-01-21 21:28:26 +00:00
|
|
|
return nil, nil
|
2015-01-15 08:16:34 +00:00
|
|
|
}
|
2015-01-15 19:20:22 +00:00
|
|
|
|
|
|
|
func (s *HTTPServer) AgentNodeMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
// Ensure we have some action
|
|
|
|
params := req.URL.Query()
|
|
|
|
if _, ok := params["enable"]; !ok {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprint(resp, "Missing value for enable")
|
2015-01-15 19:20:22 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
raw := params.Get("enable")
|
2015-01-21 17:53:31 +00:00
|
|
|
enable, err := strconv.ParseBool(raw)
|
|
|
|
if err != nil {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Invalid value for enable: %q", raw)
|
2015-01-15 19:20:22 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
// Get the provided token, if any, and vet against any ACL policies.
|
2015-09-10 18:43:59 +00:00
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-14 19:31:01 +00:00
|
|
|
if rule != nil && !rule.NodeWrite(s.agent.config.NodeName, nil) {
|
2017-08-23 14:52:48 +00:00
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
2015-09-10 18:43:59 +00:00
|
|
|
|
2015-01-15 19:20:22 +00:00
|
|
|
if enable {
|
2015-09-10 18:43:59 +00:00
|
|
|
s.agent.EnableNodeMaintenance(params.Get("reason"), token)
|
2015-01-15 19:20:22 +00:00
|
|
|
} else {
|
|
|
|
s.agent.DisableNodeMaintenance()
|
|
|
|
}
|
2015-02-20 23:45:06 +00:00
|
|
|
s.syncChanges()
|
2015-01-15 19:20:22 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2015-02-20 23:45:06 +00:00
|
|
|
|
2016-11-16 21:45:26 +00:00
|
|
|
func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2016-12-14 17:33:57 +00:00
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
2016-11-28 21:08:31 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentRead(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
2016-11-28 21:08:31 +00:00
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
// Get the provided loglevel.
|
2016-11-16 21:45:26 +00:00
|
|
|
logLevel := req.URL.Query().Get("loglevel")
|
|
|
|
if logLevel == "" {
|
|
|
|
logLevel = "INFO"
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
// Upper case the level since that's required by the filter.
|
2016-11-16 21:45:26 +00:00
|
|
|
logLevel = strings.ToUpper(logLevel)
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
// Create a level filter and flusher.
|
2016-11-16 21:45:26 +00:00
|
|
|
filter := logger.LevelFilter()
|
|
|
|
filter.MinLevel = logutils.LogLevel(logLevel)
|
|
|
|
if !logger.ValidateLevelFilter(filter.MinLevel, filter) {
|
2017-08-23 19:19:11 +00:00
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintf(resp, "Unknown log level: %s", filter.MinLevel)
|
2016-11-16 21:45:26 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
flusher, ok := resp.(http.Flusher)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("Streaming not supported")
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
// Set up a log handler.
|
2016-11-16 21:45:26 +00:00
|
|
|
handler := &httpLogHandler{
|
|
|
|
filter: filter,
|
|
|
|
logCh: make(chan string, 512),
|
2017-05-19 09:53:41 +00:00
|
|
|
logger: s.agent.logger,
|
2016-11-16 21:45:26 +00:00
|
|
|
}
|
2017-05-19 15:51:39 +00:00
|
|
|
s.agent.LogWriter.RegisterHandler(handler)
|
|
|
|
defer s.agent.LogWriter.DeregisterHandler(handler)
|
2016-11-16 21:45:26 +00:00
|
|
|
notify := resp.(http.CloseNotifier).CloseNotify()
|
|
|
|
|
2018-02-19 21:53:10 +00:00
|
|
|
// Send header so client can start streaming body
|
|
|
|
resp.WriteHeader(http.StatusOK)
|
2018-04-03 20:33:13 +00:00
|
|
|
|
|
|
|
// 0 byte write is needed before the Flush call so that if we are using
|
|
|
|
// a gzip stream it will go ahead and write out the HTTP response header
|
|
|
|
resp.Write([]byte(""))
|
2018-02-19 21:53:10 +00:00
|
|
|
flusher.Flush()
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
// Stream logs until the connection is closed.
|
2016-11-16 21:45:26 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-notify:
|
2017-05-19 15:51:39 +00:00
|
|
|
s.agent.LogWriter.DeregisterHandler(handler)
|
2016-11-28 21:08:31 +00:00
|
|
|
if handler.droppedCount > 0 {
|
|
|
|
s.agent.logger.Printf("[WARN] agent: Dropped %d logs during monitor request", handler.droppedCount)
|
|
|
|
}
|
2016-11-16 21:45:26 +00:00
|
|
|
return nil, nil
|
|
|
|
case log := <-handler.logCh:
|
Use fmt.Fprint/Fprintf/Fprintln
Used the following rewrite rules:
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c, d))) -> fmt.Fprintf(resp, a, b, c, d)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b, c))) -> fmt.Fprintf(resp, a, b, c)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a, b))) -> fmt.Fprintf(resp, a, b)' *.go
gofmt -w -r 'resp.Write([]byte(fmt.Sprintf(a))) -> fmt.Fprint(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a + "\n")) -> fmt.Fprintln(resp, a)' *.go
gofmt -w -r 'resp.Write([]byte(a)) -> fmt.Fprint(resp, a)' *.go
2017-04-20 14:07:42 +00:00
|
|
|
fmt.Fprintln(resp, log)
|
2016-11-16 21:45:26 +00:00
|
|
|
flusher.Flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type httpLogHandler struct {
|
2016-11-28 21:08:31 +00:00
|
|
|
filter *logutils.LevelFilter
|
|
|
|
logCh chan string
|
|
|
|
logger *log.Logger
|
|
|
|
droppedCount int
|
2016-11-16 21:45:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *httpLogHandler) HandleLog(log string) {
|
|
|
|
// Check the log level
|
|
|
|
if !h.filter.Check([]byte(log)) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do a non-blocking send
|
|
|
|
select {
|
|
|
|
case h.logCh <- log:
|
|
|
|
default:
|
2016-11-28 21:08:31 +00:00
|
|
|
// Just increment a counter for dropped logs to this handler; we can't log now
|
|
|
|
// because the lock is already held by the LogWriter invoking this
|
2017-04-20 19:00:03 +00:00
|
|
|
h.droppedCount++
|
2016-11-16 21:45:26 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2017-11-28 21:47:30 +00:00
|
|
|
if s.checkACLDisabled(resp, req) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
// Fetch the ACL token, if any, and enforce agent policy.
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
2017-08-23 14:52:48 +00:00
|
|
|
rule, err := s.agent.resolveToken(token)
|
2017-07-26 18:03:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-23 14:52:48 +00:00
|
|
|
if rule != nil && !rule.AgentWrite(s.agent.config.NodeName) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
2017-07-26 18:03:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The body is just the token, but it's in a JSON object so we can add
|
|
|
|
// fields to this later if needed.
|
|
|
|
var args api.AgentToken
|
|
|
|
if err := decodeBody(req, &args, nil); err != nil {
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Figure out the target token.
|
|
|
|
target := strings.TrimPrefix(req.URL.Path, "/v1/agent/token/")
|
|
|
|
switch target {
|
|
|
|
case "acl_token":
|
|
|
|
s.agent.tokens.UpdateUserToken(args.Token)
|
|
|
|
|
|
|
|
case "acl_agent_token":
|
|
|
|
s.agent.tokens.UpdateAgentToken(args.Token)
|
|
|
|
|
|
|
|
case "acl_agent_master_token":
|
|
|
|
s.agent.tokens.UpdateAgentMasterToken(args.Token)
|
|
|
|
|
2017-08-03 22:39:31 +00:00
|
|
|
case "acl_replication_token":
|
|
|
|
s.agent.tokens.UpdateACLReplicationToken(args.Token)
|
|
|
|
|
2017-07-26 18:03:43 +00:00
|
|
|
default:
|
|
|
|
resp.WriteHeader(http.StatusNotFound)
|
|
|
|
fmt.Fprintf(resp, "Token %q is unknown", target)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-03-21 15:56:14 +00:00
|
|
|
s.agent.logger.Printf("[INFO] agent: Updated agent's ACL token %q", target)
|
2017-07-26 18:03:43 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2018-03-17 04:39:26 +00:00
|
|
|
|
|
|
|
// AgentConnectCARoots returns the trusted CA roots.
|
|
|
|
func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2018-04-11 08:52:51 +00:00
|
|
|
var args structs.DCSpecificRequest
|
|
|
|
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
raw, m, err := s.agent.cache.Get(cachetype.ConnectCARootName, &args)
|
2018-04-11 08:52:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-15 12:13:54 +00:00
|
|
|
defer setCacheMeta(resp, &m)
|
|
|
|
|
|
|
|
// Add cache hit
|
2018-04-11 08:52:51 +00:00
|
|
|
|
|
|
|
reply, ok := raw.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
|
|
|
// This should never happen, but we want to protect against panics
|
|
|
|
return nil, fmt.Errorf("internal error: response type not correct")
|
|
|
|
}
|
|
|
|
defer setMeta(resp, &reply.QueryMeta)
|
|
|
|
|
|
|
|
return *reply, nil
|
2018-03-17 04:39:26 +00:00
|
|
|
}
|
2018-03-21 17:55:39 +00:00
|
|
|
|
|
|
|
// AgentConnectCALeafCert returns the certificate bundle for a service
|
|
|
|
// instance. This supports blocking queries to update the returned bundle.
|
|
|
|
func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2018-05-22 17:33:14 +00:00
|
|
|
// Get the service name. Note that this is the name of the sevice,
|
|
|
|
// not the ID of the service instance.
|
2018-05-19 06:27:02 +00:00
|
|
|
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
|
2018-03-21 17:55:39 +00:00
|
|
|
|
2018-04-30 21:23:49 +00:00
|
|
|
args := cachetype.ConnectCALeafRequest{
|
2018-05-19 06:27:02 +00:00
|
|
|
Service: serviceName, // Need name not ID
|
2018-04-30 21:23:49 +00:00
|
|
|
}
|
|
|
|
var qOpts structs.QueryOptions
|
|
|
|
// Store DC in the ConnectCALeafRequest but query opts separately
|
|
|
|
if done := s.parse(resp, req, &args.Datacenter, &qOpts); done {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
args.MinQueryIndex = qOpts.MinQueryIndex
|
2018-03-21 17:55:39 +00:00
|
|
|
|
2018-05-07 04:46:22 +00:00
|
|
|
// Verify the proxy token. This will check both the local proxy token
|
|
|
|
// as well as the ACL if the token isn't local.
|
2018-06-18 19:37:00 +00:00
|
|
|
effectiveToken, _, err := s.agent.verifyProxyToken(qOpts.Token, serviceName, "")
|
2018-04-30 21:23:49 +00:00
|
|
|
if err != nil {
|
2018-03-21 17:55:39 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-05-10 16:04:33 +00:00
|
|
|
args.Token = effectiveToken
|
2018-04-30 21:23:49 +00:00
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
raw, m, err := s.agent.cache.Get(cachetype.ConnectCALeafName, &args)
|
2018-04-30 21:23:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-15 12:13:54 +00:00
|
|
|
defer setCacheMeta(resp, &m)
|
2018-04-30 21:23:49 +00:00
|
|
|
|
|
|
|
reply, ok := raw.(*structs.IssuedCert)
|
|
|
|
if !ok {
|
|
|
|
// This should never happen, but we want to protect against panics
|
|
|
|
return nil, fmt.Errorf("internal error: response type not correct")
|
|
|
|
}
|
|
|
|
setIndex(resp, reply.ModifyIndex)
|
2018-03-21 17:55:39 +00:00
|
|
|
|
2018-04-30 21:23:49 +00:00
|
|
|
return reply, nil
|
2018-03-21 17:55:39 +00:00
|
|
|
}
|
2018-03-21 23:02:46 +00:00
|
|
|
|
2018-04-18 20:05:30 +00:00
|
|
|
// GET /v1/agent/connect/proxy/:proxy_service_id
|
|
|
|
//
|
|
|
|
// Returns the local proxy config for the identified proxy. Requires token=
|
|
|
|
// param with the correct local ProxyToken (not ACL token).
|
2018-04-05 16:15:43 +00:00
|
|
|
func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2018-04-18 20:05:30 +00:00
|
|
|
// Get the proxy ID. Note that this is the ID of a proxy's service instance.
|
|
|
|
id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/proxy/")
|
|
|
|
|
|
|
|
// Maybe block
|
|
|
|
var queryOpts structs.QueryOptions
|
|
|
|
if parseWait(resp, req, &queryOpts) {
|
|
|
|
// parseWait returns an error itself
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-05-07 04:02:44 +00:00
|
|
|
// Parse the token
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
|
2018-04-18 20:05:30 +00:00
|
|
|
// Parse hash specially since it's only this endpoint that uses it currently.
|
|
|
|
// Eventually this should happen in parseWait and end up in QueryOptions but I
|
|
|
|
// didn't want to make very general changes right away.
|
|
|
|
hash := req.URL.Query().Get("hash")
|
|
|
|
|
2018-04-18 20:48:58 +00:00
|
|
|
return s.agentLocalBlockingQuery(resp, hash, &queryOpts,
|
2018-04-19 10:15:32 +00:00
|
|
|
func(ws memdb.WatchSet) (string, interface{}, error) {
|
2018-04-18 20:05:30 +00:00
|
|
|
// Retrieve the proxy specified
|
|
|
|
proxy := s.agent.State.Proxy(id)
|
|
|
|
if proxy == nil {
|
|
|
|
resp.WriteHeader(http.StatusNotFound)
|
|
|
|
fmt.Fprintf(resp, "unknown proxy service ID: %s", id)
|
|
|
|
return "", nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the target service as a convenience
|
|
|
|
target := s.agent.State.Service(proxy.Proxy.TargetServiceID)
|
|
|
|
if target == nil {
|
|
|
|
// Not found since this endpoint is only useful for agent-managed proxies so
|
|
|
|
// service missing means the service was deregistered racily with this call.
|
|
|
|
resp.WriteHeader(http.StatusNotFound)
|
|
|
|
fmt.Fprintf(resp, "unknown target service ID: %s", proxy.Proxy.TargetServiceID)
|
|
|
|
return "", nil, nil
|
|
|
|
}
|
|
|
|
|
2018-05-19 06:27:02 +00:00
|
|
|
// Validate the ACL token
|
2018-06-18 19:37:00 +00:00
|
|
|
_, isProxyToken, err := s.agent.verifyProxyToken(token, target.Service, id)
|
2018-05-19 06:27:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
|
|
|
|
2018-04-19 10:15:32 +00:00
|
|
|
// Watch the proxy for changes
|
|
|
|
ws.Add(proxy.WatchCh)
|
2018-04-18 20:05:30 +00:00
|
|
|
|
|
|
|
hash, err := hashstructure.Hash(proxy.Proxy, nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
|
|
|
contentHash := fmt.Sprintf("%x", hash)
|
|
|
|
|
2018-06-19 11:11:42 +00:00
|
|
|
// Set defaults
|
|
|
|
config, err := s.agent.applyProxyConfigDefaults(proxy.Proxy)
|
|
|
|
if err != nil {
|
|
|
|
return "", nil, err
|
2018-04-26 13:01:20 +00:00
|
|
|
}
|
|
|
|
|
2018-06-18 19:37:00 +00:00
|
|
|
// Only merge in telemetry config from agent if the requested is
|
|
|
|
// authorized with a proxy token. This prevents us leaking potentially
|
|
|
|
// sensitive config like Circonus API token via a public endpoint. Proxy
|
|
|
|
// tokens are only ever generated in-memory and passed via ENV to a child
|
|
|
|
// proxy process so potential for abuse here seems small. This endpoint in
|
|
|
|
// general is only useful for managed proxies now so it should _always_ be
|
|
|
|
// true that auth is via a proxy token but inconvenient for testing if we
|
|
|
|
// lock it down so strictly.
|
|
|
|
if isProxyToken {
|
|
|
|
// Add telemetry config. Copy the global config so we can customize the
|
|
|
|
// prefix.
|
|
|
|
telemetryCfg := s.agent.config.Telemetry
|
|
|
|
telemetryCfg.MetricsPrefix = telemetryCfg.MetricsPrefix + ".proxy." + target.ID
|
|
|
|
|
|
|
|
// First see if the user has specified telemetry
|
|
|
|
if userRaw, ok := config["telemetry"]; ok {
|
|
|
|
// User specified domething, see if it is compatible with agent
|
|
|
|
// telemetry config:
|
|
|
|
var uCfg lib.TelemetryConfig
|
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
Result: &uCfg,
|
|
|
|
// Make sure that if the user passes something that isn't just a
|
|
|
|
// simple override of a valid TelemetryConfig that we fail so that we
|
|
|
|
// don't clobber their custom config.
|
|
|
|
ErrorUnused: true,
|
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
if err = dec.Decode(userRaw); err == nil {
|
|
|
|
// It did decode! Merge any unspecified fields from agent config.
|
|
|
|
uCfg.MergeDefaults(&telemetryCfg)
|
|
|
|
config["telemetry"] = uCfg
|
|
|
|
}
|
2018-06-13 15:53:44 +00:00
|
|
|
}
|
2018-06-18 19:37:00 +00:00
|
|
|
// Failed to decode, just keep user's config["telemetry"] verbatim
|
|
|
|
// with no agent merge.
|
|
|
|
} else {
|
|
|
|
// Add agent telemetry config.
|
|
|
|
config["telemetry"] = telemetryCfg
|
2018-06-13 15:53:44 +00:00
|
|
|
}
|
2018-06-07 13:11:06 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 13:24:24 +00:00
|
|
|
reply := &api.ConnectProxyConfig{
|
2018-04-18 20:05:30 +00:00
|
|
|
ProxyServiceID: proxy.Proxy.ProxyService.ID,
|
|
|
|
TargetServiceID: target.ID,
|
|
|
|
TargetServiceName: target.Service,
|
|
|
|
ContentHash: contentHash,
|
2018-05-03 17:44:10 +00:00
|
|
|
ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()),
|
|
|
|
Command: proxy.Proxy.Command,
|
2018-04-26 13:01:20 +00:00
|
|
|
Config: config,
|
2018-04-18 20:05:30 +00:00
|
|
|
}
|
|
|
|
return contentHash, reply, nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-04-19 10:15:32 +00:00
|
|
|
type agentLocalBlockingFunc func(ws memdb.WatchSet) (string, interface{}, error)
|
2018-04-18 20:05:30 +00:00
|
|
|
|
2018-04-18 20:48:58 +00:00
|
|
|
// agentLocalBlockingQuery performs a blocking query in a generic way against
|
|
|
|
// local agent state that has no RPC or raft to back it. It uses `hash` paramter
|
|
|
|
// instead of an `index`. The resp is needed to write the `X-Consul-ContentHash`
|
|
|
|
// header back on return no Status nor body content is ever written to it.
|
|
|
|
func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash string,
|
2018-04-18 20:05:30 +00:00
|
|
|
queryOpts *structs.QueryOptions, fn agentLocalBlockingFunc) (interface{}, error) {
|
|
|
|
|
2018-04-19 10:15:32 +00:00
|
|
|
// If we are not blocking we can skip tracking and allocating - nil WatchSet
|
|
|
|
// is still valid to call Add on and will just be a no op.
|
|
|
|
var ws memdb.WatchSet
|
|
|
|
var timeout *time.Timer
|
2018-04-18 20:05:30 +00:00
|
|
|
|
|
|
|
if hash != "" {
|
|
|
|
// TODO(banks) at least define these defaults somewhere in a const. Would be
|
|
|
|
// nice not to duplicate the ones in consul/rpc.go too...
|
|
|
|
wait := queryOpts.MaxQueryTime
|
|
|
|
if wait == 0 {
|
|
|
|
wait = 5 * time.Minute
|
|
|
|
}
|
|
|
|
if wait > 10*time.Minute {
|
|
|
|
wait = 10 * time.Minute
|
|
|
|
}
|
|
|
|
// Apply a small amount of jitter to the request.
|
|
|
|
wait += lib.RandomStagger(wait / 16)
|
2018-04-19 10:15:32 +00:00
|
|
|
timeout = time.NewTimer(wait)
|
2018-04-18 20:05:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
2018-04-26 13:01:20 +00:00
|
|
|
// Must reset this every loop in case the Watch set is already closed but
|
|
|
|
// hash remains same. In that case we'll need to re-block on ws.Watch()
|
|
|
|
// again.
|
|
|
|
ws = memdb.NewWatchSet()
|
2018-04-19 10:15:32 +00:00
|
|
|
curHash, curResp, err := fn(ws)
|
2018-04-18 20:05:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return curResp, err
|
|
|
|
}
|
2018-04-19 10:15:32 +00:00
|
|
|
// Return immediately if there is no timeout, the hash is different or the
|
|
|
|
// Watch returns true (indicating timeout fired). Note that Watch on a nil
|
|
|
|
// WatchSet immediately returns false which would incorrectly cause this to
|
|
|
|
// loop and repeat again, however we rely on the invariant that ws == nil
|
|
|
|
// IFF timeout == nil in which case the Watch call is never invoked.
|
|
|
|
if timeout == nil || hash != curHash || ws.Watch(timeout.C) {
|
|
|
|
resp.Header().Set("X-Consul-ContentHash", curHash)
|
|
|
|
return curResp, err
|
2018-04-18 20:05:30 +00:00
|
|
|
}
|
2018-04-19 10:15:32 +00:00
|
|
|
// Watch returned false indicating a change was detected, loop and repeat
|
|
|
|
// the callback to load the new value.
|
2018-04-18 20:05:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-21 23:02:46 +00:00
|
|
|
// AgentConnectAuthorize
|
|
|
|
//
|
|
|
|
// POST /v1/agent/connect/authorize
|
2018-05-11 05:37:02 +00:00
|
|
|
//
|
2018-05-19 04:03:10 +00:00
|
|
|
// Note: when this logic changes, consider if the Intention.Check RPC method
|
2018-05-11 05:37:02 +00:00
|
|
|
// also needs to be updated.
|
2018-03-21 23:02:46 +00:00
|
|
|
func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2018-03-26 01:50:05 +00:00
|
|
|
// Fetch the token
|
|
|
|
var token string
|
|
|
|
s.parseToken(req, &token)
|
|
|
|
|
2018-03-26 00:52:26 +00:00
|
|
|
// Decode the request from the request body
|
|
|
|
var authReq structs.ConnectAuthorizeRequest
|
|
|
|
if err := decodeBody(req, &authReq, nil); err != nil {
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to have a target to check intentions
|
|
|
|
if authReq.Target == "" {
|
|
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
|
|
fmt.Fprintf(resp, "Target service must be specified")
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the certificate URI from the client ID
|
2018-03-28 21:29:35 +00:00
|
|
|
uriRaw, err := url.Parse(authReq.ClientCertURI)
|
2018-03-26 00:52:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return &connectAuthorizeResp{
|
|
|
|
Authorized: false,
|
|
|
|
Reason: fmt.Sprintf("Client ID must be a URI: %s", err),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
uri, err := connect.ParseCertURI(uriRaw)
|
|
|
|
if err != nil {
|
|
|
|
return &connectAuthorizeResp{
|
|
|
|
Authorized: false,
|
|
|
|
Reason: fmt.Sprintf("Invalid client ID: %s", err),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
uriService, ok := uri.(*connect.SpiffeIDService)
|
|
|
|
if !ok {
|
|
|
|
return &connectAuthorizeResp{
|
|
|
|
Authorized: false,
|
2018-03-26 01:06:10 +00:00
|
|
|
Reason: "Client ID must be a valid SPIFFE service URI",
|
2018-03-26 00:52:26 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-03-26 01:50:05 +00:00
|
|
|
// We need to verify service:write permissions for the given token.
|
|
|
|
// We do this manually here since the RPC request below only verifies
|
|
|
|
// service:read.
|
|
|
|
rule, err := s.agent.resolveToken(token)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if rule != nil && !rule.ServiceWrite(authReq.Target, nil) {
|
|
|
|
return nil, acl.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2018-05-09 19:30:43 +00:00
|
|
|
// Validate the trust domain matches ours. Later we will support explicit
|
|
|
|
// external federation but not built yet.
|
|
|
|
rootArgs := &structs.DCSpecificRequest{Datacenter: s.agent.config.Datacenter}
|
2018-06-15 12:13:54 +00:00
|
|
|
raw, _, err := s.agent.cache.Get(cachetype.ConnectCARootName, rootArgs)
|
2018-05-09 19:30:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
roots, ok := raw.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("internal error: roots response type not correct")
|
|
|
|
}
|
|
|
|
if roots.TrustDomain == "" {
|
|
|
|
return nil, fmt.Errorf("connect CA not bootstrapped yet")
|
|
|
|
}
|
|
|
|
if roots.TrustDomain != strings.ToLower(uriService.Host) {
|
|
|
|
return &connectAuthorizeResp{
|
|
|
|
Authorized: false,
|
|
|
|
Reason: fmt.Sprintf("Identity from an external trust domain: %s",
|
|
|
|
uriService.Host),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(banks): Implement revocation list checking here.
|
2018-03-27 17:09:13 +00:00
|
|
|
|
2018-03-26 01:50:05 +00:00
|
|
|
// Get the intentions for this target service.
|
2018-03-26 00:52:26 +00:00
|
|
|
args := &structs.IntentionQueryRequest{
|
|
|
|
Datacenter: s.agent.config.Datacenter,
|
|
|
|
Match: &structs.IntentionQueryMatch{
|
|
|
|
Type: structs.IntentionMatchDestination,
|
|
|
|
Entries: []structs.IntentionMatchEntry{
|
|
|
|
{
|
|
|
|
Namespace: structs.IntentionDefaultNamespace,
|
|
|
|
Name: authReq.Target,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2018-03-26 01:50:05 +00:00
|
|
|
args.Token = token
|
2018-04-17 23:26:58 +00:00
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
raw, m, err := s.agent.cache.Get(cachetype.IntentionMatchName, args)
|
2018-04-17 23:26:58 +00:00
|
|
|
if err != nil {
|
2018-03-26 00:52:26 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-15 12:13:54 +00:00
|
|
|
setCacheMeta(resp, &m)
|
2018-04-17 23:26:58 +00:00
|
|
|
|
|
|
|
reply, ok := raw.(*structs.IndexedIntentionMatches)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("internal error: response type not correct")
|
|
|
|
}
|
2018-03-26 00:52:26 +00:00
|
|
|
if len(reply.Matches) != 1 {
|
|
|
|
return nil, fmt.Errorf("Internal error loading matches")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test the authorization for each match
|
|
|
|
for _, ixn := range reply.Matches[0] {
|
|
|
|
if auth, ok := uriService.Authorize(ixn); ok {
|
|
|
|
return &connectAuthorizeResp{
|
|
|
|
Authorized: auth,
|
2018-03-26 01:06:10 +00:00
|
|
|
Reason: fmt.Sprintf("Matched intention: %s", ixn.String()),
|
2018-03-26 00:52:26 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-26 01:50:05 +00:00
|
|
|
// No match, we need to determine the default behavior. We do this by
|
|
|
|
// specifying the anonymous token token, which will get that behavior.
|
|
|
|
// The default behavior if ACLs are disabled is to allow connections
|
|
|
|
// to mimic the behavior of Consul itself: everything is allowed if
|
|
|
|
// ACLs are disabled.
|
|
|
|
rule, err = s.agent.resolveToken("")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
authz := true
|
|
|
|
reason := "ACLs disabled, access is allowed by default"
|
|
|
|
if rule != nil {
|
2018-03-27 17:08:20 +00:00
|
|
|
authz = rule.IntentionDefaultAllow()
|
2018-03-26 01:50:05 +00:00
|
|
|
reason = "Default behavior configured by ACLs"
|
|
|
|
}
|
|
|
|
|
2018-03-26 00:52:26 +00:00
|
|
|
return &connectAuthorizeResp{
|
2018-03-26 01:50:05 +00:00
|
|
|
Authorized: authz,
|
|
|
|
Reason: reason,
|
2018-03-26 00:52:26 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-03-26 01:02:25 +00:00
|
|
|
// connectAuthorizeResp is the response format/structure for the
|
|
|
|
// /v1/agent/connect/authorize endpoint.
|
2018-03-26 00:52:26 +00:00
|
|
|
type connectAuthorizeResp struct {
|
2018-03-26 01:02:25 +00:00
|
|
|
Authorized bool // True if authorized, false if not
|
|
|
|
Reason string // Reason for the Authorized value (whether true or false)
|
2018-03-21 23:02:46 +00:00
|
|
|
}
|