Resolve conflicts
This commit is contained in:
commit
ec6e8021c0
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: The Vault provider will now automatically renew the lease of the token used, if supported.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
connect: fix Vault provider not respecting IntermediateCertTTL
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area`
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: show correct datacenter for gateways
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
checks: add health status to the failure message when gRPC healthchecks fail.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
agent: make the json/hcl decoding of ConnectProxyConfig fully work with CamelCase and snake_case
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
server: make sure that the various replication loggers use consistent logging
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
agent: when enable_central_service_config is enabled ensure agent reload doesn't revert check state to critical
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
api: support GetMeta() and GetNamespace() on all config entry kinds
|
||||
```
|
|
@ -16,6 +16,24 @@ function status {
|
|||
tput sgr0
|
||||
}
|
||||
|
||||
# Returns the latest GitHub "backport/*" label
|
||||
function get_latest_backport_label {
|
||||
local resp
|
||||
local ret
|
||||
local latest_backport_label
|
||||
|
||||
resp=$(curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/labels")
|
||||
ret="$?"
|
||||
if [[ "$ret" -ne 0 ]]; then
|
||||
status "The GitHub API returned $ret which means it was probably rate limited."
|
||||
exit $ret
|
||||
fi
|
||||
|
||||
latest_backport_label=$(echo "$resp" | jq -r '.[] | select(.name | startswith("backport/")) | .name' | sort -rV | head -n1)
|
||||
echo "$latest_backport_label"
|
||||
return 0
|
||||
}
|
||||
|
||||
# This function will do the cherry-picking of a commit on a branch
|
||||
# Exit 1 if cherry-picking fails
|
||||
function cherry_pick_with_slack_notification {
|
||||
|
@ -110,10 +128,13 @@ if [[ "$count" -eq 0 ]]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
# save PR number
|
||||
pr_number=$(echo "$resp" | jq '.items[].number')
|
||||
|
||||
# If the API returned a non-zero count, we have found a PR with that commit so we find
|
||||
# the labels from the PR
|
||||
|
||||
# sorts the labels from a PR via version sort
|
||||
# Sorts the labels from a PR via version sort
|
||||
labels=$(echo "$resp" | jq --raw-output '.items[].labels[] | .name' | sort -rV)
|
||||
ret="$?"
|
||||
pr_url=$(echo "$resp" | jq --raw-output '.items[].pull_request.html_url')
|
||||
|
@ -124,6 +145,18 @@ if [[ "$ret" -ne 0 ]]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
# Attach label for latest release branch if 'docs-cherrypick' is present. Will noop if already applied.
|
||||
latest_backport_label=$(get_latest_backport_label)
|
||||
status "latest backport label is $latest_backport_label"
|
||||
if echo "$resp" | jq -e '.items[].labels[] | select(.name | contains("docs-cherrypick"))'; then
|
||||
labels=$(curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" -X POST -d "{\"labels\":[\"$latest_backport_label\"]}" "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${pr_number}/labels" | jq --raw-output '.[].name' | sort -rV)
|
||||
ret="$?"
|
||||
if [[ "$ret" -ne 0 ]]; then
|
||||
status "Error applying $latest_backport_label to $pr_url"
|
||||
exit $ret
|
||||
fi
|
||||
fi
|
||||
|
||||
backport_failures=0
|
||||
# loop through all labels on the PR
|
||||
for label in $labels; do
|
||||
|
@ -148,4 +181,4 @@ done
|
|||
if [ "$backport_failures" -ne 0 ]; then
|
||||
echo "$backport_failures backports failed"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -26,7 +26,7 @@ BUGFIXES:
|
|||
* connect: `connect envoy` command now respects the `-ca-path` flag [[GH-8606](https://github.com/hashicorp/consul/issues/8606)]
|
||||
* connect: fix bug in preventing some namespaced config entry modifications [[GH-8601](https://github.com/hashicorp/consul/issues/8601)]
|
||||
* connect: fix renewing secondary intermediate certificates [[GH-8588](https://github.com/hashicorp/consul/issues/8588)]
|
||||
* ui: fixed a bug related to in-folder KV creation [GH-8613](https://github.com/hashicorp/consul/pull/8613)
|
||||
* ui: fixed a bug related to in-folder KV creation [[GH-8613](https://github.com/hashicorp/consul/issues/8613)]
|
||||
|
||||
## 1.8.3 (August 12, 2020)
|
||||
|
||||
|
|
15
agent/acl.go
15
agent/acl.go
|
@ -105,14 +105,16 @@ func (a *Agent) vetServiceRegisterWithAuthorizer(authz acl.Authorizer, service *
|
|||
service.FillAuthzContext(&authzContext)
|
||||
// Vet the service itself.
|
||||
if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
serviceName := service.CompoundServiceName()
|
||||
return acl.PermissionDenied("Missing service:write on %s", serviceName.String())
|
||||
}
|
||||
|
||||
// Vet any service that might be getting overwritten.
|
||||
if existing := a.State.Service(service.CompoundServiceID()); existing != nil {
|
||||
existing.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(existing.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
serviceName := service.CompoundServiceName()
|
||||
return acl.PermissionDenied("Missing service:write on %s", serviceName.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,7 +123,7 @@ func (a *Agent) vetServiceRegisterWithAuthorizer(authz acl.Authorizer, service *
|
|||
if service.Kind == structs.ServiceKindConnectProxy {
|
||||
service.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
return acl.PermissionDenied("Missing service:write on %s", service.Proxy.DestinationServiceName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +153,8 @@ func (a *Agent) vetServiceUpdateWithAuthorizer(authz acl.Authorizer, serviceID s
|
|||
if existing := a.State.Service(serviceID); existing != nil {
|
||||
existing.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(existing.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
serviceName := existing.CompoundServiceName()
|
||||
return acl.PermissionDenied("Missing service:write on %s", serviceName.String())
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown service %q", serviceID)
|
||||
|
@ -229,11 +232,11 @@ func (a *Agent) vetCheckUpdateWithAuthorizer(authz acl.Authorizer, checkID struc
|
|||
if existing := a.State.Check(checkID); existing != nil {
|
||||
if len(existing.ServiceName) > 0 {
|
||||
if authz.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
return acl.PermissionDenied("Missing service:write on %s", existing.ServiceName)
|
||||
}
|
||||
} else {
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
return acl.PermissionDenied("Missing node:write on %s", a.config.NodeName)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -20,7 +20,7 @@ type aclBootstrapResponse struct {
|
|||
|
||||
// checkACLDisabled will return a standard response if ACLs are disabled. This
|
||||
// returns true if they are disabled and we should not continue.
|
||||
func (s *HTTPServer) checkACLDisabled(resp http.ResponseWriter, _req *http.Request) bool {
|
||||
func (s *HTTPHandlers) checkACLDisabled(resp http.ResponseWriter, _req *http.Request) bool {
|
||||
if s.agent.config.ACLsEnabled {
|
||||
return false
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ func (s *HTTPServer) checkACLDisabled(resp http.ResponseWriter, _req *http.Reque
|
|||
|
||||
// ACLBootstrap is used to perform a one-time ACL bootstrap operation on
|
||||
// a cluster to get the first management token.
|
||||
func (s *HTTPServer) ACLBootstrap(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBootstrap(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func (s *HTTPServer) ACLBootstrap(resp http.ResponseWriter, req *http.Request) (
|
|||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLReplicationStatus(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLReplicationStatus(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func (s *HTTPServer) ACLReplicationStatus(resp http.ResponseWriter, req *http.Re
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRulesTranslate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRulesTranslate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (s *HTTPServer) ACLRulesTranslate(resp http.ResponseWriter, req *http.Reque
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRulesTranslateLegacyToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRulesTranslateLegacyToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ func (s *HTTPServer) ACLRulesTranslateLegacyToken(resp http.ResponseWriter, req
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ func (s *HTTPServer) ACLPolicyList(resp http.ResponseWriter, req *http.Request)
|
|||
return out.Policies, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ func (s *HTTPServer) ACLPolicyCRUD(resp http.ResponseWriter, req *http.Request)
|
|||
return fn(resp, req, policyID)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyRead(resp http.ResponseWriter, req *http.Request, policyID, policyName string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyRead(resp http.ResponseWriter, req *http.Request, policyID, policyName string) (interface{}, error) {
|
||||
args := structs.ACLPolicyGetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
PolicyID: policyID,
|
||||
|
@ -269,7 +269,7 @@ func (s *HTTPServer) ACLPolicyRead(resp http.ResponseWriter, req *http.Request,
|
|||
return out.Policy, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyReadByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyReadByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -282,11 +282,11 @@ func (s *HTTPServer) ACLPolicyReadByName(resp http.ResponseWriter, req *http.Req
|
|||
return s.ACLPolicyRead(resp, req, "", policyName)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyReadByID(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyReadByID(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error) {
|
||||
return s.ACLPolicyRead(resp, req, policyID, "")
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -294,11 +294,11 @@ func (s *HTTPServer) ACLPolicyCreate(resp http.ResponseWriter, req *http.Request
|
|||
return s.aclPolicyWriteInternal(resp, req, "", true)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyWrite(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyWrite(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error) {
|
||||
return s.aclPolicyWriteInternal(resp, req, policyID, false)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclPolicyWriteInternal(_resp http.ResponseWriter, req *http.Request, policyID string, create bool) (interface{}, error) {
|
||||
func (s *HTTPHandlers) aclPolicyWriteInternal(_resp http.ResponseWriter, req *http.Request, policyID string, create bool) (interface{}, error) {
|
||||
args := structs.ACLPolicySetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ func (s *HTTPServer) aclPolicyWriteInternal(_resp http.ResponseWriter, req *http
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicyDelete(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLPolicyDelete(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error) {
|
||||
args := structs.ACLPolicyDeleteRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
PolicyID: policyID,
|
||||
|
@ -351,7 +351,7 @@ func (s *HTTPServer) ACLPolicyDelete(resp http.ResponseWriter, req *http.Request
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ func (s *HTTPServer) ACLTokenList(resp http.ResponseWriter, req *http.Request) (
|
|||
return out.Tokens, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ func (s *HTTPServer) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request) (
|
|||
return fn(resp, req, tokenID)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -454,7 +454,7 @@ func (s *HTTPServer) ACLTokenSelf(resp http.ResponseWriter, req *http.Request) (
|
|||
return out.Token, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ func (s *HTTPServer) ACLTokenCreate(resp http.ResponseWriter, req *http.Request)
|
|||
return s.aclTokenSetInternal(resp, req, "", true)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenGet(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenGet(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
args := structs.ACLTokenGetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
TokenID: tokenID,
|
||||
|
@ -494,11 +494,11 @@ func (s *HTTPServer) ACLTokenGet(resp http.ResponseWriter, req *http.Request, to
|
|||
return out.Token, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenSet(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenSet(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
return s.aclTokenSetInternal(resp, req, tokenID, false)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclTokenSetInternal(_resp http.ResponseWriter, req *http.Request, tokenID string, create bool) (interface{}, error) {
|
||||
func (s *HTTPHandlers) aclTokenSetInternal(_resp http.ResponseWriter, req *http.Request, tokenID string, create bool) (interface{}, error) {
|
||||
args := structs.ACLTokenSetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
Create: create,
|
||||
|
@ -528,7 +528,7 @@ func (s *HTTPServer) aclTokenSetInternal(_resp http.ResponseWriter, req *http.Re
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenDelete(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenDelete(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
args := structs.ACLTokenDeleteRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
TokenID: tokenID,
|
||||
|
@ -545,7 +545,7 @@ func (s *HTTPServer) ACLTokenDelete(resp http.ResponseWriter, req *http.Request,
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenClone(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLTokenClone(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ func (s *HTTPServer) ACLTokenClone(resp http.ResponseWriter, req *http.Request,
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -607,7 +607,7 @@ func (s *HTTPServer) ACLRoleList(resp http.ResponseWriter, req *http.Request) (i
|
|||
return out.Roles, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -636,7 +636,7 @@ func (s *HTTPServer) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request) (i
|
|||
return fn(resp, req, roleID)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleReadByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleReadByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -649,11 +649,11 @@ func (s *HTTPServer) ACLRoleReadByName(resp http.ResponseWriter, req *http.Reque
|
|||
return s.ACLRoleRead(resp, req, "", roleName)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleReadByID(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleReadByID(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error) {
|
||||
return s.ACLRoleRead(resp, req, roleID, "")
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleRead(resp http.ResponseWriter, req *http.Request, roleID, roleName string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleRead(resp http.ResponseWriter, req *http.Request, roleID, roleName string) (interface{}, error) {
|
||||
args := structs.ACLRoleGetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
RoleID: roleID,
|
||||
|
@ -684,7 +684,7 @@ func (s *HTTPServer) ACLRoleRead(resp http.ResponseWriter, req *http.Request, ro
|
|||
return out.Role, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -692,7 +692,7 @@ func (s *HTTPServer) ACLRoleCreate(resp http.ResponseWriter, req *http.Request)
|
|||
return s.ACLRoleWrite(resp, req, "")
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleWrite(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleWrite(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error) {
|
||||
args := structs.ACLRoleSetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
}
|
||||
|
@ -719,7 +719,7 @@ func (s *HTTPServer) ACLRoleWrite(resp http.ResponseWriter, req *http.Request, r
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLRoleDelete(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLRoleDelete(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error) {
|
||||
args := structs.ACLRoleDeleteRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
RoleID: roleID,
|
||||
|
@ -737,7 +737,7 @@ func (s *HTTPServer) ACLRoleDelete(resp http.ResponseWriter, req *http.Request,
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLBindingRuleList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBindingRuleList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -771,7 +771,7 @@ func (s *HTTPServer) ACLBindingRuleList(resp http.ResponseWriter, req *http.Requ
|
|||
return out.BindingRules, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLBindingRuleCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBindingRuleCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -800,7 +800,7 @@ func (s *HTTPServer) ACLBindingRuleCRUD(resp http.ResponseWriter, req *http.Requ
|
|||
return fn(resp, req, bindingRuleID)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLBindingRuleRead(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBindingRuleRead(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error) {
|
||||
args := structs.ACLBindingRuleGetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
BindingRuleID: bindingRuleID,
|
||||
|
@ -831,7 +831,7 @@ func (s *HTTPServer) ACLBindingRuleRead(resp http.ResponseWriter, req *http.Requ
|
|||
return out.BindingRule, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLBindingRuleCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBindingRuleCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -839,7 +839,7 @@ func (s *HTTPServer) ACLBindingRuleCreate(resp http.ResponseWriter, req *http.Re
|
|||
return s.ACLBindingRuleWrite(resp, req, "")
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLBindingRuleWrite(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBindingRuleWrite(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error) {
|
||||
args := structs.ACLBindingRuleSetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
}
|
||||
|
@ -866,7 +866,7 @@ func (s *HTTPServer) ACLBindingRuleWrite(resp http.ResponseWriter, req *http.Req
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLBindingRuleDelete(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLBindingRuleDelete(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error) {
|
||||
args := structs.ACLBindingRuleDeleteRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
BindingRuleID: bindingRuleID,
|
||||
|
@ -884,7 +884,7 @@ func (s *HTTPServer) ACLBindingRuleDelete(resp http.ResponseWriter, req *http.Re
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthMethodList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthMethodList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -915,7 +915,7 @@ func (s *HTTPServer) ACLAuthMethodList(resp http.ResponseWriter, req *http.Reque
|
|||
return out.AuthMethods, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthMethodCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthMethodCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -944,7 +944,7 @@ func (s *HTTPServer) ACLAuthMethodCRUD(resp http.ResponseWriter, req *http.Reque
|
|||
return fn(resp, req, methodName)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthMethodRead(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthMethodRead(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error) {
|
||||
args := structs.ACLAuthMethodGetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
AuthMethodName: methodName,
|
||||
|
@ -975,7 +975,7 @@ func (s *HTTPServer) ACLAuthMethodRead(resp http.ResponseWriter, req *http.Reque
|
|||
return out.AuthMethod, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthMethodCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthMethodCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -983,7 +983,7 @@ func (s *HTTPServer) ACLAuthMethodCreate(resp http.ResponseWriter, req *http.Req
|
|||
return s.ACLAuthMethodWrite(resp, req, "")
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthMethodWrite(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthMethodWrite(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error) {
|
||||
args := structs.ACLAuthMethodSetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
}
|
||||
|
@ -1013,7 +1013,7 @@ func (s *HTTPServer) ACLAuthMethodWrite(resp http.ResponseWriter, req *http.Requ
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthMethodDelete(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthMethodDelete(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error) {
|
||||
args := structs.ACLAuthMethodDeleteRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
AuthMethodName: methodName,
|
||||
|
@ -1031,7 +1031,7 @@ func (s *HTTPServer) ACLAuthMethodDelete(resp http.ResponseWriter, req *http.Req
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLLogin(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLLogin(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1057,7 +1057,7 @@ func (s *HTTPServer) ACLLogin(resp http.ResponseWriter, req *http.Request) (inte
|
|||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLLogout(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLLogout(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1093,7 +1093,7 @@ func fixupAuthMethodConfig(method *structs.ACLAuthMethod) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// At first glance it may appear like this endpoint is going to leak security relevant information.
|
||||
// There are a number of reason why this is okay.
|
||||
//
|
||||
|
|
|
@ -13,7 +13,7 @@ type aclCreateResponse struct {
|
|||
ID string
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLDestroy(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLDestroy(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -39,21 +39,21 @@ func (s *HTTPServer) ACLDestroy(resp http.ResponseWriter, req *http.Request) (in
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
return s.aclSet(resp, req, false)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
return s.aclSet(resp, req, true)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclSet(resp http.ResponseWriter, req *http.Request, update bool) (interface{}, error) {
|
||||
func (s *HTTPHandlers) aclSet(resp http.ResponseWriter, req *http.Request, update bool) (interface{}, error) {
|
||||
args := structs.ACLRequest{
|
||||
Datacenter: s.agent.config.ACLDatacenter,
|
||||
Op: structs.ACLSet,
|
||||
|
@ -90,7 +90,7 @@ func (s *HTTPServer) aclSet(resp http.ResponseWriter, req *http.Request, update
|
|||
return aclCreateResponse{out}, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLClone(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLClone(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func (s *HTTPServer) ACLClone(resp http.ResponseWriter, req *http.Request) (inte
|
|||
return aclCreateResponse{outID}, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func (s *HTTPServer) ACLGet(resp http.ResponseWriter, req *http.Request) (interf
|
|||
return out.ACLs, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ACLList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestACL_Legacy_Disabled_Response(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func makeTestACL(t *testing.T, srv *HTTPServer) string {
|
||||
func makeTestACL(t *testing.T, srv *HTTPHandlers) string {
|
||||
body := bytes.NewBuffer(nil)
|
||||
enc := json.NewEncoder(body)
|
||||
raw := map[string]interface{}{
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/dns"
|
||||
"github.com/hashicorp/consul/agent/router"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/go-connlimit"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -29,14 +28,12 @@ import (
|
|||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/ae"
|
||||
autoconf "github.com/hashicorp/consul/agent/auto-config"
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
"github.com/hashicorp/consul/agent/checks"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/agent/consul"
|
||||
"github.com/hashicorp/consul/agent/local"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/systemd"
|
||||
|
@ -156,7 +153,8 @@ type notifier interface {
|
|||
// mode, it runs a full Consul server. In client-only mode, it only forwards
|
||||
// requests to other Consul servers.
|
||||
type Agent struct {
|
||||
autoConf *autoconf.AutoConfig
|
||||
// TODO: remove fields that are already in BaseDeps
|
||||
baseDeps BaseDeps
|
||||
|
||||
// config is the agent configuration.
|
||||
config *config.RuntimeConfig
|
||||
|
@ -164,9 +162,6 @@ type Agent struct {
|
|||
// Used for writing our logs
|
||||
logger hclog.InterceptLogger
|
||||
|
||||
// In-memory sink used for collecting metrics
|
||||
MemSink MetricsHandler
|
||||
|
||||
// delegate is either a *consul.Server or *consul.Client
|
||||
// depending on the configuration
|
||||
delegate delegate
|
||||
|
@ -295,12 +290,6 @@ type Agent struct {
|
|||
// IP.
|
||||
httpConnLimiter connlimit.Limiter
|
||||
|
||||
// Connection Pool
|
||||
connPool *pool.ConnPool
|
||||
|
||||
// Shared RPC Router
|
||||
router *router.Router
|
||||
|
||||
// enterpriseAgent embeds fields that we only access in consul-enterprise builds
|
||||
enterpriseAgent
|
||||
}
|
||||
|
@ -337,16 +326,12 @@ func New(bd BaseDeps) (*Agent, error) {
|
|||
shutdownCh: make(chan struct{}),
|
||||
endpoints: make(map[string]string),
|
||||
|
||||
// TODO: store the BaseDeps instead of copying them over to Agent
|
||||
baseDeps: bd,
|
||||
tokens: bd.Tokens,
|
||||
logger: bd.Logger,
|
||||
tlsConfigurator: bd.TLSConfigurator,
|
||||
config: bd.RuntimeConfig,
|
||||
cache: bd.Cache,
|
||||
MemSink: bd.MetricsHandler,
|
||||
connPool: bd.ConnPool,
|
||||
autoConf: bd.AutoConfig,
|
||||
router: bd.Router,
|
||||
}
|
||||
|
||||
a.serviceManager = NewServiceManager(&a)
|
||||
|
@ -407,7 +392,7 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
|
||||
// This needs to be done early on as it will potentially alter the configuration
|
||||
// and then how other bits are brought up
|
||||
c, err := a.autoConf.InitialConfiguration(ctx)
|
||||
c, err := a.baseDeps.AutoConfig.InitialConfiguration(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -454,23 +439,15 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
return fmt.Errorf("failed to start Consul enterprise component: %v", err)
|
||||
}
|
||||
|
||||
options := []consul.ConsulOption{
|
||||
consul.WithLogger(a.logger),
|
||||
consul.WithTokenStore(a.tokens),
|
||||
consul.WithTLSConfigurator(a.tlsConfigurator),
|
||||
consul.WithConnectionPool(a.connPool),
|
||||
consul.WithRouter(a.router),
|
||||
}
|
||||
|
||||
// Setup either the client or the server.
|
||||
if c.ServerMode {
|
||||
server, err := consul.NewServer(consulCfg, options...)
|
||||
server, err := consul.NewServer(consulCfg, a.baseDeps.Deps)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to start Consul server: %v", err)
|
||||
}
|
||||
a.delegate = server
|
||||
} else {
|
||||
client, err := consul.NewClient(consulCfg, options...)
|
||||
client, err := consul.NewClient(consulCfg, a.baseDeps.Deps)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to start Consul client: %v", err)
|
||||
}
|
||||
|
@ -487,7 +464,7 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
a.State.Delegate = a.delegate
|
||||
a.State.TriggerSyncChanges = a.sync.SyncChanges.Trigger
|
||||
|
||||
if err := a.autoConf.Start(&lib.StopChannelContext{StopCh: a.shutdownCh}); err != nil {
|
||||
if err := a.baseDeps.AutoConfig.Start(&lib.StopChannelContext{StopCh: a.shutdownCh}); err != nil {
|
||||
return fmt.Errorf("AutoConf failed to start certificate monitor: %w", err)
|
||||
}
|
||||
a.serviceManager.Start()
|
||||
|
@ -754,7 +731,7 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
|
|||
l = tls.NewListener(l, tlscfg)
|
||||
}
|
||||
|
||||
srv := &HTTPServer{
|
||||
srv := &HTTPHandlers{
|
||||
agent: a,
|
||||
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
|
||||
}
|
||||
|
@ -1297,7 +1274,7 @@ func (a *Agent) ShutdownAgent() error {
|
|||
|
||||
// this would be cancelled anyways (by the closing of the shutdown ch) but
|
||||
// this should help them to be stopped more quickly
|
||||
a.autoConf.Stop()
|
||||
a.baseDeps.AutoConfig.Stop()
|
||||
|
||||
// Stop the service manager (must happen before we take the stateLock to avoid deadlock)
|
||||
if a.serviceManager != nil {
|
||||
|
@ -1863,7 +1840,8 @@ func (a *Agent) AddServiceAndReplaceChecks(service *structs.NodeService, chkType
|
|||
token: token,
|
||||
replaceExistingChecks: true,
|
||||
source: source,
|
||||
}, a.snapshotCheckState())
|
||||
snap: a.snapshotCheckState(),
|
||||
})
|
||||
}
|
||||
|
||||
// AddService is used to add a service entry.
|
||||
|
@ -1882,12 +1860,13 @@ func (a *Agent) AddService(service *structs.NodeService, chkTypes []*structs.Che
|
|||
token: token,
|
||||
replaceExistingChecks: false,
|
||||
source: source,
|
||||
}, a.snapshotCheckState())
|
||||
snap: a.snapshotCheckState(),
|
||||
})
|
||||
}
|
||||
|
||||
// addServiceLocked adds a service entry to the service manager if enabled, or directly
|
||||
// to the local state if it is not. This function assumes the state lock is already held.
|
||||
func (a *Agent) addServiceLocked(req *addServiceRequest, snap map[structs.CheckID]*structs.HealthCheck) error {
|
||||
func (a *Agent) addServiceLocked(req *addServiceRequest) error {
|
||||
req.fixupForAddServiceLocked()
|
||||
|
||||
req.service.EnterpriseMeta.Normalize()
|
||||
|
@ -1905,7 +1884,7 @@ func (a *Agent) addServiceLocked(req *addServiceRequest, snap map[structs.CheckI
|
|||
req.persistDefaults = nil
|
||||
req.persistServiceConfig = false
|
||||
|
||||
return a.addServiceInternal(req, snap)
|
||||
return a.addServiceInternal(req)
|
||||
}
|
||||
|
||||
// addServiceRequest is the union of arguments for calling both
|
||||
|
@ -1930,6 +1909,7 @@ type addServiceRequest struct {
|
|||
token string
|
||||
replaceExistingChecks bool
|
||||
source configSource
|
||||
snap map[structs.CheckID]*structs.HealthCheck
|
||||
}
|
||||
|
||||
func (r *addServiceRequest) fixupForAddServiceLocked() {
|
||||
|
@ -1943,7 +1923,7 @@ func (r *addServiceRequest) fixupForAddServiceInternal() {
|
|||
}
|
||||
|
||||
// addServiceInternal adds the given service and checks to the local state.
|
||||
func (a *Agent) addServiceInternal(req *addServiceRequest, snap map[structs.CheckID]*structs.HealthCheck) error {
|
||||
func (a *Agent) addServiceInternal(req *addServiceRequest) error {
|
||||
req.fixupForAddServiceInternal()
|
||||
var (
|
||||
service = req.service
|
||||
|
@ -1955,6 +1935,7 @@ func (a *Agent) addServiceInternal(req *addServiceRequest, snap map[structs.Chec
|
|||
token = req.token
|
||||
replaceExistingChecks = req.replaceExistingChecks
|
||||
source = req.source
|
||||
snap = req.snap
|
||||
)
|
||||
|
||||
// Pause the service syncs during modification
|
||||
|
@ -3089,7 +3070,8 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
|||
token: service.Token,
|
||||
replaceExistingChecks: false, // do default behavior
|
||||
source: ConfigSourceLocal,
|
||||
}, snap)
|
||||
snap: snap,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to register service %q: %v", service.Name, err)
|
||||
}
|
||||
|
@ -3107,7 +3089,8 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
|||
token: sidecarToken,
|
||||
replaceExistingChecks: false, // do default behavior
|
||||
source: ConfigSourceLocal,
|
||||
}, snap)
|
||||
snap: snap,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to register sidecar for service %q: %v", service.Name, err)
|
||||
}
|
||||
|
@ -3199,7 +3182,8 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
|||
token: p.Token,
|
||||
replaceExistingChecks: false, // do default behavior
|
||||
source: source,
|
||||
}, snap)
|
||||
snap: snap,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed adding service %q: %s", serviceID, err)
|
||||
}
|
||||
|
@ -3472,7 +3456,7 @@ func (a *Agent) loadLimits(conf *config.RuntimeConfig) {
|
|||
// all services, checks, tokens, metadata, dnsServer configs, etc.
|
||||
// It will also reload all ongoing watches.
|
||||
func (a *Agent) ReloadConfig() error {
|
||||
newCfg, err := a.autoConf.ReadConfig()
|
||||
newCfg, err := a.baseDeps.AutoConfig.ReadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ type xdsSelf struct {
|
|||
SupportedProxies map[string][]string
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -124,7 +124,7 @@ func enablePrometheusOutput(req *http.Request) bool {
|
|||
return acceptsOpenMetricsMimeType(req.Header.Get("Accept"))
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -152,10 +152,10 @@ func (s *HTTPServer) AgentMetrics(resp http.ResponseWriter, req *http.Request) (
|
|||
handler.ServeHTTP(resp, req)
|
||||
return nil, nil
|
||||
}
|
||||
return s.agent.MemSink.DisplayMetrics(resp, req)
|
||||
return s.agent.baseDeps.MetricsHandler.DisplayMetrics(resp, req)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentReload(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentReload(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -224,7 +224,7 @@ func buildAgentService(s *structs.NodeService) api.AgentService {
|
|||
return as
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -271,7 +271,7 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
|
|||
//
|
||||
// Returns the service definition for a single local services and allows
|
||||
// blocking watch using hash-based blocking.
|
||||
func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Get the proxy ID. Note that this is the ID of a proxy's service instance.
|
||||
id := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/")
|
||||
|
||||
|
@ -350,7 +350,7 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (
|
|||
return service, err
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -393,7 +393,7 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i
|
|||
return filter.Execute(agentChecks)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -438,7 +438,7 @@ func (s *HTTPServer) AgentMembers(resp http.ResponseWriter, req *http.Request) (
|
|||
return members, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentJoin(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentJoin(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -470,7 +470,7 @@ func (s *HTTPServer) AgentJoin(resp http.ResponseWriter, req *http.Request) (int
|
|||
return nil, err
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentLeave(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentLeave(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -488,7 +488,7 @@ func (s *HTTPServer) AgentLeave(resp http.ResponseWriter, req *http.Request) (in
|
|||
return nil, s.agent.ShutdownAgent()
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentForceLeave(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -510,13 +510,13 @@ func (s *HTTPServer) AgentForceLeave(resp http.ResponseWriter, req *http.Request
|
|||
// syncChanges is a helper function which wraps a blocking call to sync
|
||||
// services and checks to the server. If the operation fails, we only
|
||||
// only warn because the write did succeed and anti-entropy will sync later.
|
||||
func (s *HTTPServer) syncChanges() {
|
||||
func (s *HTTPHandlers) syncChanges() {
|
||||
if err := s.agent.State.SyncChanges(); err != nil {
|
||||
s.agent.logger.Error("failed to sync changes", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
||||
|
@ -585,7 +585,7 @@ func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Requ
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
checkID := structs.NewCheckID(types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/")), nil)
|
||||
|
||||
// Get the provided token, if any, and vet against any ACL policies.
|
||||
|
@ -614,13 +614,13 @@ func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Re
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/pass/"))
|
||||
note := req.URL.Query().Get("note")
|
||||
return s.agentCheckUpdate(resp, req, checkID, api.HealthPassing, note)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/warn/"))
|
||||
note := req.URL.Query().Get("note")
|
||||
|
||||
|
@ -628,7 +628,7 @@ func (s *HTTPServer) AgentCheckWarn(resp http.ResponseWriter, req *http.Request)
|
|||
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/fail/"))
|
||||
note := req.URL.Query().Get("note")
|
||||
|
||||
|
@ -650,7 +650,7 @@ type checkUpdate struct {
|
|||
|
||||
// AgentCheckUpdate is a PUT-based alternative to the GET-based Pass/Warn/Fail
|
||||
// APIs.
|
||||
func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentCheckUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var update checkUpdate
|
||||
if err := decodeBody(req.Body, &update); err != nil {
|
||||
resp.WriteHeader(http.StatusBadRequest)
|
||||
|
@ -673,7 +673,7 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques
|
|||
return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) agentCheckUpdate(_resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) agentCheckUpdate(_resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
|
||||
cid := structs.NewCheckID(checkID, nil)
|
||||
|
||||
// Get the provided token, if any, and vet against any ACL policies.
|
||||
|
@ -703,7 +703,7 @@ func (s *HTTPServer) agentCheckUpdate(_resp http.ResponseWriter, req *http.Reque
|
|||
}
|
||||
|
||||
// agentHealthService Returns Health for a given service ID
|
||||
func agentHealthService(serviceID structs.ServiceID, s *HTTPServer) (int, string, api.HealthChecks) {
|
||||
func agentHealthService(serviceID structs.ServiceID, s *HTTPHandlers) (int, string, api.HealthChecks) {
|
||||
checks := s.agent.State.ChecksForService(serviceID, true)
|
||||
serviceChecks := make(api.HealthChecks, 0)
|
||||
for _, c := range checks {
|
||||
|
@ -744,7 +744,7 @@ func returnTextPlain(req *http.Request) bool {
|
|||
}
|
||||
|
||||
// AgentHealthServiceByID return the local Service Health given its ID
|
||||
func (s *HTTPServer) AgentHealthServiceByID(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Pull out the service id (service id since there may be several instance of the same service on this host)
|
||||
serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/id/")
|
||||
if serviceID == "" {
|
||||
|
@ -796,7 +796,7 @@ func (s *HTTPServer) AgentHealthServiceByID(resp http.ResponseWriter, req *http.
|
|||
}
|
||||
|
||||
// AgentHealthServiceByName return the worse status of all the services with given name on an agent
|
||||
func (s *HTTPServer) AgentHealthServiceByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Pull out the service name
|
||||
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/name/")
|
||||
if serviceName == "" {
|
||||
|
@ -857,7 +857,7 @@ func (s *HTTPServer) AgentHealthServiceByName(resp http.ResponseWriter, req *htt
|
|||
return result, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "application/json"}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.ServiceDefinition
|
||||
// Fixup the type decode of TTL or Interval if a check if provided.
|
||||
|
||||
|
@ -1007,7 +1007,7 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/"), nil)
|
||||
|
||||
// Get the provided token, if any, and vet against any ACL policies.
|
||||
|
@ -1037,7 +1037,7 @@ func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Ensure we have a service ID
|
||||
sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/"), nil)
|
||||
|
||||
|
@ -1100,7 +1100,7 @@ func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentNodeMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentNodeMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Ensure we have some action
|
||||
params := req.URL.Query()
|
||||
if _, ok := params["enable"]; !ok {
|
||||
|
@ -1137,7 +1137,7 @@ func (s *HTTPServer) AgentNodeMaintenance(resp http.ResponseWriter, req *http.Re
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -1205,7 +1205,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (
|
|||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkACLDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1273,7 +1273,7 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in
|
|||
}
|
||||
|
||||
// AgentConnectCARoots returns the trusted CA roots.
|
||||
func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -1299,7 +1299,7 @@ func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Req
|
|||
|
||||
// AgentConnectCALeafCert returns the certificate bundle for a service
|
||||
// instance. This supports blocking queries to update the returned bundle.
|
||||
func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Get the service name. Note that this is the name of the service,
|
||||
// not the ID of the service instance.
|
||||
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
|
||||
|
@ -1343,7 +1343,7 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.
|
|||
//
|
||||
// Note: when this logic changes, consider if the Intention.Check RPC method
|
||||
// also needs to be updated.
|
||||
func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the token
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
@ -1384,7 +1384,7 @@ type connectAuthorizeResp struct {
|
|||
// Retrieves information about resources available and in-use for the
|
||||
// host the agent is running on such as CPU, memory, and disk usage. Requires
|
||||
// a operator:read ACL token.
|
||||
func (s *HTTPServer) AgentHost(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) AgentHost(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Fetch the ACL token, if any, and enforce agent policy.
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func makeReadOnlyAgentACL(t *testing.T, srv *HTTPServer) string {
|
||||
func makeReadOnlyAgentACL(t *testing.T, srv *HTTPHandlers) string {
|
||||
args := map[string]interface{}{
|
||||
"Name": "User Token",
|
||||
"Type": "client",
|
||||
|
@ -5615,7 +5615,7 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func waitForActiveCARoot(t *testing.T, srv *HTTPServer, expect *structs.CARoot) {
|
||||
func waitForActiveCARoot(t *testing.T, srv *HTTPHandlers, expect *structs.CARoot) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
|
|
|
@ -1917,13 +1917,15 @@ func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) {
|
|||
Status: api.HealthCritical,
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s/v1/agent/self", a.HTTPAddr())
|
||||
addr, err := firstAddr(a.Agent.apiServers, "https")
|
||||
require.NoError(t, err)
|
||||
url := fmt.Sprintf("https://%s/v1/agent/self", addr.String())
|
||||
chk := &structs.CheckType{
|
||||
HTTP: url,
|
||||
Interval: 20 * time.Millisecond,
|
||||
}
|
||||
|
||||
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
||||
err = a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -3395,14 +3397,24 @@ func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_ReloadConfigAndKeepChecksStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("normal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testAgent_ReloadConfigAndKeepChecksStatus(t, "")
|
||||
})
|
||||
t.Run("service manager", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testAgent_ReloadConfigAndKeepChecksStatus(t, "enable_central_service_config = true")
|
||||
})
|
||||
}
|
||||
|
||||
func testAgent_ReloadConfigAndKeepChecksStatus(t *testing.T, extraHCL string) {
|
||||
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
||||
hcl := `data_dir = "` + dataDir + `"
|
||||
enable_local_script_checks=true
|
||||
services=[{
|
||||
name="webserver1",
|
||||
check{id="check1", ttl="30s"}
|
||||
}]`
|
||||
}] ` + extraHCL
|
||||
a := NewTestAgent(t, hcl)
|
||||
defer a.Shutdown()
|
||||
|
||||
|
@ -3417,6 +3429,7 @@ func TestAgent_ReloadConfigAndKeepChecksStatus(t *testing.T) {
|
|||
|
||||
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
||||
require.NoError(t, a.reloadConfigInternal(c))
|
||||
|
||||
// After reload, should be passing directly (no critical state)
|
||||
for id, check := range a.State.Checks(nil) {
|
||||
require.Equal(t, "passing", check.Status, "check %q is wrong", id)
|
||||
|
@ -4607,7 +4620,7 @@ func TestSharedRPCRouter(t *testing.T) {
|
|||
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1")
|
||||
|
||||
mgr, server := srv.Agent.router.FindLANRoute()
|
||||
mgr, server := srv.Agent.baseDeps.Router.FindLANRoute()
|
||||
require.NotNil(t, mgr)
|
||||
require.NotNil(t, server)
|
||||
|
||||
|
@ -4619,7 +4632,7 @@ func TestSharedRPCRouter(t *testing.T) {
|
|||
|
||||
testrpc.WaitForTestAgent(t, client.RPC, "dc1")
|
||||
|
||||
mgr, server = client.Agent.router.FindLANRoute()
|
||||
mgr, server = client.Agent.baseDeps.Router.FindLANRoute()
|
||||
require.NotNil(t, mgr)
|
||||
require.NotNil(t, server)
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) CatalogRegister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogRegister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_register"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -43,7 +43,7 @@ func (s *HTTPServer) CatalogRegister(resp http.ResponseWriter, req *http.Request
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogDeregister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogDeregister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_deregister"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -75,7 +75,7 @@ func (s *HTTPServer) CatalogDeregister(resp http.ResponseWriter, req *http.Reque
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogDatacenters(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogDatacenters(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_datacenters"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -111,7 +111,7 @@ func (s *HTTPServer) CatalogDatacenters(resp http.ResponseWriter, req *http.Requ
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_nodes"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -149,7 +149,7 @@ RETRY_ONCE:
|
|||
return out.Nodes, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_services"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -205,15 +205,15 @@ func (s *HTTPServer) CatalogServices(resp http.ResponseWriter, req *http.Request
|
|||
return out.Services, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return s.catalogServiceNodes(resp, req, true)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return s.catalogServiceNodes(resp, req, false)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) catalogServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) {
|
||||
func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) {
|
||||
metricsKey := "catalog_service_nodes"
|
||||
pathPrefix := "/v1/catalog/service/"
|
||||
if connect {
|
||||
|
@ -302,7 +302,7 @@ func (s *HTTPServer) catalogServiceNodes(resp http.ResponseWriter, req *http.Req
|
|||
return out.ServiceNodes, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogNodeServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogNodeServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_node_services"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -365,7 +365,7 @@ RETRY_ONCE:
|
|||
return out.NodeServices, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogNodeServiceList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogNodeServiceList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_node_service_list"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
@ -415,7 +415,7 @@ RETRY_ONCE:
|
|||
return &out.NodeServices, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) CatalogGatewayServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CatalogGatewayServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_gateway_services"}, 1,
|
||||
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
|
||||
|
||||
|
|
|
@ -13,8 +13,6 @@ import (
|
|||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var ErrGRPCUnhealthy = fmt.Errorf("gRPC application didn't report service healthy")
|
||||
|
||||
// GrpcHealthProbe connects to gRPC application and queries health service for application/service status.
|
||||
type GrpcHealthProbe struct {
|
||||
server string
|
||||
|
@ -69,8 +67,8 @@ func (probe *GrpcHealthProbe) Check(target string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if response == nil || response.Status != hv1.HealthCheckResponse_SERVING {
|
||||
return ErrGRPCUnhealthy
|
||||
if response.Status != hv1.HealthCheckResponse_SERVING {
|
||||
return fmt.Errorf("gRPC %s serving status: %s", target, response.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
const ConfigEntryNotFoundErr string = "Config entry not found"
|
||||
|
||||
// Config switches on the different CRUD operations for config entries.
|
||||
func (s *HTTPServer) Config(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) Config(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
return s.configGet(resp, req)
|
||||
|
@ -27,7 +27,7 @@ func (s *HTTPServer) Config(resp http.ResponseWriter, req *http.Request) (interf
|
|||
|
||||
// configGet gets either a specific config entry, or lists all config entries
|
||||
// of a kind if no name is provided.
|
||||
func (s *HTTPServer) configGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) configGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.ConfigEntryQuery
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -74,7 +74,7 @@ func (s *HTTPServer) configGet(resp http.ResponseWriter, req *http.Request) (int
|
|||
}
|
||||
|
||||
// configDelete deletes the given config entry.
|
||||
func (s *HTTPServer) configDelete(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) configDelete(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.ConfigEntryRequest
|
||||
s.parseDC(req, &args.Datacenter)
|
||||
s.parseToken(req, &args.Token)
|
||||
|
@ -108,7 +108,7 @@ func (s *HTTPServer) configDelete(resp http.ResponseWriter, req *http.Request) (
|
|||
}
|
||||
|
||||
// ConfigCreate applies the given config entry update.
|
||||
func (s *HTTPServer) ConfigApply(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ConfigApply(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.ConfigEntryRequest{
|
||||
Op: structs.ConfigEntryUpsert,
|
||||
}
|
||||
|
|
|
@ -164,3 +164,11 @@ type NeedsLogger interface {
|
|||
// SetLogger will pass a configured Logger to the provider.
|
||||
SetLogger(logger hclog.Logger)
|
||||
}
|
||||
|
||||
// NeedsStop is an optional interface that allows a CA to define a function
|
||||
// to be called when the CA instance is no longer in use. This is different
|
||||
// from Cleanup(), as only the local provider instance is being shut down
|
||||
// such as in the case of a leader change.
|
||||
type NeedsStop interface {
|
||||
Stop()
|
||||
}
|
||||
|
|
|
@ -21,6 +21,13 @@ import (
|
|||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// NotBefore will be CertificateTimeDriftBuffer in the past to account for
|
||||
// time drift between different servers.
|
||||
CertificateTimeDriftBuffer = time.Minute
|
||||
)
|
||||
|
||||
var ErrNotInitialized = errors.New("provider not initialized")
|
||||
|
||||
type ConsulProvider struct {
|
||||
|
@ -474,7 +481,7 @@ func (c *ConsulProvider) SignIntermediate(csr *x509.CertificateRequest) (string,
|
|||
// Sign the certificate valid from 1 minute in the past, this helps it be
|
||||
// accepted right away even when nodes are not in close time sync across the
|
||||
// cluster. A minute is more than enough for typical DC clock drift.
|
||||
effectiveNow := time.Now().Add(-1 * time.Minute)
|
||||
effectiveNow := time.Now().Add(-1 * CertificateTimeDriftBuffer)
|
||||
template := x509.Certificate{
|
||||
SerialNumber: sn,
|
||||
Subject: csr.Subject,
|
||||
|
|
|
@ -44,13 +44,13 @@ func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderC
|
|||
func defaultConsulCAProviderConfig() structs.ConsulCAProviderConfig {
|
||||
return structs.ConsulCAProviderConfig{
|
||||
CommonCAProviderConfig: defaultCommonConfig(),
|
||||
IntermediateCertTTL: 24 * 365 * time.Hour,
|
||||
}
|
||||
}
|
||||
func defaultCommonConfig() structs.CommonCAProviderConfig {
|
||||
return structs.CommonCAProviderConfig{
|
||||
LeafCertTTL: 3 * 24 * time.Hour,
|
||||
PrivateKeyType: connect.DefaultPrivateKeyType,
|
||||
PrivateKeyBits: connect.DefaultPrivateKeyBits,
|
||||
LeafCertTTL: 3 * 24 * time.Hour,
|
||||
IntermediateCertTTL: 24 * 365 * time.Hour,
|
||||
PrivateKeyType: connect.DefaultPrivateKeyType,
|
||||
PrivateKeyBits: connect.DefaultPrivateKeyBits,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,12 +26,13 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) {
|
|||
"PrivateKeyBits": int64(4096),
|
||||
}
|
||||
expectCommonBase := &structs.CommonCAProviderConfig{
|
||||
LeafCertTTL: 30 * time.Hour,
|
||||
SkipValidate: true,
|
||||
CSRMaxPerSecond: 5.25,
|
||||
CSRMaxConcurrent: 55,
|
||||
PrivateKeyType: "rsa",
|
||||
PrivateKeyBits: 4096,
|
||||
LeafCertTTL: 30 * time.Hour,
|
||||
IntermediateCertTTL: 90 * time.Hour,
|
||||
SkipValidate: true,
|
||||
CSRMaxPerSecond: 5.25,
|
||||
CSRMaxConcurrent: 55,
|
||||
PrivateKeyType: "rsa",
|
||||
PrivateKeyBits: 4096,
|
||||
}
|
||||
|
||||
cases := map[string]testcase{
|
||||
|
@ -60,7 +61,6 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) {
|
|||
PrivateKey: "key",
|
||||
RootCert: "cert",
|
||||
RotationPeriod: 5 * time.Minute,
|
||||
IntermediateCertTTL: 90 * time.Hour,
|
||||
DisableCrossSigning: true,
|
||||
},
|
||||
parseFunc: func(t *testing.T, raw map[string]interface{}) interface{} {
|
||||
|
@ -86,6 +86,7 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) {
|
|||
"Token": "token",
|
||||
"RootPKIPath": "root-pki/",
|
||||
"IntermediatePKIPath": "im-pki/",
|
||||
"IntermediateCertTTL": "90h",
|
||||
"CAFile": "ca-file",
|
||||
"CAPath": "ca-path",
|
||||
"CertFile": "cert-file",
|
||||
|
@ -126,8 +127,9 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) {
|
|||
ModifyIndex: 99,
|
||||
},
|
||||
Config: map[string]interface{}{
|
||||
"ExistingARN": "arn://foo",
|
||||
"DeleteOnExit": true,
|
||||
"ExistingARN": "arn://foo",
|
||||
"DeleteOnExit": true,
|
||||
"IntermediateCertTTL": "90h",
|
||||
},
|
||||
},
|
||||
expectConfig: &structs.AWSCAProviderConfig{
|
||||
|
|
|
@ -2,6 +2,7 @@ package ca
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
|
@ -11,6 +12,8 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
@ -21,12 +24,20 @@ var ErrBackendNotMounted = fmt.Errorf("backend not mounted")
|
|||
var ErrBackendNotInitialized = fmt.Errorf("backend not initialized")
|
||||
|
||||
type VaultProvider struct {
|
||||
config *structs.VaultCAProviderConfig
|
||||
client *vaultapi.Client
|
||||
config *structs.VaultCAProviderConfig
|
||||
client *vaultapi.Client
|
||||
|
||||
shutdown func()
|
||||
|
||||
isPrimary bool
|
||||
clusterID string
|
||||
spiffeID *connect.SpiffeIDSigning
|
||||
setupIntermediatePKIPathDone bool
|
||||
logger hclog.Logger
|
||||
}
|
||||
|
||||
func NewVaultProvider() *VaultProvider {
|
||||
return &VaultProvider{shutdown: func() {}}
|
||||
}
|
||||
|
||||
func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig {
|
||||
|
@ -66,9 +77,76 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error {
|
|||
v.clusterID = cfg.ClusterID
|
||||
v.spiffeID = connect.SpiffeIDSigningForCluster(&structs.CAConfiguration{ClusterID: v.clusterID})
|
||||
|
||||
// Look up the token to see if we can auto-renew its lease.
|
||||
secret, err := client.Auth().Token().Lookup(config.Token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var token struct {
|
||||
Renewable bool
|
||||
TTL int
|
||||
}
|
||||
if err := mapstructure.Decode(secret.Data, &token); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set up a renewer to renew the token automatically, if supported.
|
||||
if token.Renewable {
|
||||
lifetimeWatcher, err := client.NewLifetimeWatcher(&vaultapi.LifetimeWatcherInput{
|
||||
Secret: &vaultapi.Secret{
|
||||
Auth: &vaultapi.SecretAuth{
|
||||
ClientToken: config.Token,
|
||||
Renewable: token.Renewable,
|
||||
LeaseDuration: secret.LeaseDuration,
|
||||
},
|
||||
},
|
||||
Increment: token.TTL,
|
||||
RenewBehavior: vaultapi.RenewBehaviorIgnoreErrors,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error beginning Vault provider token renewal: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
v.shutdown = cancel
|
||||
go v.renewToken(ctx, lifetimeWatcher)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// renewToken uses a vaultapi.Renewer to repeatedly renew our token's lease.
|
||||
func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.LifetimeWatcher) {
|
||||
go watcher.Start()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case err := <-watcher.DoneCh():
|
||||
if err != nil {
|
||||
v.logger.Error("Error renewing token for Vault provider", "error", err)
|
||||
}
|
||||
|
||||
// Watcher routine has finished, so start it again.
|
||||
go watcher.Start()
|
||||
|
||||
case <-watcher.RenewCh():
|
||||
v.logger.Error("Successfully renewed token for Vault provider")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger implements the NeedsLogger interface so the provider can log important messages.
|
||||
func (v *VaultProvider) SetLogger(logger hclog.Logger) {
|
||||
v.logger = logger.
|
||||
ResetNamed(logging.Connect).
|
||||
Named(logging.CA).
|
||||
Named(logging.Vault)
|
||||
}
|
||||
|
||||
// State implements Provider. Vault provider needs no state other than the
|
||||
// user-provided config currently.
|
||||
func (v *VaultProvider) State() (map[string]string, error) {
|
||||
|
@ -153,7 +231,7 @@ func (v *VaultProvider) setupIntermediatePKIPath() error {
|
|||
Type: "pki",
|
||||
Description: "intermediate CA backend for Consul Connect",
|
||||
Config: vaultapi.MountConfigInput{
|
||||
MaxLeaseTTL: "2160h",
|
||||
MaxLeaseTTL: v.config.IntermediateCertTTL.String(),
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -431,9 +509,16 @@ func (c *VaultProvider) SupportsCrossSigning() (bool, error) {
|
|||
// this down and recreate it on small config changes because the intermediate
|
||||
// certs get bundled with the leaf certs, so there's no cost to the CA changing.
|
||||
func (v *VaultProvider) Cleanup() error {
|
||||
v.Stop()
|
||||
|
||||
return v.client.Sys().Unmount(v.config.IntermediatePKIPath)
|
||||
}
|
||||
|
||||
// Stop shuts down the token renew goroutine.
|
||||
func (v *VaultProvider) Stop() {
|
||||
v.shutdown()
|
||||
}
|
||||
|
||||
func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderConfig, error) {
|
||||
config := structs.VaultCAProviderConfig{
|
||||
CommonCAProviderConfig: defaultCommonConfig(),
|
||||
|
|
|
@ -2,6 +2,7 @@ package ca
|
|||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -51,6 +53,42 @@ func TestVaultCAProvider_SecondaryActiveIntermediate(t *testing.T) {
|
|||
require.NoError(err)
|
||||
}
|
||||
|
||||
func TestVaultCAProvider_RenewToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
skipIfVaultNotPresent(t)
|
||||
|
||||
testVault, err := runTestVault(t)
|
||||
require.NoError(t, err)
|
||||
testVault.WaitUntilReady(t)
|
||||
|
||||
// Create a token with a short TTL to be renewed by the provider.
|
||||
ttl := 1 * time.Second
|
||||
tcr := &vaultapi.TokenCreateRequest{
|
||||
TTL: ttl.String(),
|
||||
}
|
||||
secret, err := testVault.client.Auth().Token().Create(tcr)
|
||||
require.NoError(t, err)
|
||||
providerToken := secret.Auth.ClientToken
|
||||
|
||||
_, err = createVaultProvider(t, true, testVault.addr, providerToken, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the last renewal time.
|
||||
secret, err = testVault.client.Auth().Token().Lookup(providerToken)
|
||||
require.NoError(t, err)
|
||||
firstRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait past the TTL and make sure the token has been renewed.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
secret, err = testVault.client.Auth().Token().Lookup(providerToken)
|
||||
require.NoError(r, err)
|
||||
lastRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64()
|
||||
require.NoError(r, err)
|
||||
require.Greater(r, lastRenewal, firstRenewal)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVaultCAProvider_Bootstrap(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -349,16 +387,25 @@ func testVaultProvider(t *testing.T) (*VaultProvider, *testVaultServer) {
|
|||
}
|
||||
|
||||
func testVaultProviderWithConfig(t *testing.T, isPrimary bool, rawConf map[string]interface{}) (*VaultProvider, *testVaultServer) {
|
||||
testVault, err := runTestVault()
|
||||
testVault, err := runTestVault(t)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testVault.WaitUntilReady(t)
|
||||
|
||||
provider, err := createVaultProvider(t, isPrimary, testVault.addr, testVault.rootToken, rawConf)
|
||||
if err != nil {
|
||||
testVault.Stop()
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
return provider, testVault
|
||||
}
|
||||
|
||||
func createVaultProvider(t *testing.T, isPrimary bool, addr, token string, rawConf map[string]interface{}) (*VaultProvider, error) {
|
||||
conf := map[string]interface{}{
|
||||
"Address": testVault.addr,
|
||||
"Token": testVault.rootToken,
|
||||
"Address": addr,
|
||||
"Token": token,
|
||||
"RootPKIPath": "pki-root/",
|
||||
"IntermediatePKIPath": "pki-intermediate/",
|
||||
// Tests duration parsing after msgpack type mangling during raft apply.
|
||||
|
@ -368,7 +415,7 @@ func testVaultProviderWithConfig(t *testing.T, isPrimary bool, rawConf map[strin
|
|||
conf[k] = v
|
||||
}
|
||||
|
||||
provider := &VaultProvider{}
|
||||
provider := NewVaultProvider()
|
||||
|
||||
cfg := ProviderConfig{
|
||||
ClusterID: connect.TestClusterID,
|
||||
|
@ -377,26 +424,24 @@ func testVaultProviderWithConfig(t *testing.T, isPrimary bool, rawConf map[strin
|
|||
RawConfig: conf,
|
||||
}
|
||||
|
||||
logger := hclog.New(&hclog.LoggerOptions{
|
||||
Output: ioutil.Discard,
|
||||
})
|
||||
provider.SetLogger(logger)
|
||||
|
||||
if !isPrimary {
|
||||
cfg.IsPrimary = false
|
||||
cfg.Datacenter = "dc2"
|
||||
}
|
||||
|
||||
if err := provider.Configure(cfg); err != nil {
|
||||
testVault.Stop()
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, provider.Configure(cfg))
|
||||
if isPrimary {
|
||||
if err = provider.GenerateRoot(); err != nil {
|
||||
testVault.Stop()
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := provider.GenerateIntermediate(); err != nil {
|
||||
testVault.Stop()
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, provider.GenerateRoot())
|
||||
_, err := provider.GenerateIntermediate()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return provider, testVault
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
// skipIfVaultNotPresent skips the test if the vault binary is not in PATH.
|
||||
|
@ -415,7 +460,7 @@ func skipIfVaultNotPresent(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func runTestVault() (*testVaultServer, error) {
|
||||
func runTestVault(t *testing.T) (*testVaultServer, error) {
|
||||
vaultBinaryName := os.Getenv("VAULT_BINARY_NAME")
|
||||
if vaultBinaryName == "" {
|
||||
vaultBinaryName = "vault"
|
||||
|
@ -466,13 +511,17 @@ func runTestVault() (*testVaultServer, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &testVaultServer{
|
||||
testVault := &testVaultServer{
|
||||
rootToken: token,
|
||||
addr: "http://" + clientAddr,
|
||||
cmd: cmd,
|
||||
client: client,
|
||||
returnPortsFn: returnPortsFn,
|
||||
}, nil
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
testVault.Stop()
|
||||
})
|
||||
return testVault, nil
|
||||
}
|
||||
|
||||
type testVaultServer struct {
|
||||
|
|
|
@ -38,9 +38,10 @@ var badParams = []KeyConfig{
|
|||
|
||||
func makeConfig(kc KeyConfig) structs.CommonCAProviderConfig {
|
||||
return structs.CommonCAProviderConfig{
|
||||
LeafCertTTL: 3 * 24 * time.Hour,
|
||||
PrivateKeyType: kc.keyType,
|
||||
PrivateKeyBits: kc.keyBits,
|
||||
LeafCertTTL: 3 * 24 * time.Hour,
|
||||
IntermediateCertTTL: 365 * 24 * time.Hour,
|
||||
PrivateKeyType: kc.keyType,
|
||||
PrivateKeyBits: kc.keyBits,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
// GET /v1/connect/ca/roots
|
||||
func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -25,7 +25,7 @@ func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
|
||||
// /v1/connect/ca/configuration
|
||||
func (s *HTTPServer) ConnectCAConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ConnectCAConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
return s.ConnectCAConfigurationGet(resp, req)
|
||||
|
@ -39,7 +39,7 @@ func (s *HTTPServer) ConnectCAConfiguration(resp http.ResponseWriter, req *http.
|
|||
}
|
||||
|
||||
// GEt /v1/connect/ca/configuration
|
||||
func (s *HTTPServer) ConnectCAConfigurationGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ConnectCAConfigurationGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in ConnectCAConfiguration
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -56,7 +56,7 @@ func (s *HTTPServer) ConnectCAConfigurationGet(resp http.ResponseWriter, req *ht
|
|||
}
|
||||
|
||||
// PUT /v1/connect/ca/configuration
|
||||
func (s *HTTPServer) ConnectCAConfigurationSet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) ConnectCAConfigurationSet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in ConnectCAConfiguration
|
||||
|
||||
var args structs.CARequest
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestCatalog_Register(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 8000,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -135,7 +135,7 @@ func TestCatalog_Register_NodeID(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 8000,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -199,7 +199,7 @@ node "foo" {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 8000,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: id},
|
||||
|
@ -285,7 +285,7 @@ func TestCatalog_Register_ForwardLeader(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 8000,
|
||||
},
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ func TestCatalog_Register_ForwardDC(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 8000,
|
||||
},
|
||||
}
|
||||
|
@ -1615,7 +1615,7 @@ func TestCatalog_ListServiceNodes(t *testing.T) {
|
|||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTags: []string{"slave"},
|
||||
ServiceTags: []string{"replica"},
|
||||
TagFilter: false,
|
||||
}
|
||||
var out structs.IndexedServiceNodes
|
||||
|
|
|
@ -22,18 +22,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// clientRPCConnMaxIdle controls how long we keep an idle connection
|
||||
// open to a server. 127s was chosen as the first prime above 120s
|
||||
// (arbitrarily chose to use a prime) with the intent of reusing
|
||||
// connections who are used by once-a-minute cron(8) jobs *and* who
|
||||
// use a 60s jitter window (e.g. in vixie cron job execution can
|
||||
// drift by up to 59s per job, or 119s for a once-a-minute cron job).
|
||||
clientRPCConnMaxIdle = 127 * time.Second
|
||||
|
||||
// clientMaxStreams controls how many idle streams we keep
|
||||
// open to a server
|
||||
clientMaxStreams = 32
|
||||
|
||||
// serfEventBacklog is the maximum number of unprocessed Serf Events
|
||||
// that will be held in queue before new serf events block. A
|
||||
// blocking serf event queue is a bad thing.
|
||||
|
@ -68,8 +56,7 @@ type Client struct {
|
|||
// from an agent.
|
||||
rpcLimiter atomic.Value
|
||||
|
||||
// eventCh is used to receive events from the
|
||||
// serf cluster in the datacenter
|
||||
// eventCh is used to receive events from the serf cluster in the datacenter
|
||||
eventCh chan serf.Event
|
||||
|
||||
// Logger uses the provided LogOutput
|
||||
|
@ -90,12 +77,7 @@ type Client struct {
|
|||
}
|
||||
|
||||
// NewClient creates and returns a Client
|
||||
func NewClient(config *Config, options ...ConsulOption) (*Client, error) {
|
||||
flat := flattenConsulOptions(options)
|
||||
|
||||
tlsConfigurator := flat.tlsConfigurator
|
||||
connPool := flat.connPool
|
||||
|
||||
func NewClient(config *Config, deps Deps) (*Client, error) {
|
||||
if err := config.CheckProtocolVersion(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -105,32 +87,14 @@ func NewClient(config *Config, options ...ConsulOption) (*Client, error) {
|
|||
if err := config.CheckACL(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if flat.logger == nil {
|
||||
return nil, fmt.Errorf("logger is required")
|
||||
}
|
||||
|
||||
if connPool == nil {
|
||||
connPool = &pool.ConnPool{
|
||||
Server: false,
|
||||
SrcAddr: config.RPCSrcAddr,
|
||||
Logger: flat.logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
||||
MaxTime: clientRPCConnMaxIdle,
|
||||
MaxStreams: clientMaxStreams,
|
||||
TLSConfigurator: tlsConfigurator,
|
||||
Datacenter: config.Datacenter,
|
||||
}
|
||||
}
|
||||
|
||||
logger := flat.logger.NamedIntercept(logging.ConsulClient)
|
||||
|
||||
// Create client
|
||||
c := &Client{
|
||||
config: config,
|
||||
connPool: connPool,
|
||||
connPool: deps.ConnPool,
|
||||
eventCh: make(chan serf.Event, serfEventBacklog),
|
||||
logger: logger,
|
||||
logger: deps.Logger.NamedIntercept(logging.ConsulClient),
|
||||
shutdownCh: make(chan struct{}),
|
||||
tlsConfigurator: tlsConfigurator,
|
||||
tlsConfigurator: deps.TLSConfigurator,
|
||||
}
|
||||
|
||||
c.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst))
|
||||
|
@ -156,23 +120,17 @@ func NewClient(config *Config, options ...ConsulOption) (*Client, error) {
|
|||
}
|
||||
|
||||
// Initialize the LAN Serf
|
||||
c.serf, err = c.setupSerf(config.SerfLANConfig,
|
||||
c.eventCh, serfLANSnapshot)
|
||||
c.serf, err = c.setupSerf(config.SerfLANConfig, c.eventCh, serfLANSnapshot)
|
||||
if err != nil {
|
||||
c.Shutdown()
|
||||
return nil, fmt.Errorf("Failed to start lan serf: %v", err)
|
||||
}
|
||||
|
||||
rpcRouter := flat.router
|
||||
if rpcRouter == nil {
|
||||
rpcRouter = router.NewRouter(logger, config.Datacenter, fmt.Sprintf("%s.%s", config.NodeName, config.Datacenter))
|
||||
}
|
||||
|
||||
if err := rpcRouter.AddArea(types.AreaLAN, c.serf, c.connPool); err != nil {
|
||||
if err := deps.Router.AddArea(types.AreaLAN, c.serf, c.connPool); err != nil {
|
||||
c.Shutdown()
|
||||
return nil, fmt.Errorf("Failed to add LAN area to the RPC router: %w", err)
|
||||
}
|
||||
c.router = rpcRouter
|
||||
c.router = deps.Router
|
||||
|
||||
// Start LAN event handlers after the router is complete since the event
|
||||
// handlers depend on the router and the router depends on Serf.
|
||||
|
|
|
@ -2,13 +2,17 @@ package consul
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/router"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
|
@ -64,18 +68,8 @@ func testClientWithConfigWithErr(t *testing.T, cb func(c *Config)) (string, *Cli
|
|||
if cb != nil {
|
||||
cb(config)
|
||||
}
|
||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Name: config.NodeName,
|
||||
Level: hclog.Debug,
|
||||
Output: testutil.NewLogBuffer(t),
|
||||
})
|
||||
|
||||
tlsConf, err := tlsutil.NewConfigurator(config.ToTLSUtilConfig(), logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
client, err := NewClient(config, WithLogger(logger), WithTLSConfigurator(tlsConf))
|
||||
client, err := NewClient(config, newDefaultDeps(t, config))
|
||||
return dir, client, err
|
||||
}
|
||||
|
||||
|
@ -466,14 +460,7 @@ func TestClient_RPC_TLS(t *testing.T) {
|
|||
func newClient(t *testing.T, config *Config) *Client {
|
||||
t.Helper()
|
||||
|
||||
c, err := tlsutil.NewConfigurator(config.ToTLSUtilConfig(), nil)
|
||||
require.NoError(t, err, "failed to create tls configuration")
|
||||
|
||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Level: hclog.Debug,
|
||||
Output: testutil.NewLogBuffer(t),
|
||||
})
|
||||
client, err := NewClient(config, WithLogger(logger), WithTLSConfigurator(c))
|
||||
client, err := NewClient(config, newDefaultDeps(t, config))
|
||||
require.NoError(t, err, "failed to create client")
|
||||
t.Cleanup(func() {
|
||||
client.Shutdown()
|
||||
|
@ -481,6 +468,39 @@ func newClient(t *testing.T, config *Config) *Client {
|
|||
return client
|
||||
}
|
||||
|
||||
func newDefaultDeps(t *testing.T, c *Config) Deps {
|
||||
t.Helper()
|
||||
|
||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Name: c.NodeName,
|
||||
Level: hclog.Debug,
|
||||
Output: testutil.NewLogBuffer(t),
|
||||
})
|
||||
|
||||
tls, err := tlsutil.NewConfigurator(c.ToTLSUtilConfig(), logger)
|
||||
require.NoError(t, err, "failed to create tls configuration")
|
||||
|
||||
r := router.NewRouter(logger, c.Datacenter, fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter), nil)
|
||||
|
||||
connPool := &pool.ConnPool{
|
||||
Server: false,
|
||||
SrcAddr: c.RPCSrcAddr,
|
||||
Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
||||
MaxTime: 2 * time.Minute,
|
||||
MaxStreams: 4,
|
||||
TLSConfigurator: tls,
|
||||
Datacenter: c.Datacenter,
|
||||
}
|
||||
|
||||
return Deps{
|
||||
Logger: logger,
|
||||
TLSConfigurator: tls,
|
||||
Tokens: new(token.Store),
|
||||
Router: r,
|
||||
ConnPool: connPool,
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_RPC_RateLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, conf1 := testServerConfig(t)
|
||||
|
|
|
@ -152,6 +152,17 @@ func (s *ConnectCA) ConfigurationSet(
|
|||
if err := newProvider.Configure(pCfg); err != nil {
|
||||
return fmt.Errorf("error configuring provider: %v", err)
|
||||
}
|
||||
|
||||
// Set up a defer to clean up the new provider if we exit early due to an error.
|
||||
cleanupNewProvider := true
|
||||
defer func() {
|
||||
if cleanupNewProvider {
|
||||
if err := newProvider.Cleanup(); err != nil {
|
||||
s.logger.Warn("failed to clean up CA provider while handling startup failure", "provider", newProvider, "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := newProvider.GenerateRoot(); err != nil {
|
||||
return fmt.Errorf("error generating CA root certificate: %v", err)
|
||||
}
|
||||
|
@ -195,6 +206,7 @@ func (s *ConnectCA) ConfigurationSet(
|
|||
}
|
||||
|
||||
// If the config has been committed, update the local provider instance
|
||||
cleanupNewProvider = false
|
||||
s.srv.setCAProvider(newProvider, newActiveRoot)
|
||||
|
||||
s.logger.Info("CA provider config updated")
|
||||
|
@ -291,6 +303,7 @@ func (s *ConnectCA) ConfigurationSet(
|
|||
|
||||
// If the config has been committed, update the local provider instance
|
||||
// and call teardown on the old provider
|
||||
cleanupNewProvider = false
|
||||
s.srv.setCAProvider(newProvider, newActiveRoot)
|
||||
|
||||
if err := oldProvider.Cleanup(); err != nil {
|
||||
|
|
|
@ -539,7 +539,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -559,7 +559,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -575,7 +575,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTags: []string{"master"},
|
||||
ServiceTags: []string{"primary"},
|
||||
TagFilter: false,
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
||||
|
@ -592,10 +592,10 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
if nodes[1].Node.Node != "foo" {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].Service.Tags, "slave") {
|
||||
if !stringslice.Contains(nodes[0].Service.Tags, "replica") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if !stringslice.Contains(nodes[1].Service.Tags, "master") {
|
||||
if !stringslice.Contains(nodes[1].Service.Tags, "primary") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != api.HealthWarning {
|
||||
|
@ -613,7 +613,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTag: "master",
|
||||
ServiceTag: "primary",
|
||||
TagFilter: false,
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
||||
|
@ -630,10 +630,10 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
if nodes[1].Node.Node != "foo" {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].Service.Tags, "slave") {
|
||||
if !stringslice.Contains(nodes[0].Service.Tags, "replica") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if !stringslice.Contains(nodes[1].Service.Tags, "master") {
|
||||
if !stringslice.Contains(nodes[1].Service.Tags, "primary") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != api.HealthWarning {
|
||||
|
@ -662,7 +662,7 @@ func TestHealth_ServiceNodes_MultipleServiceTags(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"master", "v2"},
|
||||
Tags: []string{"primary", "v2"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -680,7 +680,7 @@ func TestHealth_ServiceNodes_MultipleServiceTags(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"slave", "v2"},
|
||||
Tags: []string{"replica", "v2"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -694,7 +694,7 @@ func TestHealth_ServiceNodes_MultipleServiceTags(t *testing.T) {
|
|||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTags: []string{"master", "v2"},
|
||||
ServiceTags: []string{"primary", "v2"},
|
||||
TagFilter: true,
|
||||
}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
||||
|
@ -703,7 +703,7 @@ func TestHealth_ServiceNodes_MultipleServiceTags(t *testing.T) {
|
|||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, nodes[0].Node.Node, "foo")
|
||||
require.Contains(t, nodes[0].Service.Tags, "v2")
|
||||
require.Contains(t, nodes[0].Service.Tags, "master")
|
||||
require.Contains(t, nodes[0].Service.Tags, "primary")
|
||||
require.Equal(t, nodes[0].Checks[0].Status, api.HealthPassing)
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestInternal_NodeInfo(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -63,7 +63,7 @@ func TestInternal_NodeInfo(t *testing.T) {
|
|||
if nodes[0].Node != "foo" {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].Services[0].Tags, "master") {
|
||||
if !stringslice.Contains(nodes[0].Services[0].Tags, "primary") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != api.HealthPassing {
|
||||
|
@ -88,7 +88,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -108,7 +108,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -138,7 +138,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
switch node.Node {
|
||||
case "foo":
|
||||
foundFoo = true
|
||||
if !stringslice.Contains(node.Services[0].Tags, "master") {
|
||||
if !stringslice.Contains(node.Services[0].Tags, "primary") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if node.Checks[0].Status != api.HealthPassing {
|
||||
|
@ -147,7 +147,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
|
||||
case "bar":
|
||||
foundBar = true
|
||||
if !stringslice.Contains(node.Services[0].Tags, "slave") {
|
||||
if !stringslice.Contains(node.Services[0].Tags, "replica") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if node.Checks[0].Status != api.HealthWarning {
|
||||
|
@ -180,7 +180,7 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -198,7 +198,7 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
|
@ -212,7 +212,7 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
|
|||
var out2 structs.IndexedNodeDump
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{Filter: "master in Services.Tags"},
|
||||
QueryOptions: structs.QueryOptions{Filter: "primary in Services.Tags"},
|
||||
}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ func (s *Server) createCAProvider(conf *structs.CAConfiguration) (ca.Provider, e
|
|||
case structs.ConsulCAProvider:
|
||||
p = &ca.ConsulProvider{Delegate: &consulCADelegate{s}}
|
||||
case structs.VaultCAProvider:
|
||||
p = &ca.VaultProvider{}
|
||||
p = ca.NewVaultProvider()
|
||||
case structs.AWSCAProvider:
|
||||
p = &ca.AWSProvider{}
|
||||
default:
|
||||
|
@ -571,6 +571,16 @@ func (s *Server) stopConnectLeader() {
|
|||
s.leaderRoutineManager.Stop(intentionReplicationRoutineName)
|
||||
s.leaderRoutineManager.Stop(caRootPruningRoutineName)
|
||||
s.stopConnectLeaderEnterprise()
|
||||
|
||||
// If the provider implements NeedsStop, we call Stop to perform any shutdown actions.
|
||||
s.caProviderReconfigurationLock.Lock()
|
||||
defer s.caProviderReconfigurationLock.Unlock()
|
||||
provider, _ := s.getCAProvider()
|
||||
if provider != nil {
|
||||
if needsStop, ok := provider.(ca.NeedsStop); ok {
|
||||
needsStop.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) runCARootPruning(ctx context.Context) error {
|
||||
|
@ -686,7 +696,7 @@ func (s *Server) secondaryIntermediateCertRenewalWatch(ctx context.Context) erro
|
|||
return fmt.Errorf("error parsing active intermediate cert: %v", err)
|
||||
}
|
||||
|
||||
if lessThanHalfTimePassed(time.Now(), intermediateCert.NotBefore,
|
||||
if lessThanHalfTimePassed(time.Now(), intermediateCert.NotBefore.Add(ca.CertificateTimeDriftBuffer),
|
||||
intermediateCert.NotAfter) {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -100,8 +100,8 @@ func TestLeader_SecondaryCA_Initialize(t *testing.T) {
|
|||
err error
|
||||
)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, caRoot = s1.getCAProvider()
|
||||
secondaryProvider, _ = s2.getCAProvider()
|
||||
_, caRoot = getCAProviderWithLock(s1)
|
||||
secondaryProvider, _ = getCAProviderWithLock(s2)
|
||||
intermediatePEM, err = secondaryProvider.ActiveIntermediate()
|
||||
require.NoError(r, err)
|
||||
|
||||
|
@ -165,7 +165,7 @@ func TestLeader_SecondaryCA_Initialize(t *testing.T) {
|
|||
|
||||
func waitForActiveCARoot(t *testing.T, srv *Server, expect *structs.CARoot) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, root := srv.getCAProvider()
|
||||
_, root := getCAProviderWithLock(srv)
|
||||
if root == nil {
|
||||
r.Fatal("no root")
|
||||
}
|
||||
|
@ -175,6 +175,12 @@ func waitForActiveCARoot(t *testing.T, srv *Server, expect *structs.CARoot) {
|
|||
})
|
||||
}
|
||||
|
||||
func getCAProviderWithLock(s *Server) (ca.Provider, *structs.CARoot) {
|
||||
s.caProviderReconfigurationLock.Lock()
|
||||
defer s.caProviderReconfigurationLock.Unlock()
|
||||
return s.getCAProvider()
|
||||
}
|
||||
|
||||
func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) {
|
||||
// no parallel execution because we change globals
|
||||
origInterval := structs.IntermediateCertRenewInterval
|
||||
|
@ -227,7 +233,8 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) {
|
|||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Get the original intermediate
|
||||
secondaryProvider, _ := s2.getCAProvider()
|
||||
// TODO: Wait for intermediate instead of wait for leader
|
||||
secondaryProvider, _ := getCAProviderWithLock(s2)
|
||||
intermediatePEM, err := secondaryProvider.ActiveIntermediate()
|
||||
require.NoError(err)
|
||||
cert, err := connect.ParseCert(intermediatePEM)
|
||||
|
@ -253,7 +260,7 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) {
|
|||
// however, defaultQueryTime will be configurable and we con lower it
|
||||
// so that it returns for sure.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
secondaryProvider, _ := s2.getCAProvider()
|
||||
secondaryProvider, _ = getCAProviderWithLock(s2)
|
||||
intermediatePEM, err = secondaryProvider.ActiveIntermediate()
|
||||
r.Check(err)
|
||||
cert, err := connect.ParseCert(intermediatePEM)
|
||||
|
@ -266,9 +273,9 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) {
|
|||
})
|
||||
require.NoError(err)
|
||||
|
||||
// Get the new root from dc1 and validate a chain of:
|
||||
// Get the root from dc1 and validate a chain of:
|
||||
// dc2 leaf -> dc2 intermediate -> dc1 root
|
||||
_, caRoot := s1.getCAProvider()
|
||||
_, caRoot := getCAProviderWithLock(s1)
|
||||
|
||||
// Have dc2 sign a leaf cert and make sure the chain is correct.
|
||||
spiffeService := &connect.SpiffeIDService{
|
||||
|
@ -329,7 +336,7 @@ func TestLeader_SecondaryCA_IntermediateRefresh(t *testing.T) {
|
|||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Get the original intermediate
|
||||
secondaryProvider, _ := s2.getCAProvider()
|
||||
secondaryProvider, _ := getCAProviderWithLock(s2)
|
||||
oldIntermediatePEM, err := secondaryProvider.ActiveIntermediate()
|
||||
require.NoError(err)
|
||||
require.NotEmpty(oldIntermediatePEM)
|
||||
|
@ -415,7 +422,7 @@ func TestLeader_SecondaryCA_IntermediateRefresh(t *testing.T) {
|
|||
|
||||
// Get the new root from dc1 and validate a chain of:
|
||||
// dc2 leaf -> dc2 intermediate -> dc1 root
|
||||
_, caRoot := s1.getCAProvider()
|
||||
_, caRoot := getCAProviderWithLock(s1)
|
||||
|
||||
// Have dc2 sign a leaf cert and make sure the chain is correct.
|
||||
spiffeService := &connect.SpiffeIDService{
|
||||
|
@ -524,7 +531,7 @@ func TestLeader_SecondaryCA_FixSigningKeyID_via_IntermediateRefresh(t *testing.T
|
|||
// the CA provider anyway.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// verify that the root is now corrected
|
||||
provider, activeRoot := s2.getCAProvider()
|
||||
provider, activeRoot := getCAProviderWithLock(s2)
|
||||
require.NotNil(r, provider)
|
||||
require.NotNil(r, activeRoot)
|
||||
|
||||
|
@ -709,7 +716,7 @@ func TestLeader_SecondaryCA_UpgradeBeforePrimary(t *testing.T) {
|
|||
|
||||
// Wait for the secondary transition to happen and then verify the secondary DC
|
||||
// has both roots present.
|
||||
secondaryProvider, _ := s2.getCAProvider()
|
||||
secondaryProvider, _ := getCAProviderWithLock(s2)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
state1 := s1.fsm.State()
|
||||
_, roots1, err := state1.CARoots(nil)
|
||||
|
@ -730,7 +737,7 @@ func TestLeader_SecondaryCA_UpgradeBeforePrimary(t *testing.T) {
|
|||
require.NotEmpty(r, inter, "should have valid intermediate")
|
||||
})
|
||||
|
||||
_, caRoot := s1.getCAProvider()
|
||||
_, caRoot := getCAProviderWithLock(s1)
|
||||
intermediatePEM, err := secondaryProvider.ActiveIntermediate()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -1325,7 +1332,7 @@ func TestLeader_PersistIntermediateCAs(t *testing.T) {
|
|||
}
|
||||
|
||||
// Get the active root before leader change.
|
||||
_, root := s1.getCAProvider()
|
||||
_, root := getCAProviderWithLock(s1)
|
||||
require.Len(root.IntermediateCerts, 1)
|
||||
|
||||
// Force a leader change and make sure the root CA values are preserved.
|
||||
|
@ -1344,7 +1351,7 @@ func TestLeader_PersistIntermediateCAs(t *testing.T) {
|
|||
r.Fatal("no leader")
|
||||
}
|
||||
|
||||
_, newLeaderRoot := leader.getCAProvider()
|
||||
_, newLeaderRoot := getCAProviderWithLock(leader)
|
||||
if !reflect.DeepEqual(newLeaderRoot, root) {
|
||||
r.Fatalf("got %v, want %v", newLeaderRoot, root)
|
||||
}
|
||||
|
|
|
@ -10,12 +10,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -1303,12 +1301,11 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
|
|||
Level: hclog.Debug,
|
||||
Output: io.MultiWriter(pw, testutil.NewLogBuffer(t)),
|
||||
})
|
||||
tlsConf, err := tlsutil.NewConfigurator(config.ToTLSUtilConfig(), logger)
|
||||
require.NoError(t, err)
|
||||
srv, err := NewServer(config,
|
||||
WithLogger(logger),
|
||||
WithTokenStore(new(token.Store)),
|
||||
WithTLSConfigurator(tlsConf))
|
||||
|
||||
deps := newDefaultDeps(t, config)
|
||||
deps.Logger = logger
|
||||
|
||||
srv, err := NewServer(config, deps)
|
||||
require.NoError(t, err)
|
||||
defer srv.Shutdown()
|
||||
|
||||
|
|
|
@ -8,50 +8,10 @@ import (
|
|||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
type consulOptions struct {
|
||||
logger hclog.InterceptLogger
|
||||
tlsConfigurator *tlsutil.Configurator
|
||||
connPool *pool.ConnPool
|
||||
tokens *token.Store
|
||||
router *router.Router
|
||||
}
|
||||
|
||||
type ConsulOption func(*consulOptions)
|
||||
|
||||
func WithLogger(logger hclog.InterceptLogger) ConsulOption {
|
||||
return func(opt *consulOptions) {
|
||||
opt.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func WithTLSConfigurator(tlsConfigurator *tlsutil.Configurator) ConsulOption {
|
||||
return func(opt *consulOptions) {
|
||||
opt.tlsConfigurator = tlsConfigurator
|
||||
}
|
||||
}
|
||||
|
||||
func WithConnectionPool(connPool *pool.ConnPool) ConsulOption {
|
||||
return func(opt *consulOptions) {
|
||||
opt.connPool = connPool
|
||||
}
|
||||
}
|
||||
|
||||
func WithTokenStore(tokens *token.Store) ConsulOption {
|
||||
return func(opt *consulOptions) {
|
||||
opt.tokens = tokens
|
||||
}
|
||||
}
|
||||
|
||||
func WithRouter(router *router.Router) ConsulOption {
|
||||
return func(opt *consulOptions) {
|
||||
opt.router = router
|
||||
}
|
||||
}
|
||||
|
||||
func flattenConsulOptions(options []ConsulOption) consulOptions {
|
||||
var flat consulOptions
|
||||
for _, opt := range options {
|
||||
opt(&flat)
|
||||
}
|
||||
return flat
|
||||
type Deps struct {
|
||||
Logger hclog.InterceptLogger
|
||||
TLSConfigurator *tlsutil.Configurator
|
||||
Tokens *token.Store
|
||||
Router *router.Router
|
||||
ConnPool *pool.ConnPool
|
||||
}
|
||||
|
|
|
@ -70,14 +70,6 @@ const (
|
|||
raftState = "raft/"
|
||||
snapshotsRetained = 2
|
||||
|
||||
// serverRPCCache controls how long we keep an idle connection
|
||||
// open to a server
|
||||
serverRPCCache = 2 * time.Minute
|
||||
|
||||
// serverMaxStreams controls how many idle streams we keep
|
||||
// open to a server
|
||||
serverMaxStreams = 64
|
||||
|
||||
// raftLogCacheSize is the maximum number of logs to cache in-memory.
|
||||
// This is used to reduce disk I/O for the recently committed entries.
|
||||
raftLogCacheSize = 512
|
||||
|
@ -324,15 +316,8 @@ type connHandler interface {
|
|||
|
||||
// NewServer is used to construct a new Consul server from the configuration
|
||||
// and extra options, potentially returning an error.
|
||||
func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
||||
flat := flattenConsulOptions(options)
|
||||
|
||||
logger := flat.logger
|
||||
tokens := flat.tokens
|
||||
tlsConfigurator := flat.tlsConfigurator
|
||||
connPool := flat.connPool
|
||||
rpcRouter := flat.router
|
||||
|
||||
func NewServer(config *Config, flat Deps) (*Server, error) {
|
||||
logger := flat.Logger
|
||||
if err := config.CheckProtocolVersion(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -342,9 +327,6 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||
if err := config.CheckACL(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if logger == nil {
|
||||
return nil, fmt.Errorf("logger is required")
|
||||
}
|
||||
|
||||
// Check if TLS is enabled
|
||||
if config.CAFile != "" || config.CAPath != "" {
|
||||
|
@ -373,40 +355,24 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||
// Create the shutdown channel - this is closed but never written to.
|
||||
shutdownCh := make(chan struct{})
|
||||
|
||||
if connPool == nil {
|
||||
connPool = &pool.ConnPool{
|
||||
Server: true,
|
||||
SrcAddr: config.RPCSrcAddr,
|
||||
Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
||||
MaxTime: serverRPCCache,
|
||||
MaxStreams: serverMaxStreams,
|
||||
TLSConfigurator: tlsConfigurator,
|
||||
Datacenter: config.Datacenter,
|
||||
}
|
||||
}
|
||||
|
||||
serverLogger := logger.NamedIntercept(logging.ConsulServer)
|
||||
serverLogger := flat.Logger.NamedIntercept(logging.ConsulServer)
|
||||
loggers := newLoggerStore(serverLogger)
|
||||
|
||||
if rpcRouter == nil {
|
||||
rpcRouter = router.NewRouter(serverLogger, config.Datacenter, fmt.Sprintf("%s.%s", config.NodeName, config.Datacenter))
|
||||
}
|
||||
|
||||
// Create server.
|
||||
s := &Server{
|
||||
config: config,
|
||||
tokens: tokens,
|
||||
connPool: connPool,
|
||||
tokens: flat.Tokens,
|
||||
connPool: flat.ConnPool,
|
||||
eventChLAN: make(chan serf.Event, serfEventChSize),
|
||||
eventChWAN: make(chan serf.Event, serfEventChSize),
|
||||
logger: serverLogger,
|
||||
loggers: loggers,
|
||||
leaveCh: make(chan struct{}),
|
||||
reconcileCh: make(chan serf.Member, reconcileChSize),
|
||||
router: rpcRouter,
|
||||
router: flat.Router,
|
||||
rpcServer: rpc.NewServer(),
|
||||
insecureRPCServer: rpc.NewServer(),
|
||||
tlsConfigurator: tlsConfigurator,
|
||||
tlsConfigurator: flat.TLSConfigurator,
|
||||
reassertLeaderCh: make(chan chan error),
|
||||
segmentLAN: make(map[string]*serf.Serf, len(config.Segments)),
|
||||
sessionTimers: NewSessionTimers(),
|
||||
|
@ -455,11 +421,11 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||
srv: s,
|
||||
gatewayLocator: s.gatewayLocator,
|
||||
},
|
||||
Logger: s.logger,
|
||||
Logger: s.loggers.Named(logging.Replication).Named(logging.FederationState),
|
||||
},
|
||||
Rate: s.config.FederationStateReplicationRate,
|
||||
Burst: s.config.FederationStateReplicationBurst,
|
||||
Logger: logger,
|
||||
Logger: s.logger,
|
||||
SuppressErrorLog: isErrFederationStatesNotSupported,
|
||||
}
|
||||
s.federationStateReplicator, err = NewReplicator(&federationStateReplicatorConfig)
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
|
@ -292,19 +291,7 @@ func newServer(t *testing.T, c *Config) (*Server, error) {
|
|||
}
|
||||
}
|
||||
|
||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Name: c.NodeName,
|
||||
Level: hclog.Debug,
|
||||
Output: testutil.NewLogBuffer(t),
|
||||
})
|
||||
tlsConf, err := tlsutil.NewConfigurator(c.ToTLSUtilConfig(), logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv, err := NewServer(c,
|
||||
WithLogger(logger),
|
||||
WithTokenStore(new(token.Store)),
|
||||
WithTLSConfigurator(tlsConf))
|
||||
srv, err := NewServer(c, newDefaultDeps(t, c))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1488,16 +1475,11 @@ func TestServer_CALogging(t *testing.T) {
|
|||
var buf bytes.Buffer
|
||||
logger := testutil.LoggerWithOutput(t, &buf)
|
||||
|
||||
c, err := tlsutil.NewConfigurator(conf1.ToTLSUtilConfig(), logger)
|
||||
require.NoError(t, err)
|
||||
deps := newDefaultDeps(t, conf1)
|
||||
deps.Logger = logger
|
||||
|
||||
s1, err := NewServer(conf1,
|
||||
WithLogger(logger),
|
||||
WithTokenStore(new(token.Store)),
|
||||
WithTLSConfigurator(c))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
s1, err := NewServer(conf1, deps)
|
||||
require.NoError(t, err)
|
||||
defer s1.Shutdown()
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
|
|||
Address: "1.1.1.1",
|
||||
Port: 8080,
|
||||
Meta: map[string]string{strings.Repeat("a", 129): "somevalue"},
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
}
|
||||
if err := s.EnsureRegistration(9, req); err == nil {
|
||||
t.Fatalf("Service should not have been registered since Meta is invalid")
|
||||
|
@ -211,7 +211,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
|
|||
Service: "redis",
|
||||
Address: "1.1.1.1",
|
||||
Port: 8080,
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
||||
}
|
||||
if err := s.EnsureRegistration(2, req); err != nil {
|
||||
|
@ -226,7 +226,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
|
|||
Service: "redis",
|
||||
Address: "1.1.1.1",
|
||||
Port: 8080,
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2},
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
|
@ -321,7 +321,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
|
|||
Status: "critical",
|
||||
ServiceID: "redis1",
|
||||
ServiceName: "redis",
|
||||
ServiceTags: []string{"master"},
|
||||
ServiceTags: []string{"primary"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4},
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
|
@ -543,7 +543,7 @@ func deprecatedEnsureNodeWithoutIDCanRegister(t *testing.T, s *Store, nodeName s
|
|||
Node: nodeName,
|
||||
Address: "1.1.1.9",
|
||||
Meta: map[string]string{
|
||||
"version": string(txIdx),
|
||||
"version": fmt.Sprint(txIdx),
|
||||
},
|
||||
}
|
||||
if err := s.EnsureNode(txIdx, in); err != nil {
|
||||
|
@ -1568,7 +1568,7 @@ func TestStateStore_Services(t *testing.T) {
|
|||
ns1 := &structs.NodeService{
|
||||
ID: "service1",
|
||||
Service: "redis",
|
||||
Tags: []string{"prod", "master"},
|
||||
Tags: []string{"prod", "primary"},
|
||||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
|
@ -1580,7 +1580,7 @@ func TestStateStore_Services(t *testing.T) {
|
|||
ns2 := &structs.NodeService{
|
||||
ID: "service3",
|
||||
Service: "redis",
|
||||
Tags: []string{"prod", "slave"},
|
||||
Tags: []string{"prod", "replica"},
|
||||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
|
@ -1604,7 +1604,7 @@ func TestStateStore_Services(t *testing.T) {
|
|||
// Verify the result. We sort the lists since the order is
|
||||
// non-deterministic (it's built using a map internally).
|
||||
expected := structs.Services{
|
||||
"redis": []string{"prod", "master", "slave"},
|
||||
"redis": []string{"prod", "primary", "replica"},
|
||||
"dogs": []string{},
|
||||
}
|
||||
sort.Strings(expected["redis"])
|
||||
|
@ -1648,7 +1648,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
ns1 := &structs.NodeService{
|
||||
ID: "service1",
|
||||
Service: "redis",
|
||||
Tags: []string{"prod", "master"},
|
||||
Tags: []string{"prod", "primary"},
|
||||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
|
@ -1658,7 +1658,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
ns2 := &structs.NodeService{
|
||||
ID: "service1",
|
||||
Service: "redis",
|
||||
Tags: []string{"prod", "slave"},
|
||||
Tags: []string{"prod", "replica"},
|
||||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
|
@ -1677,7 +1677,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{
|
||||
"redis": []string{"master", "prod"},
|
||||
"redis": []string{"primary", "prod"},
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
require.Equal(t, expected, res)
|
||||
|
@ -1689,7 +1689,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{
|
||||
"redis": []string{"master", "prod", "slave"},
|
||||
"redis": []string{"primary", "prod", "replica"},
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
require.Equal(t, expected, res)
|
||||
|
@ -1710,7 +1710,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{
|
||||
"redis": []string{"master", "prod"},
|
||||
"redis": []string{"primary", "prod"},
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
require.Equal(t, expected, res)
|
||||
|
@ -1788,13 +1788,13 @@ func TestStateStore_ServiceNodes(t *testing.T) {
|
|||
if err := s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(14, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"master"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(14, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(15, "bar", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(15, "bar", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8001}); err != nil {
|
||||
if err := s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
|
@ -1822,7 +1822,7 @@ func TestStateStore_ServiceNodes(t *testing.T) {
|
|||
if nodes[0].ServiceID != "db" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].ServiceTags, "slave") {
|
||||
if !stringslice.Contains(nodes[0].ServiceTags, "replica") {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[0].ServicePort != 8000 {
|
||||
|
@ -1837,7 +1837,7 @@ func TestStateStore_ServiceNodes(t *testing.T) {
|
|||
if nodes[1].ServiceID != "db2" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if !stringslice.Contains(nodes[1].ServiceTags, "slave") {
|
||||
if !stringslice.Contains(nodes[1].ServiceTags, "replica") {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[1].ServicePort != 8001 {
|
||||
|
@ -1852,7 +1852,7 @@ func TestStateStore_ServiceNodes(t *testing.T) {
|
|||
if nodes[2].ServiceID != "db" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if !stringslice.Contains(nodes[2].ServiceTags, "master") {
|
||||
if !stringslice.Contains(nodes[2].ServiceTags, "primary") {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[2].ServicePort != 8000 {
|
||||
|
@ -1907,7 +1907,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) {
|
|||
|
||||
// Listing with no results returns an empty list.
|
||||
ws := memdb.NewWatchSet()
|
||||
idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"master"}, nil)
|
||||
idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"primary"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1925,13 +1925,13 @@ func TestStateStore_ServiceTagNodes(t *testing.T) {
|
|||
if err := s.EnsureNode(16, &structs.Node{Node: "bar", Address: "127.0.0.2"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(17, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"master"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(17, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(18, "foo", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8001}); err != nil {
|
||||
if err := s.EnsureService(18, "foo", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(19, "bar", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(19, "bar", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
|
@ -1940,7 +1940,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) {
|
|||
|
||||
// Read everything back.
|
||||
ws = memdb.NewWatchSet()
|
||||
idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"master"}, nil)
|
||||
idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"primary"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1956,7 +1956,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) {
|
|||
if nodes[0].Address != "127.0.0.1" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].ServiceTags, "master") {
|
||||
if !stringslice.Contains(nodes[0].ServiceTags, "primary") {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[0].ServicePort != 8000 {
|
||||
|
@ -1969,7 +1969,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) {
|
|||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// But removing a node with the "db:master" service should fire the watch.
|
||||
// But removing a node with the "db:primary" service should fire the watch.
|
||||
if err := s.DeleteNode(21, "foo"); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1989,25 +1989,25 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := s.EnsureService(17, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"master", "v2"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(17, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary", "v2"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := s.EnsureService(18, "foo", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave", "v2", "dev"}, Address: "", Port: 8001}); err != nil {
|
||||
if err := s.EnsureService(18, "foo", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica", "v2", "dev"}, Address: "", Port: 8001}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := s.EnsureService(19, "bar", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"slave", "v2"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(19, "bar", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"replica", "v2"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"master"}, nil)
|
||||
idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"primary"}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(idx), 19)
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, nodes[0].Node, "foo")
|
||||
require.Equal(t, nodes[0].Address, "127.0.0.1")
|
||||
require.Contains(t, nodes[0].ServiceTags, "master")
|
||||
require.Contains(t, nodes[0].ServiceTags, "primary")
|
||||
require.Equal(t, nodes[0].ServicePort, 8000)
|
||||
|
||||
idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}, nil)
|
||||
|
@ -2016,14 +2016,14 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) {
|
|||
require.Len(t, nodes, 3)
|
||||
|
||||
// Test filtering on multiple tags
|
||||
idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "slave"}, nil)
|
||||
idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "replica"}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(idx), 19)
|
||||
require.Len(t, nodes, 2)
|
||||
require.Contains(t, nodes[0].ServiceTags, "v2")
|
||||
require.Contains(t, nodes[0].ServiceTags, "slave")
|
||||
require.Contains(t, nodes[0].ServiceTags, "replica")
|
||||
require.Contains(t, nodes[1].ServiceTags, "v2")
|
||||
require.Contains(t, nodes[1].ServiceTags, "slave")
|
||||
require.Contains(t, nodes[1].ServiceTags, "replica")
|
||||
|
||||
idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
@ -2110,7 +2110,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) {
|
|||
assert.Nil(s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
|
||||
assert.Nil(s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
|
||||
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "native-db", Service: "db", Connect: structs.ServiceConnect{Native: true}}))
|
||||
assert.Nil(s.EnsureService(17, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8001}))
|
||||
assert.Nil(s.EnsureService(17, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Read everything back.
|
||||
|
@ -3562,7 +3562,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) {
|
|||
assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
|
||||
assert.Nil(s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
|
||||
assert.Nil(s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
|
||||
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8001}))
|
||||
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Register node checks
|
||||
|
@ -3718,7 +3718,7 @@ func BenchmarkCheckServiceNodes(b *testing.B) {
|
|||
if err := s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(2, "foo", &structs.NodeService{ID: "db1", Service: "db", Tags: []string{"master"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(2, "foo", &structs.NodeService{ID: "db1", Service: "db", Tags: []string{"primary"}, Address: "", Port: 8000}); err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
check := &structs.HealthCheck{
|
||||
|
@ -3753,7 +3753,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) {
|
|||
if err := s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.EnsureService(2, "foo", &structs.NodeService{ID: "db1", Service: "db", Tags: []string{"master"}, Address: "", Port: 8000}); err != nil {
|
||||
if err := s.EnsureService(2, "foo", &structs.NodeService{ID: "db1", Service: "db", Tags: []string{"primary"}, Address: "", Port: 8000}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
check := &structs.HealthCheck{
|
||||
|
@ -3777,7 +3777,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) {
|
|||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"master"}, nil)
|
||||
idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"primary"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ func testRegisterNode(t *testing.T, s *Store, idx uint64, nodeID string) {
|
|||
// testRegisterNodeWithChange registers a node and ensures it gets different from previous registration
|
||||
func testRegisterNodeWithChange(t *testing.T, s *Store, idx uint64, nodeID string) {
|
||||
testRegisterNodeWithMeta(t, s, idx, nodeID, map[string]string{
|
||||
"version": string(idx),
|
||||
"version": fmt.Sprint(idx),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ func testRegisterNodeWithMeta(t *testing.T, s *Store, idx uint64, nodeID string,
|
|||
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) {
|
||||
meta := make(map[string]string)
|
||||
if modifyAccordingIndex {
|
||||
meta["version"] = string(idx)
|
||||
meta["version"] = fmt.Sprint(idx)
|
||||
}
|
||||
svc := &structs.NodeService{
|
||||
ID: serviceID,
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
// checkCoordinateDisabled will return a standard response if coordinates are
|
||||
// disabled. This returns true if they are disabled and we should not continue.
|
||||
func (s *HTTPServer) checkCoordinateDisabled(resp http.ResponseWriter, req *http.Request) bool {
|
||||
func (s *HTTPHandlers) checkCoordinateDisabled(resp http.ResponseWriter, req *http.Request) bool {
|
||||
if !s.agent.config.DisableCoordinates {
|
||||
return false
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func (s *sorter) Less(i, j int) bool {
|
|||
|
||||
// CoordinateDatacenters returns the WAN nodes in each datacenter, along with
|
||||
// raw network coordinates.
|
||||
func (s *HTTPServer) CoordinateDatacenters(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CoordinateDatacenters(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkCoordinateDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func (s *HTTPServer) CoordinateDatacenters(resp http.ResponseWriter, req *http.R
|
|||
|
||||
// CoordinateNodes returns the LAN nodes in the given datacenter, along with
|
||||
// raw network coordinates.
|
||||
func (s *HTTPServer) CoordinateNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CoordinateNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkCoordinateDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func (s *HTTPServer) CoordinateNodes(resp http.ResponseWriter, req *http.Request
|
|||
|
||||
// CoordinateNode returns the LAN node in the given datacenter, along with
|
||||
// raw network coordinates.
|
||||
func (s *HTTPServer) CoordinateNode(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CoordinateNode(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkCoordinateDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func filterCoordinates(req *http.Request, in structs.Coordinates) structs.Coordi
|
|||
}
|
||||
|
||||
// CoordinateUpdate inserts or updates the LAN coordinate of a node.
|
||||
func (s *HTTPServer) CoordinateUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) CoordinateUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if s.checkCoordinateDisabled(resp, req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) DiscoveryChainRead(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) DiscoveryChainRead(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DiscoveryChainRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
|
|
@ -799,7 +799,7 @@ func TestDNS_EDNS0_ECS(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -1013,7 +1013,7 @@ func TestDNS_ServiceReverseLookup(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
|
@ -1061,7 +1061,7 @@ func TestDNS_ServiceReverseLookup_IPV6(t *testing.T) {
|
|||
Address: "2001:db8::1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
Address: "2001:db8::ff00:42:8329",
|
||||
},
|
||||
|
@ -1111,7 +1111,7 @@ func TestDNS_ServiceReverseLookup_CustomDomain(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
Address: "127.0.0.2",
|
||||
},
|
||||
|
@ -1192,7 +1192,7 @@ func TestDNS_ServiceReverseLookupNodeAddress(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
|
@ -1427,7 +1427,7 @@ func TestDNS_ServiceLookup(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -2164,7 +2164,7 @@ func TestDNS_ServiceLookup_ServiceAddress_A(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Address: "127.0.0.2",
|
||||
Port: 12345,
|
||||
},
|
||||
|
@ -2267,7 +2267,7 @@ func TestDNS_ServiceLookup_ServiceAddress_SRV(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Address: "www.google.com",
|
||||
Port: 12345,
|
||||
},
|
||||
|
@ -2364,7 +2364,7 @@ func TestDNS_ServiceLookup_ServiceAddressIPV6(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Address: "2607:20:4005:808::200e",
|
||||
Port: 12345,
|
||||
},
|
||||
|
@ -2855,7 +2855,7 @@ func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "Db",
|
||||
Tags: []string{"Master"},
|
||||
Tags: []string{"Primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -2886,9 +2886,9 @@ func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) {
|
|||
|
||||
// Try some variations to make sure case doesn't matter.
|
||||
questions := []string{
|
||||
"master.db.service.consul.",
|
||||
"mASTER.dB.service.consul.",
|
||||
"MASTER.dB.service.consul.",
|
||||
"primary.db.service.consul.",
|
||||
"pRIMARY.dB.service.consul.",
|
||||
"PRIMARY.dB.service.consul.",
|
||||
"db.service.consul.",
|
||||
"DB.service.consul.",
|
||||
"Db.service.consul.",
|
||||
|
@ -2925,7 +2925,7 @@ func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"v1.master"},
|
||||
Tags: []string{"v1.primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -2936,7 +2936,7 @@ func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
|
|||
}
|
||||
|
||||
m1 := new(dns.Msg)
|
||||
m1.SetQuestion("v1.master2.db.service.consul.", dns.TypeSRV)
|
||||
m1.SetQuestion("v1.primary2.db.service.consul.", dns.TypeSRV)
|
||||
|
||||
c1 := new(dns.Client)
|
||||
in, _, err := c1.Exchange(m1, a.DNSAddr())
|
||||
|
@ -2949,7 +2949,7 @@ func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
|
|||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("v1.master.db.service.consul.", dns.TypeSRV)
|
||||
m.SetQuestion("v1.primary.db.service.consul.", dns.TypeSRV)
|
||||
|
||||
c := new(dns.Client)
|
||||
in, _, err = c.Exchange(m, a.DNSAddr())
|
||||
|
@ -3320,7 +3320,7 @@ func TestDNS_ServiceLookup_Dedup(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -3337,7 +3337,7 @@ func TestDNS_ServiceLookup_Dedup(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db2",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -3352,7 +3352,7 @@ func TestDNS_ServiceLookup_Dedup(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db3",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
Port: 12346,
|
||||
},
|
||||
}
|
||||
|
@ -3423,7 +3423,7 @@ func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -3440,7 +3440,7 @@ func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db2",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -3455,7 +3455,7 @@ func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
|
|||
Service: &structs.NodeService{
|
||||
ID: "db3",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
Tags: []string{"replica"},
|
||||
Port: 12346,
|
||||
},
|
||||
}
|
||||
|
@ -3671,7 +3671,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3692,7 +3692,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3711,7 +3711,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3731,7 +3731,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Address: "127.0.0.3",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -3745,7 +3745,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Address: "127.0.0.4",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3827,7 +3827,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3848,7 +3848,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3867,7 +3867,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3944,7 +3944,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3966,7 +3966,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -3987,7 +3987,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
Address: "127.0.0.3",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
|
@ -4394,7 +4394,7 @@ func TestDNS_ServiceLookup_LargeResponses(t *testing.T) {
|
|||
Address: fmt.Sprintf("127.0.0.%d", i+1),
|
||||
Service: &structs.NodeService{
|
||||
Service: longServiceName,
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -4414,7 +4414,7 @@ func TestDNS_ServiceLookup_LargeResponses(t *testing.T) {
|
|||
Name: longServiceName,
|
||||
Service: structs.ServiceQuery{
|
||||
Service: longServiceName,
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -4426,7 +4426,7 @@ func TestDNS_ServiceLookup_LargeResponses(t *testing.T) {
|
|||
|
||||
// Look up the service directly and via prepared query.
|
||||
questions := []string{
|
||||
"_" + longServiceName + "._master.service.consul.",
|
||||
"_" + longServiceName + "._primary.service.consul.",
|
||||
longServiceName + ".query.consul.",
|
||||
}
|
||||
for _, question := range questions {
|
||||
|
@ -5140,7 +5140,7 @@ func TestDNS_ServiceLookup_TTL(t *testing.T) {
|
|||
Address: address,
|
||||
Service: &structs.NodeService{
|
||||
Service: service,
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345 + idx,
|
||||
},
|
||||
}
|
||||
|
@ -5219,7 +5219,7 @@ func TestDNS_PreparedQuery_TTL(t *testing.T) {
|
|||
Address: address,
|
||||
Service: &structs.NodeService{
|
||||
Service: service,
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345 + idx,
|
||||
},
|
||||
}
|
||||
|
@ -5438,7 +5438,7 @@ func TestDNS_ServiceLookup_SRV_RFC(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -5449,10 +5449,10 @@ func TestDNS_ServiceLookup_SRV_RFC(t *testing.T) {
|
|||
}
|
||||
|
||||
questions := []string{
|
||||
"_db._master.service.dc1.consul.",
|
||||
"_db._master.service.consul.",
|
||||
"_db._master.dc1.consul.",
|
||||
"_db._master.consul.",
|
||||
"_db._primary.service.dc1.consul.",
|
||||
"_db._primary.service.consul.",
|
||||
"_db._primary.dc1.consul.",
|
||||
"_db._primary.consul.",
|
||||
}
|
||||
|
||||
for _, question := range questions {
|
||||
|
@ -5513,7 +5513,7 @@ func TestDNS_ServiceLookup_SRV_RFC_TCP_Default(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -5641,7 +5641,7 @@ func TestDNS_ServiceLookup_MetaTXT(t *testing.T) {
|
|||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -5688,7 +5688,7 @@ func TestDNS_ServiceLookup_SuppressTXT(t *testing.T) {
|
|||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -6035,7 +6035,7 @@ func TestDNS_AltDomains_Service(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
@ -6789,7 +6789,7 @@ func TestDNS_Compression_Query(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Tags: []string{"primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
// EventFire is used to fire a new event
|
||||
func (s *HTTPServer) EventFire(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) EventFire(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
|
||||
// Get the datacenter
|
||||
var dc string
|
||||
|
@ -68,7 +68,7 @@ func (s *HTTPServer) EventFire(resp http.ResponseWriter, req *http.Request) (int
|
|||
}
|
||||
|
||||
// EventList is used to retrieve the recent list of events
|
||||
func (s *HTTPServer) EventList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) EventList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Parse the query options, since we simulate a blocking query
|
||||
var b structs.QueryOptions
|
||||
if parseWait(resp, req, &b) {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
// GET /v1/internal/federation-state/<datacenter>
|
||||
func (s *HTTPServer) FederationStateGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) FederationStateGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
datacenterName := strings.TrimPrefix(req.URL.Path, "/v1/internal/federation-state/")
|
||||
if datacenterName == "" {
|
||||
return nil, BadRequestError{Reason: "Missing datacenter name"}
|
||||
|
@ -36,7 +36,7 @@ func (s *HTTPServer) FederationStateGet(resp http.ResponseWriter, req *http.Requ
|
|||
}
|
||||
|
||||
// GET /v1/internal/federation-states
|
||||
func (s *HTTPServer) FederationStateList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) FederationStateList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -61,7 +61,7 @@ func (s *HTTPServer) FederationStateList(resp http.ResponseWriter, req *http.Req
|
|||
}
|
||||
|
||||
// GET /v1/internal/federation-states/mesh-gateways
|
||||
func (s *HTTPServer) FederationStateListMeshGateways(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) FederationStateListMeshGateways(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
)
|
||||
|
||||
// ClientConnPool creates and stores a connection for each datacenter.
|
||||
type ClientConnPool struct {
|
||||
dialer dialer
|
||||
servers ServerLocator
|
||||
conns map[string]*grpc.ClientConn
|
||||
connsLock sync.Mutex
|
||||
}
|
||||
|
||||
type ServerLocator interface {
|
||||
// ServerForAddr is used to look up server metadata from an address.
|
||||
ServerForAddr(addr string) (*metadata.Server, error)
|
||||
// Scheme returns the url scheme to use to dial the server. This is primarily
|
||||
// needed for testing multiple agents in parallel, because gRPC requires the
|
||||
// resolver to be registered globally.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
// TLSWrapper wraps a non-TLS connection and returns a connection with TLS
|
||||
// enabled.
|
||||
type TLSWrapper func(dc string, conn net.Conn) (net.Conn, error)
|
||||
|
||||
type dialer func(context.Context, string) (net.Conn, error)
|
||||
|
||||
func NewClientConnPool(servers ServerLocator, tls TLSWrapper) *ClientConnPool {
|
||||
return &ClientConnPool{
|
||||
dialer: newDialer(servers, tls),
|
||||
servers: servers,
|
||||
conns: make(map[string]*grpc.ClientConn),
|
||||
}
|
||||
}
|
||||
|
||||
// ClientConn returns a grpc.ClientConn for the datacenter. If there are no
|
||||
// existing connections in the pool, a new one will be created, stored in the pool,
|
||||
// then returned.
|
||||
func (c *ClientConnPool) ClientConn(datacenter string) (*grpc.ClientConn, error) {
|
||||
c.connsLock.Lock()
|
||||
defer c.connsLock.Unlock()
|
||||
|
||||
if conn, ok := c.conns[datacenter]; ok {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
conn, err := grpc.Dial(
|
||||
fmt.Sprintf("%s:///server.%s", c.servers.Scheme(), datacenter),
|
||||
// use WithInsecure mode here because we handle the TLS wrapping in the
|
||||
// custom dialer based on logic around whether the server has TLS enabled.
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithContextDialer(c.dialer),
|
||||
grpc.WithDisableRetry(),
|
||||
// TODO: previously this statsHandler was shared with the Handler. Is that necessary?
|
||||
grpc.WithStatsHandler(newStatsHandler()),
|
||||
// nolint:staticcheck // there is no other supported alternative to WithBalancerName
|
||||
grpc.WithBalancerName("pick_first"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.conns[datacenter] = conn
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// newDialer returns a gRPC dialer function that conditionally wraps the connection
|
||||
// with TLS based on the Server.useTLS value.
|
||||
func newDialer(servers ServerLocator, wrapper TLSWrapper) func(context.Context, string) (net.Conn, error) {
|
||||
return func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{}
|
||||
conn, err := d.DialContext(ctx, "tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
server, err := servers.ServerForAddr(addr)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if server.UseTLS {
|
||||
if wrapper == nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("TLS enabled but got nil TLS wrapper")
|
||||
}
|
||||
|
||||
// Switch the connection into TLS mode
|
||||
if _, err := conn.Write([]byte{byte(pool.RPCTLS)}); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap the connection in a TLS client
|
||||
tlsConn, err := wrapper(server.Datacenter, conn)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
conn = tlsConn
|
||||
}
|
||||
|
||||
_, err = conn.Write([]byte{pool.RPCGRPC})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/grpc/internal/testservice"
|
||||
"github.com/hashicorp/consul/agent/grpc/resolver"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewDialer_WithTLSWrapper(t *testing.T) {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(logError(t, lis.Close))
|
||||
|
||||
builder := resolver.NewServerResolverBuilder(resolver.Config{})
|
||||
builder.AddServer(&metadata.Server{
|
||||
Name: "server-1",
|
||||
ID: "ID1",
|
||||
Datacenter: "dc1",
|
||||
Addr: lis.Addr(),
|
||||
UseTLS: true,
|
||||
})
|
||||
|
||||
var called bool
|
||||
wrapper := func(_ string, conn net.Conn) (net.Conn, error) {
|
||||
called = true
|
||||
return conn, nil
|
||||
}
|
||||
dial := newDialer(builder, wrapper)
|
||||
ctx := context.Background()
|
||||
conn, err := dial(ctx, lis.Addr().String())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, conn.Close())
|
||||
require.True(t, called, "expected TLSWrapper to be called")
|
||||
}
|
||||
|
||||
// TODO: integration test TestNewDialer with TLS and rcp server, when the rpc
|
||||
// exists as an isolated component.
|
||||
|
||||
func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) {
|
||||
count := 4
|
||||
cfg := resolver.Config{Scheme: newScheme(t.Name())}
|
||||
res := resolver.NewServerResolverBuilder(cfg)
|
||||
resolver.RegisterWithGRPC(res)
|
||||
pool := NewClientConnPool(res, nil)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
name := fmt.Sprintf("server-%d", i)
|
||||
srv := newTestServer(t, name, "dc1")
|
||||
res.AddServer(srv.Metadata())
|
||||
t.Cleanup(srv.shutdown)
|
||||
}
|
||||
|
||||
conn, err := pool.ClientConn("dc1")
|
||||
require.NoError(t, err)
|
||||
client := testservice.NewSimpleClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
first, err := client.Something(ctx, &testservice.Req{})
|
||||
require.NoError(t, err)
|
||||
|
||||
res.RemoveServer(&metadata.Server{ID: first.ServerName, Datacenter: "dc1"})
|
||||
|
||||
resp, err := client.Something(ctx, &testservice.Req{})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, resp.ServerName, first.ServerName)
|
||||
}
|
||||
|
||||
func newScheme(n string) string {
|
||||
s := strings.Replace(n, "/", "", -1)
|
||||
s = strings.Replace(s, "_", "", -1)
|
||||
return strings.ToLower(s)
|
||||
}
|
||||
|
||||
func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) {
|
||||
count := 4
|
||||
cfg := resolver.Config{Scheme: newScheme(t.Name())}
|
||||
res := resolver.NewServerResolverBuilder(cfg)
|
||||
resolver.RegisterWithGRPC(res)
|
||||
pool := NewClientConnPool(res, nil)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
name := fmt.Sprintf("server-%d", i)
|
||||
srv := newTestServer(t, name, "dc1")
|
||||
res.AddServer(srv.Metadata())
|
||||
t.Cleanup(srv.shutdown)
|
||||
}
|
||||
|
||||
conn, err := pool.ClientConn("dc1")
|
||||
require.NoError(t, err)
|
||||
client := testservice.NewSimpleClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
first, err := client.Something(ctx, &testservice.Req{})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("rebalance a different DC, does nothing", func(t *testing.T) {
|
||||
res.NewRebalancer("dc-other")()
|
||||
|
||||
resp, err := client.Something(ctx, &testservice.Req{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp.ServerName, first.ServerName)
|
||||
})
|
||||
|
||||
t.Run("rebalance the dc", func(t *testing.T) {
|
||||
// Rebalance is random, but if we repeat it a few times it should give us a
|
||||
// new server.
|
||||
retry.RunWith(fastRetry, t, func(r *retry.R) {
|
||||
res.NewRebalancer("dc1")()
|
||||
|
||||
resp, err := client.Something(ctx, &testservice.Req{})
|
||||
require.NoError(r, err)
|
||||
require.NotEqual(r, resp.ServerName, first.ServerName)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestClientConnPool_IntegrationWithGRPCResolver_MultiDC(t *testing.T) {
|
||||
dcs := []string{"dc1", "dc2", "dc3"}
|
||||
|
||||
cfg := resolver.Config{Scheme: newScheme(t.Name())}
|
||||
res := resolver.NewServerResolverBuilder(cfg)
|
||||
resolver.RegisterWithGRPC(res)
|
||||
pool := NewClientConnPool(res, nil)
|
||||
|
||||
for _, dc := range dcs {
|
||||
name := "server-0-" + dc
|
||||
srv := newTestServer(t, name, dc)
|
||||
res.AddServer(srv.Metadata())
|
||||
t.Cleanup(srv.shutdown)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
for _, dc := range dcs {
|
||||
conn, err := pool.ClientConn(dc)
|
||||
require.NoError(t, err)
|
||||
client := testservice.NewSimpleClient(conn)
|
||||
|
||||
resp, err := client.Something(ctx, &testservice.Req{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp.Datacenter, dc)
|
||||
}
|
||||
}
|
|
@ -15,16 +15,14 @@ func NewHandler(addr net.Addr) *Handler {
|
|||
// We don't need to pass tls.Config to the server since it's multiplexed
|
||||
// behind the RPC listener, which already has TLS configured.
|
||||
srv := grpc.NewServer(
|
||||
grpc.StatsHandler(&statsHandler{}),
|
||||
grpc.StatsHandler(newStatsHandler()),
|
||||
grpc.StreamInterceptor((&activeStreamCounter{}).Intercept),
|
||||
)
|
||||
|
||||
// TODO(streaming): add gRPC services to srv here
|
||||
|
||||
return &Handler{
|
||||
srv: srv,
|
||||
listener: &chanListener{addr: addr, conns: make(chan net.Conn)},
|
||||
}
|
||||
lis := &chanListener{addr: addr, conns: make(chan net.Conn)}
|
||||
return &Handler{srv: srv, listener: lis}
|
||||
}
|
||||
|
||||
// Handler implements a handler for the rpc server listener, and the
|
||||
|
@ -57,15 +55,26 @@ type chanListener struct {
|
|||
// Accept blocks until a connection is received from Handle, and then returns the
|
||||
// connection. Accept implements part of the net.Listener interface for grpc.Server.
|
||||
func (l *chanListener) Accept() (net.Conn, error) {
|
||||
return <-l.conns, nil
|
||||
select {
|
||||
case c, ok := <-l.conns:
|
||||
if !ok {
|
||||
return nil, &net.OpError{
|
||||
Op: "accept",
|
||||
Net: l.addr.Network(),
|
||||
Addr: l.addr,
|
||||
Err: fmt.Errorf("listener closed"),
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *chanListener) Addr() net.Addr {
|
||||
return l.addr
|
||||
}
|
||||
|
||||
// Close does nothing. The connections are managed by the caller.
|
||||
func (l *chanListener) Close() error {
|
||||
close(l.conns)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ func (m *Req) GetDatacenter() string {
|
|||
|
||||
type Resp struct {
|
||||
ServerName string `protobuf:"bytes,1,opt,name=ServerName,proto3" json:"ServerName,omitempty"`
|
||||
Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -120,6 +121,13 @@ func (m *Resp) GetServerName() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *Resp) GetDatacenter() string {
|
||||
if m != nil {
|
||||
return m.Datacenter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Req)(nil), "testservice.Req")
|
||||
proto.RegisterType((*Resp)(nil), "testservice.Resp")
|
||||
|
@ -130,20 +138,20 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_3009a77c573f826d = []byte{
|
||||
// 200 bytes of a gzipped FileDescriptorProto
|
||||
// 206 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x49, 0x4c, 0x4f, 0xcd,
|
||||
0x2b, 0xd1, 0x4f, 0x2f, 0x2a, 0x48, 0xd6, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1,
|
||||
0x2f, 0x49, 0x2d, 0x2e, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2f, 0xce, 0xcc, 0x2d,
|
||||
0xc8, 0x49, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46, 0x92, 0x51, 0x52, 0xe5, 0x62,
|
||||
0x0e, 0x4a, 0x2d, 0x14, 0x92, 0xe3, 0xe2, 0x72, 0x49, 0x2c, 0x49, 0x4c, 0x4e, 0x05, 0xe9, 0x96,
|
||||
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x42, 0x12, 0x51, 0x52, 0xe3, 0x62, 0x09, 0x4a, 0x2d, 0x2e,
|
||||
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x42, 0x12, 0x51, 0x72, 0xe3, 0x62, 0x09, 0x4a, 0x2d, 0x2e,
|
||||
0x00, 0xa9, 0x0b, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xf2, 0x4b, 0xcc, 0x4d, 0x85, 0xa9, 0x43, 0x88,
|
||||
0x18, 0xe5, 0x72, 0xb1, 0x05, 0x83, 0xed, 0x12, 0x32, 0xe2, 0xe2, 0x0c, 0xce, 0xcf, 0x4d, 0x2d,
|
||||
0xc9, 0xc8, 0xcc, 0x4b, 0x17, 0x12, 0xd0, 0x43, 0xb2, 0x53, 0x2f, 0x28, 0xb5, 0x50, 0x4a, 0x10,
|
||||
0x4d, 0xa4, 0xb8, 0x40, 0x89, 0x41, 0x48, 0x9f, 0x8b, 0xc5, 0x2d, 0x27, 0xbf, 0x9c, 0x48, 0xe5,
|
||||
0x06, 0x8c, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c,
|
||||
0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x3f, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
|
||||
0x61, 0xd3, 0x5e, 0xba, 0x13, 0x01, 0x00, 0x00,
|
||||
0xa0, 0x99, 0xc3, 0x84, 0x6e, 0x8e, 0x51, 0x2e, 0x17, 0x5b, 0x30, 0xd8, 0x2d, 0x42, 0x46, 0x5c,
|
||||
0x9c, 0xc1, 0xf9, 0xb9, 0xa9, 0x25, 0x19, 0x99, 0x79, 0xe9, 0x42, 0x02, 0x7a, 0x48, 0x6e, 0xd2,
|
||||
0x0b, 0x4a, 0x2d, 0x94, 0x12, 0x44, 0x13, 0x29, 0x2e, 0x50, 0x62, 0x10, 0xd2, 0xe7, 0x62, 0x71,
|
||||
0xcb, 0xc9, 0x2f, 0x27, 0x52, 0xb9, 0x01, 0xa3, 0x93, 0xc0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e,
|
||||
0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0x38, 0x0c, 0x8c,
|
||||
0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x4b, 0x16, 0x40, 0x33, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -348,6 +356,13 @@ func (m *Resp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Datacenter) > 0 {
|
||||
i -= len(m.Datacenter)
|
||||
copy(dAtA[i:], m.Datacenter)
|
||||
i = encodeVarintSimple(dAtA, i, uint64(len(m.Datacenter)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.ServerName) > 0 {
|
||||
i -= len(m.ServerName)
|
||||
copy(dAtA[i:], m.ServerName)
|
||||
|
@ -395,6 +410,10 @@ func (m *Resp) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 1 + l + sovSimple(uint64(l))
|
||||
}
|
||||
l = len(m.Datacenter)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSimple(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -554,6 +573,38 @@ func (m *Resp) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.ServerName = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSimple
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSimple
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSimple
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Datacenter = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSimple(dAtA[iNdEx:])
|
||||
|
|
|
@ -14,4 +14,5 @@ message Req {
|
|||
|
||||
message Resp {
|
||||
string ServerName = 1;
|
||||
string Datacenter = 2;
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
package resolver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var registerLock sync.Mutex
|
||||
|
||||
// RegisterWithGRPC registers the ServerResolverBuilder as a grpc/resolver.
|
||||
// This function exists to synchronize registrations with a lock.
|
||||
// grpc/resolver.Register expects all registration to happen at init and does
|
||||
// not allow for concurrent registration. This function exists to support
|
||||
// parallel testing.
|
||||
func RegisterWithGRPC(b *ServerResolverBuilder) {
|
||||
registerLock.Lock()
|
||||
defer registerLock.Unlock()
|
||||
resolver.Register(b)
|
||||
}
|
||||
|
||||
// ServerResolverBuilder tracks the current server list and keeps any
|
||||
// ServerResolvers updated when changes occur.
|
||||
type ServerResolverBuilder struct {
|
||||
// scheme used to query the server. Defaults to consul. Used to support
|
||||
// parallel testing because gRPC registers resolvers globally.
|
||||
scheme string
|
||||
// servers is an index of Servers by Server.ID. The map contains server IDs
|
||||
// for all datacenters, so it assumes the ID is globally unique.
|
||||
servers map[string]*metadata.Server
|
||||
// resolvers is an index of connections to the serverResolver which manages
|
||||
// addresses of servers for that connection.
|
||||
resolvers map[resolver.ClientConn]*serverResolver
|
||||
// lock for servers and resolvers.
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
var _ resolver.Builder = (*ServerResolverBuilder)(nil)
|
||||
|
||||
type Config struct {
|
||||
// Scheme used to connect to the server. Defaults to consul.
|
||||
Scheme string
|
||||
}
|
||||
|
||||
func NewServerResolverBuilder(cfg Config) *ServerResolverBuilder {
|
||||
if cfg.Scheme == "" {
|
||||
cfg.Scheme = "consul"
|
||||
}
|
||||
return &ServerResolverBuilder{
|
||||
scheme: cfg.Scheme,
|
||||
servers: make(map[string]*metadata.Server),
|
||||
resolvers: make(map[resolver.ClientConn]*serverResolver),
|
||||
}
|
||||
}
|
||||
|
||||
// Rebalance shuffles the server list for resolvers in all datacenters.
|
||||
func (s *ServerResolverBuilder) NewRebalancer(dc string) func() {
|
||||
shuffler := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
return func() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
for _, resolver := range s.resolvers {
|
||||
if resolver.datacenter != dc {
|
||||
continue
|
||||
}
|
||||
// Shuffle the list of addresses using the last list given to the resolver.
|
||||
resolver.addrLock.Lock()
|
||||
addrs := resolver.addrs
|
||||
shuffler.Shuffle(len(addrs), func(i, j int) {
|
||||
addrs[i], addrs[j] = addrs[j], addrs[i]
|
||||
})
|
||||
// Pass the shuffled list to the resolver.
|
||||
resolver.updateAddrsLocked(addrs)
|
||||
resolver.addrLock.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ServerForAddr returns server metadata for a server with the specified address.
|
||||
func (s *ServerResolverBuilder) ServerForAddr(addr string) (*metadata.Server, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
for _, server := range s.servers {
|
||||
if server.Addr.String() == addr {
|
||||
return server, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to find Consul server for address %q", addr)
|
||||
}
|
||||
|
||||
// Build returns a new serverResolver for the given ClientConn. The resolver
|
||||
// will keep the ClientConn's state updated based on updates from Serf.
|
||||
func (s *ServerResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
// If there's already a resolver for this connection, return it.
|
||||
// TODO(streaming): how would this happen since we already cache connections in ClientConnPool?
|
||||
if resolver, ok := s.resolvers[cc]; ok {
|
||||
return resolver, nil
|
||||
}
|
||||
|
||||
// Make a new resolver for the dc and add it to the list of active ones.
|
||||
datacenter := strings.TrimPrefix(target.Endpoint, "server.")
|
||||
resolver := &serverResolver{
|
||||
datacenter: datacenter,
|
||||
clientConn: cc,
|
||||
close: func() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
delete(s.resolvers, cc)
|
||||
},
|
||||
}
|
||||
resolver.updateAddrs(s.getDCAddrs(datacenter))
|
||||
|
||||
s.resolvers[cc] = resolver
|
||||
return resolver, nil
|
||||
}
|
||||
|
||||
func (s *ServerResolverBuilder) Scheme() string { return s.scheme }
|
||||
|
||||
// AddServer updates the resolvers' states to include the new server's address.
|
||||
func (s *ServerResolverBuilder) AddServer(server *metadata.Server) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.servers[server.ID] = server
|
||||
|
||||
addrs := s.getDCAddrs(server.Datacenter)
|
||||
for _, resolver := range s.resolvers {
|
||||
if resolver.datacenter == server.Datacenter {
|
||||
resolver.updateAddrs(addrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveServer updates the resolvers' states with the given server removed.
|
||||
func (s *ServerResolverBuilder) RemoveServer(server *metadata.Server) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
delete(s.servers, server.ID)
|
||||
|
||||
addrs := s.getDCAddrs(server.Datacenter)
|
||||
for _, resolver := range s.resolvers {
|
||||
if resolver.datacenter == server.Datacenter {
|
||||
resolver.updateAddrs(addrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getDCAddrs returns a list of the server addresses for the given datacenter.
|
||||
// This method requires that lock is held for reads.
|
||||
func (s *ServerResolverBuilder) getDCAddrs(dc string) []resolver.Address {
|
||||
var addrs []resolver.Address
|
||||
for _, server := range s.servers {
|
||||
if server.Datacenter != dc {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs = append(addrs, resolver.Address{
|
||||
Addr: server.Addr.String(),
|
||||
Type: resolver.Backend,
|
||||
ServerName: server.Name,
|
||||
})
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
// serverResolver is a grpc Resolver that will keep a grpc.ClientConn up to date
|
||||
// on the list of server addresses to use.
|
||||
type serverResolver struct {
|
||||
// datacenter that can be reached by the clientConn. Used by ServerResolverBuilder
|
||||
// to filter resolvers for those in a specific datacenter.
|
||||
datacenter string
|
||||
|
||||
// clientConn that this resolver is providing addresses for.
|
||||
clientConn resolver.ClientConn
|
||||
|
||||
// close is used by ServerResolverBuilder to remove this resolver from the
|
||||
// index of resolvers. It is called by grpc when the connection is closed.
|
||||
close func()
|
||||
|
||||
// addrs stores the list of addresses passed to updateAddrs, so that they
|
||||
// can be rebalanced periodically by ServerResolverBuilder.
|
||||
addrs []resolver.Address
|
||||
addrLock sync.Mutex
|
||||
}
|
||||
|
||||
var _ resolver.Resolver = (*serverResolver)(nil)
|
||||
|
||||
// updateAddrs updates this serverResolver's ClientConn to use the given set of
|
||||
// addrs.
|
||||
func (r *serverResolver) updateAddrs(addrs []resolver.Address) {
|
||||
r.addrLock.Lock()
|
||||
defer r.addrLock.Unlock()
|
||||
r.updateAddrsLocked(addrs)
|
||||
}
|
||||
|
||||
// updateAddrsLocked updates this serverResolver's ClientConn to use the given
|
||||
// set of addrs. addrLock must be held by caller.
|
||||
func (r *serverResolver) updateAddrsLocked(addrs []resolver.Address) {
|
||||
// Only pass the first address initially, which will cause the
|
||||
// balancer to spin down the connection for its previous first address
|
||||
// if it is different. If we don't do this, it will keep using the old
|
||||
// first address as long as it is still in the list, making it impossible to
|
||||
// rebalance until that address is removed.
|
||||
var firstAddr []resolver.Address
|
||||
if len(addrs) > 0 {
|
||||
firstAddr = []resolver.Address{addrs[0]}
|
||||
}
|
||||
r.clientConn.UpdateState(resolver.State{Addresses: firstAddr})
|
||||
|
||||
// Call UpdateState again with the entire list of addrs in case we need them
|
||||
// for failover.
|
||||
r.clientConn.UpdateState(resolver.State{Addresses: addrs})
|
||||
|
||||
r.addrs = addrs
|
||||
}
|
||||
|
||||
func (r *serverResolver) Close() {
|
||||
r.close()
|
||||
}
|
||||
|
||||
// ResolveNow is not used
|
||||
func (*serverResolver) ResolveNow(_ resolver.ResolveNowOption) {}
|
|
@ -0,0 +1,125 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/grpc/internal/testservice"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type testServer struct {
|
||||
addr net.Addr
|
||||
name string
|
||||
dc string
|
||||
shutdown func()
|
||||
}
|
||||
|
||||
func (s testServer) Metadata() *metadata.Server {
|
||||
return &metadata.Server{ID: s.name, Datacenter: s.dc, Addr: s.addr}
|
||||
}
|
||||
|
||||
func newTestServer(t *testing.T, name string, dc string) testServer {
|
||||
addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")}
|
||||
handler := NewHandler(addr)
|
||||
|
||||
testservice.RegisterSimpleServer(handler.srv, &simple{name: name, dc: dc})
|
||||
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
rpc := &fakeRPCListener{t: t, handler: handler}
|
||||
|
||||
g := errgroup.Group{}
|
||||
g.Go(func() error {
|
||||
return rpc.listen(lis)
|
||||
})
|
||||
g.Go(func() error {
|
||||
return handler.Run()
|
||||
})
|
||||
return testServer{
|
||||
addr: lis.Addr(),
|
||||
name: name,
|
||||
dc: dc,
|
||||
shutdown: func() {
|
||||
if err := lis.Close(); err != nil {
|
||||
t.Logf("listener closed with error: %v", err)
|
||||
}
|
||||
if err := handler.Shutdown(); err != nil {
|
||||
t.Logf("grpc server shutdown: %v", err)
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
t.Logf("grpc server error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type simple struct {
|
||||
name string
|
||||
dc string
|
||||
}
|
||||
|
||||
func (s *simple) Flow(_ *testservice.Req, flow testservice.Simple_FlowServer) error {
|
||||
for flow.Context().Err() == nil {
|
||||
resp := &testservice.Resp{ServerName: "one", Datacenter: s.dc}
|
||||
if err := flow.Send(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *simple) Something(_ context.Context, _ *testservice.Req) (*testservice.Resp, error) {
|
||||
return &testservice.Resp{ServerName: s.name, Datacenter: s.dc}, nil
|
||||
}
|
||||
|
||||
// fakeRPCListener mimics agent/consul.Server.listen to handle the RPCType byte.
|
||||
// In the future we should be able to refactor Server and extract this RPC
|
||||
// handling logic so that we don't need to use a fake.
|
||||
// For now, since this logic is in agent/consul, we can't easily use Server.listen
|
||||
// so we fake it.
|
||||
type fakeRPCListener struct {
|
||||
t *testing.T
|
||||
handler *Handler
|
||||
}
|
||||
|
||||
func (f *fakeRPCListener) listen(listener net.Listener) error {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go f.handleConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeRPCListener) handleConn(conn net.Conn) {
|
||||
buf := make([]byte, 1)
|
||||
|
||||
if _, err := conn.Read(buf); err != nil {
|
||||
if err != io.EOF {
|
||||
fmt.Println("ERROR", err.Error())
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
typ := pool.RPCType(buf[0])
|
||||
|
||||
if typ == pool.RPCGRPC {
|
||||
f.handler.Handle(conn)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("ERROR: unexpected byte", typ)
|
||||
conn.Close()
|
||||
}
|
|
@ -9,12 +9,19 @@ import (
|
|||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
var defaultMetrics = metrics.Default()
|
||||
|
||||
// statsHandler is a grpc/stats.StatsHandler which emits connection and
|
||||
// request metrics to go-metrics.
|
||||
type statsHandler struct {
|
||||
metrics *metrics.Metrics
|
||||
activeConns uint64 // must be 8-byte aligned for atomic access
|
||||
}
|
||||
|
||||
func newStatsHandler() *statsHandler {
|
||||
return &statsHandler{metrics: defaultMetrics}
|
||||
}
|
||||
|
||||
// TagRPC implements grpcStats.StatsHandler
|
||||
func (c *statsHandler) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context {
|
||||
// No-op
|
||||
|
@ -29,7 +36,7 @@ func (c *statsHandler) HandleRPC(_ context.Context, s stats.RPCStats) {
|
|||
}
|
||||
switch s.(type) {
|
||||
case *stats.InHeader:
|
||||
metrics.IncrCounter([]string{"grpc", label, "request"}, 1)
|
||||
c.metrics.IncrCounter([]string{"grpc", label, "request"}, 1)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,7 +60,7 @@ func (c *statsHandler) HandleConn(_ context.Context, s stats.ConnStats) {
|
|||
// Decrement!
|
||||
count = atomic.AddUint64(&c.activeConns, ^uint64(0))
|
||||
}
|
||||
metrics.SetGauge([]string{"grpc", label, "active_conns"}, float32(count))
|
||||
c.metrics.SetGauge([]string{"grpc", label, "active_conns"}, float32(count))
|
||||
}
|
||||
|
||||
type activeStreamCounter struct {
|
||||
|
@ -71,10 +78,10 @@ func (i *activeStreamCounter) Intercept(
|
|||
handler grpc.StreamHandler,
|
||||
) error {
|
||||
count := atomic.AddUint64(&i.count, 1)
|
||||
metrics.SetGauge([]string{"grpc", "server", "active_streams"}, float32(count))
|
||||
defaultMetrics.SetGauge([]string{"grpc", "server", "active_streams"}, float32(count))
|
||||
defer func() {
|
||||
count := atomic.AddUint64(&i.count, ^uint64(0))
|
||||
metrics.SetGauge([]string{"grpc", "server", "active_streams"}, float32(count))
|
||||
defaultMetrics.SetGauge([]string{"grpc", "server", "active_streams"}, float32(count))
|
||||
}()
|
||||
|
||||
return handler(srv, ss)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/agent/grpc/internal/testservice"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -18,12 +19,11 @@ func TestHandler_EmitsStats(t *testing.T) {
|
|||
|
||||
addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")}
|
||||
handler := NewHandler(addr)
|
||||
|
||||
testservice.RegisterSimpleServer(handler.srv, &simple{})
|
||||
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer lis.Close()
|
||||
t.Cleanup(logError(t, lis.Close))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
@ -43,7 +43,7 @@ func TestHandler_EmitsStats(t *testing.T) {
|
|||
|
||||
conn, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithInsecure())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
t.Cleanup(logError(t, conn.Close))
|
||||
|
||||
client := testservice.NewSimpleClient(conn)
|
||||
fClient, err := client.Flow(ctx, &testservice.Req{Datacenter: "mine"})
|
||||
|
@ -53,36 +53,24 @@ func TestHandler_EmitsStats(t *testing.T) {
|
|||
_, err = fClient.Recv()
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
// Wait for the server to stop so that active_streams is predictable.
|
||||
retry.RunWith(fastRetry, t, func(r *retry.R) {
|
||||
expectedGauge := []metricCall{
|
||||
{key: []string{"testing", "grpc", "server", "active_conns"}, val: 1},
|
||||
{key: []string{"testing", "grpc", "server", "active_streams"}, val: 1},
|
||||
{key: []string{"testing", "grpc", "server", "active_streams"}, val: 0},
|
||||
}
|
||||
require.Equal(r, expectedGauge, sink.gaugeCalls)
|
||||
})
|
||||
|
||||
expectedCounter := []metricCall{
|
||||
{key: []string{"testing", "grpc", "server", "request"}, val: 1},
|
||||
}
|
||||
require.Equal(t, expectedCounter, sink.incrCounterCalls)
|
||||
expectedGauge := []metricCall{
|
||||
{key: []string{"testing", "grpc", "server", "active_conns"}, val: 1},
|
||||
{key: []string{"testing", "grpc", "server", "active_streams"}, val: 1},
|
||||
// TODO: why is the count reset to 0 before the client receives the second message?
|
||||
{key: []string{"testing", "grpc", "server", "active_streams"}, val: 0},
|
||||
}
|
||||
require.Equal(t, expectedGauge, sink.gaugeCalls)
|
||||
}
|
||||
|
||||
type simple struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (s *simple) Flow(_ *testservice.Req, flow testservice.Simple_FlowServer) error {
|
||||
if err := flow.Send(&testservice.Resp{ServerName: "one"}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := flow.Send(&testservice.Resp{ServerName: "two"}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *simple) Something(_ context.Context, _ *testservice.Req) (*testservice.Resp, error) {
|
||||
return &testservice.Resp{ServerName: "the-fake-service-name"}, nil
|
||||
}
|
||||
var fastRetry = &retry.Timer{Timeout: 7 * time.Second, Wait: 2 * time.Millisecond}
|
||||
|
||||
func patchGlobalMetrics(t *testing.T) *fakeMetricsSink {
|
||||
t.Helper()
|
||||
|
@ -94,7 +82,8 @@ func patchGlobalMetrics(t *testing.T) *fakeMetricsSink {
|
|||
ProfileInterval: time.Second, // Poll runtime every second
|
||||
FilterDefault: true,
|
||||
}
|
||||
_, err := metrics.NewGlobal(cfg, sink)
|
||||
var err error
|
||||
defaultMetrics, err = metrics.New(cfg, sink)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_, err = metrics.NewGlobal(cfg, &metrics.BlackholeSink{})
|
||||
|
@ -122,3 +111,11 @@ type metricCall struct {
|
|||
val float32
|
||||
labels []metrics.Label
|
||||
}
|
||||
|
||||
func logError(t *testing.T, f func() error) func() {
|
||||
return func() {
|
||||
if err := f(); err != nil {
|
||||
t.Logf(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ const (
|
|||
ingressHealth = "ingress"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) HealthChecksInState(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) HealthChecksInState(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Set default DC
|
||||
args := structs.ChecksInStateRequest{}
|
||||
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
||||
|
@ -66,7 +66,7 @@ RETRY_ONCE:
|
|||
return out.HealthChecks, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) HealthNodeChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) HealthNodeChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Set default DC
|
||||
args := structs.NodeSpecificRequest{}
|
||||
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
||||
|
@ -112,7 +112,7 @@ RETRY_ONCE:
|
|||
return out.HealthChecks, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) HealthServiceChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) HealthServiceChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Set default DC
|
||||
args := structs.ServiceSpecificRequest{}
|
||||
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
|
||||
|
@ -162,24 +162,24 @@ RETRY_ONCE:
|
|||
|
||||
// HealthIngressServiceNodes should return "all the healthy ingress gateway instances
|
||||
// that I can use to access this connect-enabled service without mTLS".
|
||||
func (s *HTTPServer) HealthIngressServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) HealthIngressServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return s.healthServiceNodes(resp, req, ingressHealth)
|
||||
}
|
||||
|
||||
// HealthConnectServiceNodes should return "all healthy connect-enabled
|
||||
// endpoints (e.g. could be side car proxies or native instances) for this
|
||||
// service so I can connect with mTLS".
|
||||
func (s *HTTPServer) HealthConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) HealthConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return s.healthServiceNodes(resp, req, connectHealth)
|
||||
}
|
||||
|
||||
// HealthServiceNodes should return "all the healthy instances of this service
|
||||
// registered so I can connect directly to them".
|
||||
func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) HealthServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return s.healthServiceNodes(resp, req, serviceHealth)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) healthServiceNodes(resp http.ResponseWriter, req *http.Request, healthType string) (interface{}, error) {
|
||||
func (s *HTTPHandlers) healthServiceNodes(resp http.ResponseWriter, req *http.Request, healthType string) (interface{}, error) {
|
||||
// Set default DC
|
||||
args := structs.ServiceSpecificRequest{}
|
||||
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
|
||||
|
|
152
agent/http.go
152
agent/http.go
|
@ -15,7 +15,6 @@ import (
|
|||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
|
@ -79,73 +78,70 @@ func (e ForbiddenError) Error() string {
|
|||
return "Access is restricted"
|
||||
}
|
||||
|
||||
// HTTPServer provides an HTTP api for an agent.
|
||||
//
|
||||
// TODO: rename this struct to something more appropriate. It is an http.Handler,
|
||||
// request router or multiplexer, but it is not a Server.
|
||||
type HTTPServer struct {
|
||||
// HTTPHandlers provides http.Handler functions for the HTTP APi.
|
||||
type HTTPHandlers struct {
|
||||
agent *Agent
|
||||
denylist *Denylist
|
||||
}
|
||||
|
||||
type templatedFile struct {
|
||||
// bufferedFile implements os.File and allows us to modify a file from disk by
|
||||
// writing out the new version into a buffer and then serving file reads from
|
||||
// that. It assumes you are modifying a real file and presents the actual file's
|
||||
// info when queried.
|
||||
type bufferedFile struct {
|
||||
templated *bytes.Reader
|
||||
name string
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
info os.FileInfo
|
||||
}
|
||||
|
||||
func newTemplatedFile(buf *bytes.Buffer, raw http.File) *templatedFile {
|
||||
func newBufferedFile(buf *bytes.Buffer, raw http.File) *bufferedFile {
|
||||
info, _ := raw.Stat()
|
||||
return &templatedFile{
|
||||
return &bufferedFile{
|
||||
templated: bytes.NewReader(buf.Bytes()),
|
||||
name: info.Name(),
|
||||
mode: info.Mode(),
|
||||
modTime: info.ModTime(),
|
||||
info: info,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *templatedFile) Read(p []byte) (n int, err error) {
|
||||
func (t *bufferedFile) Read(p []byte) (n int, err error) {
|
||||
return t.templated.Read(p)
|
||||
}
|
||||
|
||||
func (t *templatedFile) Seek(offset int64, whence int) (int64, error) {
|
||||
func (t *bufferedFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return t.templated.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (t *templatedFile) Close() error {
|
||||
func (t *bufferedFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *templatedFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
func (t *bufferedFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
|
||||
func (t *templatedFile) Stat() (os.FileInfo, error) {
|
||||
func (t *bufferedFile) Stat() (os.FileInfo, error) {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *templatedFile) Name() string {
|
||||
return t.name
|
||||
func (t *bufferedFile) Name() string {
|
||||
return t.info.Name()
|
||||
}
|
||||
|
||||
func (t *templatedFile) Size() int64 {
|
||||
func (t *bufferedFile) Size() int64 {
|
||||
return int64(t.templated.Len())
|
||||
}
|
||||
|
||||
func (t *templatedFile) Mode() os.FileMode {
|
||||
return t.mode
|
||||
func (t *bufferedFile) Mode() os.FileMode {
|
||||
return t.info.Mode()
|
||||
}
|
||||
|
||||
func (t *templatedFile) ModTime() time.Time {
|
||||
return t.modTime
|
||||
func (t *bufferedFile) ModTime() time.Time {
|
||||
return t.info.ModTime()
|
||||
}
|
||||
|
||||
func (t *templatedFile) IsDir() bool {
|
||||
func (t *bufferedFile) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *templatedFile) Sys() interface{} {
|
||||
func (t *bufferedFile) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -161,28 +157,56 @@ func (fs *redirectFS) Open(name string) (http.File, error) {
|
|||
return file, err
|
||||
}
|
||||
|
||||
type templatedIndexFS struct {
|
||||
fs http.FileSystem
|
||||
templateVars func() map[string]interface{}
|
||||
type settingsInjectedIndexFS struct {
|
||||
fs http.FileSystem
|
||||
UISettings map[string]interface{}
|
||||
}
|
||||
|
||||
func (fs *templatedIndexFS) Open(name string) (http.File, error) {
|
||||
func (fs *settingsInjectedIndexFS) Open(name string) (http.File, error) {
|
||||
file, err := fs.fs.Open(name)
|
||||
if err != nil || name != "/index.html" {
|
||||
return file, err
|
||||
}
|
||||
|
||||
content, _ := ioutil.ReadAll(file)
|
||||
file.Seek(0, 0)
|
||||
t, err := template.New("fmtedindex").Parse(string(content))
|
||||
content, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed reading index.html: %s", err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
if err := t.Execute(&out, fs.templateVars()); err != nil {
|
||||
return nil, err
|
||||
file.Seek(0, 0)
|
||||
|
||||
// Replace the placeholder in the meta ENV with the actual UI config settings.
|
||||
// Ember passes the ENV with URL encoded JSON in a meta tag. We are replacing
|
||||
// a key and value that is the encoded version of
|
||||
// `"CONSUL_UI_SETTINGS_PLACEHOLDER":"__CONSUL_UI_SETTINGS_GO_HERE__"`
|
||||
// with a URL-encoded JSON blob representing the actual config.
|
||||
|
||||
// First built an escaped, JSON blob from the settings passed.
|
||||
bs, err := json.Marshal(fs.UISettings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed marshalling UI settings JSON: %s", err)
|
||||
}
|
||||
return newTemplatedFile(&out, file), nil
|
||||
// We want to remove the first and last chars which will be the { and } since
|
||||
// we are injecting these variabled into the middle of an existing object.
|
||||
bs = bytes.Trim(bs, "{}")
|
||||
|
||||
// We use PathEscape because we don't want spaces to be turned into "+" like
|
||||
// QueryEscape does.
|
||||
escaped := url.PathEscape(string(bs))
|
||||
|
||||
content = bytes.Replace(content,
|
||||
[]byte("%22CONSUL_UI_SETTINGS_PLACEHOLDER%22%3A%22__CONSUL_UI_SETTINGS_GO_HERE__%22"),
|
||||
[]byte(escaped), 1)
|
||||
|
||||
// We also need to inject the content path. This used to be a go template
|
||||
// hence the syntax but for now simple string replacement is fine esp. since
|
||||
// all the other templated stuff above can't easily be done that was as we are
|
||||
// replacing an entire placeholder element in an encoded JSON blob with
|
||||
// multiple encoded JSON elements.
|
||||
if path, ok := fs.UISettings["CONSUL_CONTENT_PATH"].(string); ok {
|
||||
content = bytes.Replace(content, []byte("{{.ContentPath}}"), []byte(path), -1)
|
||||
}
|
||||
|
||||
return newBufferedFile(bytes.NewBuffer(content), file), nil
|
||||
}
|
||||
|
||||
// endpoint is a Consul-specific HTTP handler that takes the usual arguments in
|
||||
|
@ -191,7 +215,7 @@ func (fs *templatedIndexFS) Open(name string) (http.File, error) {
|
|||
type endpoint func(resp http.ResponseWriter, req *http.Request) (interface{}, error)
|
||||
|
||||
// unboundEndpoint is an endpoint method on a server.
|
||||
type unboundEndpoint func(s *HTTPServer, resp http.ResponseWriter, req *http.Request) (interface{}, error)
|
||||
type unboundEndpoint func(s *HTTPHandlers, resp http.ResponseWriter, req *http.Request) (interface{}, error)
|
||||
|
||||
// endpoints is a map from URL pattern to unbound endpoint.
|
||||
var endpoints map[string]unboundEndpoint
|
||||
|
@ -226,7 +250,7 @@ func (w *wrappedMux) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
|||
}
|
||||
|
||||
// handler is used to attach our handlers to the mux
|
||||
func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
||||
func (s *HTTPHandlers) handler(enableDebug bool) http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// handleFuncMetrics takes the given pattern and handler and wraps to produce
|
||||
|
@ -332,7 +356,7 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
|||
uifs = fs
|
||||
}
|
||||
|
||||
uifs = &redirectFS{fs: &templatedIndexFS{fs: uifs, templateVars: s.GenerateHTMLTemplateVars}}
|
||||
uifs = &redirectFS{fs: &settingsInjectedIndexFS{fs: uifs, UISettings: s.GetUIENVFromConfig()}}
|
||||
// create a http handler using the ui file system
|
||||
// and the headers specified by the http_config.response_headers user config
|
||||
uifsWithHeaders := serveHandlerWithHeaders(
|
||||
|
@ -366,19 +390,19 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) GenerateHTMLTemplateVars() map[string]interface{} {
|
||||
func (s *HTTPHandlers) GetUIENVFromConfig() map[string]interface{} {
|
||||
vars := map[string]interface{}{
|
||||
"ContentPath": s.agent.config.UIContentPath,
|
||||
"ACLsEnabled": s.agent.config.ACLsEnabled,
|
||||
"CONSUL_CONTENT_PATH": s.agent.config.UIContentPath,
|
||||
"CONSUL_ACLS_ENABLED": s.agent.config.ACLsEnabled,
|
||||
}
|
||||
|
||||
s.addEnterpriseHTMLTemplateVars(vars)
|
||||
s.addEnterpriseUIENVVars(vars)
|
||||
|
||||
return vars
|
||||
}
|
||||
|
||||
// nodeName returns the node name of the agent
|
||||
func (s *HTTPServer) nodeName() string {
|
||||
func (s *HTTPHandlers) nodeName() string {
|
||||
return s.agent.config.NodeName
|
||||
}
|
||||
|
||||
|
@ -406,7 +430,7 @@ var (
|
|||
)
|
||||
|
||||
// wrap is used to wrap functions to make them more convenient
|
||||
func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||
func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||
httpLogger := s.agent.logger.Named(logging.HTTP)
|
||||
return func(resp http.ResponseWriter, req *http.Request) {
|
||||
setHeaders(resp, s.agent.config.HTTPResponseHeaders)
|
||||
|
@ -593,7 +617,7 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
|||
|
||||
// marshalJSON marshals the object into JSON, respecting the user's pretty-ness
|
||||
// configuration.
|
||||
func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {
|
||||
func (s *HTTPHandlers) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {
|
||||
if _, ok := req.URL.Query()["pretty"]; ok || s.agent.config.DevMode {
|
||||
buf, err := json.MarshalIndent(obj, "", " ")
|
||||
if err != nil {
|
||||
|
@ -611,12 +635,12 @@ func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, er
|
|||
}
|
||||
|
||||
// Returns true if the UI is enabled.
|
||||
func (s *HTTPServer) IsUIEnabled() bool {
|
||||
func (s *HTTPHandlers) IsUIEnabled() bool {
|
||||
return s.agent.config.UIDir != "" || s.agent.config.EnableUI
|
||||
}
|
||||
|
||||
// Renders a simple index page
|
||||
func (s *HTTPServer) Index(resp http.ResponseWriter, req *http.Request) {
|
||||
func (s *HTTPHandlers) Index(resp http.ResponseWriter, req *http.Request) {
|
||||
// Check if this is a non-index path
|
||||
if req.URL.Path != "/" {
|
||||
resp.WriteHeader(http.StatusNotFound)
|
||||
|
@ -871,7 +895,7 @@ func parseCacheControl(resp http.ResponseWriter, req *http.Request, b structs.Qu
|
|||
|
||||
// parseConsistency is used to parse the ?stale and ?consistent query params.
|
||||
// Returns true on error
|
||||
func (s *HTTPServer) parseConsistency(resp http.ResponseWriter, req *http.Request, b structs.QueryOptionsCompat) bool {
|
||||
func (s *HTTPHandlers) parseConsistency(resp http.ResponseWriter, req *http.Request, b structs.QueryOptionsCompat) bool {
|
||||
query := req.URL.Query()
|
||||
defaults := true
|
||||
if _, ok := query["stale"]; ok {
|
||||
|
@ -926,7 +950,7 @@ func (s *HTTPServer) parseConsistency(resp http.ResponseWriter, req *http.Reques
|
|||
}
|
||||
|
||||
// parseDC is used to parse the ?dc query param
|
||||
func (s *HTTPServer) parseDC(req *http.Request, dc *string) {
|
||||
func (s *HTTPHandlers) parseDC(req *http.Request, dc *string) {
|
||||
if other := req.URL.Query().Get("dc"); other != "" {
|
||||
*dc = other
|
||||
} else if *dc == "" {
|
||||
|
@ -936,7 +960,7 @@ func (s *HTTPServer) parseDC(req *http.Request, dc *string) {
|
|||
|
||||
// parseTokenInternal is used to parse the ?token query param or the X-Consul-Token header or
|
||||
// Authorization Bearer token (RFC6750).
|
||||
func (s *HTTPServer) parseTokenInternal(req *http.Request, token *string) {
|
||||
func (s *HTTPHandlers) parseTokenInternal(req *http.Request, token *string) {
|
||||
tok := ""
|
||||
if other := req.URL.Query().Get("token"); other != "" {
|
||||
tok = other
|
||||
|
@ -970,7 +994,7 @@ func (s *HTTPServer) parseTokenInternal(req *http.Request, token *string) {
|
|||
// parseTokenWithDefault passes through to parseTokenInternal and optionally resolves proxy tokens to real ACL tokens.
|
||||
// If the token is invalid or not specified it will populate the token with the agents UserToken (acl_token in the
|
||||
// consul configuration)
|
||||
func (s *HTTPServer) parseTokenWithDefault(req *http.Request, token *string) {
|
||||
func (s *HTTPHandlers) parseTokenWithDefault(req *http.Request, token *string) {
|
||||
s.parseTokenInternal(req, token) // parseTokenInternal modifies *token
|
||||
if token != nil && *token == "" {
|
||||
*token = s.agent.tokens.UserToken()
|
||||
|
@ -981,7 +1005,7 @@ func (s *HTTPServer) parseTokenWithDefault(req *http.Request, token *string) {
|
|||
|
||||
// parseToken is used to parse the ?token query param or the X-Consul-Token header or
|
||||
// Authorization Bearer token header (RFC6750). This function is used widely in Consul's endpoints
|
||||
func (s *HTTPServer) parseToken(req *http.Request, token *string) {
|
||||
func (s *HTTPHandlers) parseToken(req *http.Request, token *string) {
|
||||
s.parseTokenWithDefault(req, token)
|
||||
}
|
||||
|
||||
|
@ -1011,7 +1035,7 @@ func sourceAddrFromRequest(req *http.Request) string {
|
|||
// parseSource is used to parse the ?near=<node> query parameter, used for
|
||||
// sorting by RTT based on a source node. We set the source's DC to the target
|
||||
// DC in the request, if given, or else the agent's DC.
|
||||
func (s *HTTPServer) parseSource(req *http.Request, source *structs.QuerySource) {
|
||||
func (s *HTTPHandlers) parseSource(req *http.Request, source *structs.QuerySource) {
|
||||
s.parseDC(req, &source.Datacenter)
|
||||
source.Ip = sourceAddrFromRequest(req)
|
||||
if node := req.URL.Query().Get("near"); node != "" {
|
||||
|
@ -1025,7 +1049,7 @@ func (s *HTTPServer) parseSource(req *http.Request, source *structs.QuerySource)
|
|||
|
||||
// parseMetaFilter is used to parse the ?node-meta=key:value query parameter, used for
|
||||
// filtering results to nodes with the given metadata key/value
|
||||
func (s *HTTPServer) parseMetaFilter(req *http.Request) map[string]string {
|
||||
func (s *HTTPHandlers) parseMetaFilter(req *http.Request) map[string]string {
|
||||
if filterList, ok := req.URL.Query()["node-meta"]; ok {
|
||||
filters := make(map[string]string)
|
||||
for _, filter := range filterList {
|
||||
|
@ -1047,7 +1071,7 @@ func parseMetaPair(raw string) (string, string) {
|
|||
|
||||
// parseInternal is a convenience method for endpoints that need
|
||||
// to use both parseWait and parseDC.
|
||||
func (s *HTTPServer) parseInternal(resp http.ResponseWriter, req *http.Request, dc *string, b structs.QueryOptionsCompat) bool {
|
||||
func (s *HTTPHandlers) parseInternal(resp http.ResponseWriter, req *http.Request, dc *string, b structs.QueryOptionsCompat) bool {
|
||||
s.parseDC(req, dc)
|
||||
var token string
|
||||
s.parseTokenWithDefault(req, &token)
|
||||
|
@ -1066,11 +1090,11 @@ func (s *HTTPServer) parseInternal(resp http.ResponseWriter, req *http.Request,
|
|||
|
||||
// parse is a convenience method for endpoints that need
|
||||
// to use both parseWait and parseDC.
|
||||
func (s *HTTPServer) parse(resp http.ResponseWriter, req *http.Request, dc *string, b structs.QueryOptionsCompat) bool {
|
||||
func (s *HTTPHandlers) parse(resp http.ResponseWriter, req *http.Request, dc *string, b structs.QueryOptionsCompat) bool {
|
||||
return s.parseInternal(resp, req, dc, b)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) checkWriteAccess(req *http.Request) error {
|
||||
func (s *HTTPHandlers) checkWriteAccess(req *http.Request) error {
|
||||
if req.Method == http.MethodGet || req.Method == http.MethodHead || req.Method == http.MethodOptions {
|
||||
return nil
|
||||
}
|
||||
|
@ -1096,7 +1120,7 @@ func (s *HTTPServer) checkWriteAccess(req *http.Request) error {
|
|||
return ForbiddenError{}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) parseFilter(req *http.Request, filter *string) {
|
||||
func (s *HTTPHandlers) parseFilter(req *http.Request, filter *string) {
|
||||
if other := req.URL.Query().Get("filter"); other != "" {
|
||||
*filter = other
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) parseEntMeta(req *http.Request, entMeta *structs.EnterpriseMeta) error {
|
||||
func (s *HTTPHandlers) parseEntMeta(req *http.Request, entMeta *structs.EnterpriseMeta) error {
|
||||
if headerNS := req.Header.Get("X-Consul-Namespace"); headerNS != "" {
|
||||
return BadRequestError{Reason: "Invalid header: \"X-Consul-Namespace\" - Namespaces are a Consul Enterprise feature"}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ func (s *HTTPServer) parseEntMeta(req *http.Request, entMeta *structs.Enterprise
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) validateEnterpriseIntentionNamespace(logName, ns string, _ bool) error {
|
||||
func (s *HTTPHandlers) validateEnterpriseIntentionNamespace(logName, ns string, _ bool) error {
|
||||
if ns == "" {
|
||||
return nil
|
||||
} else if strings.ToLower(ns) == structs.IntentionDefaultNamespace {
|
||||
|
@ -32,11 +32,11 @@ func (s *HTTPServer) validateEnterpriseIntentionNamespace(logName, ns string, _
|
|||
return BadRequestError{Reason: "Invalid " + logName + "(" + ns + ")" + ": Namespaces is a Consul Enterprise feature"}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) parseEntMetaNoWildcard(req *http.Request, _ *structs.EnterpriseMeta) error {
|
||||
func (s *HTTPHandlers) parseEntMetaNoWildcard(req *http.Request, _ *structs.EnterpriseMeta) error {
|
||||
return s.parseEntMeta(req, nil)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) rewordUnknownEnterpriseFieldError(err error) error {
|
||||
func (s *HTTPHandlers) rewordUnknownEnterpriseFieldError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func (s *HTTPServer) rewordUnknownEnterpriseFieldError(err error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *HTTPServer) addEnterpriseHTMLTemplateVars(vars map[string]interface{}) {}
|
||||
func (s *HTTPHandlers) addEnterpriseUIENVVars(_ map[string]interface{}) {}
|
||||
|
||||
func parseACLAuthMethodEnterpriseMeta(req *http.Request, _ *structs.ACLAuthMethodEnterpriseMeta) error {
|
||||
if methodNS := req.URL.Query().Get("authmethod-ns"); methodNS != "" {
|
||||
|
@ -66,6 +66,6 @@ func parseACLAuthMethodEnterpriseMeta(req *http.Request, _ *structs.ACLAuthMetho
|
|||
}
|
||||
|
||||
// enterpriseHandler is a noop for the enterprise implementation. we pass the original back
|
||||
func (s *HTTPServer) enterpriseHandler(next http.Handler) http.Handler {
|
||||
func (s *HTTPHandlers) enterpriseHandler(next http.Handler) http.Handler {
|
||||
return next
|
||||
}
|
||||
|
|
|
@ -1,123 +1,123 @@
|
|||
package agent
|
||||
|
||||
func init() {
|
||||
registerEndpoint("/v1/acl/bootstrap", []string{"PUT"}, (*HTTPServer).ACLBootstrap)
|
||||
registerEndpoint("/v1/acl/create", []string{"PUT"}, (*HTTPServer).ACLCreate)
|
||||
registerEndpoint("/v1/acl/update", []string{"PUT"}, (*HTTPServer).ACLUpdate)
|
||||
registerEndpoint("/v1/acl/destroy/", []string{"PUT"}, (*HTTPServer).ACLDestroy)
|
||||
registerEndpoint("/v1/acl/info/", []string{"GET"}, (*HTTPServer).ACLGet)
|
||||
registerEndpoint("/v1/acl/clone/", []string{"PUT"}, (*HTTPServer).ACLClone)
|
||||
registerEndpoint("/v1/acl/list", []string{"GET"}, (*HTTPServer).ACLList)
|
||||
registerEndpoint("/v1/acl/login", []string{"POST"}, (*HTTPServer).ACLLogin)
|
||||
registerEndpoint("/v1/acl/logout", []string{"POST"}, (*HTTPServer).ACLLogout)
|
||||
registerEndpoint("/v1/acl/replication", []string{"GET"}, (*HTTPServer).ACLReplicationStatus)
|
||||
registerEndpoint("/v1/acl/policies", []string{"GET"}, (*HTTPServer).ACLPolicyList)
|
||||
registerEndpoint("/v1/acl/policy", []string{"PUT"}, (*HTTPServer).ACLPolicyCreate)
|
||||
registerEndpoint("/v1/acl/policy/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).ACLPolicyCRUD)
|
||||
registerEndpoint("/v1/acl/policy/name/", []string{"GET"}, (*HTTPServer).ACLPolicyReadByName)
|
||||
registerEndpoint("/v1/acl/roles", []string{"GET"}, (*HTTPServer).ACLRoleList)
|
||||
registerEndpoint("/v1/acl/role", []string{"PUT"}, (*HTTPServer).ACLRoleCreate)
|
||||
registerEndpoint("/v1/acl/role/name/", []string{"GET"}, (*HTTPServer).ACLRoleReadByName)
|
||||
registerEndpoint("/v1/acl/role/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).ACLRoleCRUD)
|
||||
registerEndpoint("/v1/acl/binding-rules", []string{"GET"}, (*HTTPServer).ACLBindingRuleList)
|
||||
registerEndpoint("/v1/acl/binding-rule", []string{"PUT"}, (*HTTPServer).ACLBindingRuleCreate)
|
||||
registerEndpoint("/v1/acl/binding-rule/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).ACLBindingRuleCRUD)
|
||||
registerEndpoint("/v1/acl/auth-methods", []string{"GET"}, (*HTTPServer).ACLAuthMethodList)
|
||||
registerEndpoint("/v1/acl/auth-method", []string{"PUT"}, (*HTTPServer).ACLAuthMethodCreate)
|
||||
registerEndpoint("/v1/acl/auth-method/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).ACLAuthMethodCRUD)
|
||||
registerEndpoint("/v1/acl/rules/translate", []string{"POST"}, (*HTTPServer).ACLRulesTranslate)
|
||||
registerEndpoint("/v1/acl/rules/translate/", []string{"GET"}, (*HTTPServer).ACLRulesTranslateLegacyToken)
|
||||
registerEndpoint("/v1/acl/tokens", []string{"GET"}, (*HTTPServer).ACLTokenList)
|
||||
registerEndpoint("/v1/acl/token", []string{"PUT"}, (*HTTPServer).ACLTokenCreate)
|
||||
registerEndpoint("/v1/acl/token/self", []string{"GET"}, (*HTTPServer).ACLTokenSelf)
|
||||
registerEndpoint("/v1/acl/token/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).ACLTokenCRUD)
|
||||
registerEndpoint("/v1/agent/token/", []string{"PUT"}, (*HTTPServer).AgentToken)
|
||||
registerEndpoint("/v1/agent/self", []string{"GET"}, (*HTTPServer).AgentSelf)
|
||||
registerEndpoint("/v1/agent/host", []string{"GET"}, (*HTTPServer).AgentHost)
|
||||
registerEndpoint("/v1/agent/maintenance", []string{"PUT"}, (*HTTPServer).AgentNodeMaintenance)
|
||||
registerEndpoint("/v1/agent/reload", []string{"PUT"}, (*HTTPServer).AgentReload)
|
||||
registerEndpoint("/v1/agent/monitor", []string{"GET"}, (*HTTPServer).AgentMonitor)
|
||||
registerEndpoint("/v1/agent/metrics", []string{"GET"}, (*HTTPServer).AgentMetrics)
|
||||
registerEndpoint("/v1/agent/services", []string{"GET"}, (*HTTPServer).AgentServices)
|
||||
registerEndpoint("/v1/agent/service/", []string{"GET"}, (*HTTPServer).AgentService)
|
||||
registerEndpoint("/v1/agent/checks", []string{"GET"}, (*HTTPServer).AgentChecks)
|
||||
registerEndpoint("/v1/agent/members", []string{"GET"}, (*HTTPServer).AgentMembers)
|
||||
registerEndpoint("/v1/agent/join/", []string{"PUT"}, (*HTTPServer).AgentJoin)
|
||||
registerEndpoint("/v1/agent/leave", []string{"PUT"}, (*HTTPServer).AgentLeave)
|
||||
registerEndpoint("/v1/agent/force-leave/", []string{"PUT"}, (*HTTPServer).AgentForceLeave)
|
||||
registerEndpoint("/v1/agent/health/service/id/", []string{"GET"}, (*HTTPServer).AgentHealthServiceByID)
|
||||
registerEndpoint("/v1/agent/health/service/name/", []string{"GET"}, (*HTTPServer).AgentHealthServiceByName)
|
||||
registerEndpoint("/v1/agent/check/register", []string{"PUT"}, (*HTTPServer).AgentRegisterCheck)
|
||||
registerEndpoint("/v1/agent/check/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterCheck)
|
||||
registerEndpoint("/v1/agent/check/pass/", []string{"PUT"}, (*HTTPServer).AgentCheckPass)
|
||||
registerEndpoint("/v1/agent/check/warn/", []string{"PUT"}, (*HTTPServer).AgentCheckWarn)
|
||||
registerEndpoint("/v1/agent/check/fail/", []string{"PUT"}, (*HTTPServer).AgentCheckFail)
|
||||
registerEndpoint("/v1/agent/check/update/", []string{"PUT"}, (*HTTPServer).AgentCheckUpdate)
|
||||
registerEndpoint("/v1/agent/connect/authorize", []string{"POST"}, (*HTTPServer).AgentConnectAuthorize)
|
||||
registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPServer).AgentConnectCARoots)
|
||||
registerEndpoint("/v1/agent/connect/ca/leaf/", []string{"GET"}, (*HTTPServer).AgentConnectCALeafCert)
|
||||
registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPServer).AgentRegisterService)
|
||||
registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterService)
|
||||
registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPServer).AgentServiceMaintenance)
|
||||
registerEndpoint("/v1/catalog/register", []string{"PUT"}, (*HTTPServer).CatalogRegister)
|
||||
registerEndpoint("/v1/catalog/connect/", []string{"GET"}, (*HTTPServer).CatalogConnectServiceNodes)
|
||||
registerEndpoint("/v1/catalog/deregister", []string{"PUT"}, (*HTTPServer).CatalogDeregister)
|
||||
registerEndpoint("/v1/catalog/datacenters", []string{"GET"}, (*HTTPServer).CatalogDatacenters)
|
||||
registerEndpoint("/v1/catalog/nodes", []string{"GET"}, (*HTTPServer).CatalogNodes)
|
||||
registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices)
|
||||
registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes)
|
||||
registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices)
|
||||
registerEndpoint("/v1/catalog/node-services/", []string{"GET"}, (*HTTPServer).CatalogNodeServiceList)
|
||||
registerEndpoint("/v1/catalog/gateway-services/", []string{"GET"}, (*HTTPServer).CatalogGatewayServices)
|
||||
registerEndpoint("/v1/config/", []string{"GET", "DELETE"}, (*HTTPServer).Config)
|
||||
registerEndpoint("/v1/config", []string{"PUT"}, (*HTTPServer).ConfigApply)
|
||||
registerEndpoint("/v1/connect/ca/configuration", []string{"GET", "PUT"}, (*HTTPServer).ConnectCAConfiguration)
|
||||
registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots)
|
||||
registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint)
|
||||
registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch)
|
||||
registerEndpoint("/v1/connect/intentions/check", []string{"GET"}, (*HTTPServer).IntentionCheck)
|
||||
registerEndpoint("/v1/connect/intentions/exact", []string{"GET"}, (*HTTPServer).IntentionGetExact)
|
||||
registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).IntentionSpecific)
|
||||
registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters)
|
||||
registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes)
|
||||
registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPServer).CoordinateNode)
|
||||
registerEndpoint("/v1/coordinate/update", []string{"PUT"}, (*HTTPServer).CoordinateUpdate)
|
||||
registerEndpoint("/v1/internal/federation-states", []string{"GET"}, (*HTTPServer).FederationStateList)
|
||||
registerEndpoint("/v1/internal/federation-states/mesh-gateways", []string{"GET"}, (*HTTPServer).FederationStateListMeshGateways)
|
||||
registerEndpoint("/v1/internal/federation-state/", []string{"GET"}, (*HTTPServer).FederationStateGet)
|
||||
registerEndpoint("/v1/discovery-chain/", []string{"GET", "POST"}, (*HTTPServer).DiscoveryChainRead)
|
||||
registerEndpoint("/v1/event/fire/", []string{"PUT"}, (*HTTPServer).EventFire)
|
||||
registerEndpoint("/v1/event/list", []string{"GET"}, (*HTTPServer).EventList)
|
||||
registerEndpoint("/v1/health/node/", []string{"GET"}, (*HTTPServer).HealthNodeChecks)
|
||||
registerEndpoint("/v1/health/checks/", []string{"GET"}, (*HTTPServer).HealthServiceChecks)
|
||||
registerEndpoint("/v1/health/state/", []string{"GET"}, (*HTTPServer).HealthChecksInState)
|
||||
registerEndpoint("/v1/health/service/", []string{"GET"}, (*HTTPServer).HealthServiceNodes)
|
||||
registerEndpoint("/v1/health/connect/", []string{"GET"}, (*HTTPServer).HealthConnectServiceNodes)
|
||||
registerEndpoint("/v1/health/ingress/", []string{"GET"}, (*HTTPServer).HealthIngressServiceNodes)
|
||||
registerEndpoint("/v1/internal/ui/nodes", []string{"GET"}, (*HTTPServer).UINodes)
|
||||
registerEndpoint("/v1/internal/ui/node/", []string{"GET"}, (*HTTPServer).UINodeInfo)
|
||||
registerEndpoint("/v1/internal/ui/services", []string{"GET"}, (*HTTPServer).UIServices)
|
||||
registerEndpoint("/v1/internal/ui/gateway-services-nodes/", []string{"GET"}, (*HTTPServer).UIGatewayServicesNodes)
|
||||
registerEndpoint("/v1/internal/ui/gateway-intentions/", []string{"GET"}, (*HTTPServer).UIGatewayIntentions)
|
||||
registerEndpoint("/v1/internal/acl/authorize", []string{"POST"}, (*HTTPServer).ACLAuthorize)
|
||||
registerEndpoint("/v1/kv/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).KVSEndpoint)
|
||||
registerEndpoint("/v1/operator/raft/configuration", []string{"GET"}, (*HTTPServer).OperatorRaftConfiguration)
|
||||
registerEndpoint("/v1/operator/raft/peer", []string{"DELETE"}, (*HTTPServer).OperatorRaftPeer)
|
||||
registerEndpoint("/v1/operator/keyring", []string{"GET", "POST", "PUT", "DELETE"}, (*HTTPServer).OperatorKeyringEndpoint)
|
||||
registerEndpoint("/v1/operator/autopilot/configuration", []string{"GET", "PUT"}, (*HTTPServer).OperatorAutopilotConfiguration)
|
||||
registerEndpoint("/v1/operator/autopilot/health", []string{"GET"}, (*HTTPServer).OperatorServerHealth)
|
||||
registerEndpoint("/v1/query", []string{"GET", "POST"}, (*HTTPServer).PreparedQueryGeneral)
|
||||
registerEndpoint("/v1/acl/bootstrap", []string{"PUT"}, (*HTTPHandlers).ACLBootstrap)
|
||||
registerEndpoint("/v1/acl/create", []string{"PUT"}, (*HTTPHandlers).ACLCreate)
|
||||
registerEndpoint("/v1/acl/update", []string{"PUT"}, (*HTTPHandlers).ACLUpdate)
|
||||
registerEndpoint("/v1/acl/destroy/", []string{"PUT"}, (*HTTPHandlers).ACLDestroy)
|
||||
registerEndpoint("/v1/acl/info/", []string{"GET"}, (*HTTPHandlers).ACLGet)
|
||||
registerEndpoint("/v1/acl/clone/", []string{"PUT"}, (*HTTPHandlers).ACLClone)
|
||||
registerEndpoint("/v1/acl/list", []string{"GET"}, (*HTTPHandlers).ACLList)
|
||||
registerEndpoint("/v1/acl/login", []string{"POST"}, (*HTTPHandlers).ACLLogin)
|
||||
registerEndpoint("/v1/acl/logout", []string{"POST"}, (*HTTPHandlers).ACLLogout)
|
||||
registerEndpoint("/v1/acl/replication", []string{"GET"}, (*HTTPHandlers).ACLReplicationStatus)
|
||||
registerEndpoint("/v1/acl/policies", []string{"GET"}, (*HTTPHandlers).ACLPolicyList)
|
||||
registerEndpoint("/v1/acl/policy", []string{"PUT"}, (*HTTPHandlers).ACLPolicyCreate)
|
||||
registerEndpoint("/v1/acl/policy/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).ACLPolicyCRUD)
|
||||
registerEndpoint("/v1/acl/policy/name/", []string{"GET"}, (*HTTPHandlers).ACLPolicyReadByName)
|
||||
registerEndpoint("/v1/acl/roles", []string{"GET"}, (*HTTPHandlers).ACLRoleList)
|
||||
registerEndpoint("/v1/acl/role", []string{"PUT"}, (*HTTPHandlers).ACLRoleCreate)
|
||||
registerEndpoint("/v1/acl/role/name/", []string{"GET"}, (*HTTPHandlers).ACLRoleReadByName)
|
||||
registerEndpoint("/v1/acl/role/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).ACLRoleCRUD)
|
||||
registerEndpoint("/v1/acl/binding-rules", []string{"GET"}, (*HTTPHandlers).ACLBindingRuleList)
|
||||
registerEndpoint("/v1/acl/binding-rule", []string{"PUT"}, (*HTTPHandlers).ACLBindingRuleCreate)
|
||||
registerEndpoint("/v1/acl/binding-rule/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).ACLBindingRuleCRUD)
|
||||
registerEndpoint("/v1/acl/auth-methods", []string{"GET"}, (*HTTPHandlers).ACLAuthMethodList)
|
||||
registerEndpoint("/v1/acl/auth-method", []string{"PUT"}, (*HTTPHandlers).ACLAuthMethodCreate)
|
||||
registerEndpoint("/v1/acl/auth-method/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).ACLAuthMethodCRUD)
|
||||
registerEndpoint("/v1/acl/rules/translate", []string{"POST"}, (*HTTPHandlers).ACLRulesTranslate)
|
||||
registerEndpoint("/v1/acl/rules/translate/", []string{"GET"}, (*HTTPHandlers).ACLRulesTranslateLegacyToken)
|
||||
registerEndpoint("/v1/acl/tokens", []string{"GET"}, (*HTTPHandlers).ACLTokenList)
|
||||
registerEndpoint("/v1/acl/token", []string{"PUT"}, (*HTTPHandlers).ACLTokenCreate)
|
||||
registerEndpoint("/v1/acl/token/self", []string{"GET"}, (*HTTPHandlers).ACLTokenSelf)
|
||||
registerEndpoint("/v1/acl/token/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).ACLTokenCRUD)
|
||||
registerEndpoint("/v1/agent/token/", []string{"PUT"}, (*HTTPHandlers).AgentToken)
|
||||
registerEndpoint("/v1/agent/self", []string{"GET"}, (*HTTPHandlers).AgentSelf)
|
||||
registerEndpoint("/v1/agent/host", []string{"GET"}, (*HTTPHandlers).AgentHost)
|
||||
registerEndpoint("/v1/agent/maintenance", []string{"PUT"}, (*HTTPHandlers).AgentNodeMaintenance)
|
||||
registerEndpoint("/v1/agent/reload", []string{"PUT"}, (*HTTPHandlers).AgentReload)
|
||||
registerEndpoint("/v1/agent/monitor", []string{"GET"}, (*HTTPHandlers).AgentMonitor)
|
||||
registerEndpoint("/v1/agent/metrics", []string{"GET"}, (*HTTPHandlers).AgentMetrics)
|
||||
registerEndpoint("/v1/agent/services", []string{"GET"}, (*HTTPHandlers).AgentServices)
|
||||
registerEndpoint("/v1/agent/service/", []string{"GET"}, (*HTTPHandlers).AgentService)
|
||||
registerEndpoint("/v1/agent/checks", []string{"GET"}, (*HTTPHandlers).AgentChecks)
|
||||
registerEndpoint("/v1/agent/members", []string{"GET"}, (*HTTPHandlers).AgentMembers)
|
||||
registerEndpoint("/v1/agent/join/", []string{"PUT"}, (*HTTPHandlers).AgentJoin)
|
||||
registerEndpoint("/v1/agent/leave", []string{"PUT"}, (*HTTPHandlers).AgentLeave)
|
||||
registerEndpoint("/v1/agent/force-leave/", []string{"PUT"}, (*HTTPHandlers).AgentForceLeave)
|
||||
registerEndpoint("/v1/agent/health/service/id/", []string{"GET"}, (*HTTPHandlers).AgentHealthServiceByID)
|
||||
registerEndpoint("/v1/agent/health/service/name/", []string{"GET"}, (*HTTPHandlers).AgentHealthServiceByName)
|
||||
registerEndpoint("/v1/agent/check/register", []string{"PUT"}, (*HTTPHandlers).AgentRegisterCheck)
|
||||
registerEndpoint("/v1/agent/check/deregister/", []string{"PUT"}, (*HTTPHandlers).AgentDeregisterCheck)
|
||||
registerEndpoint("/v1/agent/check/pass/", []string{"PUT"}, (*HTTPHandlers).AgentCheckPass)
|
||||
registerEndpoint("/v1/agent/check/warn/", []string{"PUT"}, (*HTTPHandlers).AgentCheckWarn)
|
||||
registerEndpoint("/v1/agent/check/fail/", []string{"PUT"}, (*HTTPHandlers).AgentCheckFail)
|
||||
registerEndpoint("/v1/agent/check/update/", []string{"PUT"}, (*HTTPHandlers).AgentCheckUpdate)
|
||||
registerEndpoint("/v1/agent/connect/authorize", []string{"POST"}, (*HTTPHandlers).AgentConnectAuthorize)
|
||||
registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPHandlers).AgentConnectCARoots)
|
||||
registerEndpoint("/v1/agent/connect/ca/leaf/", []string{"GET"}, (*HTTPHandlers).AgentConnectCALeafCert)
|
||||
registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPHandlers).AgentRegisterService)
|
||||
registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPHandlers).AgentDeregisterService)
|
||||
registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPHandlers).AgentServiceMaintenance)
|
||||
registerEndpoint("/v1/catalog/register", []string{"PUT"}, (*HTTPHandlers).CatalogRegister)
|
||||
registerEndpoint("/v1/catalog/connect/", []string{"GET"}, (*HTTPHandlers).CatalogConnectServiceNodes)
|
||||
registerEndpoint("/v1/catalog/deregister", []string{"PUT"}, (*HTTPHandlers).CatalogDeregister)
|
||||
registerEndpoint("/v1/catalog/datacenters", []string{"GET"}, (*HTTPHandlers).CatalogDatacenters)
|
||||
registerEndpoint("/v1/catalog/nodes", []string{"GET"}, (*HTTPHandlers).CatalogNodes)
|
||||
registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPHandlers).CatalogServices)
|
||||
registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPHandlers).CatalogServiceNodes)
|
||||
registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPHandlers).CatalogNodeServices)
|
||||
registerEndpoint("/v1/catalog/node-services/", []string{"GET"}, (*HTTPHandlers).CatalogNodeServiceList)
|
||||
registerEndpoint("/v1/catalog/gateway-services/", []string{"GET"}, (*HTTPHandlers).CatalogGatewayServices)
|
||||
registerEndpoint("/v1/config/", []string{"GET", "DELETE"}, (*HTTPHandlers).Config)
|
||||
registerEndpoint("/v1/config", []string{"PUT"}, (*HTTPHandlers).ConfigApply)
|
||||
registerEndpoint("/v1/connect/ca/configuration", []string{"GET", "PUT"}, (*HTTPHandlers).ConnectCAConfiguration)
|
||||
registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPHandlers).ConnectCARoots)
|
||||
registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPHandlers).IntentionEndpoint)
|
||||
registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPHandlers).IntentionMatch)
|
||||
registerEndpoint("/v1/connect/intentions/check", []string{"GET"}, (*HTTPHandlers).IntentionCheck)
|
||||
registerEndpoint("/v1/connect/intentions/exact", []string{"GET"}, (*HTTPHandlers).IntentionGetExact)
|
||||
registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).IntentionSpecific)
|
||||
registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPHandlers).CoordinateDatacenters)
|
||||
registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPHandlers).CoordinateNodes)
|
||||
registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPHandlers).CoordinateNode)
|
||||
registerEndpoint("/v1/coordinate/update", []string{"PUT"}, (*HTTPHandlers).CoordinateUpdate)
|
||||
registerEndpoint("/v1/internal/federation-states", []string{"GET"}, (*HTTPHandlers).FederationStateList)
|
||||
registerEndpoint("/v1/internal/federation-states/mesh-gateways", []string{"GET"}, (*HTTPHandlers).FederationStateListMeshGateways)
|
||||
registerEndpoint("/v1/internal/federation-state/", []string{"GET"}, (*HTTPHandlers).FederationStateGet)
|
||||
registerEndpoint("/v1/discovery-chain/", []string{"GET", "POST"}, (*HTTPHandlers).DiscoveryChainRead)
|
||||
registerEndpoint("/v1/event/fire/", []string{"PUT"}, (*HTTPHandlers).EventFire)
|
||||
registerEndpoint("/v1/event/list", []string{"GET"}, (*HTTPHandlers).EventList)
|
||||
registerEndpoint("/v1/health/node/", []string{"GET"}, (*HTTPHandlers).HealthNodeChecks)
|
||||
registerEndpoint("/v1/health/checks/", []string{"GET"}, (*HTTPHandlers).HealthServiceChecks)
|
||||
registerEndpoint("/v1/health/state/", []string{"GET"}, (*HTTPHandlers).HealthChecksInState)
|
||||
registerEndpoint("/v1/health/service/", []string{"GET"}, (*HTTPHandlers).HealthServiceNodes)
|
||||
registerEndpoint("/v1/health/connect/", []string{"GET"}, (*HTTPHandlers).HealthConnectServiceNodes)
|
||||
registerEndpoint("/v1/health/ingress/", []string{"GET"}, (*HTTPHandlers).HealthIngressServiceNodes)
|
||||
registerEndpoint("/v1/internal/ui/nodes", []string{"GET"}, (*HTTPHandlers).UINodes)
|
||||
registerEndpoint("/v1/internal/ui/node/", []string{"GET"}, (*HTTPHandlers).UINodeInfo)
|
||||
registerEndpoint("/v1/internal/ui/services", []string{"GET"}, (*HTTPHandlers).UIServices)
|
||||
registerEndpoint("/v1/internal/ui/gateway-services-nodes/", []string{"GET"}, (*HTTPHandlers).UIGatewayServicesNodes)
|
||||
registerEndpoint("/v1/internal/ui/gateway-intentions/", []string{"GET"}, (*HTTPHandlers).UIGatewayIntentions)
|
||||
registerEndpoint("/v1/internal/acl/authorize", []string{"POST"}, (*HTTPHandlers).ACLAuthorize)
|
||||
registerEndpoint("/v1/kv/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).KVSEndpoint)
|
||||
registerEndpoint("/v1/operator/raft/configuration", []string{"GET"}, (*HTTPHandlers).OperatorRaftConfiguration)
|
||||
registerEndpoint("/v1/operator/raft/peer", []string{"DELETE"}, (*HTTPHandlers).OperatorRaftPeer)
|
||||
registerEndpoint("/v1/operator/keyring", []string{"GET", "POST", "PUT", "DELETE"}, (*HTTPHandlers).OperatorKeyringEndpoint)
|
||||
registerEndpoint("/v1/operator/autopilot/configuration", []string{"GET", "PUT"}, (*HTTPHandlers).OperatorAutopilotConfiguration)
|
||||
registerEndpoint("/v1/operator/autopilot/health", []string{"GET"}, (*HTTPHandlers).OperatorServerHealth)
|
||||
registerEndpoint("/v1/query", []string{"GET", "POST"}, (*HTTPHandlers).PreparedQueryGeneral)
|
||||
// specific prepared query endpoints have more complex rules for allowed methods, so
|
||||
// the prefix is registered with no methods.
|
||||
registerEndpoint("/v1/query/", []string{}, (*HTTPServer).PreparedQuerySpecific)
|
||||
registerEndpoint("/v1/session/create", []string{"PUT"}, (*HTTPServer).SessionCreate)
|
||||
registerEndpoint("/v1/session/destroy/", []string{"PUT"}, (*HTTPServer).SessionDestroy)
|
||||
registerEndpoint("/v1/session/renew/", []string{"PUT"}, (*HTTPServer).SessionRenew)
|
||||
registerEndpoint("/v1/session/info/", []string{"GET"}, (*HTTPServer).SessionGet)
|
||||
registerEndpoint("/v1/session/node/", []string{"GET"}, (*HTTPServer).SessionsForNode)
|
||||
registerEndpoint("/v1/session/list", []string{"GET"}, (*HTTPServer).SessionList)
|
||||
registerEndpoint("/v1/status/leader", []string{"GET"}, (*HTTPServer).StatusLeader)
|
||||
registerEndpoint("/v1/status/peers", []string{"GET"}, (*HTTPServer).StatusPeers)
|
||||
registerEndpoint("/v1/snapshot", []string{"GET", "PUT"}, (*HTTPServer).Snapshot)
|
||||
registerEndpoint("/v1/txn", []string{"PUT"}, (*HTTPServer).Txn)
|
||||
registerEndpoint("/v1/query/", []string{}, (*HTTPHandlers).PreparedQuerySpecific)
|
||||
registerEndpoint("/v1/session/create", []string{"PUT"}, (*HTTPHandlers).SessionCreate)
|
||||
registerEndpoint("/v1/session/destroy/", []string{"PUT"}, (*HTTPHandlers).SessionDestroy)
|
||||
registerEndpoint("/v1/session/renew/", []string{"PUT"}, (*HTTPHandlers).SessionRenew)
|
||||
registerEndpoint("/v1/session/info/", []string{"GET"}, (*HTTPHandlers).SessionGet)
|
||||
registerEndpoint("/v1/session/node/", []string{"GET"}, (*HTTPHandlers).SessionsForNode)
|
||||
registerEndpoint("/v1/session/list", []string{"GET"}, (*HTTPHandlers).SessionList)
|
||||
registerEndpoint("/v1/status/leader", []string{"GET"}, (*HTTPHandlers).StatusLeader)
|
||||
registerEndpoint("/v1/status/peers", []string{"GET"}, (*HTTPHandlers).StatusPeers)
|
||||
registerEndpoint("/v1/snapshot", []string{"GET", "PUT"}, (*HTTPHandlers).Snapshot)
|
||||
registerEndpoint("/v1/txn", []string{"PUT"}, (*HTTPHandlers).Txn)
|
||||
}
|
||||
|
|
|
@ -831,7 +831,7 @@ func TestHTTPServer_PProfHandlers_EnableDebug(t *testing.T) {
|
|||
resp := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/debug/pprof/profile?seconds=1", nil)
|
||||
|
||||
httpServer := &HTTPServer{agent: a.Agent}
|
||||
httpServer := &HTTPHandlers{agent: a.Agent}
|
||||
httpServer.handler(true).ServeHTTP(resp, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, resp.Code)
|
||||
|
@ -845,7 +845,7 @@ func TestHTTPServer_PProfHandlers_DisableDebugNoACLs(t *testing.T) {
|
|||
resp := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/debug/pprof/profile", nil)
|
||||
|
||||
httpServer := &HTTPServer{agent: a.Agent}
|
||||
httpServer := &HTTPHandlers{agent: a.Agent}
|
||||
httpServer.handler(false).ServeHTTP(resp, req)
|
||||
|
||||
require.Equal(t, http.StatusUnauthorized, resp.Code)
|
||||
|
@ -1351,9 +1351,11 @@ func TestHTTPServer_HandshakeTimeout(t *testing.T) {
|
|||
})
|
||||
defer a.Shutdown()
|
||||
|
||||
addr, err := firstAddr(a.Agent.apiServers, "https")
|
||||
require.NoError(t, err)
|
||||
// Connect to it with a plain TCP client that doesn't attempt to send HTTP or
|
||||
// complete a TLS handshake.
|
||||
conn, err := net.Dial("tcp", a.HTTPAddr())
|
||||
conn, err := net.Dial("tcp", addr.String())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
|
@ -1413,7 +1415,8 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) {
|
|||
})
|
||||
defer a.Shutdown()
|
||||
|
||||
addr := a.HTTPAddr()
|
||||
addr, err := firstAddr(a.Agent.apiServers, strings.ToLower(tc.name))
|
||||
require.NoError(t, err)
|
||||
|
||||
assertConn := func(conn net.Conn, wantOpen bool) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -1433,21 +1436,21 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) {
|
|||
}
|
||||
|
||||
// Connect to the server with bare TCP
|
||||
conn1, err := net.DialTimeout("tcp", addr, time.Second)
|
||||
conn1, err := net.DialTimeout("tcp", addr.String(), time.Second)
|
||||
require.NoError(t, err)
|
||||
defer conn1.Close()
|
||||
|
||||
assertConn(conn1, true)
|
||||
|
||||
// Two conns should succeed
|
||||
conn2, err := net.DialTimeout("tcp", addr, time.Second)
|
||||
conn2, err := net.DialTimeout("tcp", addr.String(), time.Second)
|
||||
require.NoError(t, err)
|
||||
defer conn2.Close()
|
||||
|
||||
assertConn(conn2, true)
|
||||
|
||||
// Third should succeed negotiating TCP handshake...
|
||||
conn3, err := net.DialTimeout("tcp", addr, time.Second)
|
||||
conn3, err := net.DialTimeout("tcp", addr.String(), time.Second)
|
||||
require.NoError(t, err)
|
||||
defer conn3.Close()
|
||||
|
||||
|
@ -1460,7 +1463,7 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) {
|
|||
require.NoError(t, a.reloadConfigInternal(&newCfg))
|
||||
|
||||
// Now another conn should be allowed
|
||||
conn4, err := net.DialTimeout("tcp", addr, time.Second)
|
||||
conn4, err := net.DialTimeout("tcp", addr.String(), time.Second)
|
||||
require.NoError(t, err)
|
||||
defer conn4.Close()
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
// /v1/connect/intentions
|
||||
func (s *HTTPServer) IntentionEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
return s.IntentionList(resp, req)
|
||||
|
@ -24,7 +24,7 @@ func (s *HTTPServer) IntentionEndpoint(resp http.ResponseWriter, req *http.Reque
|
|||
}
|
||||
|
||||
// GET /v1/connect/intentions
|
||||
func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in IntentionEndpoint
|
||||
|
||||
var args structs.DCSpecificRequest
|
||||
|
@ -46,7 +46,7 @@ func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
|
||||
// POST /v1/connect/intentions
|
||||
func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in IntentionEndpoint
|
||||
|
||||
var entMeta structs.EnterpriseMeta
|
||||
|
@ -77,7 +77,7 @@ func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request
|
|||
return intentionCreateResponse{reply}, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) validateEnterpriseIntention(ixn *structs.Intention) error {
|
||||
func (s *HTTPHandlers) validateEnterpriseIntention(ixn *structs.Intention) error {
|
||||
if err := s.validateEnterpriseIntentionNamespace("SourceNS", ixn.SourceNS, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (s *HTTPServer) validateEnterpriseIntention(ixn *structs.Intention) error {
|
|||
}
|
||||
|
||||
// GET /v1/connect/intentions/match
|
||||
func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionMatch(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Prepare args
|
||||
args := &structs.IntentionQueryRequest{Match: &structs.IntentionQueryMatch{}}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -152,7 +152,7 @@ func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
|
||||
// GET /v1/connect/intentions/check
|
||||
func (s *HTTPServer) IntentionCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Prepare args
|
||||
args := &structs.IntentionQueryRequest{Check: &structs.IntentionQueryCheck{}}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -210,7 +210,7 @@ func (s *HTTPServer) IntentionCheck(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
|
||||
// GET /v1/connect/intentions/exact
|
||||
func (s *HTTPServer) IntentionGetExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionGetExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var entMeta structs.EnterpriseMeta
|
||||
if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil {
|
||||
return nil, err
|
||||
|
@ -284,7 +284,7 @@ func (s *HTTPServer) IntentionGetExact(resp http.ResponseWriter, req *http.Reque
|
|||
}
|
||||
|
||||
// IntentionSpecific handles the endpoint for /v1/connect/intentions/:id
|
||||
func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
id := strings.TrimPrefix(req.URL.Path, "/v1/connect/intentions/")
|
||||
|
||||
switch req.Method {
|
||||
|
@ -303,7 +303,7 @@ func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Reque
|
|||
}
|
||||
|
||||
// GET /v1/connect/intentions/:id
|
||||
func (s *HTTPServer) IntentionSpecificGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionSpecificGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in IntentionEndpoint
|
||||
|
||||
args := structs.IntentionQueryRequest{
|
||||
|
@ -344,7 +344,7 @@ func (s *HTTPServer) IntentionSpecificGet(id string, resp http.ResponseWriter, r
|
|||
}
|
||||
|
||||
// PUT /v1/connect/intentions/:id
|
||||
func (s *HTTPServer) IntentionSpecificUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionSpecificUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in IntentionEndpoint
|
||||
|
||||
var entMeta structs.EnterpriseMeta
|
||||
|
@ -377,7 +377,7 @@ func (s *HTTPServer) IntentionSpecificUpdate(id string, resp http.ResponseWriter
|
|||
}
|
||||
|
||||
// DELETE /v1/connect/intentions/:id
|
||||
func (s *HTTPServer) IntentionSpecificDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) IntentionSpecificDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Method is tested in IntentionEndpoint
|
||||
|
||||
args := structs.IntentionRequest{
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) KVSEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KVSEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Set default DC
|
||||
args := structs.KeyRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -46,7 +46,7 @@ func (s *HTTPServer) KVSEndpoint(resp http.ResponseWriter, req *http.Request) (i
|
|||
}
|
||||
|
||||
// KVSGet handles a GET request
|
||||
func (s *HTTPServer) KVSGet(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KVSGet(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
// Check for recurse
|
||||
method := "KVS.Get"
|
||||
params := req.URL.Query()
|
||||
|
@ -93,7 +93,7 @@ func (s *HTTPServer) KVSGet(resp http.ResponseWriter, req *http.Request, args *s
|
|||
}
|
||||
|
||||
// KVSGetKeys handles a GET request for keys
|
||||
func (s *HTTPServer) KVSGetKeys(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KVSGetKeys(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func (s *HTTPServer) KVSGetKeys(resp http.ResponseWriter, req *http.Request, arg
|
|||
}
|
||||
|
||||
// KVSPut handles a PUT request
|
||||
func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KVSPut(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s
|
|||
}
|
||||
|
||||
// KVSPut handles a DELETE request
|
||||
func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KVSDelete(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
|
||||
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
// OperatorRaftConfiguration is used to inspect the current Raft configuration.
|
||||
// This supports the stale query mode in case the cluster doesn't have a leader.
|
||||
func (s *HTTPServer) OperatorRaftConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) OperatorRaftConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -31,7 +31,7 @@ func (s *HTTPServer) OperatorRaftConfiguration(resp http.ResponseWriter, req *ht
|
|||
|
||||
// OperatorRaftPeer supports actions on Raft peers. Currently we only support
|
||||
// removing peers by address.
|
||||
func (s *HTTPServer) OperatorRaftPeer(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) OperatorRaftPeer(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.RaftRemovePeerRequest
|
||||
s.parseDC(req, &args.Datacenter)
|
||||
s.parseToken(req, &args.Token)
|
||||
|
@ -73,7 +73,7 @@ type keyringArgs struct {
|
|||
}
|
||||
|
||||
// OperatorKeyringEndpoint handles keyring operations (install, list, use, remove)
|
||||
func (s *HTTPServer) OperatorKeyringEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) OperatorKeyringEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args keyringArgs
|
||||
if req.Method == "POST" || req.Method == "PUT" || req.Method == "DELETE" {
|
||||
if err := decodeBody(req.Body, &args); err != nil {
|
||||
|
@ -125,7 +125,7 @@ func (s *HTTPServer) OperatorKeyringEndpoint(resp http.ResponseWriter, req *http
|
|||
}
|
||||
|
||||
// KeyringInstall is used to install a new gossip encryption key into the cluster
|
||||
func (s *HTTPServer) KeyringInstall(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KeyringInstall(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
responses, err := s.agent.InstallKey(args.Key, args.Token, args.RelayFactor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -135,7 +135,7 @@ func (s *HTTPServer) KeyringInstall(resp http.ResponseWriter, req *http.Request,
|
|||
}
|
||||
|
||||
// KeyringList is used to list the keys installed in the cluster
|
||||
func (s *HTTPServer) KeyringList(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KeyringList(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
responses, err := s.agent.ListKeys(args.Token, args.LocalOnly, args.RelayFactor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -145,7 +145,7 @@ func (s *HTTPServer) KeyringList(resp http.ResponseWriter, req *http.Request, ar
|
|||
}
|
||||
|
||||
// KeyringRemove is used to list the keys installed in the cluster
|
||||
func (s *HTTPServer) KeyringRemove(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KeyringRemove(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
responses, err := s.agent.RemoveKey(args.Key, args.Token, args.RelayFactor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -155,7 +155,7 @@ func (s *HTTPServer) KeyringRemove(resp http.ResponseWriter, req *http.Request,
|
|||
}
|
||||
|
||||
// KeyringUse is used to change the primary gossip encryption key
|
||||
func (s *HTTPServer) KeyringUse(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
func (s *HTTPHandlers) KeyringUse(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
||||
responses, err := s.agent.UseKey(args.Key, args.Token, args.RelayFactor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -183,7 +183,7 @@ func keyringErrorsOrNil(responses []*structs.KeyringResponse) error {
|
|||
|
||||
// OperatorAutopilotConfiguration is used to inspect the current Autopilot configuration.
|
||||
// This supports the stale query mode in case the cluster doesn't have a leader.
|
||||
func (s *HTTPServer) OperatorAutopilotConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) OperatorAutopilotConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Switch on the method
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
|
@ -261,7 +261,7 @@ func (s *HTTPServer) OperatorAutopilotConfiguration(resp http.ResponseWriter, re
|
|||
}
|
||||
|
||||
// OperatorServerHealth is used to get the health of the servers in the local DC
|
||||
func (s *HTTPServer) OperatorServerHealth(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) OperatorServerHealth(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
|
|
@ -16,7 +16,7 @@ type preparedQueryCreateResponse struct {
|
|||
}
|
||||
|
||||
// preparedQueryCreate makes a new prepared query.
|
||||
func (s *HTTPServer) preparedQueryCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.PreparedQueryRequest{
|
||||
Op: structs.PreparedQueryCreate,
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func (s *HTTPServer) preparedQueryCreate(resp http.ResponseWriter, req *http.Req
|
|||
}
|
||||
|
||||
// preparedQueryList returns all the prepared queries.
|
||||
func (s *HTTPServer) preparedQueryList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.DCSpecificRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -63,7 +63,7 @@ RETRY_ONCE:
|
|||
}
|
||||
|
||||
// PreparedQueryGeneral handles all the general prepared query requests.
|
||||
func (s *HTTPServer) PreparedQueryGeneral(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) PreparedQueryGeneral(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
switch req.Method {
|
||||
case "POST":
|
||||
return s.preparedQueryCreate(resp, req)
|
||||
|
@ -90,7 +90,7 @@ func parseLimit(req *http.Request, limit *int) error {
|
|||
}
|
||||
|
||||
// preparedQueryExecute executes a prepared query.
|
||||
func (s *HTTPServer) preparedQueryExecute(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryExecute(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.PreparedQueryExecuteRequest{
|
||||
QueryIDOrName: id,
|
||||
Agent: structs.QuerySource{
|
||||
|
@ -174,7 +174,7 @@ func (s *HTTPServer) preparedQueryExecute(id string, resp http.ResponseWriter, r
|
|||
// preparedQueryExplain shows which query a name resolves to, the fully
|
||||
// interpolated template (if it's a template), as well as additional info
|
||||
// about the execution of a query.
|
||||
func (s *HTTPServer) preparedQueryExplain(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryExplain(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.PreparedQueryExecuteRequest{
|
||||
QueryIDOrName: id,
|
||||
Agent: structs.QuerySource{
|
||||
|
@ -214,7 +214,7 @@ RETRY_ONCE:
|
|||
}
|
||||
|
||||
// preparedQueryGet returns a single prepared query.
|
||||
func (s *HTTPServer) preparedQueryGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.PreparedQuerySpecificRequest{
|
||||
QueryID: id,
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ RETRY_ONCE:
|
|||
}
|
||||
|
||||
// preparedQueryUpdate updates a prepared query.
|
||||
func (s *HTTPServer) preparedQueryUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.PreparedQueryRequest{
|
||||
Op: structs.PreparedQueryUpdate,
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ func (s *HTTPServer) preparedQueryUpdate(id string, resp http.ResponseWriter, re
|
|||
}
|
||||
|
||||
// preparedQueryDelete deletes prepared query.
|
||||
func (s *HTTPServer) preparedQueryDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) preparedQueryDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.PreparedQueryRequest{
|
||||
Op: structs.PreparedQueryDelete,
|
||||
Query: &structs.PreparedQuery{
|
||||
|
@ -292,7 +292,7 @@ func (s *HTTPServer) preparedQueryDelete(id string, resp http.ResponseWriter, re
|
|||
}
|
||||
|
||||
// PreparedQuerySpecificOptions handles OPTIONS requests to prepared query endpoints.
|
||||
func (s *HTTPServer) preparedQuerySpecificOptions(resp http.ResponseWriter, req *http.Request) interface{} {
|
||||
func (s *HTTPHandlers) preparedQuerySpecificOptions(resp http.ResponseWriter, req *http.Request) interface{} {
|
||||
path := req.URL.Path
|
||||
switch {
|
||||
case strings.HasSuffix(path, "/execute"):
|
||||
|
@ -311,7 +311,7 @@ func (s *HTTPServer) preparedQuerySpecificOptions(resp http.ResponseWriter, req
|
|||
|
||||
// PreparedQuerySpecific handles all the prepared query requests specific to a
|
||||
// particular query.
|
||||
func (s *HTTPServer) PreparedQuerySpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) PreparedQuerySpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if req.Method == "OPTIONS" {
|
||||
return s.preparedQuerySpecificOptions(resp, req), nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
package router
|
||||
|
||||
import "github.com/hashicorp/consul/agent/metadata"
|
||||
|
||||
// ServerTracker is called when Router is notified of a server being added or
|
||||
// removed.
|
||||
type ServerTracker interface {
|
||||
NewRebalancer(dc string) func()
|
||||
AddServer(*metadata.Server)
|
||||
RemoveServer(*metadata.Server)
|
||||
}
|
||||
|
||||
// Rebalancer is called periodically to re-order the servers so that the load on the
|
||||
// servers is evenly balanced.
|
||||
type Rebalancer func()
|
||||
|
||||
// NoOpServerTracker is a ServerTracker that does nothing. Used when gRPC is not
|
||||
// enabled.
|
||||
type NoOpServerTracker struct{}
|
||||
|
||||
// Rebalance does nothing
|
||||
func (NoOpServerTracker) NewRebalancer(string) func() {
|
||||
return func() {}
|
||||
}
|
||||
|
||||
// AddServer does nothing
|
||||
func (NoOpServerTracker) AddServer(*metadata.Server) {}
|
||||
|
||||
// RemoveServer does nothing
|
||||
func (NoOpServerTracker) RemoveServer(*metadata.Server) {}
|
|
@ -98,6 +98,8 @@ type Manager struct {
|
|||
// client.ConnPool.
|
||||
connPoolPinger Pinger
|
||||
|
||||
rebalancer Rebalancer
|
||||
|
||||
// serverName has the name of the managers's server. This is used to
|
||||
// short-circuit pinging to itself.
|
||||
serverName string
|
||||
|
@ -267,7 +269,7 @@ func (m *Manager) saveServerList(l serverList) {
|
|||
}
|
||||
|
||||
// New is the only way to safely create a new Manager struct.
|
||||
func New(logger hclog.Logger, shutdownCh chan struct{}, clusterInfo ManagerSerfCluster, connPoolPinger Pinger, serverName string) (m *Manager) {
|
||||
func New(logger hclog.Logger, shutdownCh chan struct{}, clusterInfo ManagerSerfCluster, connPoolPinger Pinger, serverName string, rb Rebalancer) (m *Manager) {
|
||||
if logger == nil {
|
||||
logger = hclog.New(&hclog.LoggerOptions{})
|
||||
}
|
||||
|
@ -278,6 +280,7 @@ func New(logger hclog.Logger, shutdownCh chan struct{}, clusterInfo ManagerSerfC
|
|||
m.connPoolPinger = connPoolPinger // can't pass *consul.ConnPool: import cycle
|
||||
m.rebalanceTimer = time.NewTimer(clientRPCMinReuseDuration)
|
||||
m.shutdownCh = shutdownCh
|
||||
m.rebalancer = rb
|
||||
m.serverName = serverName
|
||||
atomic.StoreInt32(&m.offline, 1)
|
||||
|
||||
|
@ -529,6 +532,7 @@ func (m *Manager) Start() {
|
|||
for {
|
||||
select {
|
||||
case <-m.rebalanceTimer.C:
|
||||
m.rebalancer()
|
||||
m.RebalanceServers()
|
||||
m.refreshServerRebalanceTimer()
|
||||
|
||||
|
|
|
@ -54,14 +54,16 @@ func (s *fauxSerf) NumNodes() int {
|
|||
func testManager() (m *Manager) {
|
||||
logger := GetBufferedLogger()
|
||||
shutdownCh := make(chan struct{})
|
||||
m = New(logger, shutdownCh, &fauxSerf{numNodes: 16384}, &fauxConnPool{}, "")
|
||||
m = New(logger, shutdownCh, &fauxSerf{numNodes: 16384}, &fauxConnPool{}, "", noopRebalancer)
|
||||
return m
|
||||
}
|
||||
|
||||
func noopRebalancer() {}
|
||||
|
||||
func testManagerFailProb(failPct float64) (m *Manager) {
|
||||
logger := GetBufferedLogger()
|
||||
shutdownCh := make(chan struct{})
|
||||
m = New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct}, "")
|
||||
m = New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct}, "", noopRebalancer)
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -300,7 +302,7 @@ func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
|
|||
shutdownCh := make(chan struct{})
|
||||
|
||||
for _, s := range clusters {
|
||||
m := New(logger, shutdownCh, &fauxSerf{numNodes: s.numNodes}, &fauxConnPool{}, "")
|
||||
m := New(logger, shutdownCh, &fauxSerf{numNodes: s.numNodes}, &fauxConnPool{}, "", noopRebalancer)
|
||||
for i := 0; i < s.numServers; i++ {
|
||||
nodeName := fmt.Sprintf("s%02d", i)
|
||||
m.AddServer(&metadata.Server{Name: nodeName})
|
||||
|
|
|
@ -57,21 +57,23 @@ func (s *fauxSerf) NumNodes() int {
|
|||
func testManager(t testing.TB) (m *router.Manager) {
|
||||
logger := testutil.Logger(t)
|
||||
shutdownCh := make(chan struct{})
|
||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{}, "")
|
||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{}, "", noopRebalancer)
|
||||
return m
|
||||
}
|
||||
|
||||
func noopRebalancer() {}
|
||||
|
||||
func testManagerFailProb(t testing.TB, failPct float64) (m *router.Manager) {
|
||||
logger := testutil.Logger(t)
|
||||
shutdownCh := make(chan struct{})
|
||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct}, "")
|
||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct}, "", noopRebalancer)
|
||||
return m
|
||||
}
|
||||
|
||||
func testManagerFailAddr(t testing.TB, failAddr net.Addr) (m *router.Manager) {
|
||||
logger := testutil.Logger(t)
|
||||
shutdownCh := make(chan struct{})
|
||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failAddr: failAddr}, "")
|
||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failAddr: failAddr}, "", noopRebalancer)
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -195,7 +197,7 @@ func TestServers_FindServer(t *testing.T) {
|
|||
func TestServers_New(t *testing.T) {
|
||||
logger := testutil.Logger(t)
|
||||
shutdownCh := make(chan struct{})
|
||||
m := router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{}, "")
|
||||
m := router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{}, "", noopRebalancer)
|
||||
if m == nil {
|
||||
t.Fatalf("Manager nil")
|
||||
}
|
||||
|
|
|
@ -41,6 +41,10 @@ type Router struct {
|
|||
// routeFn is a hook to actually do the routing.
|
||||
routeFn func(datacenter string) (*Manager, *metadata.Server, bool)
|
||||
|
||||
// grpcServerTracker is used to balance grpc connections across servers,
|
||||
// and has callbacks for adding or removing a server.
|
||||
grpcServerTracker ServerTracker
|
||||
|
||||
// isShutdown prevents adding new routes to a router after it is shut
|
||||
// down.
|
||||
isShutdown bool
|
||||
|
@ -87,17 +91,21 @@ type areaInfo struct {
|
|||
}
|
||||
|
||||
// NewRouter returns a new Router with the given configuration.
|
||||
func NewRouter(logger hclog.Logger, localDatacenter, serverName string) *Router {
|
||||
func NewRouter(logger hclog.Logger, localDatacenter, serverName string, tracker ServerTracker) *Router {
|
||||
if logger == nil {
|
||||
logger = hclog.New(&hclog.LoggerOptions{})
|
||||
}
|
||||
if tracker == nil {
|
||||
tracker = NoOpServerTracker{}
|
||||
}
|
||||
|
||||
router := &Router{
|
||||
logger: logger.Named(logging.Router),
|
||||
localDatacenter: localDatacenter,
|
||||
serverName: serverName,
|
||||
areas: make(map[types.AreaID]*areaInfo),
|
||||
managers: make(map[string][]*Manager),
|
||||
logger: logger.Named(logging.Router),
|
||||
localDatacenter: localDatacenter,
|
||||
serverName: serverName,
|
||||
areas: make(map[types.AreaID]*areaInfo),
|
||||
managers: make(map[string][]*Manager),
|
||||
grpcServerTracker: tracker,
|
||||
}
|
||||
|
||||
// Hook the direct route lookup by default.
|
||||
|
@ -251,7 +259,8 @@ func (r *Router) maybeInitializeManager(area *areaInfo, dc string) *Manager {
|
|||
}
|
||||
|
||||
shutdownCh := make(chan struct{})
|
||||
manager := New(r.logger, shutdownCh, area.cluster, area.pinger, r.serverName)
|
||||
rb := r.grpcServerTracker.NewRebalancer(dc)
|
||||
manager := New(r.logger, shutdownCh, area.cluster, area.pinger, r.serverName, rb)
|
||||
info = &managerInfo{
|
||||
manager: manager,
|
||||
shutdownCh: shutdownCh,
|
||||
|
@ -278,6 +287,7 @@ func (r *Router) addServer(area *areaInfo, s *metadata.Server) error {
|
|||
}
|
||||
|
||||
manager.AddServer(s)
|
||||
r.grpcServerTracker.AddServer(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -313,6 +323,7 @@ func (r *Router) RemoveServer(areaID types.AreaID, s *metadata.Server) error {
|
|||
return nil
|
||||
}
|
||||
info.manager.RemoveServer(s)
|
||||
r.grpcServerTracker.RemoveServer(s)
|
||||
|
||||
// If this manager is empty then remove it so we don't accumulate cruft
|
||||
// and waste time during request routing.
|
||||
|
@ -536,10 +547,13 @@ func (r *Router) GetDatacentersByDistance() ([]string, error) {
|
|||
for _, m := range info.cluster.Members() {
|
||||
ok, parts := metadata.IsConsulServer(m)
|
||||
if !ok {
|
||||
r.logger.Warn("Non-server in server-only area",
|
||||
"non_server", m.Name,
|
||||
"area", areaID,
|
||||
)
|
||||
if areaID != types.AreaLAN {
|
||||
r.logger.Warn("Non-server in server-only area",
|
||||
"non_server", m.Name,
|
||||
"area", areaID,
|
||||
"func", "GetDatacentersByDistance",
|
||||
)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -547,6 +561,7 @@ func (r *Router) GetDatacentersByDistance() ([]string, error) {
|
|||
r.logger.Debug("server in area left, skipping",
|
||||
"server", m.Name,
|
||||
"area", areaID,
|
||||
"func", "GetDatacentersByDistance",
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
@ -607,10 +622,13 @@ func (r *Router) GetDatacenterMaps() ([]structs.DatacenterMap, error) {
|
|||
for _, m := range info.cluster.Members() {
|
||||
ok, parts := metadata.IsConsulServer(m)
|
||||
if !ok {
|
||||
r.logger.Warn("Non-server in server-only area",
|
||||
"non_server", m.Name,
|
||||
"area", areaID,
|
||||
)
|
||||
if areaID != types.AreaLAN {
|
||||
r.logger.Warn("Non-server in server-only area",
|
||||
"non_server", m.Name,
|
||||
"area", areaID,
|
||||
"func", "GetDatacenterMaps",
|
||||
)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -618,6 +636,7 @@ func (r *Router) GetDatacenterMaps() ([]structs.DatacenterMap, error) {
|
|||
r.logger.Debug("server in area left, skipping",
|
||||
"server", m.Name,
|
||||
"area", areaID,
|
||||
"func", "GetDatacenterMaps",
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ func testCluster(self string) *mockCluster {
|
|||
|
||||
func testRouter(t testing.TB, dc string) *Router {
|
||||
logger := testutil.Logger(t)
|
||||
return NewRouter(logger, dc, "")
|
||||
return NewRouter(logger, dc, "", nil)
|
||||
}
|
||||
|
||||
func TestRouter_Shutdown(t *testing.T) {
|
||||
|
|
|
@ -87,7 +87,11 @@ func (s *ServiceManager) registerOnce(args *addServiceRequest) error {
|
|||
s.agent.stateLock.Lock()
|
||||
defer s.agent.stateLock.Unlock()
|
||||
|
||||
err := s.agent.addServiceInternal(args, s.agent.snapshotCheckState())
|
||||
if args.snap == nil {
|
||||
args.snap = s.agent.snapshotCheckState()
|
||||
}
|
||||
|
||||
err := s.agent.addServiceInternal(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating service registration: %v", err)
|
||||
}
|
||||
|
@ -128,7 +132,7 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error {
|
|||
req.persistService = nil
|
||||
req.persistDefaults = nil
|
||||
req.persistServiceConfig = false
|
||||
return s.agent.addServiceInternal(req, s.agent.snapshotCheckState())
|
||||
return s.agent.addServiceInternal(req)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -273,7 +277,8 @@ func (w *serviceConfigWatch) RegisterAndStart(
|
|||
token: w.registration.token,
|
||||
replaceExistingChecks: w.registration.replaceExistingChecks,
|
||||
source: w.registration.source,
|
||||
}, w.agent.snapshotCheckState())
|
||||
snap: w.agent.snapshotCheckState(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating service registration: %v", err)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ type sessionCreateResponse struct {
|
|||
}
|
||||
|
||||
// SessionCreate is used to create a new session
|
||||
func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) SessionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Default the session to our node + serf check + release session
|
||||
// invalidate behavior.
|
||||
args := structs.SessionRequest{
|
||||
|
@ -60,7 +60,7 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
|
||||
// SessionDestroy is used to destroy an existing session
|
||||
func (s *HTTPServer) SessionDestroy(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) SessionDestroy(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.SessionRequest{
|
||||
Op: structs.SessionDestroy,
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func (s *HTTPServer) SessionDestroy(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
|
||||
// SessionRenew is used to renew the TTL on an existing TTL session
|
||||
func (s *HTTPServer) SessionRenew(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) SessionRenew(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.SessionSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -118,7 +118,7 @@ func (s *HTTPServer) SessionRenew(resp http.ResponseWriter, req *http.Request) (
|
|||
}
|
||||
|
||||
// SessionGet is used to get info for a particular session
|
||||
func (s *HTTPServer) SessionGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) SessionGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.SessionSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -150,7 +150,7 @@ func (s *HTTPServer) SessionGet(resp http.ResponseWriter, req *http.Request) (in
|
|||
}
|
||||
|
||||
// SessionList is used to list all the sessions
|
||||
func (s *HTTPServer) SessionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) SessionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.SessionSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -173,7 +173,7 @@ func (s *HTTPServer) SessionList(resp http.ResponseWriter, req *http.Request) (i
|
|||
}
|
||||
|
||||
// SessionsForNode returns all the nodes belonging to a node
|
||||
func (s *HTTPServer) SessionsForNode(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) SessionsForNode(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.NodeSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
|
|
@ -372,7 +372,7 @@ func TestSessionCreate_NoCheck(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func makeTestSession(t *testing.T, srv *HTTPServer) string {
|
||||
func makeTestSession(t *testing.T, srv *HTTPHandlers) string {
|
||||
t.Helper()
|
||||
url := "/v1/session/create"
|
||||
req, _ := http.NewRequest("PUT", url, nil)
|
||||
|
@ -385,7 +385,7 @@ func makeTestSession(t *testing.T, srv *HTTPServer) string {
|
|||
return sessResp.ID
|
||||
}
|
||||
|
||||
func makeTestSessionDelete(t *testing.T, srv *HTTPServer) string {
|
||||
func makeTestSessionDelete(t *testing.T, srv *HTTPHandlers) string {
|
||||
t.Helper()
|
||||
// Create Session with delete behavior
|
||||
body := bytes.NewBuffer(nil)
|
||||
|
@ -406,7 +406,7 @@ func makeTestSessionDelete(t *testing.T, srv *HTTPServer) string {
|
|||
return sessResp.ID
|
||||
}
|
||||
|
||||
func makeTestSessionTTL(t *testing.T, srv *HTTPServer, ttl string) string {
|
||||
func makeTestSessionTTL(t *testing.T, srv *HTTPHandlers, ttl string) string {
|
||||
t.Helper()
|
||||
// Create Session with TTL
|
||||
body := bytes.NewBuffer(nil)
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
autoconf "github.com/hashicorp/consul/agent/auto-config"
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/agent/consul"
|
||||
"github.com/hashicorp/consul/agent/grpc/resolver"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/router"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
|
@ -25,15 +27,12 @@ import (
|
|||
// has been moved out in front of Agent.New, and we can better see the setup
|
||||
// dependencies.
|
||||
type BaseDeps struct {
|
||||
Logger hclog.InterceptLogger
|
||||
TLSConfigurator *tlsutil.Configurator // TODO: use an interface
|
||||
MetricsHandler MetricsHandler
|
||||
RuntimeConfig *config.RuntimeConfig
|
||||
Tokens *token.Store
|
||||
Cache *cache.Cache
|
||||
AutoConfig *autoconf.AutoConfig // TODO: use an interface
|
||||
ConnPool *pool.ConnPool // TODO: use an interface
|
||||
Router *router.Router
|
||||
consul.Deps // TODO: un-embed
|
||||
|
||||
RuntimeConfig *config.RuntimeConfig
|
||||
MetricsHandler MetricsHandler
|
||||
AutoConfig *autoconf.AutoConfig // TODO: use an interface
|
||||
Cache *cache.Cache
|
||||
}
|
||||
|
||||
// MetricsHandler provides an http.Handler for displaying metrics.
|
||||
|
@ -84,7 +83,10 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error)
|
|||
d.Cache = cache.New(cfg.Cache)
|
||||
d.ConnPool = newConnPool(cfg, d.Logger, d.TLSConfigurator)
|
||||
|
||||
d.Router = router.NewRouter(d.Logger, cfg.Datacenter, fmt.Sprintf("%s.%s", cfg.NodeName, cfg.Datacenter))
|
||||
// TODO(streaming): setConfig.Scheme name for tests
|
||||
builder := resolver.NewServerResolverBuilder(resolver.Config{})
|
||||
resolver.RegisterWithGRPC(builder)
|
||||
d.Router = router.NewRouter(d.Logger, cfg.Datacenter, fmt.Sprintf("%s.%s", cfg.NodeName, cfg.Datacenter), builder)
|
||||
|
||||
acConf := autoconf.Config{
|
||||
DirectRPC: d.ConnPool,
|
||||
|
@ -120,6 +122,12 @@ func newConnPool(config *config.RuntimeConfig, logger hclog.Logger, tls *tlsutil
|
|||
pool.MaxTime = 2 * time.Minute
|
||||
pool.MaxStreams = 64
|
||||
} else {
|
||||
// MaxTime controls how long we keep an idle connection open to a server.
|
||||
// 127s was chosen as the first prime above 120s
|
||||
// (arbitrarily chose to use a prime) with the intent of reusing
|
||||
// connections who are used by once-a-minute cron(8) jobs *and* who
|
||||
// use a 60s jitter window (e.g. in vixie cron job execution can
|
||||
// drift by up to 59s per job, or 119s for a once-a-minute cron job).
|
||||
pool.MaxTime = 127 * time.Second
|
||||
pool.MaxStreams = 32
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
// Snapshot handles requests to take and restore snapshots. This uses a special
|
||||
// mechanism to make the RPC since we potentially stream large amounts of data
|
||||
// as part of these requests.
|
||||
func (s *HTTPServer) Snapshot(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) Snapshot(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.SnapshotRequest
|
||||
s.parseDC(req, &args.Datacenter)
|
||||
s.parseToken(req, &args.Token)
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) StatusLeader(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) StatusLeader(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.DCSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
@ -19,7 +19,7 @@ func (s *HTTPServer) StatusLeader(resp http.ResponseWriter, req *http.Request) (
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) StatusPeers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) StatusPeers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.DCSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
|
|
@ -314,7 +314,8 @@ func (c *CAConfiguration) GetCommonConfig() (*CommonCAProviderConfig, error) {
|
|||
}
|
||||
|
||||
type CommonCAProviderConfig struct {
|
||||
LeafCertTTL time.Duration
|
||||
LeafCertTTL time.Duration
|
||||
IntermediateCertTTL time.Duration
|
||||
|
||||
SkipValidate bool
|
||||
|
||||
|
@ -360,6 +361,10 @@ type CommonCAProviderConfig struct {
|
|||
var MinLeafCertTTL = time.Hour
|
||||
var MaxLeafCertTTL = 365 * 24 * time.Hour
|
||||
|
||||
// intermediateCertRenewInterval is the interval at which the expiration
|
||||
// of the intermediate cert is checked and renewed if necessary.
|
||||
var IntermediateCertRenewInterval = time.Hour
|
||||
|
||||
func (c CommonCAProviderConfig) Validate() error {
|
||||
if c.SkipValidate {
|
||||
return nil
|
||||
|
@ -373,6 +378,33 @@ func (c CommonCAProviderConfig) Validate() error {
|
|||
return fmt.Errorf("leaf cert TTL must be less than %s", MaxLeafCertTTL)
|
||||
}
|
||||
|
||||
if c.IntermediateCertTTL < (3 * IntermediateCertRenewInterval) {
|
||||
// Intermediate Certificates are checked every
|
||||
// hour(intermediateCertRenewInterval) if they are about to
|
||||
// expire. Recreating an intermediate certs is started once
|
||||
// more than half its lifetime has passed.
|
||||
// If it would be 2h, worst case is that the check happens
|
||||
// right before half time and when the check happens again, the
|
||||
// certificate is very close to expiring, leaving only a small
|
||||
// timeframe to renew. 3h leaves more than 30min to recreate.
|
||||
// Right now the minimum LeafCertTTL is 1h, which means this
|
||||
// check not strictly needed, because the same thing is covered
|
||||
// in the next check too. But just in case minimum LeafCertTTL
|
||||
// changes at some point, this validation must still be
|
||||
// performed.
|
||||
return fmt.Errorf("Intermediate Cert TTL must be greater or equal than %dh", 3*int(IntermediateCertRenewInterval.Hours()))
|
||||
}
|
||||
if c.IntermediateCertTTL < (3 * c.LeafCertTTL) {
|
||||
// Intermediate Certificates are being sent to the proxy when
|
||||
// the Leaf Certificate changes because they are bundled
|
||||
// together.
|
||||
// That means that the Intermediate Certificate TTL must be at
|
||||
// a minimum of 3 * Leaf Certificate TTL to ensure that the new
|
||||
// Intermediate is being set together with the Leaf Certificate
|
||||
// before it expires.
|
||||
return fmt.Errorf("Intermediate Cert TTL must be greater or equal than 3 * LeafCertTTL (>=%s).", 3*c.LeafCertTTL)
|
||||
}
|
||||
|
||||
switch c.PrivateKeyType {
|
||||
case "ec":
|
||||
if c.PrivateKeyBits != 224 && c.PrivateKeyBits != 256 && c.PrivateKeyBits != 384 && c.PrivateKeyBits != 521 {
|
||||
|
@ -392,10 +424,9 @@ func (c CommonCAProviderConfig) Validate() error {
|
|||
type ConsulCAProviderConfig struct {
|
||||
CommonCAProviderConfig `mapstructure:",squash"`
|
||||
|
||||
PrivateKey string
|
||||
RootCert string
|
||||
RotationPeriod time.Duration
|
||||
IntermediateCertTTL time.Duration
|
||||
PrivateKey string
|
||||
RootCert string
|
||||
RotationPeriod time.Duration
|
||||
|
||||
// DisableCrossSigning is really only useful in test code to use the built in
|
||||
// provider while exercising logic that depends on the CA provider ability to
|
||||
|
@ -404,37 +435,7 @@ type ConsulCAProviderConfig struct {
|
|||
DisableCrossSigning bool
|
||||
}
|
||||
|
||||
// intermediateCertRenewInterval is the interval at which the expiration
|
||||
// of the intermediate cert is checked and renewed if necessary.
|
||||
var IntermediateCertRenewInterval = time.Hour
|
||||
|
||||
func (c *ConsulCAProviderConfig) Validate() error {
|
||||
if c.IntermediateCertTTL < (3 * IntermediateCertRenewInterval) {
|
||||
// Intermediate Certificates are checked every
|
||||
// hour(intermediateCertRenewInterval) if they are about to
|
||||
// expire. Recreating an intermediate certs is started once
|
||||
// more than half its lifetime has passed.
|
||||
// If it would be 2h, worst case is that the check happens
|
||||
// right before half time and when the check happens again, the
|
||||
// certificate is very close to expiring, leaving only a small
|
||||
// timeframe to renew. 3h leaves more than 30min to recreate.
|
||||
// Right now the minimum LeafCertTTL is 1h, which means this
|
||||
// check not strictly needed, because the same thing is covered
|
||||
// in the next check too. But just in case minimum LeafCertTTL
|
||||
// changes at some point, this validation must still be
|
||||
// performed.
|
||||
return fmt.Errorf("Intermediate Cert TTL must be greater or equal than %dh", 3*int(IntermediateCertRenewInterval.Hours()))
|
||||
}
|
||||
if c.IntermediateCertTTL < (3 * c.CommonCAProviderConfig.LeafCertTTL) {
|
||||
// Intermediate Certificates are being sent to the proxy when
|
||||
// the Leaf Certificate changes because they are bundled
|
||||
// together.
|
||||
// That means that the Intermediate Certificate TTL must be at
|
||||
// a minimum of 3 * Leaf Certificate TTL to ensure that the new
|
||||
// Intermediate is being set together with the Leaf Certificate
|
||||
// before it expires.
|
||||
return fmt.Errorf("Intermediate Cert TTL must be greater or equal than 3 * LeafCertTTL (>=%s).", 3*c.CommonCAProviderConfig.LeafCertTTL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -18,14 +18,16 @@ func TestCAConfiguration_GetCommonConfig(t *testing.T) {
|
|||
name: "basic defaults",
|
||||
cfg: &CAConfiguration{
|
||||
Config: map[string]interface{}{
|
||||
"RotationPeriod": "2160h",
|
||||
"LeafCertTTL": "72h",
|
||||
"CSRMaxPerSecond": "50",
|
||||
"RotationPeriod": "2160h",
|
||||
"LeafCertTTL": "72h",
|
||||
"IntermediateCertTTL": "4320h",
|
||||
"CSRMaxPerSecond": "50",
|
||||
},
|
||||
},
|
||||
want: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 72 * time.Hour,
|
||||
CSRMaxPerSecond: 50,
|
||||
LeafCertTTL: 72 * time.Hour,
|
||||
IntermediateCertTTL: 4320 * time.Hour,
|
||||
CSRMaxPerSecond: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -38,13 +40,15 @@ func TestCAConfiguration_GetCommonConfig(t *testing.T) {
|
|||
name: "basic defaults after encoding fun",
|
||||
cfg: &CAConfiguration{
|
||||
Config: map[string]interface{}{
|
||||
"RotationPeriod": []uint8("2160h"),
|
||||
"LeafCertTTL": []uint8("72h"),
|
||||
"RotationPeriod": []uint8("2160h"),
|
||||
"LeafCertTTL": []uint8("72h"),
|
||||
"IntermediateCertTTL": []uint8("4320h"),
|
||||
},
|
||||
},
|
||||
want: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 72 * time.Hour,
|
||||
CSRMaxPerSecond: 50, // The default value
|
||||
LeafCertTTL: 72 * time.Hour,
|
||||
IntermediateCertTTL: 4320 * time.Hour,
|
||||
CSRMaxPerSecond: 50, // The default value
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -63,39 +67,60 @@ func TestCAConfiguration_GetCommonConfig(t *testing.T) {
|
|||
func TestCAProviderConfig_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg *ConsulCAProviderConfig
|
||||
cfg *CommonCAProviderConfig
|
||||
wantErr bool
|
||||
wantMsg string
|
||||
}{
|
||||
{
|
||||
name: "defaults",
|
||||
cfg: &ConsulCAProviderConfig{},
|
||||
cfg: &CommonCAProviderConfig{},
|
||||
wantErr: true,
|
||||
wantMsg: "Intermediate Cert TTL must be greater or equal than 3h",
|
||||
wantMsg: "leaf cert TTL must be greater or equal than 1h0m0s",
|
||||
},
|
||||
{
|
||||
name: "intermediate cert ttl too short",
|
||||
cfg: &ConsulCAProviderConfig{
|
||||
CommonCAProviderConfig: CommonCAProviderConfig{LeafCertTTL: 2 * time.Hour},
|
||||
IntermediateCertTTL: 4 * time.Hour,
|
||||
cfg: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 2 * time.Hour,
|
||||
IntermediateCertTTL: 4 * time.Hour,
|
||||
},
|
||||
wantErr: true,
|
||||
wantMsg: "Intermediate Cert TTL must be greater or equal than 3 * LeafCertTTL (>=6h0m0s).",
|
||||
},
|
||||
{
|
||||
name: "intermediate cert ttl too short",
|
||||
cfg: &ConsulCAProviderConfig{
|
||||
CommonCAProviderConfig: CommonCAProviderConfig{LeafCertTTL: 5 * time.Hour},
|
||||
IntermediateCertTTL: 15*time.Hour - 1,
|
||||
cfg: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 5 * time.Hour,
|
||||
IntermediateCertTTL: 15*time.Hour - 1,
|
||||
},
|
||||
wantErr: true,
|
||||
wantMsg: "Intermediate Cert TTL must be greater or equal than 3 * LeafCertTTL (>=15h0m0s).",
|
||||
},
|
||||
{
|
||||
name: "good intermediate and leaf cert TTL",
|
||||
cfg: &ConsulCAProviderConfig{
|
||||
CommonCAProviderConfig: CommonCAProviderConfig{LeafCertTTL: 1 * time.Hour},
|
||||
IntermediateCertTTL: 4 * time.Hour,
|
||||
name: "good intermediate and leaf cert TTL, missing key type",
|
||||
cfg: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 1 * time.Hour,
|
||||
IntermediateCertTTL: 4 * time.Hour,
|
||||
},
|
||||
wantErr: true,
|
||||
wantMsg: "private key type must be either 'ec' or 'rsa'",
|
||||
},
|
||||
{
|
||||
name: "good intermediate/leaf cert TTL/key type, missing bits",
|
||||
cfg: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 1 * time.Hour,
|
||||
IntermediateCertTTL: 4 * time.Hour,
|
||||
PrivateKeyType: "ec",
|
||||
},
|
||||
wantErr: true,
|
||||
wantMsg: "EC key length must be one of (224, 256, 384, 521) bits",
|
||||
},
|
||||
{
|
||||
name: "good intermediate/leaf cert TTL/key type/bits",
|
||||
cfg: &CommonCAProviderConfig{
|
||||
LeafCertTTL: 1 * time.Hour,
|
||||
IntermediateCertTTL: 4 * time.Hour,
|
||||
PrivateKeyType: "ec",
|
||||
PrivateKeyBits: 256,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
|
|
@ -83,7 +83,7 @@ func (c *MeshGatewayConfig) ToAPI() api.MeshGatewayConfig {
|
|||
type ConnectProxyConfig struct {
|
||||
// DestinationServiceName is required and is the name of the service to accept
|
||||
// traffic for.
|
||||
DestinationServiceName string `json:",omitempty"`
|
||||
DestinationServiceName string `json:",omitempty" alias:"destination_service_name"`
|
||||
|
||||
// DestinationServiceID is optional and should only be specified for
|
||||
// "side-car" style proxies where the proxy is in front of just a single
|
||||
|
@ -91,19 +91,19 @@ type ConnectProxyConfig struct {
|
|||
// being represented which must be registered to the same agent. It's valid to
|
||||
// provide a service ID that does not yet exist to avoid timing issues when
|
||||
// bootstrapping a service with a proxy.
|
||||
DestinationServiceID string `json:",omitempty"`
|
||||
DestinationServiceID string `json:",omitempty" alias:"destination_service_id"`
|
||||
|
||||
// LocalServiceAddress is the address of the local service instance. It is
|
||||
// optional and should only be specified for "side-car" style proxies. It will
|
||||
// default to 127.0.0.1 if the proxy is a "side-car" (DestinationServiceID is
|
||||
// set) but otherwise will be ignored.
|
||||
LocalServiceAddress string `json:",omitempty"`
|
||||
LocalServiceAddress string `json:",omitempty" alias:"local_service_address"`
|
||||
|
||||
// LocalServicePort is the port of the local service instance. It is optional
|
||||
// and should only be specified for "side-car" style proxies. It will default
|
||||
// to the registered port for the instance if the proxy is a "side-car"
|
||||
// (DestinationServiceID is set) but otherwise will be ignored.
|
||||
LocalServicePort int `json:",omitempty"`
|
||||
LocalServicePort int `json:",omitempty" alias:"local_service_port"`
|
||||
|
||||
// Config is the arbitrary configuration data provided with the proxy
|
||||
// registration.
|
||||
|
@ -123,10 +123,11 @@ type ConnectProxyConfig struct {
|
|||
func (t *ConnectProxyConfig) UnmarshalJSON(data []byte) (err error) {
|
||||
type Alias ConnectProxyConfig
|
||||
aux := &struct {
|
||||
DestinationServiceNameSnake string `json:"destination_service_name"`
|
||||
DestinationServiceIDSnake string `json:"destination_service_id"`
|
||||
LocalServiceAddressSnake string `json:"local_service_address"`
|
||||
LocalServicePortSnake int `json:"local_service_port"`
|
||||
DestinationServiceNameSnake string `json:"destination_service_name"`
|
||||
DestinationServiceIDSnake string `json:"destination_service_id"`
|
||||
LocalServiceAddressSnake string `json:"local_service_address"`
|
||||
LocalServicePortSnake int `json:"local_service_port"`
|
||||
MeshGatewaySnake MeshGatewayConfig `json:"mesh_gateway"`
|
||||
|
||||
*Alias
|
||||
}{
|
||||
|
@ -147,6 +148,9 @@ func (t *ConnectProxyConfig) UnmarshalJSON(data []byte) (err error) {
|
|||
if t.LocalServicePort == 0 {
|
||||
t.LocalServicePort = aux.LocalServicePortSnake
|
||||
}
|
||||
if t.MeshGateway.Mode == "" {
|
||||
t.MeshGateway.Mode = aux.MeshGatewaySnake.Mode
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
|
@ -223,9 +227,9 @@ type Upstream struct {
|
|||
// DestinationType would be better as an int constant but even with custom
|
||||
// JSON marshallers it causes havoc with all the mapstructure mangling we do
|
||||
// on service definitions in various places.
|
||||
DestinationType string
|
||||
DestinationNamespace string `json:",omitempty"`
|
||||
DestinationName string
|
||||
DestinationType string `alias:"destination_type"`
|
||||
DestinationNamespace string `json:",omitempty" alias:"destination_namespace"`
|
||||
DestinationName string `alias:"destination_name"`
|
||||
|
||||
// Datacenter that the service discovery request should be run against. Note
|
||||
// for prepared queries, the actual results might be from a different
|
||||
|
@ -234,19 +238,19 @@ type Upstream struct {
|
|||
|
||||
// LocalBindAddress is the ip address a side-car proxy should listen on for
|
||||
// traffic destined for this upstream service. Default if empty is 127.0.0.1.
|
||||
LocalBindAddress string `json:",omitempty"`
|
||||
LocalBindAddress string `json:",omitempty" alias:"local_bind_address"`
|
||||
|
||||
// LocalBindPort is the ip address a side-car proxy should listen on for traffic
|
||||
// destined for this upstream service. Required.
|
||||
LocalBindPort int
|
||||
LocalBindPort int `alias:"local_bind_port"`
|
||||
|
||||
// Config is an opaque config that is specific to the proxy process being run.
|
||||
// It can be used to pass arbitrary configuration for this specific upstream
|
||||
// to the proxy.
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
|
||||
// MeshGateway is the configuration for mesh gateway usage of this upstream
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
|
||||
|
||||
// IngressHosts are a list of hosts that should route to this upstream from
|
||||
// an ingress gateway. This cannot and should not be set by a user, it is
|
||||
|
@ -260,8 +264,11 @@ func (t *Upstream) UnmarshalJSON(data []byte) (err error) {
|
|||
DestinationTypeSnake string `json:"destination_type"`
|
||||
DestinationNamespaceSnake string `json:"destination_namespace"`
|
||||
DestinationNameSnake string `json:"destination_name"`
|
||||
LocalBindPortSnake int `json:"local_bind_port"`
|
||||
LocalBindAddressSnake string `json:"local_bind_address"`
|
||||
|
||||
LocalBindAddressSnake string `json:"local_bind_address"`
|
||||
LocalBindPortSnake int `json:"local_bind_port"`
|
||||
|
||||
MeshGatewaySnake MeshGatewayConfig `json:"mesh_gateway"`
|
||||
|
||||
*Alias
|
||||
}{
|
||||
|
@ -279,11 +286,14 @@ func (t *Upstream) UnmarshalJSON(data []byte) (err error) {
|
|||
if t.DestinationName == "" {
|
||||
t.DestinationName = aux.DestinationNameSnake
|
||||
}
|
||||
if t.LocalBindAddress == "" {
|
||||
t.LocalBindAddress = aux.LocalBindAddressSnake
|
||||
}
|
||||
if t.LocalBindPort == 0 {
|
||||
t.LocalBindPort = aux.LocalBindPortSnake
|
||||
}
|
||||
if t.LocalBindAddress == "" {
|
||||
t.LocalBindAddress = aux.LocalBindAddressSnake
|
||||
if t.MeshGateway.Mode == "" {
|
||||
t.MeshGateway.Mode = aux.MeshGatewaySnake.Mode
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -412,7 +422,7 @@ type ExposePath struct {
|
|||
// ListenerPort defines the port of the proxy's listener for exposed paths.
|
||||
ListenerPort int `json:",omitempty" alias:"listener_port"`
|
||||
|
||||
// ExposePath is the path to expose through the proxy, ie. "/metrics."
|
||||
// Path is the path to expose through the proxy, ie. "/metrics."
|
||||
Path string `json:",omitempty"`
|
||||
|
||||
// LocalPathPort is the port that the service is listening on for the given path.
|
||||
|
@ -423,14 +433,15 @@ type ExposePath struct {
|
|||
Protocol string `json:",omitempty"`
|
||||
|
||||
// ParsedFromCheck is set if this path was parsed from a registered check
|
||||
ParsedFromCheck bool
|
||||
ParsedFromCheck bool `json:",omitempty" alias:"parsed_from_check"`
|
||||
}
|
||||
|
||||
func (t *ExposePath) UnmarshalJSON(data []byte) (err error) {
|
||||
type Alias ExposePath
|
||||
aux := &struct {
|
||||
LocalPathPortSnake int `json:"local_path_port"`
|
||||
ListenerPortSnake int `json:"listener_port"`
|
||||
ListenerPortSnake int `json:"listener_port"`
|
||||
LocalPathPortSnake int `json:"local_path_port"`
|
||||
ParsedFromCheckSnake bool `json:"parsed_from_check"`
|
||||
|
||||
*Alias
|
||||
}{
|
||||
|
@ -445,6 +456,9 @@ func (t *ExposePath) UnmarshalJSON(data []byte) (err error) {
|
|||
if t.ListenerPort == 0 {
|
||||
t.ListenerPort = aux.ListenerPortSnake
|
||||
}
|
||||
if aux.ParsedFromCheckSnake {
|
||||
t.ParsedFromCheck = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -108,8 +108,7 @@ func TestUpstream_MarshalJSON(t *testing.T) {
|
|||
"DestinationName": "foo",
|
||||
"Datacenter": "dc1",
|
||||
"LocalBindPort": 1234,
|
||||
"MeshGateway": {},
|
||||
"Config": null
|
||||
"MeshGateway": {}
|
||||
}`,
|
||||
wantErr: false,
|
||||
},
|
||||
|
@ -126,8 +125,7 @@ func TestUpstream_MarshalJSON(t *testing.T) {
|
|||
"DestinationName": "foo",
|
||||
"Datacenter": "dc1",
|
||||
"LocalBindPort": 1234,
|
||||
"MeshGateway": {},
|
||||
"Config": null
|
||||
"MeshGateway": {}
|
||||
}`,
|
||||
wantErr: false,
|
||||
},
|
||||
|
@ -148,10 +146,11 @@ func TestUpstream_MarshalJSON(t *testing.T) {
|
|||
|
||||
func TestUpstream_UnmarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
want Upstream
|
||||
wantErr bool
|
||||
name string
|
||||
json string
|
||||
jsonSnake string
|
||||
want Upstream
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "service",
|
||||
|
@ -197,18 +196,303 @@ func TestUpstream_UnmarshalJSON(t *testing.T) {
|
|||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "kitchen sink",
|
||||
json: `
|
||||
{
|
||||
"DestinationType": "service",
|
||||
"DestinationNamespace": "default",
|
||||
"DestinationName": "bar1",
|
||||
"Datacenter": "dc1",
|
||||
"LocalBindAddress": "127.0.0.2",
|
||||
"LocalBindPort": 6060,
|
||||
"Config": {
|
||||
"x": "y",
|
||||
"z": -2
|
||||
},
|
||||
"MeshGateway": {
|
||||
"Mode": "local"
|
||||
}
|
||||
}
|
||||
`,
|
||||
jsonSnake: `
|
||||
{
|
||||
"destination_type": "service",
|
||||
"destination_namespace": "default",
|
||||
"destination_name": "bar1",
|
||||
"datacenter": "dc1",
|
||||
"local_bind_address": "127.0.0.2",
|
||||
"local_bind_port": 6060,
|
||||
"config": {
|
||||
"x": "y",
|
||||
"z": -2
|
||||
},
|
||||
"mesh_gateway": {
|
||||
"mode": "local"
|
||||
}
|
||||
}
|
||||
`,
|
||||
want: Upstream{
|
||||
DestinationType: UpstreamDestTypeService,
|
||||
DestinationNamespace: "default",
|
||||
DestinationName: "bar1",
|
||||
Datacenter: "dc1",
|
||||
LocalBindAddress: "127.0.0.2",
|
||||
LocalBindPort: 6060,
|
||||
Config: map[string]interface{}{
|
||||
"x": "y",
|
||||
"z": float64(-2),
|
||||
},
|
||||
MeshGateway: MeshGatewayConfig{
|
||||
Mode: "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
var got Upstream
|
||||
err := json.Unmarshal([]byte(tt.json), &got)
|
||||
if tt.wantErr {
|
||||
require.Error(err)
|
||||
return
|
||||
t.Run("camel", func(t *testing.T) {
|
||||
var got Upstream
|
||||
err := json.Unmarshal([]byte(tt.json), &got)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got, "%+v", got)
|
||||
}
|
||||
})
|
||||
|
||||
if tt.jsonSnake != "" {
|
||||
t.Run("snake", func(t *testing.T) {
|
||||
var got Upstream
|
||||
err := json.Unmarshal([]byte(tt.jsonSnake), &got)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectProxyConfig_UnmarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
jsonSnake string
|
||||
want ConnectProxyConfig
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "kitchen sink",
|
||||
json: `
|
||||
{
|
||||
"DestinationServiceName": "foo-name",
|
||||
"DestinationServiceID": "foo-id",
|
||||
"LocalServiceAddress": "127.0.0.1",
|
||||
"LocalServicePort": 5050,
|
||||
"Config": {
|
||||
"a": "b",
|
||||
"v": 42
|
||||
},
|
||||
"Upstreams": [
|
||||
{
|
||||
"DestinationType": "service",
|
||||
"DestinationNamespace": "default",
|
||||
"DestinationName": "bar1",
|
||||
"Datacenter": "dc1",
|
||||
"LocalBindAddress": "127.0.0.2",
|
||||
"LocalBindPort": 6060,
|
||||
"Config": {
|
||||
"x": "y",
|
||||
"z": -2
|
||||
},
|
||||
"MeshGateway": {
|
||||
"Mode": "local"
|
||||
}
|
||||
},
|
||||
{
|
||||
"DestinationType": "service",
|
||||
"DestinationNamespace": "default",
|
||||
"DestinationName": "bar2",
|
||||
"Datacenter": "dc2",
|
||||
"LocalBindAddress": "127.0.0.2",
|
||||
"LocalBindPort": 6161
|
||||
}
|
||||
],
|
||||
"MeshGateway": {
|
||||
"Mode": "remote"
|
||||
},
|
||||
"Expose": {
|
||||
"Checks": true,
|
||||
"Paths": [
|
||||
{
|
||||
"ListenerPort": 8080,
|
||||
"Path": "/foo",
|
||||
"LocalPathPort": 7070,
|
||||
"Protocol": "http2",
|
||||
"ParsedFromCheck": true
|
||||
},
|
||||
{
|
||||
"ListenerPort": 8181,
|
||||
"Path": "/foo2",
|
||||
"LocalPathPort": 7171,
|
||||
"Protocol": "http",
|
||||
"ParsedFromCheck": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`,
|
||||
jsonSnake: `
|
||||
{
|
||||
"destination_service_name": "foo-name",
|
||||
"destination_service_id": "foo-id",
|
||||
"local_service_address": "127.0.0.1",
|
||||
"local_service_port": 5050,
|
||||
"config": {
|
||||
"a": "b",
|
||||
"v": 42
|
||||
},
|
||||
"upstreams": [
|
||||
{
|
||||
"destination_type": "service",
|
||||
"destination_namespace": "default",
|
||||
"destination_name": "bar1",
|
||||
"datacenter": "dc1",
|
||||
"local_bind_address": "127.0.0.2",
|
||||
"local_bind_port": 6060,
|
||||
"config": {
|
||||
"x": "y",
|
||||
"z": -2
|
||||
},
|
||||
"mesh_gateway": {
|
||||
"mode": "local"
|
||||
}
|
||||
},
|
||||
{
|
||||
"destination_type": "service",
|
||||
"destination_namespace": "default",
|
||||
"destination_name": "bar2",
|
||||
"datacenter": "dc2",
|
||||
"local_bind_address": "127.0.0.2",
|
||||
"local_bind_port": 6161
|
||||
}
|
||||
],
|
||||
"mesh_gateway": {
|
||||
"mode": "remote"
|
||||
},
|
||||
"expose": {
|
||||
"checks": true,
|
||||
"paths": [
|
||||
{
|
||||
"listener_port": 8080,
|
||||
"path": "/foo",
|
||||
"local_path_port": 7070,
|
||||
"protocol": "http2",
|
||||
"parsed_from_check": true
|
||||
},
|
||||
{
|
||||
"listener_port": 8181,
|
||||
"path": "/foo2",
|
||||
"local_path_port": 7171,
|
||||
"protocol": "http",
|
||||
"parsed_from_check": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`,
|
||||
want: ConnectProxyConfig{
|
||||
DestinationServiceName: "foo-name",
|
||||
DestinationServiceID: "foo-id",
|
||||
LocalServiceAddress: "127.0.0.1",
|
||||
LocalServicePort: 5050,
|
||||
Config: map[string]interface{}{
|
||||
"a": "b",
|
||||
"v": float64(42),
|
||||
},
|
||||
Upstreams: []Upstream{
|
||||
{
|
||||
DestinationType: UpstreamDestTypeService,
|
||||
DestinationNamespace: "default",
|
||||
DestinationName: "bar1",
|
||||
Datacenter: "dc1",
|
||||
LocalBindAddress: "127.0.0.2",
|
||||
LocalBindPort: 6060,
|
||||
Config: map[string]interface{}{
|
||||
"x": "y",
|
||||
"z": float64(-2),
|
||||
},
|
||||
MeshGateway: MeshGatewayConfig{
|
||||
Mode: "local",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
DestinationType: UpstreamDestTypeService,
|
||||
DestinationNamespace: "default",
|
||||
DestinationName: "bar2",
|
||||
Datacenter: "dc2",
|
||||
LocalBindAddress: "127.0.0.2",
|
||||
LocalBindPort: 6161,
|
||||
},
|
||||
},
|
||||
|
||||
MeshGateway: MeshGatewayConfig{
|
||||
Mode: "remote",
|
||||
},
|
||||
Expose: ExposeConfig{
|
||||
Checks: true,
|
||||
Paths: []ExposePath{
|
||||
{
|
||||
ListenerPort: 8080,
|
||||
Path: "/foo",
|
||||
LocalPathPort: 7070,
|
||||
Protocol: "http2",
|
||||
ParsedFromCheck: true,
|
||||
},
|
||||
{
|
||||
ListenerPort: 8181,
|
||||
Path: "/foo2",
|
||||
LocalPathPort: 7171,
|
||||
Protocol: "http",
|
||||
ParsedFromCheck: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run("camel", func(t *testing.T) {
|
||||
//
|
||||
var got ConnectProxyConfig
|
||||
err := json.Unmarshal([]byte(tt.json), &got)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
if tt.jsonSnake != "" {
|
||||
t.Run("snake", func(t *testing.T) {
|
||||
//
|
||||
var got ConnectProxyConfig
|
||||
err := json.Unmarshal([]byte(tt.json), &got)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
require.NoError(err)
|
||||
require.Equal(tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1315,7 +1315,7 @@ func TestStructs_DirEntry_Clone(t *testing.T) {
|
|||
func TestStructs_ValidateServiceAndNodeMetadata(t *testing.T) {
|
||||
tooMuchMeta := make(map[string]string)
|
||||
for i := 0; i < metaMaxKeyPairs+1; i++ {
|
||||
tooMuchMeta[string(i)] = "value"
|
||||
tooMuchMeta[fmt.Sprint(i)] = "value"
|
||||
}
|
||||
type testcase struct {
|
||||
Meta map[string]string
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
@ -73,8 +74,8 @@ type TestAgent struct {
|
|||
// It is valid after Start().
|
||||
dns *DNSServer
|
||||
|
||||
// srv is an HTTPServer that may be used to test http endpoints.
|
||||
srv *HTTPServer
|
||||
// srv is an HTTPHandlers that may be used to test http endpoints.
|
||||
srv *HTTPHandlers
|
||||
|
||||
// overrides is an hcl config source to use to override otherwise
|
||||
// non-user settable configurations
|
||||
|
@ -212,7 +213,7 @@ func (a *TestAgent) Start(t *testing.T) (err error) {
|
|||
// Start the anti-entropy syncer
|
||||
a.Agent.StartSync()
|
||||
|
||||
a.srv = &HTTPServer{agent: agent, denylist: NewDenylist(a.config.HTTPBlockEndpoints)}
|
||||
a.srv = &HTTPHandlers{agent: agent, denylist: NewDenylist(a.config.HTTPBlockEndpoints)}
|
||||
|
||||
if err := a.waitForUp(); err != nil {
|
||||
a.Shutdown()
|
||||
|
@ -313,13 +314,23 @@ func (a *TestAgent) DNSAddr() string {
|
|||
}
|
||||
|
||||
func (a *TestAgent) HTTPAddr() string {
|
||||
var srv apiServer
|
||||
for _, srv = range a.Agent.apiServers.servers {
|
||||
if srv.Protocol == "http" {
|
||||
break
|
||||
addr, err := firstAddr(a.Agent.apiServers, "http")
|
||||
if err != nil {
|
||||
// TODO: t.Fatal instead of panic
|
||||
panic("no http server registered")
|
||||
}
|
||||
return addr.String()
|
||||
}
|
||||
|
||||
// firstAddr is used by tests to look up the address for the first server which
|
||||
// matches the protocol
|
||||
func firstAddr(s *apiServers, protocol string) (net.Addr, error) {
|
||||
for _, srv := range s.servers {
|
||||
if srv.Protocol == protocol {
|
||||
return srv.Addr, nil
|
||||
}
|
||||
}
|
||||
return srv.Addr.String()
|
||||
return nil, fmt.Errorf("no server registered with protocol %v", protocol)
|
||||
}
|
||||
|
||||
func (a *TestAgent) SegmentAddr(name string) string {
|
||||
|
|
|
@ -63,7 +63,7 @@ func isWrite(op api.KVOp) bool {
|
|||
// internal RPC format. This returns a count of the number of write ops, and
|
||||
// a boolean, that if false means an error response has been generated and
|
||||
// processing should stop.
|
||||
func (s *HTTPServer) convertOps(resp http.ResponseWriter, req *http.Request) (structs.TxnOps, int, bool) {
|
||||
func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (structs.TxnOps, int, bool) {
|
||||
// The TxnMaxReqLen limit and KVMaxValueSize limit both default to the
|
||||
// suggested raft data size and can be configured independently. The
|
||||
// TxnMaxReqLen is enforced on the cumulative size of the transaction,
|
||||
|
@ -291,7 +291,7 @@ func (s *HTTPServer) convertOps(resp http.ResponseWriter, req *http.Request) (st
|
|||
// transaction. A transaction consisting of only read operations will be fast-
|
||||
// pathed to an endpoint that supports consistency modes (but not blocking),
|
||||
// and everything else will be routed through Raft like a normal write.
|
||||
func (s *HTTPServer) Txn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) Txn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Convert the ops from the API format to the internal format.
|
||||
ops, writes, ok := s.convertOps(resp, req)
|
||||
if !ok {
|
||||
|
|
|
@ -44,7 +44,7 @@ type ServiceSummary struct {
|
|||
|
||||
// UINodes is used to list the nodes in a given datacenter. We return a
|
||||
// NodeDump which provides overview information for all the nodes
|
||||
func (s *HTTPServer) UINodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) UINodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Parse arguments
|
||||
args := structs.DCSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -87,7 +87,7 @@ RPC:
|
|||
|
||||
// UINodeInfo is used to get info on a single node in a given datacenter. We return a
|
||||
// NodeInfo which provides overview information for the node
|
||||
func (s *HTTPServer) UINodeInfo(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) UINodeInfo(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Parse arguments
|
||||
args := structs.NodeSpecificRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -137,7 +137,7 @@ RPC:
|
|||
|
||||
// UIServices is used to list the services in a given datacenter. We return a
|
||||
// ServiceSummary which provides overview information for the service
|
||||
func (s *HTTPServer) UIServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Parse arguments
|
||||
args := structs.ServiceDumpRequest{}
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
|
@ -165,11 +165,11 @@ RPC:
|
|||
|
||||
// Generate the summary
|
||||
// TODO (gateways) (freddy) Have Internal.ServiceDump return ServiceDump instead. Need to add bexpr filtering for type.
|
||||
return summarizeServices(out.Nodes.ToServiceDump(), out.Gateways, s.agent.config), nil
|
||||
return summarizeServices(out.Nodes.ToServiceDump(), out.Gateways, s.agent.config, args.Datacenter), nil
|
||||
}
|
||||
|
||||
// UIGatewayServices is used to query all the nodes for services associated with a gateway along with their gateway config
|
||||
func (s *HTTPServer) UIGatewayServicesNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) UIGatewayServicesNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Parse arguments
|
||||
args := structs.ServiceSpecificRequest{}
|
||||
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
|
||||
|
@ -200,10 +200,11 @@ RPC:
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return summarizeServices(out.Dump, nil, s.agent.config), nil
|
||||
return summarizeServices(out.Dump, nil, s.agent.config, args.Datacenter), nil
|
||||
}
|
||||
|
||||
func summarizeServices(dump structs.ServiceDump, gateways structs.GatewayServices, cfg *config.RuntimeConfig) []*ServiceSummary {
|
||||
// TODO (freddy): Refactor to split up for the two use cases
|
||||
func summarizeServices(dump structs.ServiceDump, gateways structs.GatewayServices, cfg *config.RuntimeConfig, dc string) []*ServiceSummary {
|
||||
// Collect the summary information
|
||||
var services []structs.ServiceName
|
||||
summary := make(map[structs.ServiceName]*ServiceSummary)
|
||||
|
@ -240,7 +241,7 @@ func summarizeServices(dump structs.ServiceDump, gateways structs.GatewayService
|
|||
if csn.GatewayService != nil {
|
||||
gwsvc := csn.GatewayService
|
||||
sum := getService(gwsvc.Service)
|
||||
modifySummaryForGatewayService(cfg, sum, gwsvc)
|
||||
modifySummaryForGatewayService(cfg, dc, sum, gwsvc)
|
||||
}
|
||||
|
||||
// Will happen in cases where we only have the GatewayServices mapping
|
||||
|
@ -328,6 +329,7 @@ func summarizeServices(dump structs.ServiceDump, gateways structs.GatewayService
|
|||
|
||||
func modifySummaryForGatewayService(
|
||||
cfg *config.RuntimeConfig,
|
||||
datacenter string,
|
||||
sum *ServiceSummary,
|
||||
gwsvc *structs.GatewayService,
|
||||
) {
|
||||
|
@ -340,7 +342,7 @@ func modifySummaryForGatewayService(
|
|||
}
|
||||
dnsAddresses = append(dnsAddresses, serviceIngressDNSName(
|
||||
gwsvc.Service.Name,
|
||||
cfg.Datacenter,
|
||||
datacenter,
|
||||
domain,
|
||||
&gwsvc.Service.EnterpriseMeta,
|
||||
))
|
||||
|
@ -362,7 +364,7 @@ func modifySummaryForGatewayService(
|
|||
}
|
||||
|
||||
// GET /v1/internal/ui/gateway-intentions/:gateway
|
||||
func (s *HTTPServer) UIGatewayIntentions(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
func (s *HTTPHandlers) UIGatewayIntentions(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var args structs.IntentionQueryRequest
|
||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
|
@ -865,3 +866,13 @@ func TestUIGatewayIntentions(t *testing.T) {
|
|||
}
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestUIEndpoint_modifySummaryForGatewayService_UseRequestedDCInsteadOfConfigured(t *testing.T) {
|
||||
dc := "dc2"
|
||||
cfg := config.RuntimeConfig{Datacenter: "dc1", DNSDomain: "consul"}
|
||||
sum := ServiceSummary{GatewayConfig: GatewayConfig{}}
|
||||
gwsvc := structs.GatewayService{Service: structs.ServiceName{Name: "test"}, Port: 42}
|
||||
modifySummaryForGatewayService(&cfg, dc, &sum, &gwsvc)
|
||||
expected := serviceCanonicalDNSName("test", "ingress", "dc2", "consul", nil) + ":42"
|
||||
require.Equal(t, expected, sum.GatewayConfig.Addresses[0])
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func TestShouldProcessUserEvent(t *testing.T) {
|
|||
srv1 := &structs.NodeService{
|
||||
ID: "mysql",
|
||||
Service: "mysql",
|
||||
Tags: []string{"test", "foo", "bar", "master"},
|
||||
Tags: []string{"test", "foo", "bar", "primary"},
|
||||
Port: 5000,
|
||||
}
|
||||
a.State.AddService(srv1, "")
|
||||
|
@ -99,7 +99,7 @@ func TestShouldProcessUserEvent(t *testing.T) {
|
|||
// Bad tag name
|
||||
p = &UserEvent{
|
||||
ServiceFilter: ".*sql",
|
||||
TagFilter: "slave",
|
||||
TagFilter: "replica",
|
||||
}
|
||||
if a.shouldProcessUserEvent(p) {
|
||||
t.Fatalf("bad")
|
||||
|
@ -108,7 +108,7 @@ func TestShouldProcessUserEvent(t *testing.T) {
|
|||
// Good service name
|
||||
p = &UserEvent{
|
||||
ServiceFilter: ".*sql",
|
||||
TagFilter: "master",
|
||||
TagFilter: "primary",
|
||||
}
|
||||
if !a.shouldProcessUserEvent(p) {
|
||||
t.Fatalf("bad")
|
||||
|
@ -154,7 +154,7 @@ func TestFireReceiveEvent(t *testing.T) {
|
|||
srv1 := &structs.NodeService{
|
||||
ID: "mysql",
|
||||
Service: "mysql",
|
||||
Tags: []string{"test", "foo", "bar", "master"},
|
||||
Tags: []string{"test", "foo", "bar", "primary"},
|
||||
Port: 5000,
|
||||
}
|
||||
a.State.AddService(srv1, "")
|
||||
|
|
|
@ -26,6 +26,8 @@ const (
|
|||
type ConfigEntry interface {
|
||||
GetKind() string
|
||||
GetName() string
|
||||
GetNamespace() string
|
||||
GetMeta() map[string]string
|
||||
GetCreateIndex() uint64
|
||||
GetModifyIndex() uint64
|
||||
}
|
||||
|
@ -108,6 +110,14 @@ func (s *ServiceConfigEntry) GetName() string {
|
|||
return s.Name
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetNamespace() string {
|
||||
return s.Namespace
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetMeta() map[string]string {
|
||||
return s.Meta
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
|
||||
return s.CreateIndex
|
||||
}
|
||||
|
@ -136,6 +146,14 @@ func (p *ProxyConfigEntry) GetName() string {
|
|||
return p.Name
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetNamespace() string {
|
||||
return p.Namespace
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetMeta() map[string]string {
|
||||
return p.Meta
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
|
||||
return p.CreateIndex
|
||||
}
|
||||
|
|
|
@ -17,10 +17,12 @@ type ServiceRouterConfigEntry struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceRouterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceRouterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceRouterConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceRouterConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceRoute struct {
|
||||
Match *ServiceRouteMatch `json:",omitempty"`
|
||||
|
@ -117,10 +119,12 @@ type ServiceSplitterConfigEntry struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceSplitterConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceSplitterConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceSplit struct {
|
||||
Weight float32
|
||||
|
@ -185,10 +189,12 @@ func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceResolverConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceResolverConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceResolverConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceResolverConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceResolverSubset struct {
|
||||
Filter string `json:",omitempty"`
|
||||
|
|
|
@ -32,6 +32,9 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
require.NotNil(t, qm)
|
||||
require.NotEqual(t, 0, qm.RequestTime)
|
||||
|
||||
// generic verification
|
||||
require.Equal(t, testEntry.Meta, entry.GetMeta())
|
||||
|
||||
// verify it
|
||||
readResolver, ok := entry.(*ServiceResolverConfigEntry)
|
||||
require.True(t, ok)
|
||||
|
@ -61,6 +64,9 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
require.NotNil(t, qm)
|
||||
require.NotEqual(t, 0, qm.RequestTime)
|
||||
|
||||
// generic verification
|
||||
require.Equal(t, testEntry.Meta, entry.GetMeta())
|
||||
|
||||
// verify it
|
||||
readSplitter, ok := entry.(*ServiceSplitterConfigEntry)
|
||||
require.True(t, ok)
|
||||
|
@ -90,6 +96,9 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
require.NotNil(t, qm)
|
||||
require.NotEqual(t, 0, qm.RequestTime)
|
||||
|
||||
// generic verification
|
||||
require.Equal(t, testEntry.Meta, entry.GetMeta())
|
||||
|
||||
// verify it
|
||||
readRouter, ok := entry.(*ServiceRouterConfigEntry)
|
||||
require.True(t, ok)
|
||||
|
@ -150,6 +159,10 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
},
|
||||
},
|
||||
ConnectTimeout: 5 * time.Second,
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
},
|
||||
verify: verifyResolver,
|
||||
},
|
||||
|
@ -187,6 +200,10 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
Namespace: defaultNamespace,
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
},
|
||||
verify: verifySplitter,
|
||||
},
|
||||
|
@ -221,6 +238,10 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
},
|
||||
verify: verifyRouter,
|
||||
},
|
||||
|
|
|
@ -96,6 +96,14 @@ func (i *IngressGatewayConfigEntry) GetName() string {
|
|||
return i.Name
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetNamespace() string {
|
||||
return i.Namespace
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetMeta() map[string]string {
|
||||
return i.Meta
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 {
|
||||
return i.CreateIndex
|
||||
}
|
||||
|
@ -165,6 +173,14 @@ func (g *TerminatingGatewayConfigEntry) GetName() string {
|
|||
return g.Name
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetNamespace() string {
|
||||
return g.Namespace
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetMeta() map[string]string {
|
||||
return g.Meta
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 {
|
||||
return g.CreateIndex
|
||||
}
|
||||
|
|
|
@ -16,6 +16,10 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
ingress1 := &IngressGatewayConfigEntry{
|
||||
Kind: IngressGateway,
|
||||
Name: "foo",
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
}
|
||||
|
||||
ingress2 := &IngressGatewayConfigEntry{
|
||||
|
@ -62,6 +66,8 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
require.True(t, ok)
|
||||
require.Equal(t, ingress1.Kind, readIngress.Kind)
|
||||
require.Equal(t, ingress1.Name, readIngress.Name)
|
||||
require.Equal(t, ingress1.Meta, readIngress.Meta)
|
||||
require.Equal(t, ingress1.Meta, readIngress.GetMeta())
|
||||
|
||||
// update it
|
||||
ingress1.Listeners = []IngressListener{
|
||||
|
@ -164,6 +170,10 @@ func TestAPI_ConfigEntries_TerminatingGateway(t *testing.T) {
|
|||
terminating1 := &TerminatingGatewayConfigEntry{
|
||||
Kind: TerminatingGateway,
|
||||
Name: "foo",
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
}
|
||||
|
||||
terminating2 := &TerminatingGatewayConfigEntry{
|
||||
|
@ -194,6 +204,8 @@ func TestAPI_ConfigEntries_TerminatingGateway(t *testing.T) {
|
|||
require.True(t, ok)
|
||||
require.Equal(t, terminating1.Kind, readTerminating.Kind)
|
||||
require.Equal(t, terminating1.Name, readTerminating.Name)
|
||||
require.Equal(t, terminating1.Meta, readTerminating.Meta)
|
||||
require.Equal(t, terminating1.Meta, readTerminating.GetMeta())
|
||||
|
||||
// update it
|
||||
terminating1.Services = []LinkedService{
|
||||
|
|
|
@ -23,6 +23,10 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
"foo": "bar",
|
||||
"bar": 1.0,
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
}
|
||||
|
||||
// set it
|
||||
|
@ -43,6 +47,8 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
require.Equal(t, global_proxy.Kind, readProxy.Kind)
|
||||
require.Equal(t, global_proxy.Name, readProxy.Name)
|
||||
require.Equal(t, global_proxy.Config, readProxy.Config)
|
||||
require.Equal(t, global_proxy.Meta, readProxy.Meta)
|
||||
require.Equal(t, global_proxy.Meta, readProxy.GetMeta())
|
||||
|
||||
global_proxy.Config["baz"] = true
|
||||
// CAS update fail
|
||||
|
@ -92,6 +98,10 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
Kind: ServiceDefaults,
|
||||
Name: "foo",
|
||||
Protocol: "udp",
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
}
|
||||
|
||||
service2 := &ServiceConfigEntry{
|
||||
|
@ -124,6 +134,8 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
require.Equal(t, service.Kind, readService.Kind)
|
||||
require.Equal(t, service.Name, readService.Name)
|
||||
require.Equal(t, service.Protocol, readService.Protocol)
|
||||
require.Equal(t, service.Meta, readService.Meta)
|
||||
require.Equal(t, service.Meta, readService.GetMeta())
|
||||
|
||||
// update it
|
||||
service.Protocol = "tcp"
|
||||
|
|
|
@ -17,15 +17,14 @@ cat <<-EOF
|
|||
Usage: ${SCRIPT_NAME} [<options ...>] <proto filepath>
|
||||
|
||||
Description:
|
||||
This script will build generate the Go files from protobuf files. In addition to
|
||||
just running the correct protoc generator it will also fixup build tags in the
|
||||
Generate the Go files from protobuf definitions. In addition to
|
||||
running the protoc generator it will also fixup build tags in the
|
||||
generated code.
|
||||
|
||||
Options:
|
||||
--import-replace Replace imports of google types with those from the gogo/protobuf repo.
|
||||
--grpc Enable the gRPC plugin
|
||||
|
||||
-h | --help Print this help text.
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
|
@ -74,6 +73,7 @@ function main {
|
|||
local gogo_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types"
|
||||
gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types"
|
||||
gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types"
|
||||
gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types"
|
||||
gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/api/annotations.proto=github.com/gogo/googleapis/google/api"
|
||||
gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/field_mask.proto=github.com/gogo/protobuf/types"
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package keyring
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent"
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
|
@ -23,12 +24,13 @@ type cmd struct {
|
|||
help string
|
||||
|
||||
// flags
|
||||
installKey string
|
||||
useKey string
|
||||
removeKey string
|
||||
listKeys bool
|
||||
relay int
|
||||
local bool
|
||||
installKey string
|
||||
useKey string
|
||||
removeKey string
|
||||
listKeys bool
|
||||
listPrimaryKeys bool
|
||||
relay int
|
||||
local bool
|
||||
}
|
||||
|
||||
func (c *cmd) init() {
|
||||
|
@ -45,6 +47,8 @@ func (c *cmd) init() {
|
|||
"performed on keys which are not currently the primary key.")
|
||||
c.flags.BoolVar(&c.listKeys, "list", false,
|
||||
"List all keys currently in use within the cluster.")
|
||||
c.flags.BoolVar(&c.listPrimaryKeys, "list-primary", false,
|
||||
"List all primary keys currently in use within the cluster.")
|
||||
c.flags.IntVar(&c.relay, "relay-factor", 0,
|
||||
"Setting this to a non-zero value will cause nodes to relay their response "+
|
||||
"to the operation through this many randomly-chosen other nodes in the "+
|
||||
|
@ -58,6 +62,22 @@ func (c *cmd) init() {
|
|||
c.help = flags.Usage(help, c.flags)
|
||||
}
|
||||
|
||||
func numberActions(listKeys, listPrimaryKeys bool, installKey, useKey, removeKey string) int {
|
||||
count := 0
|
||||
if listKeys {
|
||||
count++
|
||||
}
|
||||
if listPrimaryKeys {
|
||||
count++
|
||||
}
|
||||
for _, arg := range []string{installKey, useKey, removeKey} {
|
||||
if len(arg) > 0 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (c *cmd) Run(args []string) int {
|
||||
if err := c.flags.Parse(args); err != nil {
|
||||
return 1
|
||||
|
@ -70,21 +90,15 @@ func (c *cmd) Run(args []string) int {
|
|||
Ui: c.UI,
|
||||
}
|
||||
|
||||
// Only accept a single argument
|
||||
found := c.listKeys
|
||||
for _, arg := range []string{c.installKey, c.useKey, c.removeKey} {
|
||||
if found && len(arg) > 0 {
|
||||
c.UI.Error("Only a single action is allowed")
|
||||
return 1
|
||||
}
|
||||
found = found || len(arg) > 0
|
||||
}
|
||||
|
||||
// Fail fast if no actionable args were passed
|
||||
if !found {
|
||||
num := numberActions(c.listKeys, c.listPrimaryKeys, c.installKey, c.useKey, c.removeKey)
|
||||
if num == 0 {
|
||||
c.UI.Error(c.Help())
|
||||
return 1
|
||||
}
|
||||
if num > 1 {
|
||||
c.UI.Error("Only a single action is allowed")
|
||||
return 1
|
||||
}
|
||||
|
||||
// Validate the relay factor
|
||||
relayFactor, err := agent.ParseRelayFactor(c.relay)
|
||||
|
@ -114,7 +128,22 @@ func (c *cmd) Run(args []string) int {
|
|||
c.UI.Error(fmt.Sprintf("error: %s", err))
|
||||
return 1
|
||||
}
|
||||
c.handleList(responses)
|
||||
for _, response := range responses {
|
||||
c.UI.Output(formatResponse(response, response.Keys))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if c.listPrimaryKeys {
|
||||
c.UI.Info("Gathering installed primary encryption keys...")
|
||||
responses, err := client.Operator().KeyringList(&consulapi.QueryOptions{RelayFactor: relayFactor, LocalOnly: c.local})
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("error: %s", err))
|
||||
return 1
|
||||
}
|
||||
for _, response := range responses {
|
||||
c.UI.Output(formatResponse(response, response.PrimaryKeys))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -153,27 +182,40 @@ func (c *cmd) Run(args []string) int {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (c *cmd) handleList(responses []*consulapi.KeyringResponse) {
|
||||
for _, response := range responses {
|
||||
pool := response.Datacenter + " (LAN)"
|
||||
if response.Segment != "" {
|
||||
pool += fmt.Sprintf(" [%s]", response.Segment)
|
||||
}
|
||||
if response.WAN {
|
||||
pool = "WAN"
|
||||
}
|
||||
func formatResponse(response *consulapi.KeyringResponse, keys map[string]int) string {
|
||||
b := new(strings.Builder)
|
||||
b.WriteString("\n")
|
||||
b.WriteString(poolName(response.Datacenter, response.WAN, response.Segment))
|
||||
b.WriteString(formatMessages(response.Messages))
|
||||
b.WriteString(formatKeys(keys, response.NumNodes))
|
||||
return strings.TrimRight(b.String(), "\n")
|
||||
}
|
||||
|
||||
c.UI.Output("")
|
||||
c.UI.Output(pool + ":")
|
||||
|
||||
for from, msg := range response.Messages {
|
||||
c.UI.Output(fmt.Sprintf(" ===> %s: %s", from, msg))
|
||||
}
|
||||
|
||||
for key, num := range response.Keys {
|
||||
c.UI.Output(fmt.Sprintf(" %s [%d/%d]", key, num, response.NumNodes))
|
||||
}
|
||||
func poolName(dc string, wan bool, segment string) string {
|
||||
pool := fmt.Sprintf("%s (LAN)", dc)
|
||||
if wan {
|
||||
pool = "WAN"
|
||||
}
|
||||
if segment != "" {
|
||||
segment = fmt.Sprintf(" [%s]", segment)
|
||||
}
|
||||
return fmt.Sprintf("%s%s:\n", pool, segment)
|
||||
}
|
||||
|
||||
func formatMessages(messages map[string]string) string {
|
||||
b := new(strings.Builder)
|
||||
for from, msg := range messages {
|
||||
b.WriteString(fmt.Sprintf(" ===> %s: %s\n", from, msg))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func formatKeys(keys map[string]int, total int) string {
|
||||
b := new(strings.Builder)
|
||||
for key, num := range keys {
|
||||
b.WriteString(fmt.Sprintf(" %s [%d/%d]\n", key, num, total))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (c *cmd) Synopsis() string {
|
||||
|
|
|
@ -5,7 +5,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent"
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestKeyringCommand_noTabs(t *testing.T) {
|
||||
|
@ -51,6 +53,16 @@ func TestKeyringCommand(t *testing.T) {
|
|||
|
||||
// Rotate to key2, remove key1
|
||||
useKey(t, a1.HTTPAddr(), key2)
|
||||
|
||||
// New key should be present
|
||||
out = listPrimaryKeys(t, a1.HTTPAddr())
|
||||
if strings.Contains(out, key1) {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
if !strings.Contains(out, key2) {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
|
||||
removeKey(t, a1.HTTPAddr(), key1)
|
||||
|
||||
// Only key2 is present now
|
||||
|
@ -132,6 +144,19 @@ func listKeys(t *testing.T, addr string) string {
|
|||
return ui.OutputWriter.String()
|
||||
}
|
||||
|
||||
func listPrimaryKeys(t *testing.T, addr string) string {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
|
||||
args := []string{"-list-primary", "-http-addr=" + addr}
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
|
||||
return ui.OutputWriter.String()
|
||||
}
|
||||
|
||||
func installKey(t *testing.T, addr string, key string) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
|
@ -164,3 +189,42 @@ func removeKey(t *testing.T, addr string, key string) {
|
|||
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyringCommand_poolName(t *testing.T) {
|
||||
require.Equal(t, "dc1 (LAN):\n", poolName("dc1", false, ""))
|
||||
require.Equal(t, "dc1 (LAN) [segment1]:\n", poolName("dc1", false, "segment1"))
|
||||
require.Equal(t, "WAN:\n", poolName("dc1", true, ""))
|
||||
}
|
||||
|
||||
func TestKeyringCommand_formatKeys(t *testing.T) {
|
||||
require.Equal(t, "", formatKeys(map[string]int{}, 0))
|
||||
keys := formatKeys(map[string]int{"key1": 1, "key2": 2}, 2)
|
||||
require.Contains(t, keys, " key1 [1/2]\n")
|
||||
require.Contains(t, keys, " key2 [2/2]\n")
|
||||
}
|
||||
|
||||
func TestKeyringCommand_formatMessages(t *testing.T) {
|
||||
require.Equal(t, "", formatMessages(map[string]string{}))
|
||||
messages := formatMessages(map[string]string{"n1": "hello", "n2": "world"})
|
||||
require.Contains(t, messages, " ===> n1: hello\n")
|
||||
require.Contains(t, messages, " ===> n2: world\n")
|
||||
}
|
||||
|
||||
func TestKeyringCommand_formatResponse(t *testing.T) {
|
||||
response := &consulapi.KeyringResponse{Datacenter: "dc1", NumNodes: 1}
|
||||
keys := map[string]int{"key1": 1}
|
||||
require.Equal(t, "\ndc1 (LAN):\n key1 [1/1]", formatResponse(response, keys))
|
||||
|
||||
response = &consulapi.KeyringResponse{WAN: true, Datacenter: "dc1", NumNodes: 1}
|
||||
keys = map[string]int{"key1": 1}
|
||||
require.Equal(t, "\nWAN:\n key1 [1/1]", formatResponse(response, keys))
|
||||
}
|
||||
|
||||
func TestKeyringCommand_numActions(t *testing.T) {
|
||||
require.Equal(t, 0, numberActions(false, false, "", "", ""))
|
||||
require.Equal(t, 1, numberActions(true, false, "", "", ""))
|
||||
require.Equal(t, 1, numberActions(false, true, "", "", ""))
|
||||
require.Equal(t, 1, numberActions(false, false, "1", "", ""))
|
||||
require.Equal(t, 2, numberActions(true, false, "1", "", ""))
|
||||
require.Equal(t, 2, numberActions(false, false, "1", "1", ""))
|
||||
}
|
||||
|
|
|
@ -72,13 +72,14 @@ func (c *cmd) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
a := client.Agent()
|
||||
nodeName, err := a.NodeName()
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
if !c.enable && !c.disable {
|
||||
nodeName, err := a.NodeName()
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// List mode - list nodes/services in maintenance mode
|
||||
checks, err := a.Checks()
|
||||
if err != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue