Merge pull request #3160 from hashicorp/f-acl
Initial ACL enforcement framework
This commit is contained in:
commit
efa34cad2d
224
acl/acl.go
Normal file
224
acl/acl.go
Normal file
|
@ -0,0 +1,224 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
// ManagementACL is a singleton used for management tokens
|
||||
var ManagementACL *ACL
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
ManagementACL, err = NewACL(true, nil)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to setup management ACL: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// capabilitySet is a type wrapper to help managing a set of capabilities
|
||||
type capabilitySet map[string]struct{}
|
||||
|
||||
func (c capabilitySet) Check(k string) bool {
|
||||
_, ok := c[k]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c capabilitySet) Set(k string) {
|
||||
c[k] = struct{}{}
|
||||
}
|
||||
|
||||
func (c capabilitySet) Clear() {
|
||||
for cap := range c {
|
||||
delete(c, cap)
|
||||
}
|
||||
}
|
||||
|
||||
// ACL object is used to convert a set of policies into a structure that
|
||||
// can be efficiently evaluated to determine if an action is allowed.
|
||||
type ACL struct {
|
||||
// management tokens are allowed to do anything
|
||||
management bool
|
||||
|
||||
// namespaces maps a namespace to a capabilitySet
|
||||
namespaces *iradix.Tree
|
||||
|
||||
agent string
|
||||
node string
|
||||
operator string
|
||||
}
|
||||
|
||||
// maxPrivilege returns the policy which grants the most privilege
|
||||
// This handles the case of Deny always taking maximum precedence.
|
||||
func maxPrivilege(a, b string) string {
|
||||
switch {
|
||||
case a == PolicyDeny || b == PolicyDeny:
|
||||
return PolicyDeny
|
||||
case a == PolicyWrite || b == PolicyWrite:
|
||||
return PolicyWrite
|
||||
case a == PolicyRead || b == PolicyRead:
|
||||
return PolicyRead
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// NewACL compiles a set of policies into an ACL object
|
||||
func NewACL(management bool, policies []*Policy) (*ACL, error) {
|
||||
// Hot-path management tokens
|
||||
if management {
|
||||
return &ACL{management: true}, nil
|
||||
}
|
||||
|
||||
// Create the ACL object
|
||||
acl := &ACL{}
|
||||
nsTxn := iradix.New().Txn()
|
||||
|
||||
for _, policy := range policies {
|
||||
NAMESPACES:
|
||||
for _, ns := range policy.Namespaces {
|
||||
// Check for existing capabilities
|
||||
var capabilities capabilitySet
|
||||
raw, ok := nsTxn.Get([]byte(ns.Name))
|
||||
if ok {
|
||||
capabilities = raw.(capabilitySet)
|
||||
} else {
|
||||
capabilities = make(capabilitySet)
|
||||
nsTxn.Insert([]byte(ns.Name), capabilities)
|
||||
}
|
||||
|
||||
// Deny always takes precedence
|
||||
if capabilities.Check(NamespaceCapabilityDeny) {
|
||||
continue NAMESPACES
|
||||
}
|
||||
|
||||
// Add in all the capabilities
|
||||
for _, cap := range ns.Capabilities {
|
||||
if cap == NamespaceCapabilityDeny {
|
||||
// Overwrite any existing capabilities
|
||||
capabilities.Clear()
|
||||
capabilities.Set(NamespaceCapabilityDeny)
|
||||
continue NAMESPACES
|
||||
}
|
||||
capabilities.Set(cap)
|
||||
}
|
||||
}
|
||||
|
||||
// Take the maximum privilege for agent, node, and operator
|
||||
if policy.Agent != nil {
|
||||
acl.agent = maxPrivilege(acl.agent, policy.Agent.Policy)
|
||||
}
|
||||
if policy.Node != nil {
|
||||
acl.node = maxPrivilege(acl.node, policy.Node.Policy)
|
||||
}
|
||||
if policy.Operator != nil {
|
||||
acl.operator = maxPrivilege(acl.operator, policy.Operator.Policy)
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize the namespaces
|
||||
acl.namespaces = nsTxn.Commit()
|
||||
return acl, nil
|
||||
}
|
||||
|
||||
// AllowNamespaceOperation checks if a given operation is allowed for a namespace
|
||||
func (a *ACL) AllowNamespaceOperation(ns string, op string) bool {
|
||||
// Hot path management tokens
|
||||
if a.management {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for a matching capability set
|
||||
raw, ok := a.namespaces.Get([]byte(ns))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the capability has been granted
|
||||
capabilities := raw.(capabilitySet)
|
||||
return capabilities.Check(op)
|
||||
}
|
||||
|
||||
// AllowAgentRead checks if read operations are allowed for an agent
|
||||
func (a *ACL) AllowAgentRead() bool {
|
||||
switch {
|
||||
case a.management:
|
||||
return true
|
||||
case a.agent == PolicyWrite:
|
||||
return true
|
||||
case a.agent == PolicyRead:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AllowAgentWrite checks if write operations are allowed for an agent
|
||||
func (a *ACL) AllowAgentWrite() bool {
|
||||
switch {
|
||||
case a.management:
|
||||
return true
|
||||
case a.agent == PolicyWrite:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AllowNodeRead checks if read operations are allowed for a node
|
||||
func (a *ACL) AllowNodeRead() bool {
|
||||
switch {
|
||||
case a.management:
|
||||
return true
|
||||
case a.node == PolicyWrite:
|
||||
return true
|
||||
case a.node == PolicyRead:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AllowNodeWrite checks if write operations are allowed for a node
|
||||
func (a *ACL) AllowNodeWrite() bool {
|
||||
switch {
|
||||
case a.management:
|
||||
return true
|
||||
case a.node == PolicyWrite:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AllowOperatorRead checks if read operations are allowed for a operator
|
||||
func (a *ACL) AllowOperatorRead() bool {
|
||||
switch {
|
||||
case a.management:
|
||||
return true
|
||||
case a.operator == PolicyWrite:
|
||||
return true
|
||||
case a.operator == PolicyRead:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AllowOperatorWrite checks if write operations are allowed for a operator
|
||||
func (a *ACL) AllowOperatorWrite() bool {
|
||||
switch {
|
||||
case a.management:
|
||||
return true
|
||||
case a.operator == PolicyWrite:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsManagement checks if this represents a management token
|
||||
func (a *ACL) IsManagement() bool {
|
||||
return a.management
|
||||
}
|
197
acl/acl_test.go
Normal file
197
acl/acl_test.go
Normal file
|
@ -0,0 +1,197 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCapabilitySet(t *testing.T) {
|
||||
var cs capabilitySet = make(map[string]struct{})
|
||||
|
||||
// Check no capabilities by default
|
||||
if cs.Check(PolicyDeny) {
|
||||
t.Fatalf("unexpected check")
|
||||
}
|
||||
|
||||
// Do a set and check
|
||||
cs.Set(PolicyDeny)
|
||||
if !cs.Check(PolicyDeny) {
|
||||
t.Fatalf("missing check")
|
||||
}
|
||||
|
||||
// Clear and check
|
||||
cs.Clear()
|
||||
if cs.Check(PolicyDeny) {
|
||||
t.Fatalf("unexpected check")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxPrivilege(t *testing.T) {
|
||||
type tcase struct {
|
||||
Privilege string
|
||||
PrecedenceOver []string
|
||||
}
|
||||
tcases := []tcase{
|
||||
{
|
||||
PolicyDeny,
|
||||
[]string{PolicyDeny, PolicyWrite, PolicyRead, ""},
|
||||
},
|
||||
{
|
||||
PolicyWrite,
|
||||
[]string{PolicyWrite, PolicyRead, ""},
|
||||
},
|
||||
{
|
||||
PolicyRead,
|
||||
[]string{PolicyRead, ""},
|
||||
},
|
||||
}
|
||||
|
||||
for idx1, tc := range tcases {
|
||||
for idx2, po := range tc.PrecedenceOver {
|
||||
if maxPrivilege(tc.Privilege, po) != tc.Privilege {
|
||||
t.Fatalf("failed %d %d", idx1, idx2)
|
||||
}
|
||||
if maxPrivilege(po, tc.Privilege) != tc.Privilege {
|
||||
t.Fatalf("failed %d %d", idx1, idx2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestACLManagement(t *testing.T) {
|
||||
// Create management ACL
|
||||
acl, err := NewACL(true, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Check default namespace rights
|
||||
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
|
||||
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
|
||||
|
||||
// Check non-specified namespace
|
||||
assert.Equal(t, true, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
|
||||
|
||||
// Check the other simpler operations
|
||||
assert.Equal(t, true, acl.IsManagement())
|
||||
assert.Equal(t, true, acl.AllowAgentRead())
|
||||
assert.Equal(t, true, acl.AllowAgentWrite())
|
||||
assert.Equal(t, true, acl.AllowNodeRead())
|
||||
assert.Equal(t, true, acl.AllowNodeWrite())
|
||||
assert.Equal(t, true, acl.AllowOperatorRead())
|
||||
assert.Equal(t, true, acl.AllowOperatorWrite())
|
||||
}
|
||||
|
||||
func TestACLMerge(t *testing.T) {
|
||||
// Merge read + write policy
|
||||
p1, err := Parse(readAll)
|
||||
assert.Nil(t, err)
|
||||
p2, err := Parse(writeAll)
|
||||
assert.Nil(t, err)
|
||||
acl, err := NewACL(false, []*Policy{p1, p2})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Check default namespace rights
|
||||
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
|
||||
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
|
||||
|
||||
// Check non-specified namespace
|
||||
assert.Equal(t, false, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
|
||||
|
||||
// Check the other simpler operations
|
||||
assert.Equal(t, false, acl.IsManagement())
|
||||
assert.Equal(t, true, acl.AllowAgentRead())
|
||||
assert.Equal(t, true, acl.AllowAgentWrite())
|
||||
assert.Equal(t, true, acl.AllowNodeRead())
|
||||
assert.Equal(t, true, acl.AllowNodeWrite())
|
||||
assert.Equal(t, true, acl.AllowOperatorRead())
|
||||
assert.Equal(t, true, acl.AllowOperatorWrite())
|
||||
|
||||
// Merge read + blank
|
||||
p3, err := Parse("")
|
||||
assert.Nil(t, err)
|
||||
acl, err = NewACL(false, []*Policy{p1, p3})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Check default namespace rights
|
||||
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
|
||||
assert.Equal(t, false, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
|
||||
|
||||
// Check non-specified namespace
|
||||
assert.Equal(t, false, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
|
||||
|
||||
// Check the other simpler operations
|
||||
assert.Equal(t, false, acl.IsManagement())
|
||||
assert.Equal(t, true, acl.AllowAgentRead())
|
||||
assert.Equal(t, false, acl.AllowAgentWrite())
|
||||
assert.Equal(t, true, acl.AllowNodeRead())
|
||||
assert.Equal(t, false, acl.AllowNodeWrite())
|
||||
assert.Equal(t, true, acl.AllowOperatorRead())
|
||||
assert.Equal(t, false, acl.AllowOperatorWrite())
|
||||
|
||||
// Merge read + deny
|
||||
p4, err := Parse(denyAll)
|
||||
assert.Nil(t, err)
|
||||
acl, err = NewACL(false, []*Policy{p1, p4})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Check default namespace rights
|
||||
assert.Equal(t, false, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
|
||||
assert.Equal(t, false, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
|
||||
|
||||
// Check non-specified namespace
|
||||
assert.Equal(t, false, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
|
||||
|
||||
// Check the other simpler operations
|
||||
assert.Equal(t, false, acl.IsManagement())
|
||||
assert.Equal(t, false, acl.AllowAgentRead())
|
||||
assert.Equal(t, false, acl.AllowAgentWrite())
|
||||
assert.Equal(t, false, acl.AllowNodeRead())
|
||||
assert.Equal(t, false, acl.AllowNodeWrite())
|
||||
assert.Equal(t, false, acl.AllowOperatorRead())
|
||||
assert.Equal(t, false, acl.AllowOperatorWrite())
|
||||
}
|
||||
|
||||
var readAll = `
|
||||
namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
operator {
|
||||
policy = "read"
|
||||
}
|
||||
`
|
||||
|
||||
var writeAll = `
|
||||
namespace "default" {
|
||||
policy = "write"
|
||||
}
|
||||
agent {
|
||||
policy = "write"
|
||||
}
|
||||
node {
|
||||
policy = "write"
|
||||
}
|
||||
operator {
|
||||
policy = "write"
|
||||
}
|
||||
`
|
||||
|
||||
var denyAll = `
|
||||
namespace "default" {
|
||||
policy = "deny"
|
||||
}
|
||||
agent {
|
||||
policy = "deny"
|
||||
}
|
||||
node {
|
||||
policy = "deny"
|
||||
}
|
||||
operator {
|
||||
policy = "deny"
|
||||
}
|
||||
`
|
159
acl/policy.go
Normal file
159
acl/policy.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/hcl"
|
||||
)
|
||||
|
||||
const (
|
||||
// The following levels are the only valid values for the `policy = "read"` stanza.
|
||||
// When policies are merged together, the most privilege is granted, except for deny
|
||||
// which always takes precedence and supercedes.
|
||||
PolicyDeny = "deny"
|
||||
PolicyRead = "read"
|
||||
PolicyWrite = "write"
|
||||
)
|
||||
|
||||
const (
|
||||
// The following are the fine-grained capabilities that can be granted within a namespace.
|
||||
// The Policy stanza is a short hand for granting several of these. When capabilities are
|
||||
// combined we take the union of all capabilities. If the deny capability is present, it
|
||||
// takes precedence and overwrites all other capabilities.
|
||||
NamespaceCapabilityDeny = "deny"
|
||||
NamespaceCapabilityListJobs = "list-jobs"
|
||||
NamespaceCapabilityReadJob = "read-job"
|
||||
NamespaceCapabilitySubmitJob = "submit-job"
|
||||
NamespaceCapabilityReadLogs = "read-logs"
|
||||
NamespaceCapabilityReadFS = "read-fs"
|
||||
)
|
||||
|
||||
var (
|
||||
validNamespace = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
|
||||
)
|
||||
|
||||
// Policy represents a parsed HCL or JSON policy.
|
||||
type Policy struct {
|
||||
Namespaces []*NamespacePolicy `hcl:"namespace,expand"`
|
||||
Agent *AgentPolicy `hcl:"agent"`
|
||||
Node *NodePolicy `hcl:"node"`
|
||||
Operator *OperatorPolicy `hcl:"operator"`
|
||||
Raw string `hcl:"-"`
|
||||
}
|
||||
|
||||
// NamespacePolicy is the policy for a specific namespace
|
||||
type NamespacePolicy struct {
|
||||
Name string `hcl:",key"`
|
||||
Policy string
|
||||
Capabilities []string
|
||||
}
|
||||
|
||||
type AgentPolicy struct {
|
||||
Policy string
|
||||
}
|
||||
|
||||
type NodePolicy struct {
|
||||
Policy string
|
||||
}
|
||||
|
||||
type OperatorPolicy struct {
|
||||
Policy string
|
||||
}
|
||||
|
||||
// isPolicyValid makes sure the given string matches one of the valid policies.
|
||||
func isPolicyValid(policy string) bool {
|
||||
switch policy {
|
||||
case PolicyDeny, PolicyRead, PolicyWrite:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isNamespaceCapabilityValid ensures the given capability is valid for a namespace policy
|
||||
func isNamespaceCapabilityValid(cap string) bool {
|
||||
switch cap {
|
||||
case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob,
|
||||
NamespaceCapabilitySubmitJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// expandNamespacePolicy provides the equivalent set of capabilities for
|
||||
// a namespace policy
|
||||
func expandNamespacePolicy(policy string) []string {
|
||||
switch policy {
|
||||
case PolicyDeny:
|
||||
return []string{NamespaceCapabilityDeny}
|
||||
case PolicyRead:
|
||||
return []string{
|
||||
NamespaceCapabilityListJobs,
|
||||
NamespaceCapabilityReadJob,
|
||||
}
|
||||
case PolicyWrite:
|
||||
return []string{
|
||||
NamespaceCapabilityListJobs,
|
||||
NamespaceCapabilityReadJob,
|
||||
NamespaceCapabilitySubmitJob,
|
||||
NamespaceCapabilityReadLogs,
|
||||
NamespaceCapabilityReadFS,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Parse is used to parse the specified ACL rules into an
|
||||
// intermediary set of policies, before being compiled into
|
||||
// the ACL
|
||||
func Parse(rules string) (*Policy, error) {
|
||||
// Decode the rules
|
||||
p := &Policy{Raw: rules}
|
||||
if rules == "" {
|
||||
// Hot path for empty rules
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Attempt to parse
|
||||
if err := hcl.Decode(p, rules); err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse ACL Policy: %v", err)
|
||||
}
|
||||
|
||||
// Validate the policy
|
||||
for _, ns := range p.Namespaces {
|
||||
if !validNamespace.MatchString(ns.Name) {
|
||||
return nil, fmt.Errorf("Invalid namespace name: %#v", ns)
|
||||
}
|
||||
if ns.Policy != "" && !isPolicyValid(ns.Policy) {
|
||||
return nil, fmt.Errorf("Invalid namespace policy: %#v", ns)
|
||||
}
|
||||
for _, cap := range ns.Capabilities {
|
||||
if !isNamespaceCapabilityValid(cap) {
|
||||
return nil, fmt.Errorf("Invalid namespace capability '%s': %#v", cap, ns)
|
||||
}
|
||||
}
|
||||
|
||||
// Expand the short hand policy to the capabilities and
|
||||
// add to any existing capabilities
|
||||
if ns.Policy != "" {
|
||||
extraCap := expandNamespacePolicy(ns.Policy)
|
||||
ns.Capabilities = append(ns.Capabilities, extraCap...)
|
||||
}
|
||||
}
|
||||
|
||||
if p.Agent != nil && !isPolicyValid(p.Agent.Policy) {
|
||||
return nil, fmt.Errorf("Invalid agent policy: %#v", p.Agent)
|
||||
}
|
||||
|
||||
if p.Node != nil && !isPolicyValid(p.Node.Policy) {
|
||||
return nil, fmt.Errorf("Invalid node policy: %#v", p.Node)
|
||||
}
|
||||
|
||||
if p.Operator != nil && !isPolicyValid(p.Operator.Policy) {
|
||||
return nil, fmt.Errorf("Invalid operator policy: %#v", p.Operator)
|
||||
}
|
||||
return p, nil
|
||||
}
|
175
acl/policy_test.go
Normal file
175
acl/policy_test.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
type tcase struct {
|
||||
Raw string
|
||||
ErrStr string
|
||||
Expect *Policy
|
||||
}
|
||||
tcases := []tcase{
|
||||
{
|
||||
`
|
||||
namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
"",
|
||||
&Policy{
|
||||
Namespaces: []*NamespacePolicy{
|
||||
&NamespacePolicy{
|
||||
Name: "default",
|
||||
Policy: PolicyRead,
|
||||
Capabilities: []string{
|
||||
NamespaceCapabilityListJobs,
|
||||
NamespaceCapabilityReadJob,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`
|
||||
namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
namespace "other" {
|
||||
policy = "write"
|
||||
}
|
||||
namespace "secret" {
|
||||
capabilities = ["deny", "read-logs"]
|
||||
}
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
node {
|
||||
policy = "write"
|
||||
}
|
||||
operator {
|
||||
policy = "deny"
|
||||
}
|
||||
`,
|
||||
"",
|
||||
&Policy{
|
||||
Namespaces: []*NamespacePolicy{
|
||||
&NamespacePolicy{
|
||||
Name: "default",
|
||||
Policy: PolicyRead,
|
||||
Capabilities: []string{
|
||||
NamespaceCapabilityListJobs,
|
||||
NamespaceCapabilityReadJob,
|
||||
},
|
||||
},
|
||||
&NamespacePolicy{
|
||||
Name: "other",
|
||||
Policy: PolicyWrite,
|
||||
Capabilities: []string{
|
||||
NamespaceCapabilityListJobs,
|
||||
NamespaceCapabilityReadJob,
|
||||
NamespaceCapabilitySubmitJob,
|
||||
NamespaceCapabilityReadLogs,
|
||||
NamespaceCapabilityReadFS,
|
||||
},
|
||||
},
|
||||
&NamespacePolicy{
|
||||
Name: "secret",
|
||||
Capabilities: []string{
|
||||
NamespaceCapabilityDeny,
|
||||
NamespaceCapabilityReadLogs,
|
||||
},
|
||||
},
|
||||
},
|
||||
Agent: &AgentPolicy{
|
||||
Policy: PolicyRead,
|
||||
},
|
||||
Node: &NodePolicy{
|
||||
Policy: PolicyWrite,
|
||||
},
|
||||
Operator: &OperatorPolicy{
|
||||
Policy: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`
|
||||
namespace "default" {
|
||||
policy = "foo"
|
||||
}
|
||||
`,
|
||||
"Invalid namespace policy",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
namespace "default" {
|
||||
capabilities = ["deny", "foo"]
|
||||
}
|
||||
`,
|
||||
"Invalid namespace capability",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
agent {
|
||||
policy = "foo"
|
||||
}
|
||||
`,
|
||||
"Invalid agent policy",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
node {
|
||||
policy = "foo"
|
||||
}
|
||||
`,
|
||||
"Invalid node policy",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
operator {
|
||||
policy = "foo"
|
||||
}
|
||||
`,
|
||||
"Invalid operator policy",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
namespace "has a space"{
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
"Invalid namespace name",
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range tcases {
|
||||
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
|
||||
p, err := Parse(tc.Raw)
|
||||
if err != nil {
|
||||
if tc.ErrStr == "" {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tc.ErrStr) {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err == nil && tc.ErrStr != "" {
|
||||
t.Fatalf("Missing expected err")
|
||||
}
|
||||
tc.Expect.Raw = tc.Raw
|
||||
assert.EqualValues(t, tc.Expect, p)
|
||||
})
|
||||
}
|
||||
}
|
186
api/acl.go
Normal file
186
api/acl.go
Normal file
|
@ -0,0 +1,186 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ACLPolicies is used to query the ACL Policy endpoints.
|
||||
type ACLPolicies struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// ACLPolicies returns a new handle on the ACL policies.
|
||||
func (c *Client) ACLPolicies() *ACLPolicies {
|
||||
return &ACLPolicies{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the policies.
|
||||
func (a *ACLPolicies) List(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) {
|
||||
var resp []*ACLPolicyListStub
|
||||
qm, err := a.client.query("/v1/acl/policies", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Upsert is used to create or update a policy
|
||||
func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policy == nil || policy.Name == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.write("/v1/acl/policy/"+policy.Name, policy, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a policy
|
||||
func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.delete("/v1/acl/policy/"+policyName, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query a specific policy
|
||||
func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
var resp ACLPolicy
|
||||
wm, err := a.client.query("/v1/acl/policy/"+policyName, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// ACLTokens is used to query the ACL token endpoints.
|
||||
type ACLTokens struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// ACLTokens returns a new handle on the ACL tokens.
|
||||
func (c *Client) ACLTokens() *ACLTokens {
|
||||
return &ACLTokens{client: c}
|
||||
}
|
||||
|
||||
// Bootstrap is used to get the initial bootstrap token
|
||||
func (a *ACLTokens) Bootstrap(q *WriteOptions) (*ACLToken, *WriteMeta, error) {
|
||||
var resp ACLToken
|
||||
wm, err := a.client.write("/v1/acl/bootstrap", nil, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// List is used to dump all of the tokens.
|
||||
func (a *ACLTokens) List(q *QueryOptions) ([]*ACLTokenListStub, *QueryMeta, error) {
|
||||
var resp []*ACLTokenListStub
|
||||
qm, err := a.client.query("/v1/acl/tokens", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Create is used to create a token
|
||||
func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
|
||||
if token.AccessorID != "" {
|
||||
return nil, nil, fmt.Errorf("cannot specify Accessor ID")
|
||||
}
|
||||
var resp ACLToken
|
||||
wm, err := a.client.write("/v1/acl/token", token, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Update is used to update an existing token
|
||||
func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
|
||||
if token.AccessorID == "" {
|
||||
return nil, nil, fmt.Errorf("missing accessor ID")
|
||||
}
|
||||
var resp ACLToken
|
||||
wm, err := a.client.write("/v1/acl/token/"+token.AccessorID,
|
||||
token, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a token
|
||||
func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, error) {
|
||||
if accessorID == "" {
|
||||
return nil, fmt.Errorf("missing accessor ID")
|
||||
}
|
||||
wm, err := a.client.delete("/v1/acl/token/"+accessorID, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query a token
|
||||
func (a *ACLTokens) Info(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) {
|
||||
if accessorID == "" {
|
||||
return nil, nil, fmt.Errorf("missing accessor ID")
|
||||
}
|
||||
var resp ACLToken
|
||||
wm, err := a.client.query("/v1/acl/token/"+accessorID, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
// ACLPolicyListStub is used to for listing ACL policies
|
||||
type ACLPolicyListStub struct {
|
||||
Name string
|
||||
Description string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// ACLPolicy is used to represent an ACL policy
|
||||
type ACLPolicy struct {
|
||||
Name string
|
||||
Description string
|
||||
Rules string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// ACLToken represents a client token which is used to Authenticate
|
||||
type ACLToken struct {
|
||||
AccessorID string
|
||||
SecretID string
|
||||
Name string
|
||||
Type string
|
||||
Policies []string
|
||||
Global bool
|
||||
CreateTime time.Time
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type ACLTokenListStub struct {
|
||||
AccessorID string
|
||||
Name string
|
||||
Type string
|
||||
Policies []string
|
||||
Global bool
|
||||
CreateTime time.Time
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
207
api/acl_test.go
Normal file
207
api/acl_test.go
Normal file
|
@ -0,0 +1,207 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestACLPolicies_ListUpsert(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
ap := c.ACLPolicies()
|
||||
|
||||
// Listing when nothing exists returns empty
|
||||
result, qm, err := ap.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if qm.LastIndex != 1 {
|
||||
t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
}
|
||||
if n := len(result); n != 0 {
|
||||
t.Fatalf("expected 0 policies, got: %d", n)
|
||||
}
|
||||
|
||||
// Register a policy
|
||||
policy := &ACLPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
Rules: `namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
}
|
||||
wm, err := ap.Upsert(policy, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Check the list again
|
||||
result, qm, err = ap.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
assertQueryMeta(t, qm)
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected policy, got: %#v", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestACLPolicies_Delete(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
ap := c.ACLPolicies()
|
||||
|
||||
// Register a policy
|
||||
policy := &ACLPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
Rules: `namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
}
|
||||
wm, err := ap.Upsert(policy, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Delete the policy
|
||||
wm, err = ap.Delete(policy.Name, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Check the list again
|
||||
result, qm, err := ap.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
assertQueryMeta(t, qm)
|
||||
if len(result) != 0 {
|
||||
t.Fatalf("unexpected policy, got: %#v", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestACLPolicies_Info(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
ap := c.ACLPolicies()
|
||||
|
||||
// Register a policy
|
||||
policy := &ACLPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
Rules: `namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
}
|
||||
wm, err := ap.Upsert(policy, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Query the policy
|
||||
out, qm, err := ap.Info(policy.Name, nil)
|
||||
assert.Nil(t, err)
|
||||
assertQueryMeta(t, qm)
|
||||
assert.Equal(t, policy.Name, out.Name)
|
||||
}
|
||||
|
||||
func TestACLTokens_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
at := c.ACLTokens()
|
||||
|
||||
// Expect out bootstrap token
|
||||
result, qm, err := at.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if qm.LastIndex == 0 {
|
||||
t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
}
|
||||
if n := len(result); n != 1 {
|
||||
t.Fatalf("expected 1 token, got: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestACLTokens_CreateUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
at := c.ACLTokens()
|
||||
|
||||
token := &ACLToken{
|
||||
Name: "foo",
|
||||
Type: "client",
|
||||
Policies: []string{"foo1"},
|
||||
}
|
||||
|
||||
// Create the token
|
||||
out, wm, err := at.Create(token, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
assert.NotNil(t, out)
|
||||
|
||||
// Update the token
|
||||
out.Name = "other"
|
||||
out2, wm, err := at.Update(out, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
assert.NotNil(t, out2)
|
||||
|
||||
// Verify the change took hold
|
||||
assert.Equal(t, out.Name, out2.Name)
|
||||
}
|
||||
|
||||
func TestACLTokens_Info(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
at := c.ACLTokens()
|
||||
|
||||
token := &ACLToken{
|
||||
Name: "foo",
|
||||
Type: "client",
|
||||
Policies: []string{"foo1"},
|
||||
}
|
||||
|
||||
// Create the token
|
||||
out, wm, err := at.Create(token, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
assert.NotNil(t, out)
|
||||
|
||||
// Query the token
|
||||
out2, qm, err := at.Info(out.AccessorID, nil)
|
||||
assert.Nil(t, err)
|
||||
assertQueryMeta(t, qm)
|
||||
assert.Equal(t, out, out2)
|
||||
}
|
||||
|
||||
func TestACLTokens_Delete(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
at := c.ACLTokens()
|
||||
|
||||
token := &ACLToken{
|
||||
Name: "foo",
|
||||
Type: "client",
|
||||
Policies: []string{"foo1"},
|
||||
}
|
||||
|
||||
// Create the token
|
||||
out, wm, err := at.Create(token, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
assert.NotNil(t, out)
|
||||
|
||||
// Delete the token
|
||||
wm, err = at.Delete(out.AccessorID, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
}
|
33
api/api.go
33
api/api.go
|
@ -41,6 +41,9 @@ type QueryOptions struct {
|
|||
|
||||
// Set HTTP parameters on the query.
|
||||
Params map[string]string
|
||||
|
||||
// SecretID is the secret ID of an ACL token
|
||||
SecretID string
|
||||
}
|
||||
|
||||
// WriteOptions are used to parameterize a write
|
||||
|
@ -48,6 +51,9 @@ type WriteOptions struct {
|
|||
// Providing a datacenter overwrites the region provided
|
||||
// by the Config
|
||||
Region string
|
||||
|
||||
// SecretID is the secret ID of an ACL token
|
||||
SecretID string
|
||||
}
|
||||
|
||||
// QueryMeta is used to return meta data about a query
|
||||
|
@ -97,6 +103,9 @@ type Config struct {
|
|||
// httpClient is the client to use. Default will be used if not provided.
|
||||
httpClient *http.Client
|
||||
|
||||
// SecretID to use. This can be overwritten per request.
|
||||
SecretID string
|
||||
|
||||
// HttpAuth is the auth info to use for http access.
|
||||
HttpAuth *HttpBasicAuth
|
||||
|
||||
|
@ -121,6 +130,7 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config {
|
|||
Address: fmt.Sprintf("%s://%s", scheme, address),
|
||||
Region: region,
|
||||
httpClient: defaultConfig.httpClient,
|
||||
SecretID: c.SecretID,
|
||||
HttpAuth: c.HttpAuth,
|
||||
WaitTime: c.WaitTime,
|
||||
TLSConfig: c.TLSConfig.Copy(),
|
||||
|
@ -217,7 +227,9 @@ func DefaultConfig() *Config {
|
|||
config.TLSConfig.Insecure = insecure
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv("NOMAD_TOKEN"); v != "" {
|
||||
config.SecretID = v
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
|
@ -345,12 +357,18 @@ func (c *Client) getNodeClientImpl(nodeID string, q *QueryOptions, lookup nodeLo
|
|||
return NewClient(conf)
|
||||
}
|
||||
|
||||
// SetSecretID sets the ACL token secret for API requests.
|
||||
func (c *Client) SetSecretID(secretID string) {
|
||||
c.config.SecretID = secretID
|
||||
}
|
||||
|
||||
// request is used to help build up a request
|
||||
type request struct {
|
||||
config *Config
|
||||
method string
|
||||
url *url.URL
|
||||
params url.Values
|
||||
token string
|
||||
body io.Reader
|
||||
obj interface{}
|
||||
}
|
||||
|
@ -364,6 +382,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
|||
if q.Region != "" {
|
||||
r.params.Set("region", q.Region)
|
||||
}
|
||||
if q.SecretID != "" {
|
||||
r.token = q.SecretID
|
||||
}
|
||||
if q.AllowStale {
|
||||
r.params.Set("stale", "")
|
||||
}
|
||||
|
@ -395,6 +416,9 @@ func (r *request) setWriteOptions(q *WriteOptions) {
|
|||
if q.Region != "" {
|
||||
r.params.Set("region", q.Region)
|
||||
}
|
||||
if q.SecretID != "" {
|
||||
r.token = q.SecretID
|
||||
}
|
||||
}
|
||||
|
||||
// toHTTP converts the request to an HTTP request
|
||||
|
@ -427,6 +451,10 @@ func (r *request) toHTTP() (*http.Request, error) {
|
|||
}
|
||||
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
if r.token != "" {
|
||||
req.Header.Set("X-Nomad-Token", r.token)
|
||||
}
|
||||
|
||||
req.URL.Host = r.url.Host
|
||||
req.URL.Scheme = r.url.Scheme
|
||||
req.Host = r.url.Host
|
||||
|
@ -457,6 +485,9 @@ func (c *Client) newRequest(method, path string) (*request, error) {
|
|||
if c.config.WaitTime != 0 {
|
||||
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||
}
|
||||
if c.config.SecretID != "" {
|
||||
r.token = r.config.SecretID
|
||||
}
|
||||
|
||||
// Add in the query parameters, if any
|
||||
for key, values := range u.Query() {
|
||||
|
|
|
@ -24,6 +24,24 @@ func init() {
|
|||
seen = make(map[*testing.T]struct{})
|
||||
}
|
||||
|
||||
func makeACLClient(t *testing.T, cb1 configCallback,
|
||||
cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer, *ACLToken) {
|
||||
client, server := makeClient(t, cb1, func(c *testutil.TestServerConfig) {
|
||||
c.ACL.Enabled = true
|
||||
if cb2 != nil {
|
||||
cb2(c)
|
||||
}
|
||||
})
|
||||
|
||||
// Get the root token
|
||||
root, _, err := client.ACLTokens().Bootstrap(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to bootstrap ACLs: %v", err)
|
||||
}
|
||||
client.SetSecretID(root.SecretID)
|
||||
return client, server, root
|
||||
}
|
||||
|
||||
func makeClient(t *testing.T, cb1 configCallback,
|
||||
cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) {
|
||||
// Make client config
|
||||
|
@ -97,6 +115,7 @@ func TestDefaultConfig_env(t *testing.T) {
|
|||
t.Parallel()
|
||||
url := "http://1.2.3.4:5678"
|
||||
auth := []string{"nomaduser", "12345"}
|
||||
token := "foobar"
|
||||
|
||||
os.Setenv("NOMAD_ADDR", url)
|
||||
defer os.Setenv("NOMAD_ADDR", "")
|
||||
|
@ -104,6 +123,9 @@ func TestDefaultConfig_env(t *testing.T) {
|
|||
os.Setenv("NOMAD_HTTP_AUTH", strings.Join(auth, ":"))
|
||||
defer os.Setenv("NOMAD_HTTP_AUTH", "")
|
||||
|
||||
os.Setenv("NOMAD_TOKEN", token)
|
||||
defer os.Setenv("NOMAD_TOKEN", "")
|
||||
|
||||
config := DefaultConfig()
|
||||
|
||||
if config.Address != url {
|
||||
|
@ -117,6 +139,10 @@ func TestDefaultConfig_env(t *testing.T) {
|
|||
if config.HttpAuth.Password != auth[1] {
|
||||
t.Errorf("expected %q to be %q", config.HttpAuth.Password, auth[1])
|
||||
}
|
||||
|
||||
if config.SecretID != token {
|
||||
t.Errorf("Expected %q to be %q", config.SecretID, token)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetQueryOptions(t *testing.T) {
|
||||
|
@ -130,6 +156,7 @@ func TestSetQueryOptions(t *testing.T) {
|
|||
AllowStale: true,
|
||||
WaitIndex: 1000,
|
||||
WaitTime: 100 * time.Second,
|
||||
SecretID: "foobar",
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
|
||||
|
@ -145,6 +172,9 @@ func TestSetQueryOptions(t *testing.T) {
|
|||
if r.params.Get("wait") != "100000ms" {
|
||||
t.Fatalf("bad: %v", r.params)
|
||||
}
|
||||
if r.token != "foobar" {
|
||||
t.Fatalf("bad: %v", r.token)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetWriteOptions(t *testing.T) {
|
||||
|
@ -154,13 +184,17 @@ func TestSetWriteOptions(t *testing.T) {
|
|||
|
||||
r, _ := c.newRequest("GET", "/v1/jobs")
|
||||
q := &WriteOptions{
|
||||
Region: "foo",
|
||||
Region: "foo",
|
||||
SecretID: "foobar",
|
||||
}
|
||||
r.setWriteOptions(q)
|
||||
|
||||
if r.params.Get("region") != "foo" {
|
||||
t.Fatalf("bad: %v", r.params)
|
||||
}
|
||||
if r.token != "foobar" {
|
||||
t.Fatalf("bad: %v", r.token)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestToHTTP(t *testing.T) {
|
||||
|
@ -170,7 +204,8 @@ func TestRequestToHTTP(t *testing.T) {
|
|||
|
||||
r, _ := c.newRequest("DELETE", "/v1/jobs/foo")
|
||||
q := &QueryOptions{
|
||||
Region: "foo",
|
||||
Region: "foo",
|
||||
SecretID: "foobar",
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
req, err := r.toHTTP()
|
||||
|
@ -184,6 +219,9 @@ func TestRequestToHTTP(t *testing.T) {
|
|||
if req.URL.RequestURI() != "/v1/jobs/foo?region=foo" {
|
||||
t.Fatalf("bad: %v", req)
|
||||
}
|
||||
if req.Header.Get("X-Nomad-Token") != "foobar" {
|
||||
t.Fatalf("bad: %v", req)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQueryMeta(t *testing.T) {
|
||||
|
|
|
@ -729,6 +729,9 @@ func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job {
|
|||
type WriteRequest struct {
|
||||
// The target region for this write
|
||||
Region string
|
||||
|
||||
// SecretID is the secret ID of an ACL token
|
||||
SecretID string
|
||||
}
|
||||
|
||||
// JobValidateRequest is used to validate a job
|
||||
|
|
219
client/acl.go
Normal file
219
client/acl.go
Normal file
|
@ -0,0 +1,219 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
const (
|
||||
// policyCacheSize is the number of ACL policies to keep cached. Policies have a fetching cost
|
||||
// so we keep the hot policies cached to reduce the ACL token resolution time.
|
||||
policyCacheSize = 64
|
||||
|
||||
// aclCacheSize is the number of ACL objects to keep cached. ACLs have a parsing and
|
||||
// construction cost, so we keep the hot objects cached to reduce the ACL token resolution time.
|
||||
aclCacheSize = 64
|
||||
|
||||
// tokenCacheSize is the number of ACL tokens to keep cached. Tokens have a fetching cost,
|
||||
// so we keep the hot tokens cached to reduce the lookups.
|
||||
tokenCacheSize = 64
|
||||
)
|
||||
|
||||
// clientACLResolver holds the state required for client resolution
|
||||
// of ACLs
|
||||
type clientACLResolver struct {
|
||||
// aclCache is used to maintain the parsed ACL objects
|
||||
aclCache *lru.TwoQueueCache
|
||||
|
||||
// policyCache is used to maintain the fetched policy objects
|
||||
policyCache *lru.TwoQueueCache
|
||||
|
||||
// tokenCache is used to maintain the fetched token objects
|
||||
tokenCache *lru.TwoQueueCache
|
||||
}
|
||||
|
||||
// init is used to setup the client resolver state
|
||||
func (c *clientACLResolver) init() error {
|
||||
// Create the ACL object cache
|
||||
var err error
|
||||
c.aclCache, err = lru.New2Q(aclCacheSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.policyCache, err = lru.New2Q(policyCacheSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.tokenCache, err = lru.New2Q(tokenCacheSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cachedACLValue is used to manage ACL Token or Policy TTLs
|
||||
type cachedACLValue struct {
|
||||
Token *structs.ACLToken
|
||||
Policy *structs.ACLPolicy
|
||||
CacheTime time.Time
|
||||
}
|
||||
|
||||
// Age is the time since the token was cached
|
||||
func (c *cachedACLValue) Age() time.Duration {
|
||||
return time.Since(c.CacheTime)
|
||||
}
|
||||
|
||||
// ResolveToken is used to translate an ACL Token Secret ID into
|
||||
// an ACL object, nil if ACLs are disabled, or an error.
|
||||
func (c *Client) ResolveToken(secretID string) (*acl.ACL, error) {
|
||||
// Fast-path if ACLs are disabled
|
||||
if !c.config.ACLEnabled {
|
||||
return nil, nil
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"client", "acl", "resolve_token"}, time.Now())
|
||||
|
||||
// Resolve the token value
|
||||
token, err := c.resolveTokenValue(secretID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if token == nil {
|
||||
return nil, structs.ErrTokenNotFound
|
||||
}
|
||||
|
||||
// Check if this is a management token
|
||||
if token.Type == structs.ACLManagementToken {
|
||||
return acl.ManagementACL, nil
|
||||
}
|
||||
|
||||
// Resolve the policies
|
||||
policies, err := c.resolvePolicies(token.SecretID, token.Policies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve the ACL object
|
||||
aclObj, err := structs.CompileACLObject(c.aclCache, policies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aclObj, nil
|
||||
}
|
||||
|
||||
// resolveTokenValue is used to translate a secret ID into an ACL token with caching
|
||||
// We use a local cache up to the TTL limit, and then resolve via a server. If we cannot
|
||||
// reach a server, but have a cached value we extend the TTL to gracefully handle outages.
|
||||
func (c *Client) resolveTokenValue(secretID string) (*structs.ACLToken, error) {
|
||||
// Hot-path the anonymous token
|
||||
if secretID == "" {
|
||||
return structs.AnonymousACLToken, nil
|
||||
}
|
||||
|
||||
// Lookup the token in the cache
|
||||
raw, ok := c.tokenCache.Get(secretID)
|
||||
if ok {
|
||||
cached := raw.(*cachedACLValue)
|
||||
if cached.Age() <= c.config.ACLTokenTTL {
|
||||
return cached.Token, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup the token
|
||||
req := structs.ResolveACLTokenRequest{
|
||||
SecretID: secretID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: c.Region(),
|
||||
AllowStale: true,
|
||||
},
|
||||
}
|
||||
var resp structs.ResolveACLTokenResponse
|
||||
if err := c.RPC("ACL.ResolveToken", &req, &resp); err != nil {
|
||||
// If we encounter an error but have a cached value, mask the error and extend the cache
|
||||
if ok {
|
||||
c.logger.Printf("[WARN] client: failed to resolve token, using expired cached value: %v", err)
|
||||
cached := raw.(*cachedACLValue)
|
||||
return cached.Token, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cache the response (positive or negative)
|
||||
c.tokenCache.Add(secretID, &cachedACLValue{
|
||||
Token: resp.Token,
|
||||
CacheTime: time.Now(),
|
||||
})
|
||||
return resp.Token, nil
|
||||
}
|
||||
|
||||
// resolvePolicies is used to translate a set of named ACL policies into the objects.
|
||||
// We cache the policies locally, and fault them from a server as necessary. Policies
|
||||
// are cached for a TTL, and then refreshed. If a server cannot be reached, the cache TTL
|
||||
// will be ignored to gracefully handle outages.
|
||||
func (c *Client) resolvePolicies(secretID string, policies []string) ([]*structs.ACLPolicy, error) {
|
||||
var out []*structs.ACLPolicy
|
||||
var expired []*structs.ACLPolicy
|
||||
var missing []string
|
||||
|
||||
// Scan the cache for each policy
|
||||
for _, policyName := range policies {
|
||||
// Lookup the policy in the cache
|
||||
raw, ok := c.policyCache.Get(policyName)
|
||||
if !ok {
|
||||
missing = append(missing, policyName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the cached value is valid or expired
|
||||
cached := raw.(*cachedACLValue)
|
||||
if cached.Age() <= c.config.ACLPolicyTTL {
|
||||
out = append(out, cached.Policy)
|
||||
} else {
|
||||
expired = append(expired, cached.Policy)
|
||||
}
|
||||
}
|
||||
|
||||
// Hot-path if we have no missing or expired policies
|
||||
if len(missing)+len(expired) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Lookup the missing and expired policies
|
||||
fetch := missing
|
||||
for _, p := range expired {
|
||||
fetch = append(fetch, p.Name)
|
||||
}
|
||||
req := structs.ACLPolicySetRequest{
|
||||
Names: fetch,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: c.Region(),
|
||||
SecretID: secretID,
|
||||
AllowStale: true,
|
||||
},
|
||||
}
|
||||
var resp structs.ACLPolicySetResponse
|
||||
if err := c.RPC("ACL.GetPolicies", &req, &resp); err != nil {
|
||||
// If we encounter an error but have cached policies, mask the error and extend the cache
|
||||
if len(missing) == 0 {
|
||||
c.logger.Printf("[WARN] client: failed to resolve policies, using expired cached value: %v", err)
|
||||
out = append(out, expired...)
|
||||
return out, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle each output
|
||||
for _, policy := range resp.Policies {
|
||||
c.policyCache.Add(policy.Name, &cachedACLValue{
|
||||
Policy: policy,
|
||||
CacheTime: time.Now(),
|
||||
})
|
||||
out = append(out, policy)
|
||||
}
|
||||
|
||||
// Return the valid policies
|
||||
return out, nil
|
||||
}
|
166
client/acl_test.go
Normal file
166
client/acl_test.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestClient_ACL_resolveTokenValue(t *testing.T) {
|
||||
s1, _, _ := testACLServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
c1 := testClient(t, func(c *config.Config) {
|
||||
c.RPCHandler = s1
|
||||
c.ACLEnabled = true
|
||||
})
|
||||
defer c1.Shutdown()
|
||||
|
||||
// Create a policy / token
|
||||
policy := mock.ACLPolicy()
|
||||
policy2 := mock.ACLPolicy()
|
||||
token := mock.ACLToken()
|
||||
token.Policies = []string{policy.Name, policy2.Name}
|
||||
token2 := mock.ACLToken()
|
||||
token2.Type = structs.ACLManagementToken
|
||||
token2.Policies = nil
|
||||
err := s1.State().UpsertACLPolicies(100, []*structs.ACLPolicy{policy, policy2})
|
||||
assert.Nil(t, err)
|
||||
err = s1.State().UpsertACLTokens(110, []*structs.ACLToken{token, token2})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Test the client resolution
|
||||
out0, err := c1.resolveTokenValue("")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out0)
|
||||
assert.Equal(t, structs.AnonymousACLToken, out0)
|
||||
|
||||
// Test the client resolution
|
||||
out1, err := c1.resolveTokenValue(token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out1)
|
||||
assert.Equal(t, token, out1)
|
||||
|
||||
out2, err := c1.resolveTokenValue(token2.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out2)
|
||||
assert.Equal(t, token2, out2)
|
||||
|
||||
out3, err := c1.resolveTokenValue(token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out3)
|
||||
if out1 != out3 {
|
||||
t.Fatalf("bad caching")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_ACL_resolvePolicies(t *testing.T) {
|
||||
s1, _, root := testACLServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
c1 := testClient(t, func(c *config.Config) {
|
||||
c.RPCHandler = s1
|
||||
c.ACLEnabled = true
|
||||
})
|
||||
defer c1.Shutdown()
|
||||
|
||||
// Create a policy / token
|
||||
policy := mock.ACLPolicy()
|
||||
policy2 := mock.ACLPolicy()
|
||||
token := mock.ACLToken()
|
||||
token.Policies = []string{policy.Name, policy2.Name}
|
||||
token2 := mock.ACLToken()
|
||||
token2.Type = structs.ACLManagementToken
|
||||
token2.Policies = nil
|
||||
err := s1.State().UpsertACLPolicies(100, []*structs.ACLPolicy{policy, policy2})
|
||||
assert.Nil(t, err)
|
||||
err = s1.State().UpsertACLTokens(110, []*structs.ACLToken{token, token2})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Test the client resolution
|
||||
out, err := c1.resolvePolicies(root.SecretID, []string{policy.Name, policy2.Name})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(out))
|
||||
|
||||
// Test caching
|
||||
out2, err := c1.resolvePolicies(root.SecretID, []string{policy.Name, policy2.Name})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(out2))
|
||||
|
||||
// Check we get the same objects back (ignore ordering)
|
||||
if out[0] != out2[0] && out[0] != out2[1] {
|
||||
t.Fatalf("bad caching")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_ACL_ResolveToken_Disabled(t *testing.T) {
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
c1 := testClient(t, func(c *config.Config) {
|
||||
c.RPCHandler = s1
|
||||
})
|
||||
defer c1.Shutdown()
|
||||
|
||||
// Should always get nil when disabled
|
||||
aclObj, err := c1.ResolveToken("blah")
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, aclObj)
|
||||
}
|
||||
|
||||
func TestClient_ACL_ResolveToken(t *testing.T) {
|
||||
s1, _, _ := testACLServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
c1 := testClient(t, func(c *config.Config) {
|
||||
c.RPCHandler = s1
|
||||
c.ACLEnabled = true
|
||||
})
|
||||
defer c1.Shutdown()
|
||||
|
||||
// Create a policy / token
|
||||
policy := mock.ACLPolicy()
|
||||
policy2 := mock.ACLPolicy()
|
||||
token := mock.ACLToken()
|
||||
token.Policies = []string{policy.Name, policy2.Name}
|
||||
token2 := mock.ACLToken()
|
||||
token2.Type = structs.ACLManagementToken
|
||||
token2.Policies = nil
|
||||
err := s1.State().UpsertACLPolicies(100, []*structs.ACLPolicy{policy, policy2})
|
||||
assert.Nil(t, err)
|
||||
err = s1.State().UpsertACLTokens(110, []*structs.ACLToken{token, token2})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Test the client resolution
|
||||
out, err := c1.ResolveToken(token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out)
|
||||
|
||||
// Test caching
|
||||
out2, err := c1.ResolveToken(token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
if out != out2 {
|
||||
t.Fatalf("should be cached")
|
||||
}
|
||||
|
||||
// Test management token
|
||||
out3, err := c1.ResolveToken(token2.SecretID)
|
||||
assert.Nil(t, err)
|
||||
if acl.ManagementACL != out3 {
|
||||
t.Fatalf("should be management")
|
||||
}
|
||||
|
||||
// Test bad token
|
||||
out4, err := c1.ResolveToken(structs.GenerateUUID())
|
||||
assert.Equal(t, structs.ErrTokenNotFound, err)
|
||||
assert.Nil(t, out4)
|
||||
}
|
|
@ -150,6 +150,9 @@ type Client struct {
|
|||
// garbageCollector is used to garbage collect terminal allocations present
|
||||
// in the node automatically
|
||||
garbageCollector *AllocGarbageCollector
|
||||
|
||||
// clientACLResolver holds the ACL resolution state
|
||||
clientACLResolver
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -192,6 +195,11 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic
|
|||
return nil, fmt.Errorf("failed to initialize client: %v", err)
|
||||
}
|
||||
|
||||
// Initialize the ACL state
|
||||
if err := c.clientACLResolver.init(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize ACL state: %v", err)
|
||||
}
|
||||
|
||||
// Add the stats collector
|
||||
statsCollector := stats.NewHostStatsCollector(logger, c.config.AllocDir)
|
||||
c.hostStatsCollector = statsCollector
|
||||
|
|
|
@ -30,6 +30,21 @@ func getPort() int {
|
|||
return 1030 + int(rand.Int31n(6440))
|
||||
}
|
||||
|
||||
func testACLServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, *structs.ACLToken) {
|
||||
server, addr := testServer(t, func(c *nomad.Config) {
|
||||
c.ACLEnabled = true
|
||||
if cb != nil {
|
||||
cb(c)
|
||||
}
|
||||
})
|
||||
token := mock.ACLManagementToken()
|
||||
err := server.State().BootstrapACLTokens(1, token)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to bootstrap ACL token: %v", err)
|
||||
}
|
||||
return server, addr, token
|
||||
}
|
||||
|
||||
func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
|
||||
// Setup the default settings
|
||||
config := nomad.DefaultConfig()
|
||||
|
|
|
@ -179,6 +179,15 @@ type Config struct {
|
|||
// NoHostUUID disables using the host's UUID and will force generation of a
|
||||
// random UUID.
|
||||
NoHostUUID bool
|
||||
|
||||
// ACLEnabled controls if ACL enforcement and management is enabled.
|
||||
ACLEnabled bool
|
||||
|
||||
// ACLTokenTTL is how long we cache token values for
|
||||
ACLTokenTTL time.Duration
|
||||
|
||||
// ACLPolicyTTL is how long we cache policy values for
|
||||
ACLPolicyTTL time.Duration
|
||||
}
|
||||
|
||||
func (c *Config) Copy() *Config {
|
||||
|
|
250
command/agent/acl_endpoint.go
Normal file
250
command/agent/acl_endpoint.go
Normal file
|
@ -0,0 +1,250 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) ACLPoliciesRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if req.Method != "GET" {
|
||||
return nil, CodedError(405, ErrInvalidMethod)
|
||||
}
|
||||
|
||||
args := structs.ACLPolicyListRequest{}
|
||||
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out structs.ACLPolicyListResponse
|
||||
if err := s.agent.RPC("ACL.ListPolicies", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setMeta(resp, &out.QueryMeta)
|
||||
if out.Policies == nil {
|
||||
out.Policies = make([]*structs.ACLPolicyListStub, 0)
|
||||
}
|
||||
return out.Policies, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLPolicySpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
name := strings.TrimPrefix(req.URL.Path, "/v1/acl/policy/")
|
||||
if len(name) == 0 {
|
||||
return nil, CodedError(400, "Missing Policy Name")
|
||||
}
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
return s.aclPolicyQuery(resp, req, name)
|
||||
case "PUT", "POST":
|
||||
return s.aclPolicyUpdate(resp, req, name)
|
||||
case "DELETE":
|
||||
return s.aclPolicyDelete(resp, req, name)
|
||||
default:
|
||||
return nil, CodedError(405, ErrInvalidMethod)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclPolicyQuery(resp http.ResponseWriter, req *http.Request,
|
||||
policyName string) (interface{}, error) {
|
||||
args := structs.ACLPolicySpecificRequest{
|
||||
Name: policyName,
|
||||
}
|
||||
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out structs.SingleACLPolicyResponse
|
||||
if err := s.agent.RPC("ACL.GetPolicy", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setMeta(resp, &out.QueryMeta)
|
||||
if out.Policy == nil {
|
||||
return nil, CodedError(404, "ACL policy not found")
|
||||
}
|
||||
return out.Policy, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclPolicyUpdate(resp http.ResponseWriter, req *http.Request,
|
||||
policyName string) (interface{}, error) {
|
||||
// Parse the policy
|
||||
var policy structs.ACLPolicy
|
||||
if err := decodeBody(req, &policy); err != nil {
|
||||
return nil, CodedError(500, err.Error())
|
||||
}
|
||||
|
||||
// Ensure the policy name matches
|
||||
if policy.Name != policyName {
|
||||
return nil, CodedError(400, "ACL policy name does not match request path")
|
||||
}
|
||||
|
||||
// Format the request
|
||||
args := structs.ACLPolicyUpsertRequest{
|
||||
Policies: []*structs.ACLPolicy{&policy},
|
||||
}
|
||||
s.parseWrite(req, &args.WriteRequest)
|
||||
|
||||
var out structs.GenericResponse
|
||||
if err := s.agent.RPC("ACL.UpsertPolicies", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setIndex(resp, out.Index)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclPolicyDelete(resp http.ResponseWriter, req *http.Request,
|
||||
policyName string) (interface{}, error) {
|
||||
|
||||
args := structs.ACLPolicyDeleteRequest{
|
||||
Names: []string{policyName},
|
||||
}
|
||||
s.parseWrite(req, &args.WriteRequest)
|
||||
|
||||
var out structs.GenericResponse
|
||||
if err := s.agent.RPC("ACL.DeletePolicies", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setIndex(resp, out.Index)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokensRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
if req.Method != "GET" {
|
||||
return nil, CodedError(405, ErrInvalidMethod)
|
||||
}
|
||||
|
||||
args := structs.ACLTokenListRequest{}
|
||||
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out structs.ACLTokenListResponse
|
||||
if err := s.agent.RPC("ACL.ListTokens", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setMeta(resp, &out.QueryMeta)
|
||||
if out.Tokens == nil {
|
||||
out.Tokens = make([]*structs.ACLTokenListStub, 0)
|
||||
}
|
||||
return out.Tokens, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenBootstrap(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Ensure this is a PUT or POST
|
||||
if !(req.Method == "PUT" || req.Method == "POST") {
|
||||
return nil, CodedError(405, ErrInvalidMethod)
|
||||
}
|
||||
|
||||
// Format the request
|
||||
args := structs.ACLTokenBootstrapRequest{}
|
||||
s.parseWrite(req, &args.WriteRequest)
|
||||
|
||||
var out structs.ACLTokenUpsertResponse
|
||||
if err := s.agent.RPC("ACL.Bootstrap", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setIndex(resp, out.Index)
|
||||
if len(out.Tokens) > 0 {
|
||||
return out.Tokens[0], nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) ACLTokenSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
accessor := strings.TrimPrefix(req.URL.Path, "/v1/acl/token")
|
||||
|
||||
// If there is no accessor, this must be a create
|
||||
if len(accessor) == 0 {
|
||||
if !(req.Method == "PUT" || req.Method == "POST") {
|
||||
return nil, CodedError(405, ErrInvalidMethod)
|
||||
}
|
||||
return s.aclTokenUpdate(resp, req, "")
|
||||
}
|
||||
|
||||
// Check if no accessor is given past the slash
|
||||
accessor = accessor[1:]
|
||||
if accessor == "" {
|
||||
return nil, CodedError(400, "Missing Token Accessor")
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
return s.aclTokenQuery(resp, req, accessor)
|
||||
case "PUT", "POST":
|
||||
return s.aclTokenUpdate(resp, req, accessor)
|
||||
case "DELETE":
|
||||
return s.aclTokenDelete(resp, req, accessor)
|
||||
default:
|
||||
return nil, CodedError(405, ErrInvalidMethod)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclTokenQuery(resp http.ResponseWriter, req *http.Request,
|
||||
tokenAccessor string) (interface{}, error) {
|
||||
args := structs.ACLTokenSpecificRequest{
|
||||
AccessorID: tokenAccessor,
|
||||
}
|
||||
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out structs.SingleACLTokenResponse
|
||||
if err := s.agent.RPC("ACL.GetToken", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setMeta(resp, &out.QueryMeta)
|
||||
if out.Token == nil {
|
||||
return nil, CodedError(404, "ACL token not found")
|
||||
}
|
||||
return out.Token, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclTokenUpdate(resp http.ResponseWriter, req *http.Request,
|
||||
tokenAccessor string) (interface{}, error) {
|
||||
// Parse the token
|
||||
var token structs.ACLToken
|
||||
if err := decodeBody(req, &token); err != nil {
|
||||
return nil, CodedError(500, err.Error())
|
||||
}
|
||||
|
||||
// Ensure the token accessor matches
|
||||
if tokenAccessor != "" && (token.AccessorID != tokenAccessor) {
|
||||
return nil, CodedError(400, "ACL token accessor does not match request path")
|
||||
}
|
||||
|
||||
// Format the request
|
||||
args := structs.ACLTokenUpsertRequest{
|
||||
Tokens: []*structs.ACLToken{&token},
|
||||
}
|
||||
s.parseWrite(req, &args.WriteRequest)
|
||||
|
||||
var out structs.ACLTokenUpsertResponse
|
||||
if err := s.agent.RPC("ACL.UpsertTokens", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setIndex(resp, out.Index)
|
||||
if len(out.Tokens) > 0 {
|
||||
return out.Tokens[0], nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclTokenDelete(resp http.ResponseWriter, req *http.Request,
|
||||
tokenAccessor string) (interface{}, error) {
|
||||
|
||||
args := structs.ACLTokenDeleteRequest{
|
||||
AccessorIDs: []string{tokenAccessor},
|
||||
}
|
||||
s.parseWrite(req, &args.WriteRequest)
|
||||
|
||||
var out structs.GenericResponse
|
||||
if err := s.agent.RPC("ACL.DeleteTokens", &args, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setIndex(resp, out.Index)
|
||||
return nil, nil
|
||||
}
|
401
command/agent/acl_endpoint_test.go
Normal file
401
command/agent/acl_endpoint_test.go
Normal file
|
@ -0,0 +1,401 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHTTP_ACLPolicyList(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
p1 := mock.ACLPolicy()
|
||||
p2 := mock.ACLPolicy()
|
||||
p3 := mock.ACLPolicy()
|
||||
args := structs.ACLPolicyUpsertRequest{
|
||||
Policies: []*structs.ACLPolicy{p1, p2, p3},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
SecretID: s.Token.SecretID,
|
||||
},
|
||||
}
|
||||
var resp structs.GenericResponse
|
||||
if err := s.Agent.RPC("ACL.UpsertPolicies", &args, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/acl/policies", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLPoliciesRequest(respW, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
|
||||
t.Fatalf("missing known leader")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
|
||||
t.Fatalf("missing last contact")
|
||||
}
|
||||
|
||||
// Check the output
|
||||
n := obj.([]*structs.ACLPolicyListStub)
|
||||
if len(n) != 3 {
|
||||
t.Fatalf("bad: %#v", n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLPolicyQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
p1 := mock.ACLPolicy()
|
||||
args := structs.ACLPolicyUpsertRequest{
|
||||
Policies: []*structs.ACLPolicy{p1},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
SecretID: s.Token.SecretID,
|
||||
},
|
||||
}
|
||||
var resp structs.GenericResponse
|
||||
if err := s.Agent.RPC("ACL.UpsertPolicies", &args, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/acl/policy/"+p1.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLPolicySpecificRequest(respW, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
|
||||
t.Fatalf("missing known leader")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
|
||||
t.Fatalf("missing last contact")
|
||||
}
|
||||
|
||||
// Check the output
|
||||
n := obj.(*structs.ACLPolicy)
|
||||
if n.Name != p1.Name {
|
||||
t.Fatalf("bad: %#v", n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLPolicyCreate(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
p1 := mock.ACLPolicy()
|
||||
buf := encodeReq(p1)
|
||||
req, err := http.NewRequest("PUT", "/v1/acl/policy/"+p1.Name, buf)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLPolicySpecificRequest(respW, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
|
||||
// Check policy was created
|
||||
state := s.Agent.server.State()
|
||||
out, err := state.ACLPolicyByName(nil, p1.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out)
|
||||
|
||||
p1.CreateIndex, p1.ModifyIndex = out.CreateIndex, out.ModifyIndex
|
||||
assert.Equal(t, p1.Name, out.Name)
|
||||
assert.Equal(t, p1, out)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLPolicyDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
p1 := mock.ACLPolicy()
|
||||
args := structs.ACLPolicyUpsertRequest{
|
||||
Policies: []*structs.ACLPolicy{p1},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
SecretID: s.Token.SecretID,
|
||||
},
|
||||
}
|
||||
var resp structs.GenericResponse
|
||||
if err := s.Agent.RPC("ACL.UpsertPolicies", &args, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("DELETE", "/v1/acl/policy/"+p1.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLPolicySpecificRequest(respW, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
|
||||
// Check policy was created
|
||||
state := s.Agent.server.State()
|
||||
out, err := state.ACLPolicyByName(nil, p1.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, out)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLTokenBootstrap(t *testing.T) {
|
||||
t.Parallel()
|
||||
conf := func(c *Config) {
|
||||
c.ACL.Enabled = true
|
||||
c.ACL.PolicyTTL = 0 // Special flag to disable auto-bootstrap
|
||||
}
|
||||
httpTest(t, conf, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("PUT", "/v1/acl/bootstrap", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLTokenBootstrap(respW, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
|
||||
// Check the output
|
||||
n := obj.(*structs.ACLToken)
|
||||
assert.NotNil(t, n)
|
||||
assert.Equal(t, "Bootstrap Token", n.Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLTokenList(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
p1 := mock.ACLToken()
|
||||
p1.AccessorID = ""
|
||||
p2 := mock.ACLToken()
|
||||
p2.AccessorID = ""
|
||||
p3 := mock.ACLToken()
|
||||
p3.AccessorID = ""
|
||||
args := structs.ACLTokenUpsertRequest{
|
||||
Tokens: []*structs.ACLToken{p1, p2, p3},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
SecretID: s.Token.SecretID,
|
||||
},
|
||||
}
|
||||
var resp structs.ACLTokenUpsertResponse
|
||||
if err := s.Agent.RPC("ACL.UpsertTokens", &args, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/acl/tokens", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLTokensRequest(respW, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
|
||||
t.Fatalf("missing known leader")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
|
||||
t.Fatalf("missing last contact")
|
||||
}
|
||||
|
||||
// Check the output (includes boostrap token)
|
||||
n := obj.([]*structs.ACLTokenListStub)
|
||||
if len(n) != 4 {
|
||||
t.Fatalf("bad: %#v", n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLTokenQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
p1 := mock.ACLToken()
|
||||
p1.AccessorID = ""
|
||||
args := structs.ACLTokenUpsertRequest{
|
||||
Tokens: []*structs.ACLToken{p1},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
SecretID: s.Token.SecretID,
|
||||
},
|
||||
}
|
||||
var resp structs.ACLTokenUpsertResponse
|
||||
if err := s.Agent.RPC("ACL.UpsertTokens", &args, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
out := resp.Tokens[0]
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/acl/token/"+out.AccessorID, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLTokenSpecificRequest(respW, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
|
||||
t.Fatalf("missing known leader")
|
||||
}
|
||||
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
|
||||
t.Fatalf("missing last contact")
|
||||
}
|
||||
|
||||
// Check the output
|
||||
n := obj.(*structs.ACLToken)
|
||||
assert.Equal(t, out, n)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLTokenCreate(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
p1 := mock.ACLToken()
|
||||
p1.AccessorID = ""
|
||||
buf := encodeReq(p1)
|
||||
req, err := http.NewRequest("PUT", "/v1/acl/token", buf)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLTokenSpecificRequest(respW, req)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
outTK := obj.(*structs.ACLToken)
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
|
||||
// Check token was created
|
||||
state := s.Agent.server.State()
|
||||
out, err := state.ACLTokenByAccessorID(nil, outTK.AccessorID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out)
|
||||
assert.Equal(t, outTK, out)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_ACLTokenDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
p1 := mock.ACLToken()
|
||||
p1.AccessorID = ""
|
||||
args := structs.ACLTokenUpsertRequest{
|
||||
Tokens: []*structs.ACLToken{p1},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
SecretID: s.Token.SecretID,
|
||||
},
|
||||
}
|
||||
var resp structs.ACLTokenUpsertResponse
|
||||
if err := s.Agent.RPC("ACL.UpsertTokens", &args, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
ID := resp.Tokens[0].AccessorID
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("DELETE", "/v1/acl/token/"+ID, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.ACLTokenSpecificRequest(respW, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
|
||||
// Check for the index
|
||||
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
|
||||
t.Fatalf("missing index")
|
||||
}
|
||||
|
||||
// Check token was created
|
||||
state := s.Agent.server.State()
|
||||
out, err := state.ACLTokenByAccessorID(nil, ID)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, out)
|
||||
})
|
||||
}
|
|
@ -106,6 +106,15 @@ func convertServerConfig(agentConfig *Config, logOutput io.Writer) (*nomad.Confi
|
|||
if agentConfig.Region != "" {
|
||||
conf.Region = agentConfig.Region
|
||||
}
|
||||
|
||||
// Set the Authoritative Region if set, otherwise default to
|
||||
// the same as the local region.
|
||||
if agentConfig.Server.AuthoritativeRegion != "" {
|
||||
conf.AuthoritativeRegion = agentConfig.Server.AuthoritativeRegion
|
||||
} else if agentConfig.Region != "" {
|
||||
conf.AuthoritativeRegion = agentConfig.Region
|
||||
}
|
||||
|
||||
if agentConfig.Datacenter != "" {
|
||||
conf.Datacenter = agentConfig.Datacenter
|
||||
}
|
||||
|
@ -134,6 +143,12 @@ func convertServerConfig(agentConfig *Config, logOutput io.Writer) (*nomad.Confi
|
|||
if len(agentConfig.Server.EnabledSchedulers) != 0 {
|
||||
conf.EnabledSchedulers = agentConfig.Server.EnabledSchedulers
|
||||
}
|
||||
if agentConfig.ACL.Enabled {
|
||||
conf.ACLEnabled = true
|
||||
}
|
||||
if agentConfig.ACL.ReplicationToken != "" {
|
||||
conf.ReplicationToken = agentConfig.ACL.ReplicationToken
|
||||
}
|
||||
|
||||
// Set up the bind addresses
|
||||
rpcAddr, err := net.ResolveTCPAddr("tcp", agentConfig.normalizedAddrs.RPC)
|
||||
|
@ -337,6 +352,11 @@ func (a *Agent) clientConfig() (*clientconfig.Config, error) {
|
|||
conf.NoHostUUID = true
|
||||
}
|
||||
|
||||
// Setup the ACLs
|
||||
conf.ACLEnabled = a.config.ACL.Enabled
|
||||
conf.ACLTokenTTL = a.config.ACL.TokenTTL
|
||||
conf.ACLPolicyTTL = a.config.ACL.PolicyTTL
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ func TestAgent_ServerConfig(t *testing.T) {
|
|||
conf.AdvertiseAddrs.Serf = "127.0.0.1:4000"
|
||||
conf.AdvertiseAddrs.RPC = "127.0.0.1:4001"
|
||||
conf.AdvertiseAddrs.HTTP = "10.10.11.1:4005"
|
||||
conf.ACL.Enabled = true
|
||||
|
||||
// Parses the advertise addrs correctly
|
||||
if err := conf.normalizeAddrs(); err != nil {
|
||||
|
@ -74,6 +75,12 @@ func TestAgent_ServerConfig(t *testing.T) {
|
|||
if serfPort != 4000 {
|
||||
t.Fatalf("expected 4000, got: %d", serfPort)
|
||||
}
|
||||
if out.AuthoritativeRegion != "global" {
|
||||
t.Fatalf("bad: %#v", out.AuthoritativeRegion)
|
||||
}
|
||||
if !out.ACLEnabled {
|
||||
t.Fatalf("ACL not enabled")
|
||||
}
|
||||
|
||||
// Assert addresses weren't changed
|
||||
if addr := conf.AdvertiseAddrs.RPC; addr != "127.0.0.1:4001" {
|
||||
|
|
|
@ -63,6 +63,7 @@ client {
|
|||
}
|
||||
server {
|
||||
enabled = true
|
||||
authoritative_region = "foobar"
|
||||
bootstrap_expect = 5
|
||||
data_dir = "/tmp/data"
|
||||
protocol_version = 3
|
||||
|
@ -82,6 +83,12 @@ server {
|
|||
rejoin_after_leave = true
|
||||
encrypt = "abc"
|
||||
}
|
||||
acl {
|
||||
enabled = true
|
||||
token_ttl = "60s"
|
||||
policy_ttl = "60s"
|
||||
replication_token = "foobar"
|
||||
}
|
||||
telemetry {
|
||||
statsite_address = "127.0.0.1:1234"
|
||||
statsd_address = "127.0.0.1:2345"
|
||||
|
|
|
@ -67,6 +67,9 @@ type Config struct {
|
|||
// Server has our server related settings
|
||||
Server *ServerConfig `mapstructure:"server"`
|
||||
|
||||
// ACL has our acl related settings
|
||||
ACL *ACLConfig `mapstructure:"acl"`
|
||||
|
||||
// Telemetry is used to configure sending telemetry
|
||||
Telemetry *Telemetry `mapstructure:"telemetry"`
|
||||
|
||||
|
@ -228,11 +231,38 @@ type ClientConfig struct {
|
|||
NoHostUUID *bool `mapstructure:"no_host_uuid"`
|
||||
}
|
||||
|
||||
// ACLConfig is configuration specific to the ACL system
|
||||
type ACLConfig struct {
|
||||
// Enabled controls if we are enforce and manage ACLs
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
|
||||
// TokenTTL controls how long we cache ACL tokens. This controls
|
||||
// how stale they can be when we are enforcing policies. Defaults
|
||||
// to "30s". Reducing this impacts performance by forcing more
|
||||
// frequent resolution.
|
||||
TokenTTL time.Duration `mapstructure:"token_ttl"`
|
||||
|
||||
// PolicyTTL controls how long we cache ACL policies. This controls
|
||||
// how stale they can be when we are enforcing policies. Defaults
|
||||
// to "30s". Reducing this impacts performance by forcing more
|
||||
// frequent resolution.
|
||||
PolicyTTL time.Duration `mapstructure:"policy_ttl"`
|
||||
|
||||
// ReplicationToken is used by servers to replicate tokens and policies
|
||||
// from the authoritative region. This must be a valid management token
|
||||
// within the authoritative region.
|
||||
ReplicationToken string `mapstructure:"replication_token"`
|
||||
}
|
||||
|
||||
// ServerConfig is configuration specific to the server mode
|
||||
type ServerConfig struct {
|
||||
// Enabled controls if we are a server
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
|
||||
// AuthoritativeRegion is used to control which region is treated as
|
||||
// the source of truth for global tokens and ACL policies.
|
||||
AuthoritativeRegion string `mapstructure:"authoritative_region"`
|
||||
|
||||
// BootstrapExpect tries to automatically bootstrap the Consul cluster,
|
||||
// by withholding peers until enough servers join.
|
||||
BootstrapExpect int `mapstructure:"bootstrap_expect"`
|
||||
|
@ -565,6 +595,11 @@ func DefaultConfig() *Config {
|
|||
RetryInterval: "30s",
|
||||
RetryMaxAttempts: 0,
|
||||
},
|
||||
ACL: &ACLConfig{
|
||||
Enabled: false,
|
||||
TokenTTL: 30 * time.Second,
|
||||
PolicyTTL: 30 * time.Second,
|
||||
},
|
||||
SyslogFacility: "LOCAL0",
|
||||
Telemetry: &Telemetry{
|
||||
CollectionInterval: "1s",
|
||||
|
@ -676,6 +711,14 @@ func (c *Config) Merge(b *Config) *Config {
|
|||
result.Server = result.Server.Merge(b.Server)
|
||||
}
|
||||
|
||||
// Apply the acl config
|
||||
if result.ACL == nil && b.ACL != nil {
|
||||
server := *b.ACL
|
||||
result.ACL = &server
|
||||
} else if b.ACL != nil {
|
||||
result.ACL = result.ACL.Merge(b.ACL)
|
||||
}
|
||||
|
||||
// Apply the ports config
|
||||
if result.Ports == nil && b.Ports != nil {
|
||||
ports := *b.Ports
|
||||
|
@ -902,6 +945,25 @@ func isTooManyColons(err error) bool {
|
|||
return err != nil && strings.Contains(err.Error(), tooManyColons)
|
||||
}
|
||||
|
||||
// Merge is used to merge two ACL configs together. The settings from the input always take precedence.
|
||||
func (a *ACLConfig) Merge(b *ACLConfig) *ACLConfig {
|
||||
result := *a
|
||||
|
||||
if b.Enabled {
|
||||
result.Enabled = true
|
||||
}
|
||||
if b.TokenTTL != 0 {
|
||||
result.TokenTTL = b.TokenTTL
|
||||
}
|
||||
if b.PolicyTTL != 0 {
|
||||
result.PolicyTTL = b.PolicyTTL
|
||||
}
|
||||
if b.ReplicationToken != "" {
|
||||
result.ReplicationToken = b.ReplicationToken
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// Merge is used to merge two server configs together
|
||||
func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
|
||||
result := *a
|
||||
|
@ -909,6 +971,9 @@ func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
|
|||
if b.Enabled {
|
||||
result.Enabled = true
|
||||
}
|
||||
if b.AuthoritativeRegion != "" {
|
||||
result.AuthoritativeRegion = b.AuthoritativeRegion
|
||||
}
|
||||
if b.BootstrapExpect > 0 {
|
||||
result.BootstrapExpect = b.BootstrapExpect
|
||||
}
|
||||
|
|
|
@ -96,6 +96,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
|||
"vault",
|
||||
"tls",
|
||||
"http_api_response_headers",
|
||||
"acl",
|
||||
}
|
||||
if err := checkHCLKeys(list, valid); err != nil {
|
||||
return multierror.Prefix(err, "config:")
|
||||
|
@ -118,6 +119,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
|||
delete(m, "vault")
|
||||
delete(m, "tls")
|
||||
delete(m, "http_api_response_headers")
|
||||
delete(m, "acl")
|
||||
|
||||
// Decode the rest
|
||||
if err := mapstructure.WeakDecode(m, result); err != nil {
|
||||
|
@ -159,6 +161,13 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Parse ACL config
|
||||
if o := list.Filter("acl"); len(o.Items) > 0 {
|
||||
if err := parseACL(&result.ACL, o); err != nil {
|
||||
return multierror.Prefix(err, "acl ->")
|
||||
}
|
||||
}
|
||||
|
||||
// Parse telemetry config
|
||||
if o := list.Filter("telemetry"); len(o.Items) > 0 {
|
||||
if err := parseTelemetry(&result.Telemetry, o); err != nil {
|
||||
|
@ -514,6 +523,7 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
|
|||
"retry_interval",
|
||||
"rejoin_after_leave",
|
||||
"encrypt",
|
||||
"authoritative_region",
|
||||
}
|
||||
if err := checkHCLKeys(listVal, valid); err != nil {
|
||||
return err
|
||||
|
@ -541,6 +551,56 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseACL(result **ACLConfig, list *ast.ObjectList) error {
|
||||
list = list.Elem()
|
||||
if len(list.Items) > 1 {
|
||||
return fmt.Errorf("only one 'acl' block allowed")
|
||||
}
|
||||
|
||||
// Get our server object
|
||||
obj := list.Items[0]
|
||||
|
||||
// Value should be an object
|
||||
var listVal *ast.ObjectList
|
||||
if ot, ok := obj.Val.(*ast.ObjectType); ok {
|
||||
listVal = ot.List
|
||||
} else {
|
||||
return fmt.Errorf("acl value: should be an object")
|
||||
}
|
||||
|
||||
// Check for invalid keys
|
||||
valid := []string{
|
||||
"enabled",
|
||||
"token_ttl",
|
||||
"policy_ttl",
|
||||
"replication_token",
|
||||
}
|
||||
if err := checkHCLKeys(listVal, valid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var m map[string]interface{}
|
||||
if err := hcl.DecodeObject(&m, listVal); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var config ACLConfig
|
||||
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||
WeaklyTypedInput: true,
|
||||
Result: &config,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dec.Decode(m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*result = &config
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTelemetry(result **Telemetry, list *ast.ObjectList) error {
|
||||
list = list.Elem()
|
||||
if len(list.Items) > 1 {
|
||||
|
|
|
@ -84,6 +84,7 @@ func TestConfig_Parse(t *testing.T) {
|
|||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: true,
|
||||
AuthoritativeRegion: "foobar",
|
||||
BootstrapExpect: 5,
|
||||
DataDir: "/tmp/data",
|
||||
ProtocolVersion: 3,
|
||||
|
@ -103,6 +104,12 @@ func TestConfig_Parse(t *testing.T) {
|
|||
RetryMaxAttempts: 3,
|
||||
EncryptKey: "abc",
|
||||
},
|
||||
ACL: &ACLConfig{
|
||||
Enabled: true,
|
||||
TokenTTL: 60 * time.Second,
|
||||
PolicyTTL: 60 * time.Second,
|
||||
ReplicationToken: "foobar",
|
||||
},
|
||||
Telemetry: &Telemetry{
|
||||
StatsiteAddr: "127.0.0.1:1234",
|
||||
StatsdAddr: "127.0.0.1:2345",
|
||||
|
|
|
@ -27,6 +27,7 @@ func TestConfig_Merge(t *testing.T) {
|
|||
Telemetry: &Telemetry{},
|
||||
Client: &ClientConfig{},
|
||||
Server: &ServerConfig{},
|
||||
ACL: &ACLConfig{},
|
||||
Ports: &Ports{},
|
||||
Addresses: &Addresses{},
|
||||
AdvertiseAddrs: &AdvertiseAddrs{},
|
||||
|
@ -91,6 +92,7 @@ func TestConfig_Merge(t *testing.T) {
|
|||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: false,
|
||||
AuthoritativeRegion: "global",
|
||||
BootstrapExpect: 1,
|
||||
DataDir: "/tmp/data1",
|
||||
ProtocolVersion: 1,
|
||||
|
@ -100,6 +102,12 @@ func TestConfig_Merge(t *testing.T) {
|
|||
MinHeartbeatTTL: 30 * time.Second,
|
||||
MaxHeartbeatsPerSecond: 30.0,
|
||||
},
|
||||
ACL: &ACLConfig{
|
||||
Enabled: true,
|
||||
TokenTTL: 60 * time.Second,
|
||||
PolicyTTL: 60 * time.Second,
|
||||
ReplicationToken: "foo",
|
||||
},
|
||||
Ports: &Ports{
|
||||
HTTP: 4646,
|
||||
RPC: 4647,
|
||||
|
@ -223,6 +231,7 @@ func TestConfig_Merge(t *testing.T) {
|
|||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: true,
|
||||
AuthoritativeRegion: "global2",
|
||||
BootstrapExpect: 2,
|
||||
DataDir: "/tmp/data2",
|
||||
ProtocolVersion: 2,
|
||||
|
@ -238,6 +247,12 @@ func TestConfig_Merge(t *testing.T) {
|
|||
RetryInterval: "10s",
|
||||
retryInterval: time.Second * 10,
|
||||
},
|
||||
ACL: &ACLConfig{
|
||||
Enabled: true,
|
||||
TokenTTL: 20 * time.Second,
|
||||
PolicyTTL: 20 * time.Second,
|
||||
ReplicationToken: "foobar",
|
||||
},
|
||||
Ports: &Ports{
|
||||
HTTP: 20000,
|
||||
RPC: 21000,
|
||||
|
|
|
@ -148,6 +148,14 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
|
|||
s.mux.HandleFunc("/v1/deployments", s.wrap(s.DeploymentsRequest))
|
||||
s.mux.HandleFunc("/v1/deployment/", s.wrap(s.DeploymentSpecificRequest))
|
||||
|
||||
s.mux.HandleFunc("/v1/acl/policies", s.wrap(s.ACLPoliciesRequest))
|
||||
s.mux.HandleFunc("/v1/acl/policy/", s.wrap(s.ACLPolicySpecificRequest))
|
||||
|
||||
s.mux.HandleFunc("/v1/acl/bootstrap", s.wrap(s.ACLTokenBootstrap))
|
||||
s.mux.HandleFunc("/v1/acl/tokens", s.wrap(s.ACLTokensRequest))
|
||||
s.mux.HandleFunc("/v1/acl/token", s.wrap(s.ACLTokenSpecificRequest))
|
||||
s.mux.HandleFunc("/v1/acl/token/", s.wrap(s.ACLTokenSpecificRequest))
|
||||
|
||||
s.mux.HandleFunc("/v1/client/fs/", s.wrap(s.FsRequest))
|
||||
s.mux.HandleFunc("/v1/client/stats", s.wrap(s.ClientStatsRequest))
|
||||
s.mux.HandleFunc("/v1/client/allocation/", s.wrap(s.ClientAllocRequest))
|
||||
|
@ -351,9 +359,24 @@ func (s *HTTPServer) parseRegion(req *http.Request, r *string) {
|
|||
}
|
||||
}
|
||||
|
||||
// parseToken is used to parse the X-Nomad-Token param
|
||||
func (s *HTTPServer) parseToken(req *http.Request, token *string) {
|
||||
if other := req.Header.Get("X-Nomad-Token"); other != "" {
|
||||
*token = other
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// parseWrite is a convenience method for endpoints that call write methods
|
||||
func (s *HTTPServer) parseWrite(req *http.Request, b *structs.WriteRequest) {
|
||||
s.parseRegion(req, &b.Region)
|
||||
s.parseToken(req, &b.SecretID)
|
||||
}
|
||||
|
||||
// parse is a convenience method for endpoints that need to parse multiple flags
|
||||
func (s *HTTPServer) parse(resp http.ResponseWriter, req *http.Request, r *string, b *structs.QueryOptions) bool {
|
||||
s.parseRegion(req, r)
|
||||
s.parseToken(req, &b.SecretID)
|
||||
parseConsistency(req, b)
|
||||
parsePrefix(req, b)
|
||||
return parseWait(resp, req, b)
|
||||
|
|
|
@ -338,6 +338,24 @@ func TestParseRegion(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := makeHTTPServer(t, nil)
|
||||
defer s.Shutdown()
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/jobs", nil)
|
||||
req.Header.Add("X-Nomad-Token", "foobar")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
var token string
|
||||
s.Server.parseToken(req, &token)
|
||||
if token != "foobar" {
|
||||
t.Fatalf("bad %s", token)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHTTP_VerifyHTTPSClient asserts that a client certificate signed by the
|
||||
// appropriate CA is required when VerifyHTTPSClient=true.
|
||||
func TestHTTP_VerifyHTTPSClient(t *testing.T) {
|
||||
|
@ -496,6 +514,22 @@ func httpTest(t testing.TB, cb func(c *Config), f func(srv *TestAgent)) {
|
|||
f(s)
|
||||
}
|
||||
|
||||
func httpACLTest(t testing.TB, cb func(c *Config), f func(srv *TestAgent)) {
|
||||
s := makeHTTPServer(t, func(c *Config) {
|
||||
c.ACL.Enabled = true
|
||||
if cb != nil {
|
||||
cb(c)
|
||||
}
|
||||
})
|
||||
defer s.Shutdown()
|
||||
testutil.WaitForLeader(t, s.Agent.RPC)
|
||||
f(s)
|
||||
}
|
||||
|
||||
func setToken(req *http.Request, token *structs.ACLToken) {
|
||||
req.Header.Set("X-Nomad-Token", token.SecretID)
|
||||
}
|
||||
|
||||
func encodeReq(obj interface{}) io.ReadCloser {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := json.NewEncoder(buf)
|
||||
|
|
|
@ -349,6 +349,7 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
|||
return nil, CodedError(400, "Job ID does not match name")
|
||||
}
|
||||
s.parseRegion(req, &args.Region)
|
||||
s.parseToken(req, &args.SecretID)
|
||||
|
||||
sJob := ApiJobToStructJob(args.Job)
|
||||
|
||||
|
@ -357,7 +358,8 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
|||
EnforceIndex: args.EnforceIndex,
|
||||
JobModifyIndex: args.JobModifyIndex,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: args.WriteRequest.Region,
|
||||
Region: args.WriteRequest.Region,
|
||||
SecretID: args.WriteRequest.SecretID,
|
||||
},
|
||||
}
|
||||
var out structs.JobRegisterResponse
|
||||
|
|
|
@ -171,6 +171,37 @@ func TestHTTP_JobsRegister(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// Test that ACL token is properly threaded through to the RPC endpoint
|
||||
func TestHTTP_JobsRegister_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
args := api.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: api.WriteRequest{
|
||||
Region: "global",
|
||||
},
|
||||
}
|
||||
buf := encodeReq(args)
|
||||
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("PUT", "/v1/jobs", buf)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
setToken(req, s.Token)
|
||||
|
||||
// Make the request
|
||||
obj, err := s.Server.JobsRequest(respW, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.NotNil(t, obj)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/client/fingerprint"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
sconfig "github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
@ -66,6 +67,9 @@ type TestAgent struct {
|
|||
// Agent is the embedded Nomad agent.
|
||||
// It is valid after Start().
|
||||
*Agent
|
||||
|
||||
// Token is auto-bootstrapped if ACLs are enabled
|
||||
Token *structs.ACLToken
|
||||
}
|
||||
|
||||
// NewTestAgent returns a started agent with the given name and
|
||||
|
@ -164,6 +168,17 @@ func (a *TestAgent) Start() *TestAgent {
|
|||
panic(fmt.Sprintf("failed OK response: %v", err))
|
||||
})
|
||||
}
|
||||
|
||||
// Check if ACLs enabled. Use special value of PolicyTTL 0s
|
||||
// to do a bypass of this step. This is so we can test bootstrap
|
||||
// without having to pass down a special flag.
|
||||
if a.Config.ACL.Enabled && a.Config.Server.Enabled && a.Config.ACL.PolicyTTL != 0 {
|
||||
a.Token = mock.ACLManagementToken()
|
||||
state := a.Agent.server.State()
|
||||
if err := state.BootstrapACLTokens(1, a.Token); err != nil {
|
||||
panic(fmt.Sprintf("token bootstrap failed: %v", err))
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
|
|
80
nomad/acl.go
Normal file
80
nomad/acl.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
// resolveToken is used to translate an ACL Token Secret ID into
|
||||
// an ACL object, nil if ACLs are disabled, or an error.
|
||||
func (s *Server) resolveToken(secretID string) (*acl.ACL, error) {
|
||||
// Fast-path if ACLs are disabled
|
||||
if !s.config.ACLEnabled {
|
||||
return nil, nil
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "resolveToken"}, time.Now())
|
||||
|
||||
// Snapshot the state
|
||||
snap, err := s.fsm.State().Snapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve the ACL
|
||||
return resolveTokenFromSnapshotCache(snap, s.aclCache, secretID)
|
||||
}
|
||||
|
||||
// resolveTokenFromSnapshotCache is used to resolve an ACL object from a snapshot of state,
|
||||
// using a cache to avoid parsing and ACL construction when possible. It is split from resolveToken
|
||||
// to simplify testing.
|
||||
func resolveTokenFromSnapshotCache(snap *state.StateSnapshot, cache *lru.TwoQueueCache, secretID string) (*acl.ACL, error) {
|
||||
// Lookup the ACL Token
|
||||
var token *structs.ACLToken
|
||||
var err error
|
||||
|
||||
// Handle anonymous requests
|
||||
if secretID == "" {
|
||||
token = structs.AnonymousACLToken
|
||||
} else {
|
||||
token, err = snap.ACLTokenBySecretID(nil, secretID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if token == nil {
|
||||
return nil, structs.ErrTokenNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this is a management token
|
||||
if token.Type == structs.ACLManagementToken {
|
||||
return acl.ManagementACL, nil
|
||||
}
|
||||
|
||||
// Get all associated policies
|
||||
policies := make([]*structs.ACLPolicy, 0, len(token.Policies))
|
||||
for _, policyName := range token.Policies {
|
||||
policy, err := snap.ACLPolicyByName(nil, policyName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if policy == nil {
|
||||
// Ignore policies that don't exist, since they don't grant any more privilege
|
||||
continue
|
||||
}
|
||||
|
||||
// Save the policy and update the cache key
|
||||
policies = append(policies, policy)
|
||||
}
|
||||
|
||||
// Compile and cache the ACL object
|
||||
aclObj, err := structs.CompileACLObject(cache, policies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aclObj, nil
|
||||
}
|
698
nomad/acl_endpoint.go
Normal file
698
nomad/acl_endpoint.go
Normal file
|
@ -0,0 +1,698 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
var (
|
||||
// aclDisabled is returned when an ACL endpoint is hit but ACLs are not enabled
|
||||
aclDisabled = fmt.Errorf("ACL support disabled")
|
||||
)
|
||||
|
||||
// ACL endpoint is used for manipulating ACL tokens and policies
|
||||
type ACL struct {
|
||||
srv *Server
|
||||
}
|
||||
|
||||
// UpsertPolicies is used to create or update a set of policies
|
||||
func (a *ACL) UpsertPolicies(args *structs.ACLPolicyUpsertRequest, reply *structs.GenericResponse) error {
|
||||
// Ensure ACLs are enabled, and always flow modification requests to the authoritative region
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
args.Region = a.srv.config.AuthoritativeRegion
|
||||
|
||||
if done, err := a.srv.forward("ACL.UpsertPolicies", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "upsert_policies"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Validate non-zero set of policies
|
||||
if len(args.Policies) == 0 {
|
||||
return fmt.Errorf("must specify as least one policy")
|
||||
}
|
||||
|
||||
// Validate each policy, compute hash
|
||||
for idx, policy := range args.Policies {
|
||||
if err := policy.Validate(); err != nil {
|
||||
return fmt.Errorf("policy %d invalid: %v", idx, err)
|
||||
}
|
||||
policy.SetHash()
|
||||
}
|
||||
|
||||
// Update via Raft
|
||||
_, index, err := a.srv.raftApply(structs.ACLPolicyUpsertRequestType, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the index
|
||||
reply.Index = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeletePolicies is used to delete policies
|
||||
func (a *ACL) DeletePolicies(args *structs.ACLPolicyDeleteRequest, reply *structs.GenericResponse) error {
|
||||
// Ensure ACLs are enabled, and always flow modification requests to the authoritative region
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
args.Region = a.srv.config.AuthoritativeRegion
|
||||
|
||||
if done, err := a.srv.forward("ACL.DeletePolicies", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "delete_policies"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Validate non-zero set of policies
|
||||
if len(args.Names) == 0 {
|
||||
return fmt.Errorf("must specify as least one policy")
|
||||
}
|
||||
|
||||
// Update via Raft
|
||||
_, index, err := a.srv.raftApply(structs.ACLPolicyDeleteRequestType, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the index
|
||||
reply.Index = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListPolicies is used to list the policies
|
||||
func (a *ACL) ListPolicies(args *structs.ACLPolicyListRequest, reply *structs.ACLPolicyListResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.ListPolicies", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "list_policies"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Setup the blocking query
|
||||
opts := blockingOptions{
|
||||
queryOpts: &args.QueryOptions,
|
||||
queryMeta: &reply.QueryMeta,
|
||||
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
||||
// Iterate over all the policies
|
||||
var err error
|
||||
var iter memdb.ResultIterator
|
||||
if prefix := args.QueryOptions.Prefix; prefix != "" {
|
||||
iter, err = state.ACLPolicyByNamePrefix(ws, prefix)
|
||||
} else {
|
||||
iter, err = state.ACLPolicies(ws)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert all the policies to a list stub
|
||||
reply.Policies = nil
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
policy := raw.(*structs.ACLPolicy)
|
||||
reply.Policies = append(reply.Policies, policy.Stub())
|
||||
}
|
||||
|
||||
// Use the last index that affected the policy table
|
||||
index, err := state.Index("acl_policy")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we never set the index to zero, otherwise a blocking query cannot be used.
|
||||
// We floor the index at one, since realistically the first write must have a higher index.
|
||||
if index == 0 {
|
||||
index = 1
|
||||
}
|
||||
reply.Index = index
|
||||
return nil
|
||||
}}
|
||||
return a.srv.blockingRPC(&opts)
|
||||
}
|
||||
|
||||
// GetPolicy is used to get a specific policy
|
||||
func (a *ACL) GetPolicy(args *structs.ACLPolicySpecificRequest, reply *structs.SingleACLPolicyResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.GetPolicy", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "get_policy"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Setup the blocking query
|
||||
opts := blockingOptions{
|
||||
queryOpts: &args.QueryOptions,
|
||||
queryMeta: &reply.QueryMeta,
|
||||
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
||||
// Look for the policy
|
||||
out, err := state.ACLPolicyByName(ws, args.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup the output
|
||||
reply.Policy = out
|
||||
if out != nil {
|
||||
reply.Index = out.ModifyIndex
|
||||
} else {
|
||||
// Use the last index that affected the policy table
|
||||
index, err := state.Index("acl_policy")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
}
|
||||
return nil
|
||||
}}
|
||||
return a.srv.blockingRPC(&opts)
|
||||
}
|
||||
|
||||
// GetPolicies is used to get a set of policies
|
||||
func (a *ACL) GetPolicies(args *structs.ACLPolicySetRequest, reply *structs.ACLPolicySetResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.GetPolicies", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "get_policies"}, time.Now())
|
||||
|
||||
// For client typed tokens, allow them to query any policies associated with that token.
|
||||
// This is used by clients which are resolving the policies to enforce. Any associated
|
||||
// policies need to be fetched so that the client can determine what to allow.
|
||||
token, err := a.srv.State().ACLTokenBySecretID(nil, args.SecretID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == nil {
|
||||
return structs.ErrTokenNotFound
|
||||
}
|
||||
if token.Type != structs.ACLManagementToken && !token.PolicySubset(args.Names) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Setup the blocking query
|
||||
opts := blockingOptions{
|
||||
queryOpts: &args.QueryOptions,
|
||||
queryMeta: &reply.QueryMeta,
|
||||
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
||||
// Setup the output
|
||||
reply.Policies = make(map[string]*structs.ACLPolicy, len(args.Names))
|
||||
|
||||
// Look for the policy
|
||||
for _, policyName := range args.Names {
|
||||
out, err := state.ACLPolicyByName(ws, policyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out != nil {
|
||||
reply.Policies[policyName] = out
|
||||
}
|
||||
}
|
||||
|
||||
// Use the last index that affected the policy table
|
||||
index, err := state.Index("acl_policy")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
return nil
|
||||
}}
|
||||
return a.srv.blockingRPC(&opts)
|
||||
}
|
||||
|
||||
// Bootstrap is used to bootstrap the initial token
|
||||
func (a *ACL) Bootstrap(args *structs.ACLTokenBootstrapRequest, reply *structs.ACLTokenUpsertResponse) error {
|
||||
// Ensure ACLs are enabled, and always flow modification requests to the authoritative region
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
args.Region = a.srv.config.AuthoritativeRegion
|
||||
|
||||
if done, err := a.srv.forward("ACL.Bootstrap", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "bootstrap"}, time.Now())
|
||||
|
||||
// Snapshot the state
|
||||
state, err := a.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify bootstrap is possible. The state store method re-verifies this,
|
||||
// but we do an early check to avoid raft transactions when possible.
|
||||
ok, err := state.CanBootstrapACLToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("ACL bootstrap already done")
|
||||
}
|
||||
|
||||
// Create a new global management token, override any parameter
|
||||
args.Token = &structs.ACLToken{
|
||||
AccessorID: structs.GenerateUUID(),
|
||||
SecretID: structs.GenerateUUID(),
|
||||
Name: "Bootstrap Token",
|
||||
Type: structs.ACLManagementToken,
|
||||
Global: true,
|
||||
CreateTime: time.Now().UTC(),
|
||||
}
|
||||
args.Token.SetHash()
|
||||
|
||||
// Update via Raft
|
||||
_, index, err := a.srv.raftApply(structs.ACLTokenBootstrapRequestType, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Populate the response. We do a lookup against the state to
|
||||
// pickup the proper create / modify times.
|
||||
state, err = a.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := state.ACLTokenByAccessorID(nil, args.Token.AccessorID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("token lookup failed: %v", err)
|
||||
}
|
||||
reply.Tokens = append(reply.Tokens, out)
|
||||
|
||||
// Update the index
|
||||
reply.Index = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpsertTokens is used to create or update a set of tokens
|
||||
func (a *ACL) UpsertTokens(args *structs.ACLTokenUpsertRequest, reply *structs.ACLTokenUpsertResponse) error {
|
||||
// Ensure ACLs are enabled, and always flow modification requests to the authoritative region
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
|
||||
// Validate non-zero set of tokens
|
||||
if len(args.Tokens) == 0 {
|
||||
return fmt.Errorf("must specify as least one token")
|
||||
}
|
||||
|
||||
// Force the request to the authoritative region if we are creating global tokens
|
||||
hasGlobal := false
|
||||
allGlobal := true
|
||||
for _, token := range args.Tokens {
|
||||
if token.Global {
|
||||
hasGlobal = true
|
||||
} else {
|
||||
allGlobal = false
|
||||
}
|
||||
}
|
||||
|
||||
// Disallow mixed requests with global and non-global tokens since we forward
|
||||
// the entire request as a single batch.
|
||||
if hasGlobal {
|
||||
if !allGlobal {
|
||||
return fmt.Errorf("cannot upsert mixed global and non-global tokens")
|
||||
}
|
||||
|
||||
// Force the request to the authoritative region if it has global
|
||||
args.Region = a.srv.config.AuthoritativeRegion
|
||||
}
|
||||
|
||||
if done, err := a.srv.forward("ACL.UpsertTokens", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "upsert_tokens"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Snapshot the state
|
||||
state, err := a.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate each token
|
||||
for idx, token := range args.Tokens {
|
||||
if err := token.Validate(); err != nil {
|
||||
return fmt.Errorf("token %d invalid: %v", idx, err)
|
||||
}
|
||||
|
||||
// Generate an accessor and secret ID if new
|
||||
if token.AccessorID == "" {
|
||||
token.AccessorID = structs.GenerateUUID()
|
||||
token.SecretID = structs.GenerateUUID()
|
||||
token.CreateTime = time.Now().UTC()
|
||||
|
||||
} else {
|
||||
// Verify the token exists
|
||||
out, err := state.ACLTokenByAccessorID(nil, token.AccessorID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("token lookup failed: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
return fmt.Errorf("cannot find token %s", token.AccessorID)
|
||||
}
|
||||
|
||||
// Cannot toggle the "Global" mode
|
||||
if token.Global != out.Global {
|
||||
return fmt.Errorf("cannot toggle global mode of %s", token.AccessorID)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the token hash
|
||||
token.SetHash()
|
||||
}
|
||||
|
||||
// Update via Raft
|
||||
_, index, err := a.srv.raftApply(structs.ACLTokenUpsertRequestType, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Populate the response. We do a lookup against the state to
|
||||
// pickup the proper create / modify times.
|
||||
state, err = a.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, token := range args.Tokens {
|
||||
out, err := state.ACLTokenByAccessorID(nil, token.AccessorID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("token lookup failed: %v", err)
|
||||
}
|
||||
reply.Tokens = append(reply.Tokens, out)
|
||||
}
|
||||
|
||||
// Update the index
|
||||
reply.Index = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTokens is used to delete tokens
|
||||
func (a *ACL) DeleteTokens(args *structs.ACLTokenDeleteRequest, reply *structs.GenericResponse) error {
|
||||
// Ensure ACLs are enabled, and always flow modification requests to the authoritative region
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
|
||||
// Validate non-zero set of tokens
|
||||
if len(args.AccessorIDs) == 0 {
|
||||
return fmt.Errorf("must specify as least one token")
|
||||
}
|
||||
|
||||
if done, err := a.srv.forward("ACL.DeleteTokens", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "delete_tokens"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Snapshot the state
|
||||
state, err := a.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine if we are deleting local or global tokens
|
||||
hasGlobal := false
|
||||
allGlobal := true
|
||||
for _, accessor := range args.AccessorIDs {
|
||||
token, err := state.ACLTokenByAccessorID(nil, accessor)
|
||||
if err != nil {
|
||||
return fmt.Errorf("token lookup failed: %v", err)
|
||||
}
|
||||
if token == nil {
|
||||
continue
|
||||
}
|
||||
if token.Global {
|
||||
hasGlobal = true
|
||||
} else {
|
||||
allGlobal = false
|
||||
}
|
||||
}
|
||||
|
||||
// Disallow mixed requests with global and non-global tokens since we forward
|
||||
// the entire request as a single batch.
|
||||
if hasGlobal {
|
||||
if !allGlobal {
|
||||
return fmt.Errorf("cannot delete mixed global and non-global tokens")
|
||||
}
|
||||
|
||||
// Force the request to the authoritative region if it has global
|
||||
if a.srv.config.Region != a.srv.config.AuthoritativeRegion {
|
||||
args.Region = a.srv.config.AuthoritativeRegion
|
||||
_, err := a.srv.forward("ACL.DeleteTokens", args, args, reply)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update via Raft
|
||||
_, index, err := a.srv.raftApply(structs.ACLTokenDeleteRequestType, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the index
|
||||
reply.Index = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListTokens is used to list the tokens
|
||||
func (a *ACL) ListTokens(args *structs.ACLTokenListRequest, reply *structs.ACLTokenListResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.ListTokens", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "list_tokens"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Setup the blocking query
|
||||
opts := blockingOptions{
|
||||
queryOpts: &args.QueryOptions,
|
||||
queryMeta: &reply.QueryMeta,
|
||||
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
||||
// Iterate over all the tokens
|
||||
var err error
|
||||
var iter memdb.ResultIterator
|
||||
if prefix := args.QueryOptions.Prefix; prefix != "" {
|
||||
iter, err = state.ACLTokenByAccessorIDPrefix(ws, prefix)
|
||||
} else if args.GlobalOnly {
|
||||
iter, err = state.ACLTokensByGlobal(ws, true)
|
||||
} else {
|
||||
iter, err = state.ACLTokens(ws)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert all the tokens to a list stub
|
||||
reply.Tokens = nil
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
token := raw.(*structs.ACLToken)
|
||||
reply.Tokens = append(reply.Tokens, token.Stub())
|
||||
}
|
||||
|
||||
// Use the last index that affected the token table
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
return nil
|
||||
}}
|
||||
return a.srv.blockingRPC(&opts)
|
||||
}
|
||||
|
||||
// GetToken is used to get a specific token
|
||||
func (a *ACL) GetToken(args *structs.ACLTokenSpecificRequest, reply *structs.SingleACLTokenResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.GetToken", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "get_token"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Setup the blocking query
|
||||
opts := blockingOptions{
|
||||
queryOpts: &args.QueryOptions,
|
||||
queryMeta: &reply.QueryMeta,
|
||||
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
||||
// Look for the token
|
||||
out, err := state.ACLTokenByAccessorID(ws, args.AccessorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup the output
|
||||
reply.Token = out
|
||||
if out != nil {
|
||||
reply.Index = out.ModifyIndex
|
||||
} else {
|
||||
// Use the last index that affected the token table
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
}
|
||||
return nil
|
||||
}}
|
||||
return a.srv.blockingRPC(&opts)
|
||||
}
|
||||
|
||||
// GetTokens is used to get a set of token
|
||||
func (a *ACL) GetTokens(args *structs.ACLTokenSetRequest, reply *structs.ACLTokenSetResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.GetTokens", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "get_tokens"}, time.Now())
|
||||
|
||||
// Check management level permissions
|
||||
if acl, err := a.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if acl == nil || !acl.IsManagement() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Setup the blocking query
|
||||
opts := blockingOptions{
|
||||
queryOpts: &args.QueryOptions,
|
||||
queryMeta: &reply.QueryMeta,
|
||||
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
||||
// Setup the output
|
||||
reply.Tokens = make(map[string]*structs.ACLToken, len(args.AccessorIDS))
|
||||
|
||||
// Look for the token
|
||||
for _, accessor := range args.AccessorIDS {
|
||||
out, err := state.ACLTokenByAccessorID(ws, accessor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out != nil {
|
||||
reply.Tokens[out.AccessorID] = out
|
||||
}
|
||||
}
|
||||
|
||||
// Use the last index that affected the token table
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
return nil
|
||||
}}
|
||||
return a.srv.blockingRPC(&opts)
|
||||
}
|
||||
|
||||
// ResolveToken is used to lookup a specific token by a secret ID. This is used for enforcing ACLs by clients.
|
||||
func (a *ACL) ResolveToken(args *structs.ResolveACLTokenRequest, reply *structs.ResolveACLTokenResponse) error {
|
||||
if !a.srv.config.ACLEnabled {
|
||||
return aclDisabled
|
||||
}
|
||||
if done, err := a.srv.forward("ACL.ResolveToken", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "acl", "resolve_token"}, time.Now())
|
||||
|
||||
// Setup the query meta
|
||||
a.srv.setQueryMeta(&reply.QueryMeta)
|
||||
|
||||
// Snapshot the state
|
||||
state, err := a.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Look for the token
|
||||
out, err := state.ACLTokenBySecretID(nil, args.SecretID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup the output
|
||||
reply.Token = out
|
||||
if out != nil {
|
||||
reply.Index = out.ModifyIndex
|
||||
} else {
|
||||
// Use the last index that affected the token table
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
}
|
||||
return nil
|
||||
}
|
1001
nomad/acl_endpoint_test.go
Normal file
1001
nomad/acl_endpoint_test.go
Normal file
File diff suppressed because it is too large
Load diff
91
nomad/acl_test.go
Normal file
91
nomad/acl_test.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestResolveACLToken(t *testing.T) {
|
||||
// Create mock state store and cache
|
||||
state, err := state.NewStateStore(os.Stderr)
|
||||
assert.Nil(t, err)
|
||||
cache, err := lru.New2Q(16)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Create a policy / token
|
||||
policy := mock.ACLPolicy()
|
||||
policy2 := mock.ACLPolicy()
|
||||
token := mock.ACLToken()
|
||||
token.Policies = []string{policy.Name, policy2.Name}
|
||||
token2 := mock.ACLToken()
|
||||
token2.Type = structs.ACLManagementToken
|
||||
token2.Policies = nil
|
||||
err = state.UpsertACLPolicies(100, []*structs.ACLPolicy{policy, policy2})
|
||||
assert.Nil(t, err)
|
||||
err = state.UpsertACLTokens(110, []*structs.ACLToken{token, token2})
|
||||
assert.Nil(t, err)
|
||||
|
||||
snap, err := state.Snapshot()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Attempt resolution of blank token. Should return anonymous policy
|
||||
aclObj, err := resolveTokenFromSnapshotCache(snap, cache, "")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj)
|
||||
|
||||
// Attempt resolution of unknown token. Should fail.
|
||||
randID := structs.GenerateUUID()
|
||||
aclObj, err = resolveTokenFromSnapshotCache(snap, cache, randID)
|
||||
assert.Equal(t, structs.ErrTokenNotFound, err)
|
||||
assert.Nil(t, aclObj)
|
||||
|
||||
// Attempt resolution of management token. Should get singleton.
|
||||
aclObj, err = resolveTokenFromSnapshotCache(snap, cache, token2.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj)
|
||||
assert.Equal(t, true, aclObj.IsManagement())
|
||||
if aclObj != acl.ManagementACL {
|
||||
t.Fatalf("expected singleton")
|
||||
}
|
||||
|
||||
// Attempt resolution of client token
|
||||
aclObj, err = resolveTokenFromSnapshotCache(snap, cache, token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj)
|
||||
|
||||
// Check that the ACL object is sane
|
||||
assert.Equal(t, false, aclObj.IsManagement())
|
||||
allowed := aclObj.AllowNamespaceOperation("default", acl.NamespaceCapabilityListJobs)
|
||||
assert.Equal(t, true, allowed)
|
||||
allowed = aclObj.AllowNamespaceOperation("other", acl.NamespaceCapabilityListJobs)
|
||||
assert.Equal(t, false, allowed)
|
||||
|
||||
// Resolve the same token again, should get cache value
|
||||
aclObj2, err := resolveTokenFromSnapshotCache(snap, cache, token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj2)
|
||||
if aclObj != aclObj2 {
|
||||
t.Fatalf("expected cached value")
|
||||
}
|
||||
|
||||
// Bust the cache by upserting the policy
|
||||
err = state.UpsertACLPolicies(120, []*structs.ACLPolicy{policy})
|
||||
assert.Nil(t, err)
|
||||
snap, err = state.Snapshot()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Resolve the same token again, should get different value
|
||||
aclObj3, err := resolveTokenFromSnapshotCache(snap, cache, token.SecretID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj3)
|
||||
if aclObj == aclObj3 {
|
||||
t.Fatalf("unexpected cached value")
|
||||
}
|
||||
}
|
|
@ -101,6 +101,10 @@ type Config struct {
|
|||
// Region is the region this Nomad server belongs to.
|
||||
Region string
|
||||
|
||||
// AuthoritativeRegion is the region which is treated as the authoritative source
|
||||
// for ACLs and Policies. This provides a single source of truth to resolve conflicts.
|
||||
AuthoritativeRegion string
|
||||
|
||||
// Datacenter is the datacenter this Nomad server belongs to.
|
||||
Datacenter string
|
||||
|
||||
|
@ -224,6 +228,17 @@ type Config struct {
|
|||
|
||||
// TLSConfig holds various TLS related configurations
|
||||
TLSConfig *config.TLSConfig
|
||||
|
||||
// ACLEnabled controls if ACL enforcement and management is enabled.
|
||||
ACLEnabled bool
|
||||
|
||||
// ReplicationBackoff is how much we backoff when replication errors.
|
||||
// This is a tunable knob for testing primarily.
|
||||
ReplicationBackoff time.Duration
|
||||
|
||||
// ReplicationToken is the ACL Token Secret ID used to fetch from
|
||||
// the Authoritative Region.
|
||||
ReplicationToken string
|
||||
}
|
||||
|
||||
// CheckVersion is used to check if the ProtocolVersion is valid
|
||||
|
@ -247,6 +262,7 @@ func DefaultConfig() *Config {
|
|||
|
||||
c := &Config{
|
||||
Region: DefaultRegion,
|
||||
AuthoritativeRegion: DefaultRegion,
|
||||
Datacenter: DefaultDC,
|
||||
NodeName: hostname,
|
||||
ProtocolVersion: ProtocolVersionMax,
|
||||
|
@ -279,6 +295,7 @@ func DefaultConfig() *Config {
|
|||
VaultConfig: config.DefaultVaultConfig(),
|
||||
RPCHoldTimeout: 5 * time.Second,
|
||||
TLSConfig: &config.TLSConfig{},
|
||||
ReplicationBackoff: 30 * time.Second,
|
||||
}
|
||||
|
||||
// Enable all known schedulers by default
|
||||
|
|
169
nomad/fsm.go
169
nomad/fsm.go
|
@ -41,6 +41,8 @@ const (
|
|||
VaultAccessorSnapshot
|
||||
JobVersionSnapshot
|
||||
DeploymentSnapshot
|
||||
ACLPolicySnapshot
|
||||
ACLTokenSnapshot
|
||||
)
|
||||
|
||||
// nomadFSM implements a finite state machine that is used
|
||||
|
@ -167,6 +169,16 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} {
|
|||
return n.applyDeploymentDelete(buf[1:], log.Index)
|
||||
case structs.JobStabilityRequestType:
|
||||
return n.applyJobStability(buf[1:], log.Index)
|
||||
case structs.ACLPolicyUpsertRequestType:
|
||||
return n.applyACLPolicyUpsert(buf[1:], log.Index)
|
||||
case structs.ACLPolicyDeleteRequestType:
|
||||
return n.applyACLPolicyDelete(buf[1:], log.Index)
|
||||
case structs.ACLTokenUpsertRequestType:
|
||||
return n.applyACLTokenUpsert(buf[1:], log.Index)
|
||||
case structs.ACLTokenDeleteRequestType:
|
||||
return n.applyACLTokenDelete(buf[1:], log.Index)
|
||||
case structs.ACLTokenBootstrapRequestType:
|
||||
return n.applyACLTokenBootstrap(buf[1:], log.Index)
|
||||
default:
|
||||
if ignoreUnknown {
|
||||
n.logger.Printf("[WARN] nomad.fsm: ignoring unknown message type (%d), upgrade to newer version", msgType)
|
||||
|
@ -669,6 +681,81 @@ func (n *nomadFSM) applyJobStability(buf []byte, index uint64) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// applyACLPolicyUpsert is used to upsert a set of policies
|
||||
func (n *nomadFSM) applyACLPolicyUpsert(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_policy_upsert"}, time.Now())
|
||||
var req structs.ACLPolicyUpsertRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
if err := n.state.UpsertACLPolicies(index, req.Policies); err != nil {
|
||||
n.logger.Printf("[ERR] nomad.fsm: UpsertACLPolicies failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyACLPolicyDelete is used to delete a set of policies
|
||||
func (n *nomadFSM) applyACLPolicyDelete(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_policy_delete"}, time.Now())
|
||||
var req structs.ACLPolicyDeleteRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
if err := n.state.DeleteACLPolicies(index, req.Names); err != nil {
|
||||
n.logger.Printf("[ERR] nomad.fsm: DeleteACLPolicies failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyACLTokenUpsert is used to upsert a set of policies
|
||||
func (n *nomadFSM) applyACLTokenUpsert(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_upsert"}, time.Now())
|
||||
var req structs.ACLTokenUpsertRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
if err := n.state.UpsertACLTokens(index, req.Tokens); err != nil {
|
||||
n.logger.Printf("[ERR] nomad.fsm: UpsertACLTokens failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyACLTokenDelete is used to delete a set of policies
|
||||
func (n *nomadFSM) applyACLTokenDelete(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_delete"}, time.Now())
|
||||
var req structs.ACLTokenDeleteRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
if err := n.state.DeleteACLTokens(index, req.AccessorIDs); err != nil {
|
||||
n.logger.Printf("[ERR] nomad.fsm: DeleteACLTokens failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyACLTokenBootstrap is used to bootstrap an ACL token
|
||||
func (n *nomadFSM) applyACLTokenBootstrap(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_bootstrap"}, time.Now())
|
||||
var req structs.ACLTokenBootstrapRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
if err := n.state.BootstrapACLTokens(index, req.Token); err != nil {
|
||||
n.logger.Printf("[ERR] nomad.fsm: BootstrapACLToken failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
|
||||
// Create a new snapshot
|
||||
snap, err := n.state.Snapshot()
|
||||
|
@ -826,6 +913,24 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
|||
return err
|
||||
}
|
||||
|
||||
case ACLPolicySnapshot:
|
||||
policy := new(structs.ACLPolicy)
|
||||
if err := dec.Decode(policy); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := restore.ACLPolicyRestore(policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case ACLTokenSnapshot:
|
||||
token := new(structs.ACLToken)
|
||||
if err := dec.Decode(token); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := restore.ACLTokenRestore(token); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Unrecognized snapshot type: %v", msgType)
|
||||
}
|
||||
|
@ -1032,6 +1137,14 @@ func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error {
|
|||
sink.Cancel()
|
||||
return err
|
||||
}
|
||||
if err := s.persistACLPolicies(sink, encoder); err != nil {
|
||||
sink.Cancel()
|
||||
return err
|
||||
}
|
||||
if err := s.persistACLTokens(sink, encoder); err != nil {
|
||||
sink.Cancel()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1308,6 +1421,62 @@ func (s *nomadSnapshot) persistDeployments(sink raft.SnapshotSink,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *nomadSnapshot) persistACLPolicies(sink raft.SnapshotSink,
|
||||
encoder *codec.Encoder) error {
|
||||
// Get all the policies
|
||||
ws := memdb.NewWatchSet()
|
||||
policies, err := s.snap.ACLPolicies(ws)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
// Get the next item
|
||||
raw := policies.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Prepare the request struct
|
||||
policy := raw.(*structs.ACLPolicy)
|
||||
|
||||
// Write out a policy registration
|
||||
sink.Write([]byte{byte(ACLPolicySnapshot)})
|
||||
if err := encoder.Encode(policy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *nomadSnapshot) persistACLTokens(sink raft.SnapshotSink,
|
||||
encoder *codec.Encoder) error {
|
||||
// Get all the policies
|
||||
ws := memdb.NewWatchSet()
|
||||
tokens, err := s.snap.ACLTokens(ws)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
// Get the next item
|
||||
raw := tokens.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Prepare the request struct
|
||||
token := raw.(*structs.ACLToken)
|
||||
|
||||
// Write out a token registration
|
||||
sink.Write([]byte{byte(ACLTokenSnapshot)})
|
||||
if err := encoder.Encode(token); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release is a no-op, as we just need to GC the pointer
|
||||
// to the state store snapshot. There is nothing to explicitly
|
||||
// cleanup.
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MockSink struct {
|
||||
|
@ -1517,6 +1518,136 @@ func TestFSM_DeleteDeployment(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFSM_UpsertACLPolicies(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
policy := mock.ACLPolicy()
|
||||
req := structs.ACLPolicyUpsertRequest{
|
||||
Policies: []*structs.ACLPolicy{policy},
|
||||
}
|
||||
buf, err := structs.Encode(structs.ACLPolicyUpsertRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
if resp != nil {
|
||||
t.Fatalf("resp: %v", resp)
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := fsm.State().ACLPolicyByName(ws, policy.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out)
|
||||
}
|
||||
|
||||
func TestFSM_DeleteACLPolicies(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
policy := mock.ACLPolicy()
|
||||
err := fsm.State().UpsertACLPolicies(1000, []*structs.ACLPolicy{policy})
|
||||
assert.Nil(t, err)
|
||||
|
||||
req := structs.ACLPolicyDeleteRequest{
|
||||
Names: []string{policy.Name},
|
||||
}
|
||||
buf, err := structs.Encode(structs.ACLPolicyDeleteRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
if resp != nil {
|
||||
t.Fatalf("resp: %v", resp)
|
||||
}
|
||||
|
||||
// Verify we are NOT registered
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := fsm.State().ACLPolicyByName(ws, policy.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
func TestFSM_BootstrapACLTokens(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
token := mock.ACLToken()
|
||||
req := structs.ACLTokenBootstrapRequest{
|
||||
Token: token,
|
||||
}
|
||||
buf, err := structs.Encode(structs.ACLTokenBootstrapRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
if resp != nil {
|
||||
t.Fatalf("resp: %v", resp)
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
out, err := fsm.State().ACLTokenByAccessorID(nil, token.AccessorID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out)
|
||||
}
|
||||
|
||||
func TestFSM_UpsertACLTokens(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
token := mock.ACLToken()
|
||||
req := structs.ACLTokenUpsertRequest{
|
||||
Tokens: []*structs.ACLToken{token},
|
||||
}
|
||||
buf, err := structs.Encode(structs.ACLTokenUpsertRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
if resp != nil {
|
||||
t.Fatalf("resp: %v", resp)
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := fsm.State().ACLTokenByAccessorID(ws, token.AccessorID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, out)
|
||||
}
|
||||
|
||||
func TestFSM_DeleteACLTokens(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
token := mock.ACLToken()
|
||||
err := fsm.State().UpsertACLTokens(1000, []*structs.ACLToken{token})
|
||||
assert.Nil(t, err)
|
||||
|
||||
req := structs.ACLTokenDeleteRequest{
|
||||
AccessorIDs: []string{token.AccessorID},
|
||||
}
|
||||
buf, err := structs.Encode(structs.ACLTokenDeleteRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
if resp != nil {
|
||||
t.Fatalf("resp: %v", resp)
|
||||
}
|
||||
|
||||
// Verify we are NOT registered
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := fsm.State().ACLTokenByAccessorID(ws, token.AccessorID)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
func testSnapshotRestore(t *testing.T, fsm *nomadFSM) *nomadFSM {
|
||||
// Snapshot
|
||||
snap, err := fsm.Snapshot()
|
||||
|
@ -1858,6 +1989,44 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFSM_SnapshotRestore_ACLPolicy(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
fsm := testFSM(t)
|
||||
state := fsm.State()
|
||||
p1 := mock.ACLPolicy()
|
||||
p2 := mock.ACLPolicy()
|
||||
state.UpsertACLPolicies(1000, []*structs.ACLPolicy{p1, p2})
|
||||
|
||||
// Verify the contents
|
||||
fsm2 := testSnapshotRestore(t, fsm)
|
||||
state2 := fsm2.State()
|
||||
ws := memdb.NewWatchSet()
|
||||
out1, _ := state2.ACLPolicyByName(ws, p1.Name)
|
||||
out2, _ := state2.ACLPolicyByName(ws, p2.Name)
|
||||
assert.Equal(t, p1, out1)
|
||||
assert.Equal(t, p2, out2)
|
||||
}
|
||||
|
||||
func TestFSM_SnapshotRestore_ACLTokens(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
fsm := testFSM(t)
|
||||
state := fsm.State()
|
||||
tk1 := mock.ACLToken()
|
||||
tk2 := mock.ACLToken()
|
||||
state.UpsertACLTokens(1000, []*structs.ACLToken{tk1, tk2})
|
||||
|
||||
// Verify the contents
|
||||
fsm2 := testSnapshotRestore(t, fsm)
|
||||
state2 := fsm2.State()
|
||||
ws := memdb.NewWatchSet()
|
||||
out1, _ := state2.ACLTokenByAccessorID(ws, tk1.AccessorID)
|
||||
out2, _ := state2.ACLTokenByAccessorID(ws, tk2.AccessorID)
|
||||
assert.Equal(t, tk1, out1)
|
||||
assert.Equal(t, tk2, out2)
|
||||
}
|
||||
|
||||
func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/client/driver"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
|
@ -71,6 +72,13 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
|
|||
// Set the warning message
|
||||
reply.Warnings = structs.MergeMultierrorWarnings(warnings, canonicalizeWarnings)
|
||||
|
||||
// Check job submission permissions
|
||||
if aclObj, err := j.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if aclObj != nil && !aclObj.AllowNamespaceOperation(structs.DefaultNamespace, acl.NamespaceCapabilitySubmitJob) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
// Lookup the job
|
||||
snap, err := j.srv.fsm.State().Snapshot()
|
||||
if err != nil {
|
||||
|
|
|
@ -93,6 +93,49 @@ func TestJobEndpoint_Register(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJobEndpoint_Register_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, root := testACLServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
// Create the register request
|
||||
job := mock.Job()
|
||||
req := &structs.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: structs.WriteRequest{Region: "global"},
|
||||
}
|
||||
|
||||
// Try without a token, expect failure
|
||||
var resp structs.JobRegisterResponse
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
|
||||
// Try with a token
|
||||
req.SecretID = root.SecretID
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp.Index == 0 {
|
||||
t.Fatalf("bad index: %d", resp.Index)
|
||||
}
|
||||
|
||||
// Check for the node in the FSM
|
||||
state := s1.fsm.State()
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.JobByID(ws, job.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("expected job")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
|
|
298
nomad/leader.go
298
nomad/leader.go
|
@ -1,6 +1,7 @@
|
|||
package nomad
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -8,8 +9,11 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -20,6 +24,10 @@ const (
|
|||
// unblocked to re-enter the scheduler. A failed evaluation occurs under
|
||||
// high contention when the schedulers plan does not make progress.
|
||||
failedEvalUnblockInterval = 1 * time.Minute
|
||||
|
||||
// replicationRateLimit is used to rate limit how often data is replicated
|
||||
// between the authoritative region and the local region
|
||||
replicationRateLimit rate.Limit = 10.0
|
||||
)
|
||||
|
||||
// monitorLeadership is used to monitor if we acquire or lose our role
|
||||
|
@ -188,6 +196,13 @@ func (s *Server) establishLeadership(stopCh chan struct{}) error {
|
|||
if err := s.reconcileJobSummaries(); err != nil {
|
||||
return fmt.Errorf("unable to reconcile job summaries: %v", err)
|
||||
}
|
||||
|
||||
// Start replication of ACLs and Policies if they are enabled,
|
||||
// and we are not the authoritative region.
|
||||
if s.config.ACLEnabled && s.config.Region != s.config.AuthoritativeRegion {
|
||||
go s.replicateACLPolicies(stopCh)
|
||||
go s.replicateACLTokens(stopCh)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -654,3 +669,286 @@ REMOVE:
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// replicateACLPolicies is used to replicate ACL policies from
|
||||
// the authoritative region to this region.
|
||||
func (s *Server) replicateACLPolicies(stopCh chan struct{}) {
|
||||
req := structs.ACLPolicyListRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: s.config.AuthoritativeRegion,
|
||||
AllowStale: true,
|
||||
},
|
||||
}
|
||||
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
||||
s.logger.Printf("[DEBUG] nomad: starting ACL policy replication from authoritative region %q", req.Region)
|
||||
|
||||
START:
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
// Rate limit how often we attempt replication
|
||||
limiter.Wait(context.Background())
|
||||
|
||||
// Fetch the list of policies
|
||||
var resp structs.ACLPolicyListResponse
|
||||
req.SecretID = s.ReplicationToken()
|
||||
err := s.forwardRegion(s.config.AuthoritativeRegion,
|
||||
"ACL.ListPolicies", &req, &resp)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to fetch policies from authoritative region: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
|
||||
// Perform a two-way diff
|
||||
delete, update := diffACLPolicies(s.State(), req.MinQueryIndex, resp.Policies)
|
||||
|
||||
// Delete policies that should not exist
|
||||
if len(delete) > 0 {
|
||||
args := &structs.ACLPolicyDeleteRequest{
|
||||
Names: delete,
|
||||
}
|
||||
_, _, err := s.raftApply(structs.ACLPolicyDeleteRequestType, args)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to delete policies: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch any outdated policies
|
||||
var fetched []*structs.ACLPolicy
|
||||
if len(update) > 0 {
|
||||
req := structs.ACLPolicySetRequest{
|
||||
Names: update,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: s.config.AuthoritativeRegion,
|
||||
SecretID: s.ReplicationToken(),
|
||||
AllowStale: true,
|
||||
MinQueryIndex: resp.Index - 1,
|
||||
},
|
||||
}
|
||||
var reply structs.ACLPolicySetResponse
|
||||
if err := s.forwardRegion(s.config.AuthoritativeRegion,
|
||||
"ACL.GetPolicies", &req, &reply); err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to fetch policies from authoritative region: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
for _, policy := range reply.Policies {
|
||||
fetched = append(fetched, policy)
|
||||
}
|
||||
}
|
||||
|
||||
// Update local policies
|
||||
if len(fetched) > 0 {
|
||||
args := &structs.ACLPolicyUpsertRequest{
|
||||
Policies: fetched,
|
||||
}
|
||||
_, _, err := s.raftApply(structs.ACLPolicyUpsertRequestType, args)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to update policies: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Update the minimum query index, blocks until there
|
||||
// is a change.
|
||||
req.MinQueryIndex = resp.Index
|
||||
}
|
||||
}
|
||||
|
||||
ERR_WAIT:
|
||||
select {
|
||||
case <-time.After(s.config.ReplicationBackoff):
|
||||
goto START
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// diffACLPolicies is used to perform a two-way diff between the local
|
||||
// policies and the remote policies to determine which policies need to
|
||||
// be deleted or updated.
|
||||
func diffACLPolicies(state *state.StateStore, minIndex uint64, remoteList []*structs.ACLPolicyListStub) (delete []string, update []string) {
|
||||
// Construct a set of the local and remote policies
|
||||
local := make(map[string][]byte)
|
||||
remote := make(map[string]struct{})
|
||||
|
||||
// Add all the local policies
|
||||
iter, err := state.ACLPolicies(nil)
|
||||
if err != nil {
|
||||
panic("failed to iterate local policies")
|
||||
}
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
policy := raw.(*structs.ACLPolicy)
|
||||
local[policy.Name] = policy.Hash
|
||||
}
|
||||
|
||||
// Iterate over the remote policies
|
||||
for _, rp := range remoteList {
|
||||
remote[rp.Name] = struct{}{}
|
||||
|
||||
// Check if the policy is missing locally
|
||||
if localHash, ok := local[rp.Name]; !ok {
|
||||
update = append(update, rp.Name)
|
||||
|
||||
// Check if policy is newer remotely and there is a hash mis-match.
|
||||
} else if rp.ModifyIndex > minIndex && !bytes.Equal(localHash, rp.Hash) {
|
||||
update = append(update, rp.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if policy should be deleted
|
||||
for lp := range local {
|
||||
if _, ok := remote[lp]; !ok {
|
||||
delete = append(delete, lp)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// replicateACLTokens is used to replicate global ACL tokens from
|
||||
// the authoritative region to this region.
|
||||
func (s *Server) replicateACLTokens(stopCh chan struct{}) {
|
||||
req := structs.ACLTokenListRequest{
|
||||
GlobalOnly: true,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: s.config.AuthoritativeRegion,
|
||||
AllowStale: true,
|
||||
},
|
||||
}
|
||||
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
||||
s.logger.Printf("[DEBUG] nomad: starting ACL token replication from authoritative region %q", req.Region)
|
||||
|
||||
START:
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
// Rate limit how often we attempt replication
|
||||
limiter.Wait(context.Background())
|
||||
|
||||
// Fetch the list of tokens
|
||||
var resp structs.ACLTokenListResponse
|
||||
req.SecretID = s.ReplicationToken()
|
||||
err := s.forwardRegion(s.config.AuthoritativeRegion,
|
||||
"ACL.ListTokens", &req, &resp)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to fetch tokens from authoritative region: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
|
||||
// Perform a two-way diff
|
||||
delete, update := diffACLTokens(s.State(), req.MinQueryIndex, resp.Tokens)
|
||||
|
||||
// Delete tokens that should not exist
|
||||
if len(delete) > 0 {
|
||||
args := &structs.ACLTokenDeleteRequest{
|
||||
AccessorIDs: delete,
|
||||
}
|
||||
_, _, err := s.raftApply(structs.ACLTokenDeleteRequestType, args)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to delete tokens: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch any outdated policies.
|
||||
var fetched []*structs.ACLToken
|
||||
if len(update) > 0 {
|
||||
req := structs.ACLTokenSetRequest{
|
||||
AccessorIDS: update,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: s.config.AuthoritativeRegion,
|
||||
SecretID: s.ReplicationToken(),
|
||||
AllowStale: true,
|
||||
MinQueryIndex: resp.Index - 1,
|
||||
},
|
||||
}
|
||||
var reply structs.ACLTokenSetResponse
|
||||
if err := s.forwardRegion(s.config.AuthoritativeRegion,
|
||||
"ACL.GetTokens", &req, &reply); err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to fetch tokens from authoritative region: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
for _, token := range reply.Tokens {
|
||||
fetched = append(fetched, token)
|
||||
}
|
||||
}
|
||||
|
||||
// Update local tokens
|
||||
if len(fetched) > 0 {
|
||||
args := &structs.ACLTokenUpsertRequest{
|
||||
Tokens: fetched,
|
||||
}
|
||||
_, _, err := s.raftApply(structs.ACLTokenUpsertRequestType, args)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] nomad: failed to update tokens: %v", err)
|
||||
goto ERR_WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Update the minimum query index, blocks until there
|
||||
// is a change.
|
||||
req.MinQueryIndex = resp.Index
|
||||
}
|
||||
}
|
||||
|
||||
ERR_WAIT:
|
||||
select {
|
||||
case <-time.After(s.config.ReplicationBackoff):
|
||||
goto START
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// diffACLTokens is used to perform a two-way diff between the local
|
||||
// tokens and the remote tokens to determine which tokens need to
|
||||
// be deleted or updated.
|
||||
func diffACLTokens(state *state.StateStore, minIndex uint64, remoteList []*structs.ACLTokenListStub) (delete []string, update []string) {
|
||||
// Construct a set of the local and remote policies
|
||||
local := make(map[string][]byte)
|
||||
remote := make(map[string]struct{})
|
||||
|
||||
// Add all the local global tokens
|
||||
iter, err := state.ACLTokensByGlobal(nil, true)
|
||||
if err != nil {
|
||||
panic("failed to iterate local tokens")
|
||||
}
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
token := raw.(*structs.ACLToken)
|
||||
local[token.AccessorID] = token.Hash
|
||||
}
|
||||
|
||||
// Iterate over the remote tokens
|
||||
for _, rp := range remoteList {
|
||||
remote[rp.AccessorID] = struct{}{}
|
||||
|
||||
// Check if the token is missing locally
|
||||
if localHash, ok := local[rp.AccessorID]; !ok {
|
||||
update = append(update, rp.AccessorID)
|
||||
|
||||
// Check if policy is newer remotely and there is a hash mis-match.
|
||||
} else if rp.ModifyIndex > minIndex && !bytes.Equal(localHash, rp.Hash) {
|
||||
update = append(update, rp.AccessorID)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if local token should be deleted
|
||||
for lp := range local {
|
||||
if _, ok := remote[lp]; !ok {
|
||||
delete = append(delete, lp)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -3,13 +3,16 @@ package nomad
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLeader_LeftServer(t *testing.T) {
|
||||
|
@ -623,3 +626,154 @@ func TestLeader_RestoreVaultAccessors(t *testing.T) {
|
|||
t.Fatalf("Bad revoked accessors: %v", tvc.RevokedTokens)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeader_ReplicateACLPolicies(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, root := testACLServer(t, func(c *Config) {
|
||||
c.Region = "region1"
|
||||
c.AuthoritativeRegion = "region1"
|
||||
c.ACLEnabled = true
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
s2, _ := testACLServer(t, func(c *Config) {
|
||||
c.Region = "region2"
|
||||
c.AuthoritativeRegion = "region1"
|
||||
c.ACLEnabled = true
|
||||
c.ReplicationBackoff = 20 * time.Millisecond
|
||||
c.ReplicationToken = root.SecretID
|
||||
})
|
||||
defer s2.Shutdown()
|
||||
testJoin(t, s1, s2)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
testutil.WaitForLeader(t, s2.RPC)
|
||||
|
||||
// Write a policy to the authoritative region
|
||||
p1 := mock.ACLPolicy()
|
||||
if err := s1.State().UpsertACLPolicies(100, []*structs.ACLPolicy{p1}); err != nil {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the policy to replicate
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
state := s2.State()
|
||||
out, err := state.ACLPolicyByName(nil, p1.Name)
|
||||
return out != nil, err
|
||||
}, func(err error) {
|
||||
t.Fatalf("should replicate policy")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_DiffACLPolicies(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
state, err := state.NewStateStore(os.Stderr)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Populate the local state
|
||||
p1 := mock.ACLPolicy()
|
||||
p2 := mock.ACLPolicy()
|
||||
p3 := mock.ACLPolicy()
|
||||
err = state.UpsertACLPolicies(100, []*structs.ACLPolicy{p1, p2, p3})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Simulate a remote list
|
||||
p2Stub := p2.Stub()
|
||||
p2Stub.ModifyIndex = 50 // Ignored, same index
|
||||
p3Stub := p3.Stub()
|
||||
p3Stub.ModifyIndex = 100 // Updated, higher index
|
||||
p3Stub.Hash = []byte{0, 1, 2, 3}
|
||||
p4 := mock.ACLPolicy()
|
||||
remoteList := []*structs.ACLPolicyListStub{
|
||||
p2Stub,
|
||||
p3Stub,
|
||||
p4.Stub(),
|
||||
}
|
||||
delete, update := diffACLPolicies(state, 50, remoteList)
|
||||
|
||||
// P1 does not exist on the remote side, should delete
|
||||
assert.Equal(t, []string{p1.Name}, delete)
|
||||
|
||||
// P2 is un-modified - ignore. P3 modified, P4 new.
|
||||
assert.Equal(t, []string{p3.Name, p4.Name}, update)
|
||||
}
|
||||
|
||||
func TestLeader_ReplicateACLTokens(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, root := testACLServer(t, func(c *Config) {
|
||||
c.Region = "region1"
|
||||
c.AuthoritativeRegion = "region1"
|
||||
c.ACLEnabled = true
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
s2, _ := testACLServer(t, func(c *Config) {
|
||||
c.Region = "region2"
|
||||
c.AuthoritativeRegion = "region1"
|
||||
c.ACLEnabled = true
|
||||
c.ReplicationBackoff = 20 * time.Millisecond
|
||||
c.ReplicationToken = root.SecretID
|
||||
})
|
||||
defer s2.Shutdown()
|
||||
testJoin(t, s1, s2)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
testutil.WaitForLeader(t, s2.RPC)
|
||||
|
||||
// Write a token to the authoritative region
|
||||
p1 := mock.ACLToken()
|
||||
p1.Global = true
|
||||
if err := s1.State().UpsertACLTokens(100, []*structs.ACLToken{p1}); err != nil {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the token to replicate
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
state := s2.State()
|
||||
out, err := state.ACLTokenByAccessorID(nil, p1.AccessorID)
|
||||
return out != nil, err
|
||||
}, func(err error) {
|
||||
t.Fatalf("should replicate token")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_DiffACLTokens(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
state, err := state.NewStateStore(os.Stderr)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Populate the local state
|
||||
p0 := mock.ACLToken()
|
||||
p1 := mock.ACLToken()
|
||||
p1.Global = true
|
||||
p2 := mock.ACLToken()
|
||||
p2.Global = true
|
||||
p3 := mock.ACLToken()
|
||||
p3.Global = true
|
||||
err = state.UpsertACLTokens(100, []*structs.ACLToken{p0, p1, p2, p3})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Simulate a remote list
|
||||
p2Stub := p2.Stub()
|
||||
p2Stub.ModifyIndex = 50 // Ignored, same index
|
||||
p3Stub := p3.Stub()
|
||||
p3Stub.ModifyIndex = 100 // Updated, higher index
|
||||
p3Stub.Hash = []byte{0, 1, 2, 3}
|
||||
p4 := mock.ACLToken()
|
||||
p4.Global = true
|
||||
remoteList := []*structs.ACLTokenListStub{
|
||||
p2Stub,
|
||||
p3Stub,
|
||||
p4.Stub(),
|
||||
}
|
||||
delete, update := diffACLTokens(state, 50, remoteList)
|
||||
|
||||
// P0 is local and should be ignored
|
||||
// P1 does not exist on the remote side, should delete
|
||||
assert.Equal(t, []string{p1.AccessorID}, delete)
|
||||
|
||||
// P2 is un-modified - ignore. P3 modified, P4 new.
|
||||
assert.Equal(t, []string{p3.AccessorID, p4.AccessorID}, update)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -336,3 +337,54 @@ func Plan() *structs.Plan {
|
|||
func PlanResult() *structs.PlanResult {
|
||||
return &structs.PlanResult{}
|
||||
}
|
||||
|
||||
func ACLPolicy() *structs.ACLPolicy {
|
||||
ap := &structs.ACLPolicy{
|
||||
Name: fmt.Sprintf("policy-%s", structs.GenerateUUID()),
|
||||
Description: "Super cool policy!",
|
||||
Rules: `
|
||||
namespace "default" {
|
||||
policy = "write"
|
||||
}
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
CreateIndex: 10,
|
||||
ModifyIndex: 20,
|
||||
}
|
||||
ap.SetHash()
|
||||
return ap
|
||||
}
|
||||
|
||||
func ACLToken() *structs.ACLToken {
|
||||
tk := &structs.ACLToken{
|
||||
AccessorID: structs.GenerateUUID(),
|
||||
SecretID: structs.GenerateUUID(),
|
||||
Name: "my cool token " + structs.GenerateUUID(),
|
||||
Type: "client",
|
||||
Policies: []string{"foo", "bar"},
|
||||
Global: false,
|
||||
CreateTime: time.Now().UTC(),
|
||||
CreateIndex: 10,
|
||||
ModifyIndex: 20,
|
||||
}
|
||||
tk.SetHash()
|
||||
return tk
|
||||
}
|
||||
|
||||
func ACLManagementToken() *structs.ACLToken {
|
||||
return &structs.ACLToken{
|
||||
AccessorID: structs.GenerateUUID(),
|
||||
SecretID: structs.GenerateUUID(),
|
||||
Name: "management " + structs.GenerateUUID(),
|
||||
Type: "management",
|
||||
Global: true,
|
||||
CreateTime: time.Now().UTC(),
|
||||
CreateIndex: 10,
|
||||
ModifyIndex: 20,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/hashicorp/nomad/command/agent/consul"
|
||||
"github.com/hashicorp/nomad/helper/tlsutil"
|
||||
"github.com/hashicorp/nomad/nomad/deploymentwatcher"
|
||||
|
@ -72,6 +73,10 @@ const (
|
|||
// defaultConsulDiscoveryIntervalRetry is how often to poll Consul for
|
||||
// new servers if there is no leader and the last Consul query failed.
|
||||
defaultConsulDiscoveryIntervalRetry time.Duration = 9 * time.Second
|
||||
|
||||
// aclCacheSize is the number of ACL objects to keep cached. ACLs have a parsing and
|
||||
// construction cost, so we keep the hot objects cached to reduce the ACL token resolution time.
|
||||
aclCacheSize = 512
|
||||
)
|
||||
|
||||
// Server is Nomad server which manages the job queues,
|
||||
|
@ -158,6 +163,9 @@ type Server struct {
|
|||
// Worker used for processing
|
||||
workers []*Worker
|
||||
|
||||
// aclCache is used to maintain the parsed ACL objects
|
||||
aclCache *lru.TwoQueueCache
|
||||
|
||||
left bool
|
||||
shutdown bool
|
||||
shutdownCh chan struct{}
|
||||
|
@ -178,6 +186,7 @@ type endpoints struct {
|
|||
Periodic *Periodic
|
||||
System *System
|
||||
Operator *Operator
|
||||
ACL *ACL
|
||||
}
|
||||
|
||||
// NewServer is used to construct a new Nomad server from the
|
||||
|
@ -225,6 +234,12 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, logger *log.Logg
|
|||
incomingTLS = itls
|
||||
}
|
||||
|
||||
// Create the ACL object cache
|
||||
aclCache, err := lru.New2Q(aclCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the server
|
||||
s := &Server{
|
||||
config: config,
|
||||
|
@ -240,6 +255,7 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, logger *log.Logg
|
|||
blockedEvals: blockedEvals,
|
||||
planQueue: planQueue,
|
||||
rpcTLS: incomingTLS,
|
||||
aclCache: aclCache,
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
|
@ -707,6 +723,7 @@ func (s *Server) setupVaultClient() error {
|
|||
// setupRPC is used to setup the RPC listener
|
||||
func (s *Server) setupRPC(tlsWrap tlsutil.RegionWrapper) error {
|
||||
// Create endpoints
|
||||
s.endpoints.ACL = &ACL{s}
|
||||
s.endpoints.Alloc = &Alloc{s}
|
||||
s.endpoints.Eval = &Eval{s}
|
||||
s.endpoints.Job = &Job{s}
|
||||
|
@ -721,6 +738,7 @@ func (s *Server) setupRPC(tlsWrap tlsutil.RegionWrapper) error {
|
|||
s.endpoints.Search = &Search{s}
|
||||
|
||||
// Register the handlers
|
||||
s.rpcServer.Register(s.endpoints.ACL)
|
||||
s.rpcServer.Register(s.endpoints.Alloc)
|
||||
s.rpcServer.Register(s.endpoints.Eval)
|
||||
s.rpcServer.Register(s.endpoints.Job)
|
||||
|
@ -1129,6 +1147,12 @@ func (s *Server) GetConfig() *Config {
|
|||
return s.config
|
||||
}
|
||||
|
||||
// ReplicationToken returns the token used for replication. We use a method to support
|
||||
// dynamic reloading of this value later.
|
||||
func (s *Server) ReplicationToken() string {
|
||||
return s.config.ReplicationToken
|
||||
}
|
||||
|
||||
// peersInfoContent is used to help operators understand what happened to the
|
||||
// peers.json file. This is written to a file called peers.info in the same
|
||||
// location.
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/command/agent/consul"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
@ -38,6 +39,21 @@ func tmpDir(t *testing.T) string {
|
|||
return dir
|
||||
}
|
||||
|
||||
func testACLServer(t *testing.T, cb func(*Config)) (*Server, *structs.ACLToken) {
|
||||
server := testServer(t, func(c *Config) {
|
||||
c.ACLEnabled = true
|
||||
if cb != nil {
|
||||
cb(c)
|
||||
}
|
||||
})
|
||||
token := mock.ACLManagementToken()
|
||||
err := server.State().BootstrapACLTokens(1, token)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to bootstrap ACL token: %v", err)
|
||||
}
|
||||
return server, token
|
||||
}
|
||||
|
||||
func testServer(t *testing.T, cb func(*Config)) *Server {
|
||||
// Setup the default settings
|
||||
config := DefaultConfig()
|
||||
|
|
|
@ -26,6 +26,8 @@ func stateStoreSchema() *memdb.DBSchema {
|
|||
evalTableSchema,
|
||||
allocTableSchema,
|
||||
vaultAccessorTableSchema,
|
||||
aclPolicyTableSchema,
|
||||
aclTokenTableSchema,
|
||||
}
|
||||
|
||||
// Add each of the tables
|
||||
|
@ -430,3 +432,55 @@ func vaultAccessorTableSchema() *memdb.TableSchema {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
// aclPolicyTableSchema returns the MemDB schema for the policy table.
|
||||
// This table is used to store the policies which are refrenced by tokens
|
||||
func aclPolicyTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: "acl_policy",
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
"id": &memdb.IndexSchema{
|
||||
Name: "id",
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Name",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// aclTokenTableSchema returns the MemDB schema for the tokens table.
|
||||
// This table is used to store the bearer tokens which are used to authenticate
|
||||
func aclTokenTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: "acl_token",
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
"id": &memdb.IndexSchema{
|
||||
Name: "id",
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.UUIDFieldIndex{
|
||||
Field: "AccessorID",
|
||||
},
|
||||
},
|
||||
"secret": &memdb.IndexSchema{
|
||||
Name: "secret",
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.UUIDFieldIndex{
|
||||
Field: "SecretID",
|
||||
},
|
||||
},
|
||||
"global": &memdb.IndexSchema{
|
||||
Name: "global",
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &memdb.FieldSetIndex{
|
||||
Field: "Global",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2788,6 +2788,286 @@ func (s *StateStore) addEphemeralDiskToTaskGroups(job *structs.Job) {
|
|||
}
|
||||
}
|
||||
|
||||
// UpsertACLPolicies is used to create or update a set of ACL policies
|
||||
func (s *StateStore) UpsertACLPolicies(index uint64, policies []*structs.ACLPolicy) error {
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
for _, policy := range policies {
|
||||
// Ensure the policy hash is non-nil. This should be done outside the state store
|
||||
// for performance reasons, but we check here for defense in depth.
|
||||
if len(policy.Hash) == 0 {
|
||||
policy.SetHash()
|
||||
}
|
||||
|
||||
// Check if the policy already exists
|
||||
existing, err := txn.First("acl_policy", "id", policy.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("policy lookup failed: %v", err)
|
||||
}
|
||||
|
||||
// Update all the indexes
|
||||
if existing != nil {
|
||||
policy.CreateIndex = existing.(*structs.ACLPolicy).CreateIndex
|
||||
policy.ModifyIndex = index
|
||||
} else {
|
||||
policy.CreateIndex = index
|
||||
policy.ModifyIndex = index
|
||||
}
|
||||
|
||||
// Update the policy
|
||||
if err := txn.Insert("acl_policy", policy); err != nil {
|
||||
return fmt.Errorf("upserting policy failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the indexes tabl
|
||||
if err := txn.Insert("index", &IndexEntry{"acl_policy", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteACLPolicies deletes the policies with the given names
|
||||
func (s *StateStore) DeleteACLPolicies(index uint64, names []string) error {
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// Delete the policy
|
||||
for _, name := range names {
|
||||
if _, err := txn.DeleteAll("acl_policy", "id", name); err != nil {
|
||||
return fmt.Errorf("deleting acl policy failed: %v", err)
|
||||
}
|
||||
}
|
||||
if err := txn.Insert("index", &IndexEntry{"acl_policy", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ACLPolicyByName is used to lookup a policy by name
|
||||
func (s *StateStore) ACLPolicyByName(ws memdb.WatchSet, name string) (*structs.ACLPolicy, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("acl_policy", "id", name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acl policy lookup failed: %v", err)
|
||||
}
|
||||
ws.Add(watchCh)
|
||||
|
||||
if existing != nil {
|
||||
return existing.(*structs.ACLPolicy), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ACLPolicyByNamePrefix is used to lookup policies by prefix
|
||||
func (s *StateStore) ACLPolicyByNamePrefix(ws memdb.WatchSet, prefix string) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
iter, err := txn.Get("acl_policy", "id_prefix", prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acl policy lookup failed: %v", err)
|
||||
}
|
||||
ws.Add(iter.WatchCh())
|
||||
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// ACLPolicies returns an iterator over all the acl policies
|
||||
func (s *StateStore) ACLPolicies(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// Walk the entire table
|
||||
iter, err := txn.Get("acl_policy", "id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ws.Add(iter.WatchCh())
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// UpsertACLTokens is used to create or update a set of ACL tokens
|
||||
func (s *StateStore) UpsertACLTokens(index uint64, tokens []*structs.ACLToken) error {
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
for _, token := range tokens {
|
||||
// Ensure the policy hash is non-nil. This should be done outside the state store
|
||||
// for performance reasons, but we check here for defense in depth.
|
||||
if len(token.Hash) == 0 {
|
||||
token.SetHash()
|
||||
}
|
||||
|
||||
// Check if the token already exists
|
||||
existing, err := txn.First("acl_token", "id", token.AccessorID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("token lookup failed: %v", err)
|
||||
}
|
||||
|
||||
// Update all the indexes
|
||||
if existing != nil {
|
||||
existTK := existing.(*structs.ACLToken)
|
||||
token.CreateIndex = existTK.CreateIndex
|
||||
token.ModifyIndex = index
|
||||
|
||||
// Do not allow SecretID or create time to change
|
||||
token.SecretID = existTK.SecretID
|
||||
token.CreateTime = existTK.CreateTime
|
||||
|
||||
} else {
|
||||
token.CreateIndex = index
|
||||
token.ModifyIndex = index
|
||||
}
|
||||
|
||||
// Update the token
|
||||
if err := txn.Insert("acl_token", token); err != nil {
|
||||
return fmt.Errorf("upserting token failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the indexes table
|
||||
if err := txn.Insert("index", &IndexEntry{"acl_token", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteACLTokens deletes the tokens with the given accessor ids
|
||||
func (s *StateStore) DeleteACLTokens(index uint64, ids []string) error {
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// Delete the tokens
|
||||
for _, id := range ids {
|
||||
if _, err := txn.DeleteAll("acl_token", "id", id); err != nil {
|
||||
return fmt.Errorf("deleting acl token failed: %v", err)
|
||||
}
|
||||
}
|
||||
if err := txn.Insert("index", &IndexEntry{"acl_token", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ACLTokenByAccessorID is used to lookup a token by accessor ID
|
||||
func (s *StateStore) ACLTokenByAccessorID(ws memdb.WatchSet, id string) (*structs.ACLToken, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("acl_token", "id", id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acl token lookup failed: %v", err)
|
||||
}
|
||||
ws.Add(watchCh)
|
||||
|
||||
if existing != nil {
|
||||
return existing.(*structs.ACLToken), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ACLTokenBySecretID is used to lookup a token by secret ID
|
||||
func (s *StateStore) ACLTokenBySecretID(ws memdb.WatchSet, secretID string) (*structs.ACLToken, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("acl_token", "secret", secretID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acl token lookup failed: %v", err)
|
||||
}
|
||||
ws.Add(watchCh)
|
||||
|
||||
if existing != nil {
|
||||
return existing.(*structs.ACLToken), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ACLTokenByAccessorIDPrefix is used to lookup tokens by prefix
|
||||
func (s *StateStore) ACLTokenByAccessorIDPrefix(ws memdb.WatchSet, prefix string) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
iter, err := txn.Get("acl_token", "id_prefix", prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acl token lookup failed: %v", err)
|
||||
}
|
||||
ws.Add(iter.WatchCh())
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// ACLTokens returns an iterator over all the tokens
|
||||
func (s *StateStore) ACLTokens(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// Walk the entire table
|
||||
iter, err := txn.Get("acl_token", "id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ws.Add(iter.WatchCh())
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// ACLTokensByGlobal returns an iterator over all the tokens filtered by global value
|
||||
func (s *StateStore) ACLTokensByGlobal(ws memdb.WatchSet, globalVal bool) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// Walk the entire table
|
||||
iter, err := txn.Get("acl_token", "global", globalVal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ws.Add(iter.WatchCh())
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// CanBootstrapACLToken checks if bootstrapping is possible
|
||||
func (s *StateStore) CanBootstrapACLToken() (bool, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// Lookup the bootstrap sentinel
|
||||
out, err := txn.First("index", "id", "acl_token_bootstrap")
|
||||
return out == nil, err
|
||||
}
|
||||
|
||||
// BootstrapACLToken is used to create an initial ACL token
|
||||
func (s *StateStore) BootstrapACLTokens(index uint64, token *structs.ACLToken) error {
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// Check if we have already done a bootstrap
|
||||
existing, err := txn.First("index", "id", "acl_token_bootstrap")
|
||||
if err != nil {
|
||||
return fmt.Errorf("bootstrap check failed: %v", err)
|
||||
}
|
||||
if existing != nil {
|
||||
return fmt.Errorf("ACL bootstrap already done")
|
||||
}
|
||||
|
||||
// Update the Create/Modify time
|
||||
token.CreateIndex = index
|
||||
token.ModifyIndex = index
|
||||
|
||||
// Insert the token
|
||||
if err := txn.Insert("acl_token", token); err != nil {
|
||||
return fmt.Errorf("upserting token failed: %v", err)
|
||||
}
|
||||
|
||||
// Update the indexes table, prevents future bootstrap
|
||||
if err := txn.Insert("index", &IndexEntry{"acl_token", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
if err := txn.Insert("index", &IndexEntry{"acl_token_bootstrap", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateSnapshot is used to provide a point-in-time snapshot
|
||||
type StateSnapshot struct {
|
||||
StateStore
|
||||
|
@ -2907,6 +3187,22 @@ func (r *StateRestore) VaultAccessorRestore(accessor *structs.VaultAccessor) err
|
|||
return nil
|
||||
}
|
||||
|
||||
// ACLPolicyRestore is used to restore an ACL policy
|
||||
func (r *StateRestore) ACLPolicyRestore(policy *structs.ACLPolicy) error {
|
||||
if err := r.txn.Insert("acl_policy", policy); err != nil {
|
||||
return fmt.Errorf("inserting acl policy failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ACLTokenRestore is used to restore an ACL token
|
||||
func (r *StateRestore) ACLTokenRestore(token *structs.ACLToken) error {
|
||||
if err := r.txn.Insert("acl_token", token); err != nil {
|
||||
return fmt.Errorf("inserting acl token failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addEphemeralDiskToTaskGroups adds missing EphemeralDisk objects to TaskGroups
|
||||
func (r *StateRestore) addEphemeralDiskToTaskGroups(job *structs.Job) {
|
||||
for _, tg := range job.TaskGroups {
|
||||
|
|
|
@ -5825,6 +5825,501 @@ func TestStateStore_RestoreVaultAccessor(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertACLPolicy(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
policy := mock.ACLPolicy()
|
||||
policy2 := mock.ACLPolicy()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := state.ACLPolicyByName(ws, policy2.Name); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := state.UpsertACLPolicies(1000,
|
||||
[]*structs.ACLPolicy{policy, policy2}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
ws = memdb.NewWatchSet()
|
||||
out, err := state.ACLPolicyByName(ws, policy.Name)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, policy, out)
|
||||
|
||||
out, err = state.ACLPolicyByName(ws, policy2.Name)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, policy2, out)
|
||||
|
||||
iter, err := state.ACLPolicies(ws)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both policies
|
||||
count := 0
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 2 {
|
||||
t.Fatalf("bad: %d", count)
|
||||
}
|
||||
|
||||
index, err := state.Index("acl_policy")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1000 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_DeleteACLPolicy(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
policy := mock.ACLPolicy()
|
||||
policy2 := mock.ACLPolicy()
|
||||
|
||||
// Create the policy
|
||||
if err := state.UpsertACLPolicies(1000,
|
||||
[]*structs.ACLPolicy{policy, policy2}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create a watcher
|
||||
ws := memdb.NewWatchSet()
|
||||
if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Delete the policy
|
||||
if err := state.DeleteACLPolicies(1001,
|
||||
[]string{policy.Name, policy2.Name}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure watching triggered
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// Ensure we don't get the object back
|
||||
ws = memdb.NewWatchSet()
|
||||
out, err := state.ACLPolicyByName(ws, policy.Name)
|
||||
assert.Equal(t, nil, err)
|
||||
if out != nil {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
|
||||
iter, err := state.ACLPolicies(ws)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both policies
|
||||
count := 0
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 0 {
|
||||
t.Fatalf("bad: %d", count)
|
||||
}
|
||||
|
||||
index, err := state.Index("acl_policy")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1001 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
names := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"foobar",
|
||||
"foozip",
|
||||
"zip",
|
||||
}
|
||||
|
||||
// Create the policies
|
||||
var baseIndex uint64 = 1000
|
||||
for _, name := range names {
|
||||
p := mock.ACLPolicy()
|
||||
p.Name = name
|
||||
if err := state.UpsertACLPolicies(baseIndex, []*structs.ACLPolicy{p}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
baseIndex++
|
||||
}
|
||||
|
||||
// Scan by prefix
|
||||
iter, err := state.ACLPolicyByNamePrefix(nil, "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both policies
|
||||
count := 0
|
||||
out := []string{}
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
out = append(out, raw.(*structs.ACLPolicy).Name)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Fatalf("bad: %d %v", count, out)
|
||||
}
|
||||
sort.Strings(out)
|
||||
|
||||
expect := []string{"foo", "foobar", "foozip"}
|
||||
assert.Equal(t, expect, out)
|
||||
}
|
||||
|
||||
func TestStateStore_BootstrapACLTokens(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
tk1 := mock.ACLToken()
|
||||
tk2 := mock.ACLToken()
|
||||
|
||||
ok, err := state.CanBootstrapACLToken()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
if err := state.BootstrapACLTokens(1000, tk1); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
out, err := state.ACLTokenByAccessorID(nil, tk1.AccessorID)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, tk1, out)
|
||||
|
||||
ok, err = state.CanBootstrapACLToken()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
if err := state.BootstrapACLTokens(1001, tk2); err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
|
||||
iter, err := state.ACLTokens(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both policies
|
||||
count := 0
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 1 {
|
||||
t.Fatalf("bad: %d", count)
|
||||
}
|
||||
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1000 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
index, err = state.Index("acl_token_bootstrap")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1000 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertACLTokens(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
tk1 := mock.ACLToken()
|
||||
tk2 := mock.ACLToken()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := state.ACLTokenByAccessorID(ws, tk2.AccessorID); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := state.UpsertACLTokens(1000,
|
||||
[]*structs.ACLToken{tk1, tk2}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
ws = memdb.NewWatchSet()
|
||||
out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, tk1, out)
|
||||
|
||||
out, err = state.ACLTokenByAccessorID(ws, tk2.AccessorID)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, tk2, out)
|
||||
|
||||
out, err = state.ACLTokenBySecretID(ws, tk1.SecretID)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, tk1, out)
|
||||
|
||||
out, err = state.ACLTokenBySecretID(ws, tk2.SecretID)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, tk2, out)
|
||||
|
||||
iter, err := state.ACLTokens(ws)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both policies
|
||||
count := 0
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 2 {
|
||||
t.Fatalf("bad: %d", count)
|
||||
}
|
||||
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1000 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_DeleteACLTokens(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
tk1 := mock.ACLToken()
|
||||
tk2 := mock.ACLToken()
|
||||
|
||||
// Create the tokens
|
||||
if err := state.UpsertACLTokens(1000,
|
||||
[]*structs.ACLToken{tk1, tk2}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create a watcher
|
||||
ws := memdb.NewWatchSet()
|
||||
if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Delete the token
|
||||
if err := state.DeleteACLTokens(1001,
|
||||
[]string{tk1.AccessorID, tk2.AccessorID}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure watching triggered
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// Ensure we don't get the object back
|
||||
ws = memdb.NewWatchSet()
|
||||
out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID)
|
||||
assert.Equal(t, nil, err)
|
||||
if out != nil {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
|
||||
iter, err := state.ACLTokens(ws)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both policies
|
||||
count := 0
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 0 {
|
||||
t.Fatalf("bad: %d", count)
|
||||
}
|
||||
|
||||
index, err := state.Index("acl_token")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1001 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
prefixes := []string{
|
||||
"aaaa",
|
||||
"aabb",
|
||||
"bbbb",
|
||||
"bbcc",
|
||||
"ffff",
|
||||
}
|
||||
|
||||
// Create the tokens
|
||||
var baseIndex uint64 = 1000
|
||||
for _, prefix := range prefixes {
|
||||
tk := mock.ACLToken()
|
||||
tk.AccessorID = prefix + tk.AccessorID[4:]
|
||||
if err := state.UpsertACLTokens(baseIndex, []*structs.ACLToken{tk}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
baseIndex++
|
||||
}
|
||||
|
||||
// Scan by prefix
|
||||
iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see both tokens
|
||||
count := 0
|
||||
out := []string{}
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
out = append(out, raw.(*structs.ACLToken).AccessorID[:4])
|
||||
}
|
||||
if count != 2 {
|
||||
t.Fatalf("bad: %d %v", count, out)
|
||||
}
|
||||
sort.Strings(out)
|
||||
|
||||
expect := []string{"aaaa", "aabb"}
|
||||
assert.Equal(t, expect, out)
|
||||
}
|
||||
|
||||
func TestStateStore_RestoreACLPolicy(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
policy := mock.ACLPolicy()
|
||||
|
||||
restore, err := state.Restore()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
err = restore.ACLPolicyRestore(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
restore.Commit()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.ACLPolicyByName(ws, policy.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Equal(t, policy, out)
|
||||
}
|
||||
|
||||
func TestStateStore_ACLTokensByGlobal(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
tk1 := mock.ACLToken()
|
||||
tk2 := mock.ACLToken()
|
||||
tk3 := mock.ACLToken()
|
||||
tk4 := mock.ACLToken()
|
||||
tk3.Global = true
|
||||
|
||||
if err := state.UpsertACLTokens(1000,
|
||||
[]*structs.ACLToken{tk1, tk2, tk3, tk4}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
iter, err := state.ACLTokensByGlobal(nil, true)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we see the one global policies
|
||||
count := 0
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 1 {
|
||||
t.Fatalf("bad: %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_RestoreACLToken(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
token := mock.ACLToken()
|
||||
|
||||
restore, err := state.Restore()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
err = restore.ACLTokenRestore(token)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
restore.Commit()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.ACLTokenByAccessorID(ws, token.AccessorID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Equal(t, token, out)
|
||||
}
|
||||
|
||||
func TestStateStore_Abandon(t *testing.T) {
|
||||
s := testStateStore(t)
|
||||
abandonCh := s.AbandonCh()
|
||||
|
|
|
@ -2,11 +2,17 @@ package structs
|
|||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
)
|
||||
|
||||
// MergeMultierrorWarnings takes job warnings and canonicalize warnings and
|
||||
|
@ -255,3 +261,52 @@ func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
|
|||
func AllocName(job, group string, idx uint) string {
|
||||
return fmt.Sprintf("%s.%s[%d]", job, group, idx)
|
||||
}
|
||||
|
||||
// ACLPolicyListHash returns a consistent hash for a set of policies.
|
||||
func ACLPolicyListHash(policies []*ACLPolicy) string {
|
||||
cacheKeyHash, err := blake2b.New256(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, policy := range policies {
|
||||
cacheKeyHash.Write([]byte(policy.Name))
|
||||
binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
|
||||
}
|
||||
cacheKey := string(cacheKeyHash.Sum(nil))
|
||||
return cacheKey
|
||||
}
|
||||
|
||||
// CompileACLObject compiles a set of ACL policies into an ACL object with a cache
|
||||
func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
|
||||
// Sort the policies to ensure consistent ordering
|
||||
sort.Slice(policies, func(i, j int) bool {
|
||||
return policies[i].Name < policies[j].Name
|
||||
})
|
||||
|
||||
// Determine the cache key
|
||||
cacheKey := ACLPolicyListHash(policies)
|
||||
aclRaw, ok := cache.Get(cacheKey)
|
||||
if ok {
|
||||
return aclRaw.(*acl.ACL), nil
|
||||
}
|
||||
|
||||
// Parse the policies
|
||||
parsed := make([]*acl.Policy, 0, len(policies))
|
||||
for _, policy := range policies {
|
||||
p, err := acl.Parse(policy.Rules)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
|
||||
}
|
||||
parsed = append(parsed, p)
|
||||
}
|
||||
|
||||
// Create the ACL object
|
||||
aclObj, err := acl.NewACL(false, parsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct ACL: %v", err)
|
||||
}
|
||||
|
||||
// Update the cache
|
||||
cache.Add(cacheKey, aclObj)
|
||||
return aclObj, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,9 @@ import (
|
|||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRemoveAllocs(t *testing.T) {
|
||||
|
@ -269,3 +272,106 @@ func TestGenerateUUID(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestACLPolicyListHash(t *testing.T) {
|
||||
h1 := ACLPolicyListHash(nil)
|
||||
assert.NotEqual(t, "", h1)
|
||||
|
||||
p1 := &ACLPolicy{
|
||||
Name: fmt.Sprintf("policy-%s", GenerateUUID()),
|
||||
Description: "Super cool policy!",
|
||||
Rules: `
|
||||
namespace "default" {
|
||||
policy = "write"
|
||||
}
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
CreateIndex: 10,
|
||||
ModifyIndex: 20,
|
||||
}
|
||||
|
||||
h2 := ACLPolicyListHash([]*ACLPolicy{p1})
|
||||
assert.NotEqual(t, "", h2)
|
||||
assert.NotEqual(t, h1, h2)
|
||||
|
||||
// Create P2 as copy of P1 with new name
|
||||
p2 := &ACLPolicy{}
|
||||
*p2 = *p1
|
||||
p2.Name = fmt.Sprintf("policy-%s", GenerateUUID())
|
||||
|
||||
h3 := ACLPolicyListHash([]*ACLPolicy{p1, p2})
|
||||
assert.NotEqual(t, "", h3)
|
||||
assert.NotEqual(t, h2, h3)
|
||||
|
||||
h4 := ACLPolicyListHash([]*ACLPolicy{p2})
|
||||
assert.NotEqual(t, "", h4)
|
||||
assert.NotEqual(t, h3, h4)
|
||||
|
||||
// ModifyIndex should change the hash
|
||||
p2.ModifyIndex++
|
||||
h5 := ACLPolicyListHash([]*ACLPolicy{p2})
|
||||
assert.NotEqual(t, "", h5)
|
||||
assert.NotEqual(t, h4, h5)
|
||||
}
|
||||
|
||||
func TestCompileACLObject(t *testing.T) {
|
||||
p1 := &ACLPolicy{
|
||||
Name: fmt.Sprintf("policy-%s", GenerateUUID()),
|
||||
Description: "Super cool policy!",
|
||||
Rules: `
|
||||
namespace "default" {
|
||||
policy = "write"
|
||||
}
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
`,
|
||||
CreateIndex: 10,
|
||||
ModifyIndex: 20,
|
||||
}
|
||||
|
||||
// Create P2 as copy of P1 with new name
|
||||
p2 := &ACLPolicy{}
|
||||
*p2 = *p1
|
||||
p2.Name = fmt.Sprintf("policy-%s", GenerateUUID())
|
||||
|
||||
// Create a small cache
|
||||
cache, err := lru.New2Q(16)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Test compilation
|
||||
aclObj, err := CompileACLObject(cache, []*ACLPolicy{p1})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj)
|
||||
|
||||
// Should get the same object
|
||||
aclObj2, err := CompileACLObject(cache, []*ACLPolicy{p1})
|
||||
assert.Nil(t, err)
|
||||
if aclObj != aclObj2 {
|
||||
t.Fatalf("expected the same object")
|
||||
}
|
||||
|
||||
// Should get another object
|
||||
aclObj3, err := CompileACLObject(cache, []*ACLPolicy{p1, p2})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj3)
|
||||
if aclObj == aclObj3 {
|
||||
t.Fatalf("unexpected same object")
|
||||
}
|
||||
|
||||
// Should be order independent
|
||||
aclObj4, err := CompileACLObject(cache, []*ACLPolicy{p2, p1})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, aclObj4)
|
||||
if aclObj3 != aclObj4 {
|
||||
t.Fatalf("expected same object")
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -20,10 +20,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
"github.com/gorhill/cronexpr"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/args"
|
||||
"github.com/mitchellh/copystructure"
|
||||
|
@ -33,8 +36,13 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrNoLeader = fmt.Errorf("No cluster leader")
|
||||
ErrNoRegionPath = fmt.Errorf("No path to region")
|
||||
ErrNoLeader = fmt.Errorf("No cluster leader")
|
||||
ErrNoRegionPath = fmt.Errorf("No path to region")
|
||||
ErrTokenNotFound = errors.New("ACL token not found")
|
||||
ErrPermissionDenied = errors.New("Permission denied")
|
||||
|
||||
// validPolicyName is used to validate a policy name
|
||||
validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
|
||||
)
|
||||
|
||||
type MessageType uint8
|
||||
|
@ -59,6 +67,11 @@ const (
|
|||
DeploymentAllocHealthRequestType
|
||||
DeploymentDeleteRequestType
|
||||
JobStabilityRequestType
|
||||
ACLPolicyUpsertRequestType
|
||||
ACLPolicyDeleteRequestType
|
||||
ACLTokenUpsertRequestType
|
||||
ACLTokenDeleteRequestType
|
||||
ACLTokenBootstrapRequestType
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -87,6 +100,19 @@ const (
|
|||
GetterModeAny = "any"
|
||||
GetterModeFile = "file"
|
||||
GetterModeDir = "dir"
|
||||
|
||||
// maxPolicyDescriptionLength limits a policy description length
|
||||
maxPolicyDescriptionLength = 256
|
||||
|
||||
// maxTokenNameLength limits a ACL token name length
|
||||
maxTokenNameLength = 64
|
||||
|
||||
// ACLClientToken and ACLManagementToken are the only types of tokens
|
||||
ACLClientToken = "client"
|
||||
ACLManagementToken = "management"
|
||||
|
||||
// DefaultNamespace is the default namespace.
|
||||
DefaultNamespace = "default"
|
||||
)
|
||||
|
||||
// Context defines the scope in which a search for Nomad object operates, and
|
||||
|
@ -127,6 +153,9 @@ type QueryOptions struct {
|
|||
|
||||
// If set, used as prefix for resource list searches
|
||||
Prefix string
|
||||
|
||||
// SecretID is secret portion of the ACL token used for the request
|
||||
SecretID string
|
||||
}
|
||||
|
||||
func (q QueryOptions) RequestRegion() string {
|
||||
|
@ -145,6 +174,9 @@ func (q QueryOptions) AllowStaleRead() bool {
|
|||
type WriteRequest struct {
|
||||
// The target region for this write
|
||||
Region string
|
||||
|
||||
// SecretID is secret portion of the ACL token used for the request
|
||||
SecretID string
|
||||
}
|
||||
|
||||
func (w WriteRequest) RequestRegion() string {
|
||||
|
@ -5324,3 +5356,308 @@ func IsRecoverable(e error) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ACLPolicy is used to represent an ACL policy
|
||||
type ACLPolicy struct {
|
||||
Name string // Unique name
|
||||
Description string // Human readable
|
||||
Rules string // HCL or JSON format
|
||||
Hash []byte
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// SetHash is used to compute and set the hash of the ACL policy
|
||||
func (c *ACLPolicy) SetHash() []byte {
|
||||
// Initialize a 256bit Blake2 hash (32 bytes)
|
||||
hash, err := blake2b.New256(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write all the user set fields
|
||||
hash.Write([]byte(c.Name))
|
||||
hash.Write([]byte(c.Description))
|
||||
hash.Write([]byte(c.Rules))
|
||||
|
||||
// Finalize the hash
|
||||
hashVal := hash.Sum(nil)
|
||||
|
||||
// Set and return the hash
|
||||
c.Hash = hashVal
|
||||
return hashVal
|
||||
}
|
||||
|
||||
func (a *ACLPolicy) Stub() *ACLPolicyListStub {
|
||||
return &ACLPolicyListStub{
|
||||
Name: a.Name,
|
||||
Description: a.Description,
|
||||
Hash: a.Hash,
|
||||
CreateIndex: a.CreateIndex,
|
||||
ModifyIndex: a.ModifyIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ACLPolicy) Validate() error {
|
||||
var mErr multierror.Error
|
||||
if !validPolicyName.MatchString(a.Name) {
|
||||
err := fmt.Errorf("invalid name '%s'", a.Name)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
if _, err := acl.Parse(a.Rules); err != nil {
|
||||
err = fmt.Errorf("failed to parse rules: %v", err)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
if len(a.Description) > maxPolicyDescriptionLength {
|
||||
err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// ACLPolicyListStub is used to for listing ACL policies
|
||||
type ACLPolicyListStub struct {
|
||||
Name string
|
||||
Description string
|
||||
Hash []byte
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// ACLPolicyListRequest is used to request a list of policies
|
||||
type ACLPolicyListRequest struct {
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ACLPolicySpecificRequest is used to query a specific policy
|
||||
type ACLPolicySpecificRequest struct {
|
||||
Name string
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ACLPolicySetRequest is used to query a set of policies
|
||||
type ACLPolicySetRequest struct {
|
||||
Names []string
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ACLPolicyListResponse is used for a list request
|
||||
type ACLPolicyListResponse struct {
|
||||
Policies []*ACLPolicyListStub
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// SingleACLPolicyResponse is used to return a single policy
|
||||
type SingleACLPolicyResponse struct {
|
||||
Policy *ACLPolicy
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// ACLPolicySetResponse is used to return a set of policies
|
||||
type ACLPolicySetResponse struct {
|
||||
Policies map[string]*ACLPolicy
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// ACLPolicyDeleteRequest is used to delete a set of policies
|
||||
type ACLPolicyDeleteRequest struct {
|
||||
Names []string
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// ACLPolicyUpsertRequest is used to upsert a set of policies
|
||||
type ACLPolicyUpsertRequest struct {
|
||||
Policies []*ACLPolicy
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// ACLToken represents a client token which is used to Authenticate
|
||||
type ACLToken struct {
|
||||
AccessorID string // Public Accessor ID (UUID)
|
||||
SecretID string // Secret ID, private (UUID)
|
||||
Name string // Human friendly name
|
||||
Type string // Client or Management
|
||||
Policies []string // Policies this token ties to
|
||||
Global bool // Global or Region local
|
||||
Hash []byte
|
||||
CreateTime time.Time // Time of creation
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
var (
|
||||
// AnonymousACLToken is used no SecretID is provided, and the
|
||||
// request is made anonymously.
|
||||
AnonymousACLToken = &ACLToken{
|
||||
AccessorID: "anonymous",
|
||||
Name: "Anonymous Token",
|
||||
Type: ACLClientToken,
|
||||
Policies: []string{"anonymous"},
|
||||
Global: false,
|
||||
}
|
||||
)
|
||||
|
||||
type ACLTokenListStub struct {
|
||||
AccessorID string
|
||||
Name string
|
||||
Type string
|
||||
Policies []string
|
||||
Global bool
|
||||
Hash []byte
|
||||
CreateTime time.Time
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// SetHash is used to compute and set the hash of the ACL token
|
||||
func (a *ACLToken) SetHash() []byte {
|
||||
// Initialize a 256bit Blake2 hash (32 bytes)
|
||||
hash, err := blake2b.New256(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write all the user set fields
|
||||
hash.Write([]byte(a.Name))
|
||||
hash.Write([]byte(a.Type))
|
||||
for _, policyName := range a.Policies {
|
||||
hash.Write([]byte(policyName))
|
||||
}
|
||||
if a.Global {
|
||||
hash.Write([]byte("global"))
|
||||
} else {
|
||||
hash.Write([]byte("local"))
|
||||
}
|
||||
|
||||
// Finalize the hash
|
||||
hashVal := hash.Sum(nil)
|
||||
|
||||
// Set and return the hash
|
||||
a.Hash = hashVal
|
||||
return hashVal
|
||||
}
|
||||
|
||||
func (a *ACLToken) Stub() *ACLTokenListStub {
|
||||
return &ACLTokenListStub{
|
||||
AccessorID: a.AccessorID,
|
||||
Name: a.Name,
|
||||
Type: a.Type,
|
||||
Policies: a.Policies,
|
||||
Global: a.Global,
|
||||
Hash: a.Hash,
|
||||
CreateTime: a.CreateTime,
|
||||
CreateIndex: a.CreateIndex,
|
||||
ModifyIndex: a.ModifyIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate is used to sanity check a token
|
||||
func (a *ACLToken) Validate() error {
|
||||
var mErr multierror.Error
|
||||
if len(a.Name) > maxTokenNameLength {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long"))
|
||||
}
|
||||
switch a.Type {
|
||||
case ACLClientToken:
|
||||
if len(a.Policies) == 0 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies"))
|
||||
}
|
||||
case ACLManagementToken:
|
||||
if len(a.Policies) != 0 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies"))
|
||||
}
|
||||
default:
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management"))
|
||||
}
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// PolicySubset checks if a given set of policies is a subset of the token
|
||||
func (a *ACLToken) PolicySubset(policies []string) bool {
|
||||
// Hot-path the management tokens, superset of all policies.
|
||||
if a.Type == ACLManagementToken {
|
||||
return true
|
||||
}
|
||||
associatedPolicies := make(map[string]struct{}, len(a.Policies))
|
||||
for _, policy := range a.Policies {
|
||||
associatedPolicies[policy] = struct{}{}
|
||||
}
|
||||
for _, policy := range policies {
|
||||
if _, ok := associatedPolicies[policy]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ACLTokenListRequest is used to request a list of tokens
|
||||
type ACLTokenListRequest struct {
|
||||
GlobalOnly bool
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ACLTokenSpecificRequest is used to query a specific token
|
||||
type ACLTokenSpecificRequest struct {
|
||||
AccessorID string
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ACLTokenSetRequest is used to query a set of tokens
|
||||
type ACLTokenSetRequest struct {
|
||||
AccessorIDS []string
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ACLTokenListResponse is used for a list request
|
||||
type ACLTokenListResponse struct {
|
||||
Tokens []*ACLTokenListStub
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// SingleACLTokenResponse is used to return a single token
|
||||
type SingleACLTokenResponse struct {
|
||||
Token *ACLToken
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// ACLTokenSetResponse is used to return a set of token
|
||||
type ACLTokenSetResponse struct {
|
||||
Tokens map[string]*ACLToken // Keyed by Accessor ID
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// ResolveACLTokenRequest is used to resolve a specific token
|
||||
type ResolveACLTokenRequest struct {
|
||||
SecretID string
|
||||
QueryOptions
|
||||
}
|
||||
|
||||
// ResolveACLTokenResponse is used to resolve a single token
|
||||
type ResolveACLTokenResponse struct {
|
||||
Token *ACLToken
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
// ACLTokenDeleteRequest is used to delete a set of tokens
|
||||
type ACLTokenDeleteRequest struct {
|
||||
AccessorIDs []string
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// ACLTokenBootstrapRequest is used to bootstrap ACLs
|
||||
type ACLTokenBootstrapRequest struct {
|
||||
Token *ACLToken // Not client specifiable
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// ACLTokenUpsertRequest is used to upsert a set of tokens
|
||||
type ACLTokenUpsertRequest struct {
|
||||
Tokens []*ACLToken
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
|
||||
type ACLTokenUpsertResponse struct {
|
||||
Tokens []*ACLToken
|
||||
WriteMeta
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestJob_Validate(t *testing.T) {
|
||||
|
@ -2264,3 +2265,109 @@ func TestIsRecoverable(t *testing.T) {
|
|||
t.Errorf("Explicitly recoverable errors *should* be recoverable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestACLTokenValidate(t *testing.T) {
|
||||
tk := &ACLToken{}
|
||||
|
||||
// Mising a type
|
||||
err := tk.Validate()
|
||||
assert.NotNil(t, err)
|
||||
if !strings.Contains(err.Error(), "client or management") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
// Missing policies
|
||||
tk.Type = ACLClientToken
|
||||
err = tk.Validate()
|
||||
assert.NotNil(t, err)
|
||||
if !strings.Contains(err.Error(), "missing policies") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
// Invalid policices
|
||||
tk.Type = ACLManagementToken
|
||||
tk.Policies = []string{"foo"}
|
||||
err = tk.Validate()
|
||||
assert.NotNil(t, err)
|
||||
if !strings.Contains(err.Error(), "associated with policies") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
// Name too long policices
|
||||
tk.Name = GenerateUUID() + GenerateUUID()
|
||||
tk.Policies = nil
|
||||
err = tk.Validate()
|
||||
assert.NotNil(t, err)
|
||||
if !strings.Contains(err.Error(), "too long") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
// Make it valid
|
||||
tk.Name = "foo"
|
||||
err = tk.Validate()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestACLTokenPolicySubset(t *testing.T) {
|
||||
tk := &ACLToken{
|
||||
Type: ACLClientToken,
|
||||
Policies: []string{"foo", "bar", "baz"},
|
||||
}
|
||||
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{}))
|
||||
assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"}))
|
||||
assert.Equal(t, false, tk.PolicySubset([]string{"new"}))
|
||||
|
||||
tk = &ACLToken{
|
||||
Type: ACLManagementToken,
|
||||
}
|
||||
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"}))
|
||||
assert.Equal(t, true, tk.PolicySubset([]string{"new"}))
|
||||
}
|
||||
|
||||
func TestACLTokenSetHash(t *testing.T) {
|
||||
tk := &ACLToken{
|
||||
Name: "foo",
|
||||
Type: ACLClientToken,
|
||||
Policies: []string{"foo", "bar"},
|
||||
Global: false,
|
||||
}
|
||||
out1 := tk.SetHash()
|
||||
assert.NotNil(t, out1)
|
||||
assert.NotNil(t, tk.Hash)
|
||||
assert.Equal(t, out1, tk.Hash)
|
||||
|
||||
tk.Policies = []string{"foo"}
|
||||
out2 := tk.SetHash()
|
||||
assert.NotNil(t, out2)
|
||||
assert.NotNil(t, tk.Hash)
|
||||
assert.Equal(t, out2, tk.Hash)
|
||||
assert.NotEqual(t, out1, out2)
|
||||
}
|
||||
|
||||
func TestACLPolicySetHash(t *testing.T) {
|
||||
ap := &ACLPolicy{
|
||||
Name: "foo",
|
||||
Description: "great policy",
|
||||
Rules: "node { policy = \"read\" }",
|
||||
}
|
||||
out1 := ap.SetHash()
|
||||
assert.NotNil(t, out1)
|
||||
assert.NotNil(t, ap.Hash)
|
||||
assert.Equal(t, out1, ap.Hash)
|
||||
|
||||
ap.Rules = "node { policy = \"write\" }"
|
||||
out2 := ap.SetHash()
|
||||
assert.NotNil(t, out2)
|
||||
assert.NotNil(t, ap.Hash)
|
||||
assert.Equal(t, out2, ap.Hash)
|
||||
assert.NotEqual(t, out1, out2)
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ type TestServerConfig struct {
|
|||
Server *ServerConfig `json:"server,omitempty"`
|
||||
Client *ClientConfig `json:"client,omitempty"`
|
||||
Vault *VaultConfig `json:"vault,omitempty"`
|
||||
ACL *ACLConfig `json:"acl,omitempty"`
|
||||
DevMode bool `json:"-"`
|
||||
Stdout, Stderr io.Writer `json:"-"`
|
||||
}
|
||||
|
@ -76,6 +77,11 @@ type VaultConfig struct {
|
|||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// ACLConfig is used to configure ACLs
|
||||
type ACLConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// ServerConfigCallback is a function interface which can be
|
||||
// passed to NewTestServerConfig to modify the server config.
|
||||
type ServerConfigCallback func(c *TestServerConfig)
|
||||
|
@ -110,6 +116,9 @@ func defaultServerConfig() *TestServerConfig {
|
|||
Vault: &VaultConfig{
|
||||
Enabled: false,
|
||||
},
|
||||
ACL: &ACLConfig{
|
||||
Enabled: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
207
vendor/golang.org/x/crypto/blake2b/blake2b.go
generated
vendored
Normal file
207
vendor/golang.org/x/crypto/blake2b/blake2b.go
generated
vendored
Normal file
|
@ -0,0 +1,207 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
|
||||
// and the extendable output function (XOF) BLAKE2Xb.
|
||||
//
|
||||
// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
|
||||
// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
|
||||
//
|
||||
// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
|
||||
// If you need a secret-key MAC (message authentication code), use the New512
|
||||
// function with a non-nil key.
|
||||
//
|
||||
// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
|
||||
// can produce hash values between 0 and 4 GiB.
|
||||
package blake2b
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// The blocksize of BLAKE2b in bytes.
|
||||
BlockSize = 128
|
||||
// The hash size of BLAKE2b-512 in bytes.
|
||||
Size = 64
|
||||
// The hash size of BLAKE2b-384 in bytes.
|
||||
Size384 = 48
|
||||
// The hash size of BLAKE2b-256 in bytes.
|
||||
Size256 = 32
|
||||
)
|
||||
|
||||
var (
|
||||
useAVX2 bool
|
||||
useAVX bool
|
||||
useSSE4 bool
|
||||
)
|
||||
|
||||
var errKeySize = errors.New("blake2b: invalid key size")
|
||||
|
||||
var iv = [8]uint64{
|
||||
0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
|
||||
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
|
||||
}
|
||||
|
||||
// Sum512 returns the BLAKE2b-512 checksum of the data.
|
||||
func Sum512(data []byte) [Size]byte {
|
||||
var sum [Size]byte
|
||||
checkSum(&sum, Size, data)
|
||||
return sum
|
||||
}
|
||||
|
||||
// Sum384 returns the BLAKE2b-384 checksum of the data.
|
||||
func Sum384(data []byte) [Size384]byte {
|
||||
var sum [Size]byte
|
||||
var sum384 [Size384]byte
|
||||
checkSum(&sum, Size384, data)
|
||||
copy(sum384[:], sum[:Size384])
|
||||
return sum384
|
||||
}
|
||||
|
||||
// Sum256 returns the BLAKE2b-256 checksum of the data.
|
||||
func Sum256(data []byte) [Size256]byte {
|
||||
var sum [Size]byte
|
||||
var sum256 [Size256]byte
|
||||
checkSum(&sum, Size256, data)
|
||||
copy(sum256[:], sum[:Size256])
|
||||
return sum256
|
||||
}
|
||||
|
||||
// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
|
||||
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
|
||||
|
||||
// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
|
||||
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
|
||||
|
||||
// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
|
||||
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
|
||||
|
||||
func newDigest(hashSize int, key []byte) (*digest, error) {
|
||||
if len(key) > Size {
|
||||
return nil, errKeySize
|
||||
}
|
||||
d := &digest{
|
||||
size: hashSize,
|
||||
keyLen: len(key),
|
||||
}
|
||||
copy(d.key[:], key)
|
||||
d.Reset()
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func checkSum(sum *[Size]byte, hashSize int, data []byte) {
|
||||
h := iv
|
||||
h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
|
||||
var c [2]uint64
|
||||
|
||||
if length := len(data); length > BlockSize {
|
||||
n := length &^ (BlockSize - 1)
|
||||
if length == n {
|
||||
n -= BlockSize
|
||||
}
|
||||
hashBlocks(&h, &c, 0, data[:n])
|
||||
data = data[n:]
|
||||
}
|
||||
|
||||
var block [BlockSize]byte
|
||||
offset := copy(block[:], data)
|
||||
remaining := uint64(BlockSize - offset)
|
||||
if c[0] < remaining {
|
||||
c[1]--
|
||||
}
|
||||
c[0] -= remaining
|
||||
|
||||
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
|
||||
|
||||
for i, v := range h[:(hashSize+7)/8] {
|
||||
binary.LittleEndian.PutUint64(sum[8*i:], v)
|
||||
}
|
||||
}
|
||||
|
||||
type digest struct {
|
||||
h [8]uint64
|
||||
c [2]uint64
|
||||
size int
|
||||
block [BlockSize]byte
|
||||
offset int
|
||||
|
||||
key [BlockSize]byte
|
||||
keyLen int
|
||||
}
|
||||
|
||||
func (d *digest) BlockSize() int { return BlockSize }
|
||||
|
||||
func (d *digest) Size() int { return d.size }
|
||||
|
||||
func (d *digest) Reset() {
|
||||
d.h = iv
|
||||
d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
|
||||
d.offset, d.c[0], d.c[1] = 0, 0, 0
|
||||
if d.keyLen > 0 {
|
||||
d.block = d.key
|
||||
d.offset = BlockSize
|
||||
}
|
||||
}
|
||||
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
|
||||
if d.offset > 0 {
|
||||
remaining := BlockSize - d.offset
|
||||
if n <= remaining {
|
||||
d.offset += copy(d.block[d.offset:], p)
|
||||
return
|
||||
}
|
||||
copy(d.block[d.offset:], p[:remaining])
|
||||
hashBlocks(&d.h, &d.c, 0, d.block[:])
|
||||
d.offset = 0
|
||||
p = p[remaining:]
|
||||
}
|
||||
|
||||
if length := len(p); length > BlockSize {
|
||||
nn := length &^ (BlockSize - 1)
|
||||
if length == nn {
|
||||
nn -= BlockSize
|
||||
}
|
||||
hashBlocks(&d.h, &d.c, 0, p[:nn])
|
||||
p = p[nn:]
|
||||
}
|
||||
|
||||
if len(p) > 0 {
|
||||
d.offset += copy(d.block[:], p)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d *digest) Sum(sum []byte) []byte {
|
||||
var hash [Size]byte
|
||||
d.finalize(&hash)
|
||||
return append(sum, hash[:d.size]...)
|
||||
}
|
||||
|
||||
func (d *digest) finalize(hash *[Size]byte) {
|
||||
var block [BlockSize]byte
|
||||
copy(block[:], d.block[:d.offset])
|
||||
remaining := uint64(BlockSize - d.offset)
|
||||
|
||||
c := d.c
|
||||
if c[0] < remaining {
|
||||
c[1]--
|
||||
}
|
||||
c[0] -= remaining
|
||||
|
||||
h := d.h
|
||||
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
|
||||
|
||||
for i, v := range h {
|
||||
binary.LittleEndian.PutUint64(hash[8*i:], v)
|
||||
}
|
||||
}
|
43
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
generated
vendored
Normal file
43
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7,amd64,!gccgo,!appengine
|
||||
|
||||
package blake2b
|
||||
|
||||
func init() {
|
||||
useAVX2 = supportsAVX2()
|
||||
useAVX = supportsAVX()
|
||||
useSSE4 = supportsSSE4()
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func supportsSSE4() bool
|
||||
|
||||
//go:noescape
|
||||
func supportsAVX() bool
|
||||
|
||||
//go:noescape
|
||||
func supportsAVX2() bool
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||
if useAVX2 {
|
||||
hashBlocksAVX2(h, c, flag, blocks)
|
||||
} else if useAVX {
|
||||
hashBlocksAVX(h, c, flag, blocks)
|
||||
} else if useSSE4 {
|
||||
hashBlocksSSE4(h, c, flag, blocks)
|
||||
} else {
|
||||
hashBlocksGeneric(h, c, flag, blocks)
|
||||
}
|
||||
}
|
762
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
generated
vendored
Normal file
762
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,762 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7,amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
|
||||
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
|
||||
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
|
||||
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
|
||||
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
|
||||
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
|
||||
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
|
||||
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
|
||||
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
|
||||
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
|
||||
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
|
||||
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
|
||||
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
|
||||
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
|
||||
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
|
||||
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
|
||||
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
|
||||
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
|
||||
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
|
||||
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
|
||||
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
|
||||
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
|
||||
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
|
||||
|
||||
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
|
||||
VPADDQ m0, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFD $-79, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPSHUFB c40, Y1, Y1; \
|
||||
VPADDQ m1, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFB c48, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPADDQ Y1, Y1, t; \
|
||||
VPSRLQ $63, Y1, Y1; \
|
||||
VPXOR t, Y1, Y1; \
|
||||
VPERMQ_0x39_Y1_Y1; \
|
||||
VPERMQ_0x4E_Y2_Y2; \
|
||||
VPERMQ_0x93_Y3_Y3; \
|
||||
VPADDQ m2, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFD $-79, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPSHUFB c40, Y1, Y1; \
|
||||
VPADDQ m3, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFB c48, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPADDQ Y1, Y1, t; \
|
||||
VPSRLQ $63, Y1, Y1; \
|
||||
VPXOR t, Y1, Y1; \
|
||||
VPERMQ_0x39_Y3_Y3; \
|
||||
VPERMQ_0x4E_Y2_Y2; \
|
||||
VPERMQ_0x93_Y1_Y1
|
||||
|
||||
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
|
||||
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
|
||||
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
|
||||
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
|
||||
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
|
||||
|
||||
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
|
||||
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
|
||||
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
|
||||
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
|
||||
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
|
||||
|
||||
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
|
||||
|
||||
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
|
||||
|
||||
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
|
||||
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
|
||||
|
||||
// load msg: Y12 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X12(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X12(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12
|
||||
|
||||
// load msg: Y13 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X13(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X13(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13
|
||||
|
||||
// load msg: Y14 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X14(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X14(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14
|
||||
|
||||
// load msg: Y15 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X15(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X15(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
|
||||
VMOVQ_SI_X12_0; \
|
||||
VMOVQ_SI_X11(4*8); \
|
||||
VPINSRQ_1_SI_X12(2*8); \
|
||||
VPINSRQ_1_SI_X11(6*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
|
||||
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
|
||||
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
|
||||
|
||||
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
|
||||
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
|
||||
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
|
||||
VMOVQ_SI_X11(11*8); \
|
||||
VPSHUFD $0x4E, 0*8(SI), X14; \
|
||||
VPINSRQ_1_SI_X11(5*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14; \
|
||||
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
|
||||
|
||||
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
|
||||
VMOVQ_SI_X11(5*8); \
|
||||
VMOVDQU 11*8(SI), X12; \
|
||||
VPINSRQ_1_SI_X11(15*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
VMOVQ_SI_X13(8*8); \
|
||||
VMOVQ_SI_X11(2*8); \
|
||||
VPINSRQ_1_SI_X13_0; \
|
||||
VPINSRQ_1_SI_X11(13*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13; \
|
||||
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
|
||||
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
|
||||
|
||||
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
|
||||
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
|
||||
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
|
||||
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
|
||||
VMOVQ_SI_X15(6*8); \
|
||||
VMOVQ_SI_X11_0; \
|
||||
VPINSRQ_1_SI_X15(10*8); \
|
||||
VPINSRQ_1_SI_X11(8*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
|
||||
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
|
||||
VMOVQ_SI_X13_0; \
|
||||
VMOVQ_SI_X11(4*8); \
|
||||
VPINSRQ_1_SI_X13(7*8); \
|
||||
VPINSRQ_1_SI_X11(15*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13; \
|
||||
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
|
||||
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
|
||||
|
||||
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
|
||||
VMOVQ_SI_X12(2*8); \
|
||||
VMOVQ_SI_X11_0; \
|
||||
VPINSRQ_1_SI_X12(6*8); \
|
||||
VPINSRQ_1_SI_X11(8*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
|
||||
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
|
||||
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
|
||||
|
||||
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
|
||||
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
|
||||
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
|
||||
VMOVQ_SI_X14_0; \
|
||||
VPSHUFD $0x4E, 8*8(SI), X11; \
|
||||
VPINSRQ_1_SI_X14(6*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14; \
|
||||
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
|
||||
|
||||
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
|
||||
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
|
||||
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
|
||||
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
|
||||
VMOVQ_SI_X15_0; \
|
||||
VMOVQ_SI_X11(6*8); \
|
||||
VPINSRQ_1_SI_X15(4*8); \
|
||||
VPINSRQ_1_SI_X11(10*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
|
||||
VMOVQ_SI_X12(6*8); \
|
||||
VMOVQ_SI_X11(11*8); \
|
||||
VPINSRQ_1_SI_X12(14*8); \
|
||||
VPINSRQ_1_SI_X11_0; \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
|
||||
VMOVQ_SI_X11(1*8); \
|
||||
VMOVDQU 12*8(SI), X14; \
|
||||
VPINSRQ_1_SI_X11(10*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14; \
|
||||
VMOVQ_SI_X15(2*8); \
|
||||
VMOVDQU 4*8(SI), X11; \
|
||||
VPINSRQ_1_SI_X15(7*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
|
||||
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
|
||||
VMOVQ_SI_X13(2*8); \
|
||||
VPSHUFD $0x4E, 5*8(SI), X11; \
|
||||
VPINSRQ_1_SI_X13(4*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13; \
|
||||
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
|
||||
VMOVQ_SI_X15(11*8); \
|
||||
VMOVQ_SI_X11(12*8); \
|
||||
VPINSRQ_1_SI_X15(14*8); \
|
||||
VPINSRQ_1_SI_X11_0; \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
|
||||
MOVQ h+0(FP), AX
|
||||
MOVQ c+8(FP), BX
|
||||
MOVQ flag+16(FP), CX
|
||||
MOVQ blocks_base+24(FP), SI
|
||||
MOVQ blocks_len+32(FP), DI
|
||||
|
||||
MOVQ SP, DX
|
||||
MOVQ SP, R9
|
||||
ADDQ $31, R9
|
||||
ANDQ $~31, R9
|
||||
MOVQ R9, SP
|
||||
|
||||
MOVQ CX, 16(SP)
|
||||
XORQ CX, CX
|
||||
MOVQ CX, 24(SP)
|
||||
|
||||
VMOVDQU ·AVX2_c40<>(SB), Y4
|
||||
VMOVDQU ·AVX2_c48<>(SB), Y5
|
||||
|
||||
VMOVDQU 0(AX), Y8
|
||||
VMOVDQU 32(AX), Y9
|
||||
VMOVDQU ·AVX2_iv0<>(SB), Y6
|
||||
VMOVDQU ·AVX2_iv1<>(SB), Y7
|
||||
|
||||
MOVQ 0(BX), R8
|
||||
MOVQ 8(BX), R9
|
||||
MOVQ R9, 8(SP)
|
||||
|
||||
loop:
|
||||
ADDQ $128, R8
|
||||
MOVQ R8, 0(SP)
|
||||
CMPQ R8, $128
|
||||
JGE noinc
|
||||
INCQ R9
|
||||
MOVQ R9, 8(SP)
|
||||
|
||||
noinc:
|
||||
VMOVDQA Y8, Y0
|
||||
VMOVDQA Y9, Y1
|
||||
VMOVDQA Y6, Y2
|
||||
VPXOR 0(SP), Y7, Y3
|
||||
|
||||
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
|
||||
VMOVDQA Y12, 32(SP)
|
||||
VMOVDQA Y13, 64(SP)
|
||||
VMOVDQA Y14, 96(SP)
|
||||
VMOVDQA Y15, 128(SP)
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
|
||||
VMOVDQA Y12, 160(SP)
|
||||
VMOVDQA Y13, 192(SP)
|
||||
VMOVDQA Y14, 224(SP)
|
||||
VMOVDQA Y15, 256(SP)
|
||||
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
|
||||
ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
|
||||
ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
|
||||
|
||||
VPXOR Y0, Y8, Y8
|
||||
VPXOR Y1, Y9, Y9
|
||||
VPXOR Y2, Y8, Y8
|
||||
VPXOR Y3, Y9, Y9
|
||||
|
||||
LEAQ 128(SI), SI
|
||||
SUBQ $128, DI
|
||||
JNE loop
|
||||
|
||||
MOVQ R8, 0(BX)
|
||||
MOVQ R9, 8(BX)
|
||||
|
||||
VMOVDQU Y8, 0(AX)
|
||||
VMOVDQU Y9, 32(AX)
|
||||
VZEROUPPER
|
||||
|
||||
MOVQ DX, SP
|
||||
RET
|
||||
|
||||
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
|
||||
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
|
||||
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
|
||||
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
|
||||
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
|
||||
|
||||
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
|
||||
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
|
||||
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
|
||||
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
|
||||
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
|
||||
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
|
||||
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
|
||||
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
|
||||
|
||||
#define SHUFFLE_AVX() \
|
||||
VMOVDQA X6, X13; \
|
||||
VMOVDQA X2, X14; \
|
||||
VMOVDQA X4, X6; \
|
||||
VPUNPCKLQDQ_X13_X13_X15; \
|
||||
VMOVDQA X5, X4; \
|
||||
VMOVDQA X6, X5; \
|
||||
VPUNPCKHQDQ_X15_X7_X6; \
|
||||
VPUNPCKLQDQ_X7_X7_X15; \
|
||||
VPUNPCKHQDQ_X15_X13_X7; \
|
||||
VPUNPCKLQDQ_X3_X3_X15; \
|
||||
VPUNPCKHQDQ_X15_X2_X2; \
|
||||
VPUNPCKLQDQ_X14_X14_X15; \
|
||||
VPUNPCKHQDQ_X15_X3_X3; \
|
||||
|
||||
#define SHUFFLE_AVX_INV() \
|
||||
VMOVDQA X2, X13; \
|
||||
VMOVDQA X4, X14; \
|
||||
VPUNPCKLQDQ_X2_X2_X15; \
|
||||
VMOVDQA X5, X4; \
|
||||
VPUNPCKHQDQ_X15_X3_X2; \
|
||||
VMOVDQA X14, X5; \
|
||||
VPUNPCKLQDQ_X3_X3_X15; \
|
||||
VMOVDQA X6, X14; \
|
||||
VPUNPCKHQDQ_X15_X13_X3; \
|
||||
VPUNPCKLQDQ_X7_X7_X15; \
|
||||
VPUNPCKHQDQ_X15_X6_X6; \
|
||||
VPUNPCKLQDQ_X14_X14_X15; \
|
||||
VPUNPCKHQDQ_X15_X7_X7; \
|
||||
|
||||
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
|
||||
VPADDQ m0, v0, v0; \
|
||||
VPADDQ v2, v0, v0; \
|
||||
VPADDQ m1, v1, v1; \
|
||||
VPADDQ v3, v1, v1; \
|
||||
VPXOR v0, v6, v6; \
|
||||
VPXOR v1, v7, v7; \
|
||||
VPSHUFD $-79, v6, v6; \
|
||||
VPSHUFD $-79, v7, v7; \
|
||||
VPADDQ v6, v4, v4; \
|
||||
VPADDQ v7, v5, v5; \
|
||||
VPXOR v4, v2, v2; \
|
||||
VPXOR v5, v3, v3; \
|
||||
VPSHUFB c40, v2, v2; \
|
||||
VPSHUFB c40, v3, v3; \
|
||||
VPADDQ m2, v0, v0; \
|
||||
VPADDQ v2, v0, v0; \
|
||||
VPADDQ m3, v1, v1; \
|
||||
VPADDQ v3, v1, v1; \
|
||||
VPXOR v0, v6, v6; \
|
||||
VPXOR v1, v7, v7; \
|
||||
VPSHUFB c48, v6, v6; \
|
||||
VPSHUFB c48, v7, v7; \
|
||||
VPADDQ v6, v4, v4; \
|
||||
VPADDQ v7, v5, v5; \
|
||||
VPXOR v4, v2, v2; \
|
||||
VPXOR v5, v3, v3; \
|
||||
VPADDQ v2, v2, t0; \
|
||||
VPSRLQ $63, v2, v2; \
|
||||
VPXOR t0, v2, v2; \
|
||||
VPADDQ v3, v3, t0; \
|
||||
VPSRLQ $63, v3, v3; \
|
||||
VPXOR t0, v3, v3
|
||||
|
||||
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
|
||||
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
|
||||
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
|
||||
VMOVQ_SI_X12(i0*8); \
|
||||
VMOVQ_SI_X13(i2*8); \
|
||||
VMOVQ_SI_X14(i4*8); \
|
||||
VMOVQ_SI_X15(i6*8); \
|
||||
VPINSRQ_1_SI_X12(i1*8); \
|
||||
VPINSRQ_1_SI_X13(i3*8); \
|
||||
VPINSRQ_1_SI_X14(i5*8); \
|
||||
VPINSRQ_1_SI_X15(i7*8)
|
||||
|
||||
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
|
||||
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
|
||||
VMOVQ_SI_X12_0; \
|
||||
VMOVQ_SI_X13(4*8); \
|
||||
VMOVQ_SI_X14(1*8); \
|
||||
VMOVQ_SI_X15(5*8); \
|
||||
VPINSRQ_1_SI_X12(2*8); \
|
||||
VPINSRQ_1_SI_X13(6*8); \
|
||||
VPINSRQ_1_SI_X14(3*8); \
|
||||
VPINSRQ_1_SI_X15(7*8)
|
||||
|
||||
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
|
||||
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
|
||||
VPSHUFD $0x4E, 0*8(SI), X12; \
|
||||
VMOVQ_SI_X13(11*8); \
|
||||
VMOVQ_SI_X14(12*8); \
|
||||
VMOVQ_SI_X15(7*8); \
|
||||
VPINSRQ_1_SI_X13(5*8); \
|
||||
VPINSRQ_1_SI_X14(2*8); \
|
||||
VPINSRQ_1_SI_X15(3*8)
|
||||
|
||||
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
|
||||
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
|
||||
VMOVDQU 11*8(SI), X12; \
|
||||
VMOVQ_SI_X13(5*8); \
|
||||
VMOVQ_SI_X14(8*8); \
|
||||
VMOVQ_SI_X15(2*8); \
|
||||
VPINSRQ_1_SI_X13(15*8); \
|
||||
VPINSRQ_1_SI_X14_0; \
|
||||
VPINSRQ_1_SI_X15(13*8)
|
||||
|
||||
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
|
||||
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
|
||||
VMOVQ_SI_X12(2*8); \
|
||||
VMOVQ_SI_X13(4*8); \
|
||||
VMOVQ_SI_X14(6*8); \
|
||||
VMOVQ_SI_X15_0; \
|
||||
VPINSRQ_1_SI_X12(5*8); \
|
||||
VPINSRQ_1_SI_X13(15*8); \
|
||||
VPINSRQ_1_SI_X14(10*8); \
|
||||
VPINSRQ_1_SI_X15(8*8)
|
||||
|
||||
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
|
||||
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
|
||||
VMOVQ_SI_X12(9*8); \
|
||||
VMOVQ_SI_X13(2*8); \
|
||||
VMOVQ_SI_X14_0; \
|
||||
VMOVQ_SI_X15(4*8); \
|
||||
VPINSRQ_1_SI_X12(5*8); \
|
||||
VPINSRQ_1_SI_X13(10*8); \
|
||||
VPINSRQ_1_SI_X14(7*8); \
|
||||
VPINSRQ_1_SI_X15(15*8)
|
||||
|
||||
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
|
||||
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
|
||||
VMOVQ_SI_X12(2*8); \
|
||||
VMOVQ_SI_X13_0; \
|
||||
VMOVQ_SI_X14(12*8); \
|
||||
VMOVQ_SI_X15(11*8); \
|
||||
VPINSRQ_1_SI_X12(6*8); \
|
||||
VPINSRQ_1_SI_X13(8*8); \
|
||||
VPINSRQ_1_SI_X14(10*8); \
|
||||
VPINSRQ_1_SI_X15(3*8)
|
||||
|
||||
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
|
||||
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
|
||||
MOVQ 0*8(SI), X12; \
|
||||
VPSHUFD $0x4E, 8*8(SI), X13; \
|
||||
MOVQ 7*8(SI), X14; \
|
||||
MOVQ 2*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(6*8); \
|
||||
VPINSRQ_1_SI_X14(3*8); \
|
||||
VPINSRQ_1_SI_X15(11*8)
|
||||
|
||||
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
|
||||
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
|
||||
MOVQ 6*8(SI), X12; \
|
||||
MOVQ 11*8(SI), X13; \
|
||||
MOVQ 15*8(SI), X14; \
|
||||
MOVQ 3*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(14*8); \
|
||||
VPINSRQ_1_SI_X13_0; \
|
||||
VPINSRQ_1_SI_X14(9*8); \
|
||||
VPINSRQ_1_SI_X15(8*8)
|
||||
|
||||
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
|
||||
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
|
||||
MOVQ 5*8(SI), X12; \
|
||||
MOVQ 8*8(SI), X13; \
|
||||
MOVQ 0*8(SI), X14; \
|
||||
MOVQ 6*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(15*8); \
|
||||
VPINSRQ_1_SI_X13(2*8); \
|
||||
VPINSRQ_1_SI_X14(4*8); \
|
||||
VPINSRQ_1_SI_X15(10*8)
|
||||
|
||||
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
|
||||
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
|
||||
VMOVDQU 12*8(SI), X12; \
|
||||
MOVQ 1*8(SI), X13; \
|
||||
MOVQ 2*8(SI), X14; \
|
||||
VPINSRQ_1_SI_X13(10*8); \
|
||||
VPINSRQ_1_SI_X14(7*8); \
|
||||
VMOVDQU 4*8(SI), X15
|
||||
|
||||
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
|
||||
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
|
||||
MOVQ 15*8(SI), X12; \
|
||||
MOVQ 3*8(SI), X13; \
|
||||
MOVQ 11*8(SI), X14; \
|
||||
MOVQ 12*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(9*8); \
|
||||
VPINSRQ_1_SI_X13(13*8); \
|
||||
VPINSRQ_1_SI_X14(14*8); \
|
||||
VPINSRQ_1_SI_X15_0
|
||||
|
||||
// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
|
||||
MOVQ h+0(FP), AX
|
||||
MOVQ c+8(FP), BX
|
||||
MOVQ flag+16(FP), CX
|
||||
MOVQ blocks_base+24(FP), SI
|
||||
MOVQ blocks_len+32(FP), DI
|
||||
|
||||
MOVQ SP, BP
|
||||
MOVQ SP, R9
|
||||
ADDQ $15, R9
|
||||
ANDQ $~15, R9
|
||||
MOVQ R9, SP
|
||||
|
||||
VMOVDQU ·AVX_c40<>(SB), X0
|
||||
VMOVDQU ·AVX_c48<>(SB), X1
|
||||
VMOVDQA X0, X8
|
||||
VMOVDQA X1, X9
|
||||
|
||||
VMOVDQU ·AVX_iv3<>(SB), X0
|
||||
VMOVDQA X0, 0(SP)
|
||||
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
|
||||
|
||||
VMOVDQU 0(AX), X10
|
||||
VMOVDQU 16(AX), X11
|
||||
VMOVDQU 32(AX), X2
|
||||
VMOVDQU 48(AX), X3
|
||||
|
||||
MOVQ 0(BX), R8
|
||||
MOVQ 8(BX), R9
|
||||
|
||||
loop:
|
||||
ADDQ $128, R8
|
||||
CMPQ R8, $128
|
||||
JGE noinc
|
||||
INCQ R9
|
||||
|
||||
noinc:
|
||||
VMOVQ_R8_X15
|
||||
VPINSRQ_1_R9_X15
|
||||
|
||||
VMOVDQA X10, X0
|
||||
VMOVDQA X11, X1
|
||||
VMOVDQU ·AVX_iv0<>(SB), X4
|
||||
VMOVDQU ·AVX_iv1<>(SB), X5
|
||||
VMOVDQU ·AVX_iv2<>(SB), X6
|
||||
|
||||
VPXOR X15, X6, X6
|
||||
VMOVDQA 0(SP), X7
|
||||
|
||||
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
|
||||
VMOVDQA X12, 16(SP)
|
||||
VMOVDQA X13, 32(SP)
|
||||
VMOVDQA X14, 48(SP)
|
||||
VMOVDQA X15, 64(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
|
||||
VMOVDQA X12, 80(SP)
|
||||
VMOVDQA X13, 96(SP)
|
||||
VMOVDQA X14, 112(SP)
|
||||
VMOVDQA X15, 128(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
|
||||
VMOVDQA X12, 144(SP)
|
||||
VMOVDQA X13, 160(SP)
|
||||
VMOVDQA X14, 176(SP)
|
||||
VMOVDQA X15, 192(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
|
||||
VMOVDQA X12, 208(SP)
|
||||
VMOVDQA X13, 224(SP)
|
||||
VMOVDQA X14, 240(SP)
|
||||
VMOVDQA X15, 256(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
VMOVDQU 32(AX), X14
|
||||
VMOVDQU 48(AX), X15
|
||||
VPXOR X0, X10, X10
|
||||
VPXOR X1, X11, X11
|
||||
VPXOR X2, X14, X14
|
||||
VPXOR X3, X15, X15
|
||||
VPXOR X4, X10, X10
|
||||
VPXOR X5, X11, X11
|
||||
VPXOR X6, X14, X2
|
||||
VPXOR X7, X15, X3
|
||||
VMOVDQU X2, 32(AX)
|
||||
VMOVDQU X3, 48(AX)
|
||||
|
||||
LEAQ 128(SI), SI
|
||||
SUBQ $128, DI
|
||||
JNE loop
|
||||
|
||||
VMOVDQU X10, 0(AX)
|
||||
VMOVDQU X11, 16(AX)
|
||||
|
||||
MOVQ R8, 0(BX)
|
||||
MOVQ R9, 8(BX)
|
||||
VZEROUPPER
|
||||
|
||||
MOVQ BP, SP
|
||||
RET
|
||||
|
||||
// func supportsAVX2() bool
|
||||
TEXT ·supportsAVX2(SB), 4, $0-1
|
||||
MOVQ runtime·support_avx2(SB), AX
|
||||
MOVB AX, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func supportsAVX() bool
|
||||
TEXT ·supportsAVX(SB), 4, $0-1
|
||||
MOVQ runtime·support_avx(SB), AX
|
||||
MOVB AX, ret+0(FP)
|
||||
RET
|
25
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
generated
vendored
Normal file
25
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.7,amd64,!gccgo,!appengine
|
||||
|
||||
package blake2b
|
||||
|
||||
func init() {
|
||||
useSSE4 = supportsSSE4()
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func supportsSSE4() bool
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||
if useSSE4 {
|
||||
hashBlocksSSE4(h, c, flag, blocks)
|
||||
} else {
|
||||
hashBlocksGeneric(h, c, flag, blocks)
|
||||
}
|
||||
}
|
290
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
generated
vendored
Normal file
290
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
|
||||
DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
|
||||
GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
|
||||
DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
|
||||
GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
|
||||
DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
|
||||
GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
|
||||
DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
|
||||
GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKLQDQ v6, t2; \
|
||||
PUNPCKHQDQ v7, v6; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
MOVO t1, v7; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKHQDQ t2, v7; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v3
|
||||
|
||||
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKLQDQ v2, t2; \
|
||||
PUNPCKHQDQ v3, v2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
MOVO t1, v3; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKHQDQ t2, v3; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v7
|
||||
|
||||
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
|
||||
PADDQ m0, v0; \
|
||||
PADDQ m1, v1; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ v3, v1; \
|
||||
PXOR v0, v6; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFD $0xB1, v6, v6; \
|
||||
PSHUFD $0xB1, v7, v7; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ v7, v5; \
|
||||
PXOR v4, v2; \
|
||||
PXOR v5, v3; \
|
||||
PSHUFB c40, v2; \
|
||||
PSHUFB c40, v3; \
|
||||
PADDQ m2, v0; \
|
||||
PADDQ m3, v1; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ v3, v1; \
|
||||
PXOR v0, v6; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFB c48, v6; \
|
||||
PSHUFB c48, v7; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ v7, v5; \
|
||||
PXOR v4, v2; \
|
||||
PXOR v5, v3; \
|
||||
MOVOU v2, t0; \
|
||||
PADDQ v2, t0; \
|
||||
PSRLQ $63, v2; \
|
||||
PXOR t0, v2; \
|
||||
MOVOU v3, t0; \
|
||||
PADDQ v3, t0; \
|
||||
PSRLQ $63, v3; \
|
||||
PXOR t0, v3
|
||||
|
||||
#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
|
||||
MOVQ i0*8(src), m0; \
|
||||
PINSRQ $1, i1*8(src), m0; \
|
||||
MOVQ i2*8(src), m1; \
|
||||
PINSRQ $1, i3*8(src), m1; \
|
||||
MOVQ i4*8(src), m2; \
|
||||
PINSRQ $1, i5*8(src), m2; \
|
||||
MOVQ i6*8(src), m3; \
|
||||
PINSRQ $1, i7*8(src), m3
|
||||
|
||||
// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
|
||||
MOVQ h+0(FP), AX
|
||||
MOVQ c+8(FP), BX
|
||||
MOVQ flag+16(FP), CX
|
||||
MOVQ blocks_base+24(FP), SI
|
||||
MOVQ blocks_len+32(FP), DI
|
||||
|
||||
MOVQ SP, BP
|
||||
MOVQ SP, R9
|
||||
ADDQ $15, R9
|
||||
ANDQ $~15, R9
|
||||
MOVQ R9, SP
|
||||
|
||||
MOVOU ·iv3<>(SB), X0
|
||||
MOVO X0, 0(SP)
|
||||
XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
|
||||
|
||||
MOVOU ·c40<>(SB), X13
|
||||
MOVOU ·c48<>(SB), X14
|
||||
|
||||
MOVOU 0(AX), X12
|
||||
MOVOU 16(AX), X15
|
||||
|
||||
MOVQ 0(BX), R8
|
||||
MOVQ 8(BX), R9
|
||||
|
||||
loop:
|
||||
ADDQ $128, R8
|
||||
CMPQ R8, $128
|
||||
JGE noinc
|
||||
INCQ R9
|
||||
|
||||
noinc:
|
||||
MOVQ R8, X8
|
||||
PINSRQ $1, R9, X8
|
||||
|
||||
MOVO X12, X0
|
||||
MOVO X15, X1
|
||||
MOVOU 32(AX), X2
|
||||
MOVOU 48(AX), X3
|
||||
MOVOU ·iv0<>(SB), X4
|
||||
MOVOU ·iv1<>(SB), X5
|
||||
MOVOU ·iv2<>(SB), X6
|
||||
|
||||
PXOR X8, X6
|
||||
MOVO 0(SP), X7
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
|
||||
MOVO X8, 16(SP)
|
||||
MOVO X9, 32(SP)
|
||||
MOVO X10, 48(SP)
|
||||
MOVO X11, 64(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
|
||||
MOVO X8, 80(SP)
|
||||
MOVO X9, 96(SP)
|
||||
MOVO X10, 112(SP)
|
||||
MOVO X11, 128(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
|
||||
MOVO X8, 144(SP)
|
||||
MOVO X9, 160(SP)
|
||||
MOVO X10, 176(SP)
|
||||
MOVO X11, 192(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
|
||||
MOVO X8, 208(SP)
|
||||
MOVO X9, 224(SP)
|
||||
MOVO X10, 240(SP)
|
||||
MOVO X11, 256(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
MOVOU 32(AX), X10
|
||||
MOVOU 48(AX), X11
|
||||
PXOR X0, X12
|
||||
PXOR X1, X15
|
||||
PXOR X2, X10
|
||||
PXOR X3, X11
|
||||
PXOR X4, X12
|
||||
PXOR X5, X15
|
||||
PXOR X6, X10
|
||||
PXOR X7, X11
|
||||
MOVOU X10, 32(AX)
|
||||
MOVOU X11, 48(AX)
|
||||
|
||||
LEAQ 128(SI), SI
|
||||
SUBQ $128, DI
|
||||
JNE loop
|
||||
|
||||
MOVOU X12, 0(AX)
|
||||
MOVOU X15, 16(AX)
|
||||
|
||||
MOVQ R8, 0(BX)
|
||||
MOVQ R9, 8(BX)
|
||||
|
||||
MOVQ BP, SP
|
||||
RET
|
||||
|
||||
// func supportsSSE4() bool
|
||||
TEXT ·supportsSSE4(SB), 4, $0-1
|
||||
MOVL $1, AX
|
||||
CPUID
|
||||
SHRL $19, CX // Bit 19 indicates SSE4 support
|
||||
ANDL $1, CX // CX != 0 if support SSE4
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
179
vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
generated
vendored
Normal file
179
vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blake2b
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// the precomputed values for BLAKE2b
|
||||
// there are 12 16-byte arrays - one for each round
|
||||
// the entries are calculated from the sigma constants.
|
||||
var precomputed = [12][16]byte{
|
||||
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
|
||||
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
|
||||
{11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
|
||||
{7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
|
||||
{9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
|
||||
{2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
|
||||
{12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
|
||||
{13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
|
||||
{6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
|
||||
{10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
|
||||
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first
|
||||
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second
|
||||
}
|
||||
|
||||
func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||
var m [16]uint64
|
||||
c0, c1 := c[0], c[1]
|
||||
|
||||
for i := 0; i < len(blocks); {
|
||||
c0 += BlockSize
|
||||
if c0 < BlockSize {
|
||||
c1++
|
||||
}
|
||||
|
||||
v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
|
||||
v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
|
||||
v12 ^= c0
|
||||
v13 ^= c1
|
||||
v14 ^= flag
|
||||
|
||||
for j := range m {
|
||||
m[j] = binary.LittleEndian.Uint64(blocks[i:])
|
||||
i += 8
|
||||
}
|
||||
|
||||
for j := range precomputed {
|
||||
s := &(precomputed[j])
|
||||
|
||||
v0 += m[s[0]]
|
||||
v0 += v4
|
||||
v12 ^= v0
|
||||
v12 = v12<<(64-32) | v12>>32
|
||||
v8 += v12
|
||||
v4 ^= v8
|
||||
v4 = v4<<(64-24) | v4>>24
|
||||
v1 += m[s[1]]
|
||||
v1 += v5
|
||||
v13 ^= v1
|
||||
v13 = v13<<(64-32) | v13>>32
|
||||
v9 += v13
|
||||
v5 ^= v9
|
||||
v5 = v5<<(64-24) | v5>>24
|
||||
v2 += m[s[2]]
|
||||
v2 += v6
|
||||
v14 ^= v2
|
||||
v14 = v14<<(64-32) | v14>>32
|
||||
v10 += v14
|
||||
v6 ^= v10
|
||||
v6 = v6<<(64-24) | v6>>24
|
||||
v3 += m[s[3]]
|
||||
v3 += v7
|
||||
v15 ^= v3
|
||||
v15 = v15<<(64-32) | v15>>32
|
||||
v11 += v15
|
||||
v7 ^= v11
|
||||
v7 = v7<<(64-24) | v7>>24
|
||||
|
||||
v0 += m[s[4]]
|
||||
v0 += v4
|
||||
v12 ^= v0
|
||||
v12 = v12<<(64-16) | v12>>16
|
||||
v8 += v12
|
||||
v4 ^= v8
|
||||
v4 = v4<<(64-63) | v4>>63
|
||||
v1 += m[s[5]]
|
||||
v1 += v5
|
||||
v13 ^= v1
|
||||
v13 = v13<<(64-16) | v13>>16
|
||||
v9 += v13
|
||||
v5 ^= v9
|
||||
v5 = v5<<(64-63) | v5>>63
|
||||
v2 += m[s[6]]
|
||||
v2 += v6
|
||||
v14 ^= v2
|
||||
v14 = v14<<(64-16) | v14>>16
|
||||
v10 += v14
|
||||
v6 ^= v10
|
||||
v6 = v6<<(64-63) | v6>>63
|
||||
v3 += m[s[7]]
|
||||
v3 += v7
|
||||
v15 ^= v3
|
||||
v15 = v15<<(64-16) | v15>>16
|
||||
v11 += v15
|
||||
v7 ^= v11
|
||||
v7 = v7<<(64-63) | v7>>63
|
||||
|
||||
v0 += m[s[8]]
|
||||
v0 += v5
|
||||
v15 ^= v0
|
||||
v15 = v15<<(64-32) | v15>>32
|
||||
v10 += v15
|
||||
v5 ^= v10
|
||||
v5 = v5<<(64-24) | v5>>24
|
||||
v1 += m[s[9]]
|
||||
v1 += v6
|
||||
v12 ^= v1
|
||||
v12 = v12<<(64-32) | v12>>32
|
||||
v11 += v12
|
||||
v6 ^= v11
|
||||
v6 = v6<<(64-24) | v6>>24
|
||||
v2 += m[s[10]]
|
||||
v2 += v7
|
||||
v13 ^= v2
|
||||
v13 = v13<<(64-32) | v13>>32
|
||||
v8 += v13
|
||||
v7 ^= v8
|
||||
v7 = v7<<(64-24) | v7>>24
|
||||
v3 += m[s[11]]
|
||||
v3 += v4
|
||||
v14 ^= v3
|
||||
v14 = v14<<(64-32) | v14>>32
|
||||
v9 += v14
|
||||
v4 ^= v9
|
||||
v4 = v4<<(64-24) | v4>>24
|
||||
|
||||
v0 += m[s[12]]
|
||||
v0 += v5
|
||||
v15 ^= v0
|
||||
v15 = v15<<(64-16) | v15>>16
|
||||
v10 += v15
|
||||
v5 ^= v10
|
||||
v5 = v5<<(64-63) | v5>>63
|
||||
v1 += m[s[13]]
|
||||
v1 += v6
|
||||
v12 ^= v1
|
||||
v12 = v12<<(64-16) | v12>>16
|
||||
v11 += v12
|
||||
v6 ^= v11
|
||||
v6 = v6<<(64-63) | v6>>63
|
||||
v2 += m[s[14]]
|
||||
v2 += v7
|
||||
v13 ^= v2
|
||||
v13 = v13<<(64-16) | v13>>16
|
||||
v8 += v13
|
||||
v7 ^= v8
|
||||
v7 = v7<<(64-63) | v7>>63
|
||||
v3 += m[s[15]]
|
||||
v3 += v4
|
||||
v14 ^= v3
|
||||
v14 = v14<<(64-16) | v14>>16
|
||||
v9 += v14
|
||||
v4 ^= v9
|
||||
v4 = v4<<(64-63) | v4>>63
|
||||
|
||||
}
|
||||
|
||||
h[0] ^= v0 ^ v8
|
||||
h[1] ^= v1 ^ v9
|
||||
h[2] ^= v2 ^ v10
|
||||
h[3] ^= v3 ^ v11
|
||||
h[4] ^= v4 ^ v12
|
||||
h[5] ^= v5 ^ v13
|
||||
h[6] ^= v6 ^ v14
|
||||
h[7] ^= v7 ^ v15
|
||||
}
|
||||
c[0], c[1] = c0, c1
|
||||
}
|
177
vendor/golang.org/x/crypto/blake2b/blake2x.go
generated
vendored
Normal file
177
vendor/golang.org/x/crypto/blake2b/blake2x.go
generated
vendored
Normal file
|
@ -0,0 +1,177 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blake2b
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// XOF defines the interface to hash functions that
|
||||
// support arbitrary-length output.
|
||||
type XOF interface {
|
||||
// Write absorbs more data into the hash's state. It panics if called
|
||||
// after Read.
|
||||
io.Writer
|
||||
|
||||
// Read reads more output from the hash. It returns io.EOF if the limit
|
||||
// has been reached.
|
||||
io.Reader
|
||||
|
||||
// Clone returns a copy of the XOF in its current state.
|
||||
Clone() XOF
|
||||
|
||||
// Reset resets the XOF to its initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
|
||||
// the the length of the output is not known in advance.
|
||||
const OutputLengthUnknown = 0
|
||||
|
||||
// magicUnknownOutputLength is a magic value for the output size that indicates
|
||||
// an unknown number of output bytes.
|
||||
const magicUnknownOutputLength = (1 << 32) - 1
|
||||
|
||||
// maxOutputLength is the absolute maximum number of bytes to produce when the
|
||||
// number of output bytes is unknown.
|
||||
const maxOutputLength = (1 << 32) * 64
|
||||
|
||||
// NewXOF creates a new variable-output-length hash. The hash either produce a
|
||||
// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes
|
||||
// (size == OutputLengthUnknown). In the latter case, an absolute limit of
|
||||
// 256GiB applies.
|
||||
//
|
||||
// A non-nil key turns the hash into a MAC. The key must between
|
||||
// zero and 32 bytes long.
|
||||
func NewXOF(size uint32, key []byte) (XOF, error) {
|
||||
if len(key) > Size {
|
||||
return nil, errKeySize
|
||||
}
|
||||
if size == magicUnknownOutputLength {
|
||||
// 2^32-1 indicates an unknown number of bytes and thus isn't a
|
||||
// valid length.
|
||||
return nil, errors.New("blake2b: XOF length too large")
|
||||
}
|
||||
if size == OutputLengthUnknown {
|
||||
size = magicUnknownOutputLength
|
||||
}
|
||||
x := &xof{
|
||||
d: digest{
|
||||
size: Size,
|
||||
keyLen: len(key),
|
||||
},
|
||||
length: size,
|
||||
}
|
||||
copy(x.d.key[:], key)
|
||||
x.Reset()
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type xof struct {
|
||||
d digest
|
||||
length uint32
|
||||
remaining uint64
|
||||
cfg, root, block [Size]byte
|
||||
offset int
|
||||
nodeOffset uint32
|
||||
readMode bool
|
||||
}
|
||||
|
||||
func (x *xof) Write(p []byte) (n int, err error) {
|
||||
if x.readMode {
|
||||
panic("blake2b: write to XOF after read")
|
||||
}
|
||||
return x.d.Write(p)
|
||||
}
|
||||
|
||||
func (x *xof) Clone() XOF {
|
||||
clone := *x
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (x *xof) Reset() {
|
||||
x.cfg[0] = byte(Size)
|
||||
binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
|
||||
binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length
|
||||
x.cfg[17] = byte(Size) // inner hash size
|
||||
|
||||
x.d.Reset()
|
||||
x.d.h[1] ^= uint64(x.length) << 32
|
||||
|
||||
x.remaining = uint64(x.length)
|
||||
if x.remaining == magicUnknownOutputLength {
|
||||
x.remaining = maxOutputLength
|
||||
}
|
||||
x.offset, x.nodeOffset = 0, 0
|
||||
x.readMode = false
|
||||
}
|
||||
|
||||
func (x *xof) Read(p []byte) (n int, err error) {
|
||||
if !x.readMode {
|
||||
x.d.finalize(&x.root)
|
||||
x.readMode = true
|
||||
}
|
||||
|
||||
if x.remaining == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n = len(p)
|
||||
if uint64(n) > x.remaining {
|
||||
n = int(x.remaining)
|
||||
p = p[:n]
|
||||
}
|
||||
|
||||
if x.offset > 0 {
|
||||
blockRemaining := Size - x.offset
|
||||
if n < blockRemaining {
|
||||
x.offset += copy(p, x.block[x.offset:])
|
||||
x.remaining -= uint64(n)
|
||||
return
|
||||
}
|
||||
copy(p, x.block[x.offset:])
|
||||
p = p[blockRemaining:]
|
||||
x.offset = 0
|
||||
x.remaining -= uint64(blockRemaining)
|
||||
}
|
||||
|
||||
for len(p) >= Size {
|
||||
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
|
||||
x.nodeOffset++
|
||||
|
||||
x.d.initConfig(&x.cfg)
|
||||
x.d.Write(x.root[:])
|
||||
x.d.finalize(&x.block)
|
||||
|
||||
copy(p, x.block[:])
|
||||
p = p[Size:]
|
||||
x.remaining -= uint64(Size)
|
||||
}
|
||||
|
||||
if todo := len(p); todo > 0 {
|
||||
if x.remaining < uint64(Size) {
|
||||
x.cfg[0] = byte(x.remaining)
|
||||
}
|
||||
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
|
||||
x.nodeOffset++
|
||||
|
||||
x.d.initConfig(&x.cfg)
|
||||
x.d.Write(x.root[:])
|
||||
x.d.finalize(&x.block)
|
||||
|
||||
x.offset = copy(p, x.block[:todo])
|
||||
x.remaining -= uint64(todo)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *digest) initConfig(cfg *[Size]byte) {
|
||||
d.offset, d.c[0], d.c[1] = 0, 0, 0
|
||||
for i := range d.h {
|
||||
d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:])
|
||||
}
|
||||
}
|
32
vendor/golang.org/x/crypto/blake2b/register.go
generated
vendored
Normal file
32
vendor/golang.org/x/crypto/blake2b/register.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package blake2b
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"hash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
newHash256 := func() hash.Hash {
|
||||
h, _ := New256(nil)
|
||||
return h
|
||||
}
|
||||
newHash384 := func() hash.Hash {
|
||||
h, _ := New384(nil)
|
||||
return h
|
||||
}
|
||||
|
||||
newHash512 := func() hash.Hash {
|
||||
h, _ := New512(nil)
|
||||
return h
|
||||
}
|
||||
|
||||
crypto.RegisterHash(crypto.BLAKE2b_256, newHash256)
|
||||
crypto.RegisterHash(crypto.BLAKE2b_384, newHash384)
|
||||
crypto.RegisterHash(crypto.BLAKE2b_512, newHash512)
|
||||
}
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
|
@ -1290,6 +1290,12 @@
|
|||
"path": "golang.org/x/crypto/ssh/terminal",
|
||||
"revision": "eb71ad9bd329b5ac0fd0148dd99bd62e8be8e035",
|
||||
"revisionTime": "2017-08-07T10:11:13Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "pE9lQ5mMiW10+m6CS9XQDhSACNU=",
|
||||
"path": "golang.org/x/crypto/blake2b",
|
||||
"revision": "eb71ad9bd329b5ac0fd0148dd99bd62e8be8e035",
|
||||
"revisionTime": "2017-08-07T10:11:13Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
|
||||
|
|
162
website/source/api/acl-policies.html.md
Normal file
162
website/source/api/acl-policies.html.md
Normal file
|
@ -0,0 +1,162 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: ACL Policies - HTTP API
|
||||
sidebar_current: api-acl-policies
|
||||
description: |-
|
||||
The /acl/policy endpoints are used to configure and manage ACL policies.
|
||||
---
|
||||
|
||||
# ACL Policies HTTP API
|
||||
|
||||
The `/acl/policies` and `/acl/policy/` endpoints are used to manage ACL policies.
|
||||
For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
|
||||
|
||||
## List Policies
|
||||
|
||||
This endpoint lists all ACL policies. This lists the policies that have been replicated
|
||||
to the region, and may lag behind the authoritative region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/acl/policies` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries), [consistency modes](/api/index.html#consistency-modes) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | Consistency Modes | ACL Required |
|
||||
| ---------------- | ----------------- | ------------ |
|
||||
| `YES` | `all` | `management` |
|
||||
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/acl/policies
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Name": "foo",
|
||||
"Description": "",
|
||||
"CreateIndex": 12,
|
||||
"ModifyIndex": 13,
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Create or Update Policy
|
||||
|
||||
This endpoint creates or updates an ACL Policy. This request is always forwarded to the
|
||||
authoritative region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/acl/policy/:policy_name` | `(empty body)` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------------ |
|
||||
| `NO` | `management` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `Name` `(string: <required>)` - Specifies the name of the policy.
|
||||
Creates the policy if the name does not exist, otherwise updates the existing policy.
|
||||
|
||||
- `Description` `(string: <optional>)` - Specifies a human readable description.
|
||||
|
||||
- `Rules` `(string: <required>)` - Specifies the Policy rules in HCL or JSON format.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```json
|
||||
{
|
||||
"Name": "my-policy",
|
||||
"Description": "This is a great policy",
|
||||
"Rules": ""
|
||||
}
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
--data @payload.json \
|
||||
https://nomad.rocks/v1/acl/policy/my-policy
|
||||
```
|
||||
|
||||
## Read Policy
|
||||
|
||||
This endpoint reads an ACL policy with the given name. This queries the policy that have been
|
||||
replicated to the region, and may lag behind the authoritative region.
|
||||
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/acl/policy/:policy_name` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries), [consistency modes](/api/index.html#consistency-modes) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | Consistency Modes | ACL Required |
|
||||
| ---------------- | ----------------- | ------------ |
|
||||
| `YES` | `all` | `management` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/acl/policy/foo
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"Name": "foo",
|
||||
"Rules": "",
|
||||
"Description": "",
|
||||
"CreateIndex": 12,
|
||||
"ModifyIndex": 13
|
||||
}
|
||||
```
|
||||
|
||||
## Delete Policy
|
||||
|
||||
This endpoint deletes the named ACL policy. This request is always forwarded to the
|
||||
authoritative region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| -------- | ---------------------------- | -------------------------- |
|
||||
| `DELETE` | `/acl/policy/:policy_name` | `(empty body)` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------- |
|
||||
| `NO` | `management` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `policy_name` `(string: <required>)` - Specifies the policy name to delete.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request DELETE \
|
||||
https://nomad.rocks/v1/acl/policy/foo
|
||||
```
|
||||
|
298
website/source/api/acl-tokens.html.md
Normal file
298
website/source/api/acl-tokens.html.md
Normal file
|
@ -0,0 +1,298 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: ACL Tokens - HTTP API
|
||||
sidebar_current: api-acl-tokens
|
||||
description: |-
|
||||
The /acl/token/ endpoints are used to configure and manage ACL tokens.
|
||||
---
|
||||
|
||||
# ACL Tokens HTTP API
|
||||
|
||||
The `/acl/bootstrap`, `/acl/tokens`, and `/acl/token/` endpoints are used to manage ACL tokens.
|
||||
For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
|
||||
|
||||
## Bootstrap Token
|
||||
|
||||
This endpoint is used to bootstrap the ACL system and provide the initial management token.
|
||||
This request is always forwarded to the authoritative region. It can only be invoked once.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/acl/bootstrap` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
https://nomad.rocks/v1/acl/bootstrap
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessorID":"b780e702-98ce-521f-2e5f-c6b87de05b24",
|
||||
"SecretID":"3f4a0fcd-7c42-773c-25db-2d31ba0c05fe",
|
||||
"Name":"Bootstrap Token",
|
||||
"Type":"management",
|
||||
"Policies":null,
|
||||
"Global":true,
|
||||
"CreateTime":"2017-08-23T22:47:14.695408057Z",
|
||||
"CreateIndex":7,
|
||||
"ModifyIndex":7
|
||||
}
|
||||
```
|
||||
|
||||
## List Tokens
|
||||
|
||||
This endpoint lists all ACL tokens. This lists the local tokens and the global
|
||||
tokens which have been replicated to the region, and may lag behind the authoritative region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/acl/tokens` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries), [consistency modes](/api/index.html#consistency-modes) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | Consistency Modes | ACL Required |
|
||||
| ---------------- | ----------------- | ------------ |
|
||||
| `YES` | `all` | `management` |
|
||||
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/acl/tokens
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"AccessorID": "b780e702-98ce-521f-2e5f-c6b87de05b24",
|
||||
"Name": "Bootstrap Token",
|
||||
"Type": "management",
|
||||
"Policies": null,
|
||||
"Global": true,
|
||||
"CreateTime": "2017-08-23T22:47:14.695408057Z",
|
||||
"CreateIndex": 7,
|
||||
"ModifyIndex": 7
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Create Token
|
||||
|
||||
This endpoint creates an ACL Token. If the token is a global token, the request
|
||||
is forwarded to the authoritative region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/acl/token` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------------ |
|
||||
| `NO` | `management` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `Name` `(string: <optional>)` - Specifies the human readable name of the token.
|
||||
|
||||
- `Type` `(string: <required>)` - Specifies the type of token. Must be either `client` or `management`.
|
||||
|
||||
- `Policies` `(array<string>: <required>)` - Must be null or blank for `management` type tokens, otherwise must specify at least one policy for `client` type tokens.
|
||||
|
||||
- `Global` `(bool: <optional>)` - If true, indicates this token should be replicated globally to all regions. Otherwise, this token is created local to the target region.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```json
|
||||
{
|
||||
"Name": "Readonly token",
|
||||
"Type": "client",
|
||||
"Policies": ["readonly"],
|
||||
"Global": false
|
||||
}
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
--data @payload.json \
|
||||
https://nomad.rocks/v1/acl/token
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessorID": "aa534e09-6a07-0a45-2295-a7f77063d429",
|
||||
"SecretID": "8176afd3-772d-0b71-8f85-7fa5d903e9d4",
|
||||
"Name": "Readonly token",
|
||||
"Type": "client",
|
||||
"Policies": [
|
||||
"readonly"
|
||||
],
|
||||
"Global": false,
|
||||
"CreateTime": "2017-08-23T23:25:41.429154233Z",
|
||||
"CreateIndex": 52,
|
||||
"ModifyIndex": 52
|
||||
}
|
||||
```
|
||||
|
||||
## Update Token
|
||||
|
||||
This endpoint updates an existing ACL Token. If the token is a global token, the request
|
||||
is forwarded to the authoritative region. Note that a token cannot be switched from global
|
||||
to local or visa versa.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/acl/token/:accessor_id` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------------ |
|
||||
| `NO` | `management` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `AccessorID` `(string: <required>)` - Specifies the token (by accessor) that is being updated. Must match payload body and request path.
|
||||
|
||||
- `Name` `(string: <optional>)` - Specifies the human readable name of the token.
|
||||
|
||||
- `Type` `(string: <required>)` - Specifies the type of token. Must be either `client` or `management`.
|
||||
|
||||
- `Policies` `(array<string>: <required>)` - Must be null or blank for `management` type tokens, otherwise must specify at least one policy for `client` type tokens.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessorID": "aa534e09-6a07-0a45-2295-a7f77063d429",
|
||||
"Name": "Read-write token",
|
||||
"Type": "client",
|
||||
"Policies": ["readwrite"],
|
||||
}
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
--data @payload.json \
|
||||
https://nomad.rocks/v1/acl/token/aa534e09-6a07-0a45-2295-a7f77063d429
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessorID": "aa534e09-6a07-0a45-2295-a7f77063d429",
|
||||
"SecretID": "8176afd3-772d-0b71-8f85-7fa5d903e9d4",
|
||||
"Name": "Read-write token",
|
||||
"Type": "client",
|
||||
"Policies": [
|
||||
"readwrite"
|
||||
],
|
||||
"Global": false,
|
||||
"CreateTime": "2017-08-23T23:25:41.429154233Z",
|
||||
"CreateIndex": 52,
|
||||
"ModifyIndex": 64
|
||||
}
|
||||
```
|
||||
|
||||
## Read Token
|
||||
|
||||
This endpoint reads an ACL token with the given accessor. If the token is a global token
|
||||
which has been replicated to the region it may lag behind the authoritative region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/acl/token/:accessor_id` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries), [consistency modes](/api/index.html#consistency-modes) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | Consistency Modes | ACL Required |
|
||||
| ---------------- | ----------------- | ------------ |
|
||||
| `YES` | `all` | `management` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/acl/token/aa534e09-6a07-0a45-2295-a7f77063d429
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessorID": "aa534e09-6a07-0a45-2295-a7f77063d429",
|
||||
"SecretID": "8176afd3-772d-0b71-8f85-7fa5d903e9d4",
|
||||
"Name": "Read-write token",
|
||||
"Type": "client",
|
||||
"Policies": [
|
||||
"readwrite"
|
||||
],
|
||||
"Global": false,
|
||||
"CreateTime": "2017-08-23T23:25:41.429154233Z",
|
||||
"CreateIndex": 52,
|
||||
"ModifyIndex": 64
|
||||
}
|
||||
```
|
||||
|
||||
## Delete Token
|
||||
|
||||
This endpoint deletes the ACL token by accessor. This request is forwarded to the
|
||||
authoritative region for global tokens.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| -------- | ---------------------------- | -------------------------- |
|
||||
| `DELETE` | `/acl/token/:accessor_id` | `(empty body)` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------- |
|
||||
| `NO` | `management` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `accessor_id` `(string: <required>)` - Specifies the ACL token accessor ID.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request DELETE \
|
||||
https://nomad.rocks/v1/acl/token/aa534e09-6a07-0a45-2295-a7f77063d429
|
||||
```
|
||||
|
|
@ -73,11 +73,21 @@ administration.
|
|||
|
||||
## ACLs
|
||||
|
||||
The Nomad API does not support ACLs at this time.
|
||||
Several endpoints in Nomad use or require ACL tokens to operate. The token are used to authenticate the request and determine if the request is allowed based on the associated authorizations. Tokens are specified per-request by using the `X-Nomad-Token` request header set to the `SecretID` of an ACL Token.
|
||||
|
||||
For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
|
||||
|
||||
## Authentication
|
||||
|
||||
The Nomad API does not support authentication at this time.
|
||||
When ACLs are enabled, a Nomad token should be provided to API requests using the `X-Nomad-Token` header. When using authentication, clients should communicate via TLS.
|
||||
|
||||
Here is an example using curl:
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--header "X-Nomad-Token: aa534e09-6a07-0a45-2295-a7f77063d429" \
|
||||
https://nomad.rocks/v1/jobs
|
||||
```
|
||||
|
||||
## Blocking Queries
|
||||
|
||||
|
|
BIN
website/source/assets/images/acl.jpg
(Stored with Git LFS)
Normal file
BIN
website/source/assets/images/acl.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
50
website/source/docs/agent/configuration/acl.html.md
Normal file
50
website/source/docs/agent/configuration/acl.html.md
Normal file
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "acl Stanza - Agent Configuration"
|
||||
sidebar_current: "docs-agent-configuration-acl"
|
||||
description: |-
|
||||
The "acl" stanza configures the Nomad agent to enable ACLs and tune various parameters.
|
||||
---
|
||||
|
||||
# `acl` Stanza
|
||||
|
||||
<table class="table table-bordered table-striped">
|
||||
<tr>
|
||||
<th width="120">Placement</th>
|
||||
<td>
|
||||
<code>**acl**</code>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The `acl` stanza configures the Nomad agent to enable ACLs and tunes various ACL parameters.
|
||||
|
||||
```hcl
|
||||
acl {
|
||||
enabled = true
|
||||
token_ttl = "30s"
|
||||
policy_ttl = "60s"
|
||||
}
|
||||
```
|
||||
|
||||
## `acl` Parameters
|
||||
|
||||
- `enabled` `(bool: false)` - Specifies if ACL enforcement is enabled. All other
|
||||
client configuration options depend on this value.
|
||||
|
||||
- `token_ttl` `(string: "30s")` - Specifies the maximum time-to-live (TTL) for
|
||||
cached ACL tokens. This does not affect servers, since they do not cache tokens.
|
||||
Setting this value lower reduces how stale a token can be, but increases
|
||||
the request load against servers. If a client cannot reach a server, for example
|
||||
because of an outage, the TTL will be ignored and the cached value used.
|
||||
|
||||
- `policy_ttl` `(string: "30s")` - Specifies the maximum time-to-live (TTL) for
|
||||
cached ACL policies. This does not affect servers, since they do not cache policies.
|
||||
Setting this value lower reduces how stale a policy can be, but increases
|
||||
the request load against servers. If a client cannot reach a server, for example
|
||||
because of an outage, the TTL will be ignored and the cached value used.
|
||||
|
||||
- `replication_token` `(string: "")` - Specifies the Secret ID of the ACL token
|
||||
to use for replicating policies and tokens. This is used by servers in non-authoritative
|
||||
region to mirror the policies and tokens into the local region.
|
||||
|
|
@ -74,6 +74,8 @@ testing.
|
|||
|
||||
## General Parameters
|
||||
|
||||
- `acl` <code>([ACL][acl]: nil)</code> - Specifies configuration which is specific to ACLs.
|
||||
|
||||
- `addresses` `(Addresses: see below)` - Specifies the bind address for
|
||||
individual network services. Any values configured in this stanza take
|
||||
precedence over the default [bind_addr](#bind_addr).
|
||||
|
@ -230,3 +232,4 @@ http_api_response_headers {
|
|||
[tls]: /docs/agent/configuration/tls.html "Nomad Agent tls Configuration"
|
||||
[client]: /docs/agent/configuration/client.html "Nomad Agent client Configuration"
|
||||
[server]: /docs/agent/configuration/server.html "Nomad Agent server Configuration"
|
||||
[acl]: /docs/agent/configuration/acl.html "Nomad Agent ACL Configuration"
|
||||
|
|
|
@ -34,6 +34,11 @@ server {
|
|||
|
||||
## `server` Parameters
|
||||
|
||||
- `authoritative_region` `(string: "")` - Specifies the authoritative region, which
|
||||
provides a single source of truth for global configurations such as ACL Policies and
|
||||
global ACL tokens. Non-authoritative regions will replicate from the authoritative
|
||||
to act as a mirror. By default, the local region is assumed to be authoritative.
|
||||
|
||||
- `bootstrap_expect` `(int: required)` - Specifies the number of server nodes to
|
||||
wait for before bootstrapping. It is most common to use the odd-numbered
|
||||
integers `3` or `5` for this value, depending on the cluster size. A value of
|
||||
|
|
312
website/source/guides/acl.html.markdown
Normal file
312
website/source/guides/acl.html.markdown
Normal file
|
@ -0,0 +1,312 @@
|
|||
---
|
||||
layout: "guides"
|
||||
page_title: "ACLs"
|
||||
sidebar_current: "guides-acl"
|
||||
description: |-
|
||||
Don't panic! This is a critical first step. Depending on your deployment
|
||||
configuration, it may take only a single server failure for cluster
|
||||
unavailability. Recovery requires an operator to intervene, but recovery is
|
||||
straightforward.
|
||||
---
|
||||
|
||||
# ACL System
|
||||
|
||||
Nomad provides an optional Access Control List (ACL) system which can be used to control access to data and APIs. The ACL is [Capability-based](https://en.wikipedia.org/wiki/Capability-based_security), relying on tokens which are associated with policies to determine which fine grained rules can be applied. Nomad's capability based ACL system is very similar to the design of [AWS IAM](https://aws.amazon.com/iam/).
|
||||
|
||||
# ACL System Overview
|
||||
|
||||
The ACL system is designed to be easy to use and fast to enforce while providing administrative insight. At the highest level, there are three major components to the ACL system:
|
||||
|
||||
![ACL Overview](/assets/images/acl.jpg)
|
||||
|
||||
* **ACL Policies**. No permissions are granted by default, making Nomad a default-deny or whitelist system. Policies allow a set of capabilities or actions to be granted or whitelisted. For example, a "readonly" policy might only grant the ability to list and inspect running jobs, but not to submit new ones.
|
||||
|
||||
* **ACL Tokens**. Requests to Nomad are authenticated by using bearer token. Each ACL token has a public Accessor ID which is used to name a token, and a Secret ID which is used to make requests to Nomad. The Secret ID is provided using a request header (`X-Nomad-Token`) and is used to authenticate the caller. Token are either `management` or `client` types. The `management` tokens are effectively "root" in the system, and can perform any operation. The `client` tokens are associated with one or more ACL policies which grant specific capabilities.
|
||||
|
||||
* **Capabilities**. Capabilties are the set of actions that can be performed. This includes listing jobs, submitting jobs, querying nodes, etc. A `management` token is granted all capabilities, while `client` tokens are granted specific capabilties via ACL Policies. The full set of capabilities is discussed below in the rule specifications.
|
||||
|
||||
### ACL Policies
|
||||
|
||||
An ACL policy is a named set of rules. Each policy must have a unique name, an optional description, and a rule set.
|
||||
A client ACL token can be associated with multiple policies, and a request is allowed if _any_ of the associated policies grant the capability.
|
||||
Management tokens cannot be associated with policies because they are granted all capabilities.
|
||||
|
||||
The special `anonymous` policy can be defined to grant capabilities to requests which are made anonymously. An anonymous request is a request made to Nomad without the `X-Nomad-Token` header specified. This can be used to allow anonymous users to list jobs and view their status, while requiring authenticated requests to submit new jobs or modify existing jobs. By default, there is no `anonymous` policy set meaning all anonymous requests are denied.
|
||||
|
||||
### ACL Tokens
|
||||
|
||||
ACL tokens are used to authenticate requests and determine if the caller is authorized to perform an action. Each ACL token has a public Accessor ID which is used to identify the token, a Secret ID which is used to make requests to Nomad, and an optional human readable name. All `client` type tokens are associated with one or more policies, and can perform an action if any associated policy allows it. Tokens can be associated with policies which do not exist, which are the equivalent of granting no capabilities. The `management` type tokens cannot be associated with policies, but can perform any action.
|
||||
|
||||
When ACL tokens are created, they can be optionally marked as `Global`. This causes them to be created in the authoritative region and replicated to all other regions. Otherwise, tokens are created locally in the region the request was made and not replicated. Local tokens cannot be used for cross-region requests since they are not replicated between regions.
|
||||
|
||||
### Capabilities and Scope
|
||||
|
||||
The following table summarizes the ACL Rules that are available for constructing policy rules:
|
||||
|
||||
| Policy | Scope |
|
||||
| ---------- | -------------------------------------------- |
|
||||
| [namespace](#namespace-rules) | Job related operations by namespace |
|
||||
| [agent](#agent-rules) | Utility operations in the Agent API |
|
||||
| [node](#node-rules) | Node-level catalog operations |
|
||||
| [operator](#operator-rules) | Cluster-level operations in the Operator API |
|
||||
|
||||
Constructing rules from these policies is covered in detail in the Rule Specification section below.
|
||||
|
||||
### Multi-Region Configuration
|
||||
|
||||
Nomad supports multi-datacenter and multi-region configurations. A single region is able to service multiple datacenters, and all servers in a region replicate their state between each other. In a multi-region configuration, there is a set of servers per region. Each region operates independently and is loosely coupled to allow jobs to be scheduled in any region and requests to flow transparently to the correct region.
|
||||
|
||||
When ACLs are enabled, Nomad depends on an "authoritative region" to act as a single source of truth for ACL policies and global ACL tokens. The authoritative region is configured in the [`server` stanza](/docs/agent/configuration/server.html) of agents, and all regions must share a single a single authoritative source. Any ACL policies or global ACL tokens are created in the authoritative region first. All other regions replicate ACL policies and global ACL tokens to act as local mirrors. This allows policies to be administered centrally, and for enforcement to be local to each region for low latency.
|
||||
|
||||
Global ACL tokens are used to allow cross region requests. Standard ACL tokens are created in a single target region and not replicated. This means if a request takes place between regions, global tokens must be used so that both regions will have the token registered.
|
||||
|
||||
# Configuring ACLs
|
||||
|
||||
ACLs are not enabled by default, and must be enabled. Clients and Servers need to set `enabled` in the [`acl` stanza](/docs/agent/configuration/acl.html). This enables the [ACL Policy](/api/acl-policies.html) and [ACL Token](/api/acl-tokens.html) APIs, as well as endpoint enforcement.
|
||||
|
||||
For multi-region configurations, all servers must be configured to use a single [authoritative region](/docs/agent/configuration/server.html#authoritative_region). The authoritative region is responsible for managing ACL policies and global tokens. Servers in other regions will replicate policies and global tokens to act as a mirror, and must have their [`replication_token`](/docs/agent/configuration/acl.html#replication_token) configured.
|
||||
|
||||
# Bootstrapping ACLs
|
||||
|
||||
Bootstrapping ACLs on a new cluster requires a few steps, outlined below:
|
||||
|
||||
### Enable ACLs on Nomad Servers
|
||||
|
||||
The APIs needed to manage policies and tokens are not enabled until ACLs are enabled. To begin, we need to enable the ACLs on the servers. If a multi-region setup is used, the authoritiative region should be enabled first. For each server:
|
||||
|
||||
1. Set `enabled = true` in the [`acl` stanza](/docs/agent/configuration/acl.html#enabled).
|
||||
1. Set `authoritative_region` in the [`server` stanza](/docs/agent/configuration/server.html#authoritative_region).
|
||||
1. For servers outside the authoritative region, set `replication_token` in the [`acl` stanza](/docs/agent/configuration/acl.html#replication_token). Replication tokens should be `management` type tokens which are either created in the authoritative region, or created as Global tokens.
|
||||
1. Restarting the Nomad server to pick the new configuration.
|
||||
|
||||
Please take care to restart the servers one at a time, and ensure each server has joined and is operating correctly before restarting another.
|
||||
|
||||
### Generate the initial token
|
||||
|
||||
Once the ACL system is enabled, we need to generate our initial token. This first token is used to bootstrap the system and care should be taken not to lose it. Once the ACL system is enabled, we use the [Bootstrap API](/api/acl-tokens.html#bootstrap-token):
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
https://nomad.rocks/v1/acl/bootstrap?pretty=true
|
||||
```
|
||||
```json
|
||||
{
|
||||
"AccessorID":"b780e702-98ce-521f-2e5f-c6b87de05b24",
|
||||
"SecretID":"3f4a0fcd-7c42-773c-25db-2d31ba0c05fe",
|
||||
"Name":"Bootstrap Token",
|
||||
"Type":"management",
|
||||
"Policies":null,
|
||||
"Global":true,
|
||||
"CreateTime":"2017-08-23T22:47:14.695408057Z",
|
||||
"CreateIndex":7,
|
||||
"ModifyIndex":7
|
||||
}
|
||||
```
|
||||
|
||||
Once the initial bootstrap is performed, it _cannot be performed again_. Make sure to save this AccessorID and SecretID.
|
||||
The bootstrap token is a `management` type token, meaning it can perform any operation. It should be used to setup the ACL policies and create additional ACL tokens. The bootstrap token can be deleted and is like any other token, so care should be taken to not revoke all management tokens.
|
||||
|
||||
### Enable ACLs on Nomad Clients
|
||||
|
||||
To enforce client endpoints, we need to enable ACLs on clients as well. This is simpler than servers, and we just need to set `enabled = true` in the [`acl` stanza](/docs/agent/configuration/acl.html). Once configured, we need to restart the client for the change.
|
||||
|
||||
|
||||
### Set an Anonymous Policy (Optional)
|
||||
|
||||
The ACL system uses a whitelist or default-deny model. This means by default no permissions are granted.
|
||||
For clients making requests without ACL tokens, we may want to grant some basic level of access. This is done by setting rules
|
||||
on the special "anonymous" policy. This policy is applied to any requests made without a token.
|
||||
|
||||
To permit anonymous users to read, we can setup the following policy:
|
||||
|
||||
```text
|
||||
# Store our token secret ID
|
||||
$ export NOMAD_TOKEN="BOOTSTRAP_SECRET_ID"
|
||||
|
||||
# Write out the payload
|
||||
$ cat > payload.json <<EOF
|
||||
{
|
||||
"Name": "anonymous",
|
||||
"Description": "Allow read-only access for anonymous requests",
|
||||
"Rules": "
|
||||
namespace \"default\" {
|
||||
policy = \"read\"
|
||||
}
|
||||
agent {
|
||||
policy = \"read\"
|
||||
}
|
||||
node {
|
||||
policy = \"read\"
|
||||
}
|
||||
"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Install the policy
|
||||
$ curl --request POST \
|
||||
--data @payload.json \
|
||||
-H "X-Nomad-Token: $NOMAD_TOKEN" \
|
||||
https://nomad.rocks/v1/acl/policy/anonymous
|
||||
|
||||
# Verify anonymous request works
|
||||
$ curl https://nomad.rocks/v1/jobs
|
||||
```
|
||||
|
||||
# Rule Specification
|
||||
|
||||
A core part of the ACL system is the rule language which is used to describe the policy that must be enforced.
|
||||
We make use of the [HashiCorp Configuration Language (HCL)](https://github.com/hashicorp/hcl/) to specify rules.
|
||||
This language is human readable and interoperable with JSON making it easy to machine-generate. Policies can contain any number of rules.
|
||||
|
||||
Policies typically have several dispositions:
|
||||
|
||||
* `read`: allow the resource to be read but not modified
|
||||
* `write`: allow the resource to be read and modified
|
||||
* `deny`: do not allow the resource to be read or modified. Deny takes precedence when multiple policies are associated with a token.
|
||||
|
||||
Specification in the HCL format looks like:
|
||||
|
||||
```text
|
||||
# Allow read only access to the default namespace
|
||||
namespace "default" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
# Allow writing to the `foo` namespace
|
||||
namespace "foo" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
```
|
||||
|
||||
This is equivalent to the following JSON input:
|
||||
|
||||
```json
|
||||
{
|
||||
"namespace": {
|
||||
"default": {
|
||||
"policy": "read"
|
||||
},
|
||||
"foo": {
|
||||
"policy": "write"
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"policy": "read"
|
||||
},
|
||||
"node": {
|
||||
"policy": "read"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The [ACL Policy](/api/acl-policies.html) API allows either HCL or JSON to be used to define the content of the rules section.
|
||||
|
||||
### Namespace Rules
|
||||
|
||||
The `namespace` policy controls access to a namespace, including the [Jobs API](/api/jobs.html), [Deployments API](/api/deployments.html), [Allocations API](/api/allocations.html), and [Evaluations API](/api/evaluations.html).
|
||||
|
||||
```
|
||||
namespace "default" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
namespace "sensitive" {
|
||||
policy = "read"
|
||||
}
|
||||
```
|
||||
|
||||
Namespace rules are keyed by the namespace name they apply to. When no namespace is specified, the "default" namespace is the one used. For example, the above policy grants writeaccess to the default namespace, and read access to the sensitive namespace. In addition to the coarse grained `policy` specification, the `namespace` stanza allows setting a more fine grained list of `capabilities`. This includes:
|
||||
|
||||
* `deny` - When multiple policies are associated with a token, deny will take precedence and prevent any capabilities.
|
||||
* `list-jobs` - Allows listing the jobs and seeing coarse grain status.
|
||||
* `read-job` - Allows inspecting a job and seeing fine grain status.
|
||||
* `submit-job` - Allows jobs to be submitted or modified.
|
||||
* `read-logs` - Allows the logs associated with a job to be viewed.
|
||||
* `read-fs` - Allows the filesystem of allocations associated to be viewed.
|
||||
|
||||
The coarse grained policy dispositions are shorthand for the fine grained capabilities:
|
||||
|
||||
* `deny` policy - ["deny"]
|
||||
* `read` policy - ["list-jobs", "read-jobs"]
|
||||
* `write` policy - ["list-jobs", "read-jobs", "submit-job", "read-logs", "read-fs"]
|
||||
|
||||
When both the policy short hand and a capabilities list are provided, the capabilities are merged:
|
||||
|
||||
```
|
||||
# Allow reading jobs and submitting jobs, without allowing access
|
||||
# to view log output or inspect the filesystem
|
||||
namespace "default" {
|
||||
policy = "read"
|
||||
capabilities = ["submit-job"]
|
||||
}
|
||||
```
|
||||
|
||||
### Node Rules
|
||||
|
||||
The `node` policy controls access to the [Node API](/api/nodes.html) such as listing nodes or triggering a node drain.
|
||||
Node rules are specified for all nodes using the `node` key:
|
||||
|
||||
```
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
```
|
||||
|
||||
There's only one node policy allowed per rule set, and its value is set to one of the policy dispositions.
|
||||
|
||||
### Agent Rules
|
||||
|
||||
The `agent` policy controls access to the utility operations in the [Agent API](/api/agent.html), such as join and leave.
|
||||
Agent rules are specified for all agents using the `agent` key:
|
||||
|
||||
```
|
||||
agent {
|
||||
policy = "write"
|
||||
}
|
||||
```
|
||||
|
||||
There's only one agent policy allowed per rule set, and its value is set to one of the policy dispositions.
|
||||
|
||||
|
||||
### Operator Rules
|
||||
|
||||
The `operator` policy controls access to the [Operator API](/api/operator.html). Operator rules look like:
|
||||
|
||||
```
|
||||
operator {
|
||||
policy = "read"
|
||||
}
|
||||
```
|
||||
|
||||
There's only one operator policy allowed per rule set, and its value is set to one of the policy dispositions. In the example above, the token could be used to query the operator endpoints for diagnostic purposes but not make any changes.
|
||||
|
||||
# Advanced Topics
|
||||
|
||||
### Outages and Mulit-Region Replication
|
||||
|
||||
The ACL system takes some steps to ensure operation during outages. Clients nodes maintain a limited
|
||||
cache of ACL tokens and ACL policies that have recently or frequently been used, associated with a time-to-live (TTL).
|
||||
|
||||
When the region servers are unavailable, the clients will automatically ignore the cache TTL,
|
||||
and extend the cache until the outage has recovered. For any policies or tokens that are not cached,
|
||||
they will be treated as missing and denied access until the outage has been resolved.
|
||||
|
||||
Nomad servers have all the policies and tokens locally and can continue serving requests even if
|
||||
quorum is lost. The tokens and policies may become stale during this period as data is not actively
|
||||
replicating, but will be automatically fixed when the outage has been resolved.
|
||||
|
||||
In a multi-region setup, there is a single authoritative region which is the source of truth for
|
||||
ACL policies and global ACL tokens. All other regions asychronously replicate from the authoritative
|
||||
region. When replication is interrupted, the existing data is used for request processing and may
|
||||
become stale. When the authoritative region is reachable, replication will resume and repair any
|
||||
inconsistency.
|
||||
|
|
@ -15,6 +15,14 @@
|
|||
|
||||
<hr>
|
||||
|
||||
<li<%= sidebar_current("api-acl-policies") %>>
|
||||
<a href="/api/acl-policies.html">ACL Policies</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-acl-tokens") %>>
|
||||
<a href="/api/acl-tokens.html">ACL Tokens</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-agent") %>>
|
||||
<a href="/api/agent.html">Agent</a>
|
||||
</li>
|
||||
|
|
|
@ -300,6 +300,9 @@
|
|||
<li<%= sidebar_current("docs-agent-configuration") %>>
|
||||
<a href="/docs/agent/configuration/index.html">Configuration</a>
|
||||
<ul class="nav">
|
||||
<li <%= sidebar_current("docs-agent-configuration-acl") %>>
|
||||
<a href="/docs/agent/configuration/acl.html">acl</a>
|
||||
</li>
|
||||
<li <%= sidebar_current("docs-agent-configuration-client") %>>
|
||||
<a href="/docs/agent/configuration/client.html">client</a>
|
||||
</li>
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
<% wrap_layout :inner do %>
|
||||
<% content_for :sidebar do %>
|
||||
<ul class="nav docs-sidenav">
|
||||
<ul class="nav docs-sidenav">
|
||||
|
||||
<li<%= sidebar_current("guides-acl") %>>
|
||||
<a href="/guides/acl.html">ACLs</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("guides-spark") %>>
|
||||
<a href="/guides/spark/spark.html">Apache Spark Integration</a>
|
||||
|
|
Loading…
Reference in a new issue