Merge branch 'main' into docs-k8s-cli-alpha
This commit is contained in:
commit
c4033b3d83
|
@ -0,0 +1,4 @@
|
|||
```release-note:improvement
|
||||
checks: add failures_before_warning setting for interval checks.
|
||||
```
|
||||
|
|
@ -536,9 +536,6 @@ func (p *policyAuthorizer) IntentionRead(prefix string, _ *AuthorizerContext) En
|
|||
// IntentionWrite checks if writing (creating, updating, or deleting) of an
|
||||
// intention is allowed.
|
||||
func (p *policyAuthorizer) IntentionWrite(prefix string, _ *AuthorizerContext) EnforcementDecision {
|
||||
if prefix == "" {
|
||||
return Deny
|
||||
}
|
||||
if prefix == "*" {
|
||||
return p.allAllowed(p.intentionRules, AccessWrite)
|
||||
}
|
||||
|
|
|
@ -236,7 +236,7 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *policyRulesMergeContext) update(merged *PolicyRules) {
|
||||
func (p *policyRulesMergeContext) fill(merged *PolicyRules) {
|
||||
merged.ACL = p.aclRule
|
||||
merged.Keyring = p.keyringRule
|
||||
merged.Operator = p.operatorRule
|
||||
|
@ -354,8 +354,8 @@ func (m *PolicyMerger) Policy() *Policy {
|
|||
ID: fmt.Sprintf("%x", m.idHasher.Sum(nil)),
|
||||
}
|
||||
|
||||
m.policyRulesMergeContext.update(&merged.PolicyRules)
|
||||
m.enterprisePolicyRulesMergeContext.update(&merged.EnterprisePolicyRules)
|
||||
m.policyRulesMergeContext.fill(&merged.PolicyRules)
|
||||
m.enterprisePolicyRulesMergeContext.fill(&merged.EnterprisePolicyRules)
|
||||
|
||||
return merged
|
||||
}
|
||||
|
|
|
@ -12,6 +12,6 @@ func (ctx *enterprisePolicyRulesMergeContext) merge(*EnterprisePolicyRules) {
|
|||
// do nothing
|
||||
}
|
||||
|
||||
func (ctx *enterprisePolicyRulesMergeContext) update(*EnterprisePolicyRules) {
|
||||
func (ctx *enterprisePolicyRulesMergeContext) fill(*EnterprisePolicyRules) {
|
||||
// do nothing
|
||||
}
|
||||
|
|
|
@ -2459,6 +2459,11 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
|||
maxOutputSize = chkType.OutputMaxSize
|
||||
}
|
||||
|
||||
// FailuresBeforeWarning has to default to same value as FailuresBeforeCritical
|
||||
if chkType.FailuresBeforeWarning == 0 {
|
||||
chkType.FailuresBeforeWarning = chkType.FailuresBeforeCritical
|
||||
}
|
||||
|
||||
// Get the address of the proxy for this service if it exists
|
||||
// Need its config to know whether we should reroute checks to it
|
||||
var proxy *structs.NodeService
|
||||
|
@ -2473,7 +2478,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
|||
}
|
||||
}
|
||||
|
||||
statusHandler := checks.NewStatusHandler(a.State, a.logger, chkType.SuccessBeforePassing, chkType.FailuresBeforeCritical)
|
||||
statusHandler := checks.NewStatusHandler(a.State, a.logger, chkType.SuccessBeforePassing, chkType.FailuresBeforeWarning, chkType.FailuresBeforeCritical)
|
||||
sid := check.CompoundServiceID()
|
||||
|
||||
cid := check.CompoundCheckID()
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
http2 "golang.org/x/net/http2"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
@ -16,6 +15,8 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
http2 "golang.org/x/net/http2"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
|
@ -907,17 +908,19 @@ type StatusHandler struct {
|
|||
logger hclog.Logger
|
||||
successBeforePassing int
|
||||
successCounter int
|
||||
failuresBeforeWarning int
|
||||
failuresBeforeCritical int
|
||||
failuresCounter int
|
||||
}
|
||||
|
||||
// NewStatusHandler set counters values to threshold in order to immediatly update status after first check.
|
||||
func NewStatusHandler(inner CheckNotifier, logger hclog.Logger, successBeforePassing, failuresBeforeCritical int) *StatusHandler {
|
||||
func NewStatusHandler(inner CheckNotifier, logger hclog.Logger, successBeforePassing, failuresBeforeWarning, failuresBeforeCritical int) *StatusHandler {
|
||||
return &StatusHandler{
|
||||
logger: logger,
|
||||
inner: inner,
|
||||
successBeforePassing: successBeforePassing,
|
||||
successCounter: successBeforePassing,
|
||||
failuresBeforeWarning: failuresBeforeWarning,
|
||||
failuresBeforeCritical: failuresBeforeCritical,
|
||||
failuresCounter: failuresBeforeCritical,
|
||||
}
|
||||
|
@ -950,10 +953,17 @@ func (s *StatusHandler) updateCheck(checkID structs.CheckID, status, output stri
|
|||
s.inner.UpdateCheck(checkID, status, output)
|
||||
return
|
||||
}
|
||||
s.logger.Warn("Check failed but has not reached failure threshold",
|
||||
// Defaults to same value as failuresBeforeCritical if not set.
|
||||
if s.failuresCounter >= s.failuresBeforeWarning {
|
||||
s.logger.Warn("Check is now warning", "check", checkID.String())
|
||||
s.inner.UpdateCheck(checkID, api.HealthWarning, output)
|
||||
return
|
||||
}
|
||||
s.logger.Warn("Check failed but has not reached warning/failure threshold",
|
||||
"check", checkID.String(),
|
||||
"status", status,
|
||||
"failure_count", s.failuresCounter,
|
||||
"warning_threshold", s.failuresBeforeWarning,
|
||||
"failure_threshold", s.failuresBeforeCritical,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestCheckMonitor_Script(t *testing.T) {
|
|||
t.Run(tt.status, func(t *testing.T) {
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
check := &CheckMonitor{
|
||||
|
@ -94,7 +94,7 @@ func TestCheckMonitor_Args(t *testing.T) {
|
|||
t.Run(tt.status, func(t *testing.T) {
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckMonitor{
|
||||
|
@ -128,7 +128,7 @@ func TestCheckMonitor_Timeout(t *testing.T) {
|
|||
// t.Parallel() // timing test. no parallel
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
check := &CheckMonitor{
|
||||
|
@ -163,7 +163,7 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
|
|||
// t.Parallel() // timing test. no parallel
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
|
@ -195,7 +195,7 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
|
|||
t.Parallel()
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckMonitor{
|
||||
|
@ -354,7 +354,7 @@ func TestCheckHTTP(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
|
@ -397,7 +397,7 @@ func TestCheckHTTP_Proxied(t *testing.T) {
|
|||
notif := mock.NewNotify()
|
||||
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckHTTP{
|
||||
|
@ -433,7 +433,7 @@ func TestCheckHTTP_NotProxied(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckHTTP{
|
||||
|
@ -558,7 +558,7 @@ func TestCheckMaxOutputSize(t *testing.T) {
|
|||
Interval: 2 * time.Millisecond,
|
||||
Logger: logger,
|
||||
OutputMaxSize: maxOutputSize,
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0),
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
||||
}
|
||||
|
||||
check.Start()
|
||||
|
@ -586,7 +586,7 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("bar", nil)
|
||||
|
||||
|
@ -659,7 +659,7 @@ func TestCheckHTTPBody(t *testing.T) {
|
|||
Timeout: timeout,
|
||||
Interval: 2 * time.Millisecond,
|
||||
Logger: logger,
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0),
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
||||
}
|
||||
check.Start()
|
||||
defer check.Stop()
|
||||
|
@ -690,7 +690,7 @@ func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
|||
HTTP: "http://foo.bar/baz",
|
||||
Interval: 10 * time.Second,
|
||||
Logger: logger,
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0),
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
||||
}
|
||||
|
||||
check.Start()
|
||||
|
@ -725,7 +725,7 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("skipverify_true", nil)
|
||||
check := &CheckHTTP{
|
||||
|
@ -767,7 +767,7 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("skipverify_false", nil)
|
||||
|
||||
check := &CheckHTTP{
|
||||
|
@ -819,7 +819,7 @@ func mockTCPServer(network string) net.Listener {
|
|||
func expectTCPStatus(t *testing.T, tcp string, status string) {
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckTCP{
|
||||
|
@ -846,13 +846,12 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
|||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 2, 3)
|
||||
|
||||
// Set the initial status to passing after a single success
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should become critical after 3 failed checks only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
// Status should still be passing after 1 failed check only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -860,10 +859,19 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
|||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should become warning after 2 failed checks only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should become critical after 4 failed checks only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
|
@ -871,14 +879,14 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
|||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, 4, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
@ -888,17 +896,18 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
|||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 2, 3)
|
||||
|
||||
// Set the initial status to passing after a single success
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should remain passing after FAIL PASS FAIL FAIL sequence
|
||||
// Status should remain passing after FAIL PASS FAIL PASS FAIL sequence
|
||||
// Although we have 3 FAILS, they are not consecutive
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -906,11 +915,19 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
|||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Critical after a 3rd consecutive FAIL
|
||||
// Warning after a 2rd consecutive FAIL
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Critical after a 3rd consecutive FAIL
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
|
@ -920,19 +937,137 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
|||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
// Passing after a 2nd consecutive PASS
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 4, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStatusHandlerWarningAndCriticalThresholdsTheSameSetsCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 3, 3)
|
||||
|
||||
// Set the initial status to passing after a single success
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should remain passing after FAIL FAIL sequence
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 1, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Critical and not Warning after a 3rd consecutive FAIL
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
// Passing after consecutive PASS PASS sequence
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStatusHandlerMaintainWarningStatusWhenCheckIsFlapping(t *testing.T) {
|
||||
t.Parallel()
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 3, 3, 5)
|
||||
|
||||
// Set the initial status to passing after a single success.
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should remain passing after a FAIL FAIL sequence.
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 1, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Warning after a 3rd consecutive FAIL.
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should remain passing after PASS FAIL FAIL FAIL PASS FAIL FAIL FAIL PASS sequence.
|
||||
// Although we have 6 FAILS, they are not consecutive.
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The status gets updated due to failuresCounter being reset
|
||||
// but the status itself remains as Warning.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// Status doesn'tn change, but the state update is triggered.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 4, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should change only after 5 consecutive FAIL updates.
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The status doesn't change, but a status update is triggered.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 5, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The status doesn't change, but a status update is triggered.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 6, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The FailuresBeforeCritical threshold is finally breached.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 7, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckTCPCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
|
@ -992,7 +1127,7 @@ func TestCheckH2PING(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
tlsCfg := &api.TLSConfig{
|
||||
InsecureSkipVerify: true,
|
||||
|
@ -1044,7 +1179,7 @@ func TestCheckH2PING_TLS_BadVerify(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
tlsCfg := &api.TLSConfig{}
|
||||
tlsClientCfg, err := api.SetupTLSConfig(tlsCfg)
|
||||
|
@ -1085,7 +1220,7 @@ func TestCheckH2PINGInvalidListener(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
tlsCfg := &api.TLSConfig{
|
||||
InsecureSkipVerify: true,
|
||||
|
@ -1388,7 +1523,7 @@ func TestCheck_Docker(t *testing.T) {
|
|||
|
||||
notif, upd := mock.NewNotifyChan()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
id := structs.NewCheckID("chk", nil)
|
||||
|
||||
check := &CheckDocker{
|
||||
|
|
|
@ -113,7 +113,7 @@ func TestGRPC_Proxied(t *testing.T) {
|
|||
Output: ioutil.Discard,
|
||||
})
|
||||
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckGRPC{
|
||||
|
@ -147,7 +147,7 @@ func TestGRPC_NotProxied(t *testing.T) {
|
|||
Output: ioutil.Discard,
|
||||
})
|
||||
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckGRPC{
|
||||
|
|
|
@ -787,7 +787,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %s", i, err)
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %s", i, err)
|
||||
return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %w", i, err)
|
||||
}
|
||||
configEntries = append(configEntries, entry)
|
||||
}
|
||||
|
@ -1415,6 +1415,12 @@ func (b *builder) validate(rt RuntimeConfig) error {
|
|||
return fmt.Errorf("service %q: %s", s.Name, err)
|
||||
}
|
||||
}
|
||||
// Check for errors in the node check definitions
|
||||
for _, c := range rt.Checks {
|
||||
if err := c.CheckType().Validate(); err != nil {
|
||||
return fmt.Errorf("check %q: %w", c.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the given Connect CA provider config
|
||||
validCAProviders := map[string]bool{
|
||||
|
@ -1584,6 +1590,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
|
|||
TTL: b.durationVal(fmt.Sprintf("check[%s].ttl", id), v.TTL),
|
||||
SuccessBeforePassing: intVal(v.SuccessBeforePassing),
|
||||
FailuresBeforeCritical: intVal(v.FailuresBeforeCritical),
|
||||
FailuresBeforeWarning: intValWithDefault(v.FailuresBeforeWarning, intVal(v.FailuresBeforeCritical)),
|
||||
H2PING: stringVal(v.H2PING),
|
||||
DeregisterCriticalServiceAfter: b.durationVal(fmt.Sprintf("check[%s].deregister_critical_service_after", id), v.DeregisterCriticalServiceAfter),
|
||||
OutputMaxSize: intValWithDefault(v.OutputMaxSize, checks.DefaultBufSize),
|
||||
|
|
|
@ -424,6 +424,7 @@ type CheckDefinition struct {
|
|||
TTL *string `mapstructure:"ttl"`
|
||||
H2PING *string `mapstructure:"h2ping"`
|
||||
SuccessBeforePassing *int `mapstructure:"success_before_passing"`
|
||||
FailuresBeforeWarning *int `mapstructure:"failures_before_warning"`
|
||||
FailuresBeforeCritical *int `mapstructure:"failures_before_critical"`
|
||||
DeregisterCriticalServiceAfter *string `mapstructure:"deregister_critical_service_after" alias:"deregistercriticalserviceafter"`
|
||||
|
||||
|
|
|
@ -434,6 +434,9 @@ type RuntimeConfig struct {
|
|||
// tls_skip_verify = (true|false)
|
||||
// timeout = "duration"
|
||||
// ttl = "duration"
|
||||
// success_before_passing = int
|
||||
// failures_before_warning = int
|
||||
// failures_before_critical = int
|
||||
// deregister_critical_service_after = "duration"
|
||||
// },
|
||||
// ...
|
||||
|
|
|
@ -2330,17 +2330,17 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{
|
||||
`{ "check": { "name": "a", "args": ["/bin/true"] } }`,
|
||||
`{ "check": { "name": "b", "args": ["/bin/false"] } }`,
|
||||
`{ "check": { "name": "a", "args": ["/bin/true"], "interval": "1s" } }`,
|
||||
`{ "check": { "name": "b", "args": ["/bin/false"], "interval": "1s" } }`,
|
||||
},
|
||||
hcl: []string{
|
||||
`check = { name = "a" args = ["/bin/true"] }`,
|
||||
`check = { name = "b" args = ["/bin/false"] }`,
|
||||
`check = { name = "a" args = ["/bin/true"] interval = "1s"}`,
|
||||
`check = { name = "b" args = ["/bin/false"] interval = "1s" }`,
|
||||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.Checks = []*structs.CheckDefinition{
|
||||
{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize},
|
||||
{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize},
|
||||
{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize, Interval: time.Second},
|
||||
{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize, Interval: time.Second},
|
||||
}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
|
@ -2351,14 +2351,14 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{
|
||||
`{ "check": { "name": "a", "grpc": "localhost:12345/foo", "grpc_use_tls": true } }`,
|
||||
`{ "check": { "name": "a", "grpc": "localhost:12345/foo", "grpc_use_tls": true, "interval": "1s" } }`,
|
||||
},
|
||||
hcl: []string{
|
||||
`check = { name = "a" grpc = "localhost:12345/foo", grpc_use_tls = true }`,
|
||||
`check = { name = "a" grpc = "localhost:12345/foo", grpc_use_tls = true interval = "1s" }`,
|
||||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.Checks = []*structs.CheckDefinition{
|
||||
{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize},
|
||||
{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize, Interval: time.Second},
|
||||
}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
|
@ -2478,7 +2478,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
"name": "y",
|
||||
"DockerContainerID": "z",
|
||||
"DeregisterCriticalServiceAfter": "10s",
|
||||
"ScriptArgs": ["a", "b"]
|
||||
"ScriptArgs": ["a", "b"],
|
||||
"Interval": "2s"
|
||||
}
|
||||
}
|
||||
}`,
|
||||
|
@ -2500,6 +2501,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
DockerContainerID = "z"
|
||||
DeregisterCriticalServiceAfter = "10s"
|
||||
ScriptArgs = ["a", "b"]
|
||||
Interval = "2s"
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
@ -2517,12 +2519,13 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
EnableTagOverride: true,
|
||||
Checks: []*structs.CheckType{
|
||||
{
|
||||
CheckID: types.CheckID("x"),
|
||||
CheckID: "x",
|
||||
Name: "y",
|
||||
DockerContainerID: "z",
|
||||
DeregisterCriticalServiceAfter: 10 * time.Second,
|
||||
ScriptArgs: []string{"a", "b"},
|
||||
OutputMaxSize: checks.DefaultBufSize,
|
||||
Interval: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
Weights: &structs.Weights{
|
||||
|
@ -5299,7 +5302,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "bdeb5f6a",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 1813 * time.Second,
|
||||
TTL: 21743 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 14232 * time.Second,
|
||||
},
|
||||
{
|
||||
|
@ -5326,7 +5328,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "6adc3bfb",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 18506 * time.Second,
|
||||
TTL: 31006 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 2366 * time.Second,
|
||||
},
|
||||
{
|
||||
|
@ -5353,7 +5354,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "7BdnzBYk",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 5954 * time.Second,
|
||||
TTL: 30044 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 13209 * time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -5559,7 +5559,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "4f191d4F",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38333 * time.Second,
|
||||
TTL: 57201 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 44214 * time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -5611,30 +5610,14 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "f43ouY7a",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 34738 * time.Second,
|
||||
TTL: 22773 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 84282 * time.Second,
|
||||
},
|
||||
&structs.CheckType{
|
||||
CheckID: "UHsDeLxG",
|
||||
Name: "PQSaPWlT",
|
||||
Notes: "jKChDOdl",
|
||||
Status: "5qFz6OZn",
|
||||
ScriptArgs: []string{"NMtYWlT9", "vj74JXsm"},
|
||||
HTTP: "1LBDJhw4",
|
||||
Header: map[string][]string{
|
||||
"cXPmnv1M": {"imDqfaBx", "NFxZ1bQe"},
|
||||
"vr7wY7CS": {"EtCoNPPL", "9vAarJ5s"},
|
||||
},
|
||||
Method: "wzByP903",
|
||||
Body: "4I8ucZgZ",
|
||||
CheckID: "UHsDeLxG",
|
||||
Name: "PQSaPWlT",
|
||||
Notes: "jKChDOdl",
|
||||
Status: "5qFz6OZn",
|
||||
OutputMaxSize: checks.DefaultBufSize,
|
||||
TCP: "2exjZIGE",
|
||||
H2PING: "jTDuR1DC",
|
||||
Interval: 5656 * time.Second,
|
||||
DockerContainerID: "5tDBWpfA",
|
||||
Shell: "rlTpLM8s",
|
||||
TLSServerName: "sOv5WTtp",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 4868 * time.Second,
|
||||
TTL: 11222 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 68482 * time.Second,
|
||||
|
@ -5770,7 +5753,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "axw5QPL5",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 18913 * time.Second,
|
||||
TTL: 44743 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 8482 * time.Second,
|
||||
},
|
||||
&structs.CheckType{
|
||||
|
@ -5795,7 +5777,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "7uwWOnUS",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38282 * time.Second,
|
||||
TTL: 1181 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 4992 * time.Second,
|
||||
},
|
||||
&structs.CheckType{
|
||||
|
@ -5820,7 +5801,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "ECSHk8WF",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38483 * time.Second,
|
||||
TTL: 10943 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 68787 * time.Second,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -94,6 +94,7 @@
|
|||
"DeregisterCriticalServiceAfter": "0s",
|
||||
"DockerContainerID": "",
|
||||
"EnterpriseMeta": {},
|
||||
"FailuresBeforeWarning": 0,
|
||||
"FailuresBeforeCritical": 0,
|
||||
"GRPC": "",
|
||||
"GRPCUseTLS": false,
|
||||
|
@ -295,6 +296,7 @@
|
|||
"CheckID": "",
|
||||
"DeregisterCriticalServiceAfter": "0s",
|
||||
"DockerContainerID": "",
|
||||
"FailuresBeforeWarning": 0,
|
||||
"FailuresBeforeCritical": 0,
|
||||
"GRPC": "",
|
||||
"GRPCUseTLS": false,
|
||||
|
@ -416,4 +418,4 @@
|
|||
"Watches": [],
|
||||
"XDSAddrs": [],
|
||||
"XDSPort": 0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,7 +117,6 @@ check = {
|
|||
tls_server_name = "7BdnzBYk"
|
||||
tls_skip_verify = true
|
||||
timeout = "5954s"
|
||||
ttl = "30044s"
|
||||
deregister_critical_service_after = "13209s"
|
||||
},
|
||||
checks = [
|
||||
|
@ -145,7 +144,6 @@ checks = [
|
|||
tls_server_name = "bdeb5f6a"
|
||||
tls_skip_verify = true
|
||||
timeout = "1813s"
|
||||
ttl = "21743s"
|
||||
deregister_critical_service_after = "14232s"
|
||||
},
|
||||
{
|
||||
|
@ -172,7 +170,6 @@ checks = [
|
|||
tls_server_name = "6adc3bfb"
|
||||
tls_skip_verify = true
|
||||
timeout = "18506s"
|
||||
ttl = "31006s"
|
||||
deregister_critical_service_after = "2366s"
|
||||
}
|
||||
]
|
||||
|
@ -389,7 +386,6 @@ service = {
|
|||
tls_server_name = "ECSHk8WF"
|
||||
tls_skip_verify = true
|
||||
timeout = "38483s"
|
||||
ttl = "10943s"
|
||||
deregister_critical_service_after = "68787s"
|
||||
}
|
||||
checks = [
|
||||
|
@ -415,7 +411,6 @@ service = {
|
|||
tls_server_name = "axw5QPL5"
|
||||
tls_skip_verify = true
|
||||
timeout = "18913s"
|
||||
ttl = "44743s"
|
||||
deregister_critical_service_after = "8482s"
|
||||
},
|
||||
{
|
||||
|
@ -440,7 +435,6 @@ service = {
|
|||
tls_server_name = "7uwWOnUS"
|
||||
tls_skip_verify = true
|
||||
timeout = "38282s"
|
||||
ttl = "1181s"
|
||||
deregister_critical_service_after = "4992s"
|
||||
}
|
||||
]
|
||||
|
@ -479,7 +473,6 @@ services = [
|
|||
tls_server_name = "4f191d4F"
|
||||
tls_skip_verify = true
|
||||
timeout = "38333s"
|
||||
ttl = "57201s"
|
||||
deregister_critical_service_after = "44214s"
|
||||
}
|
||||
connect {
|
||||
|
@ -521,7 +514,6 @@ services = [
|
|||
tls_server_name = "f43ouY7a"
|
||||
tls_skip_verify = true
|
||||
timeout = "34738s"
|
||||
ttl = "22773s"
|
||||
deregister_critical_service_after = "84282s"
|
||||
},
|
||||
{
|
||||
|
@ -529,22 +521,7 @@ services = [
|
|||
name = "PQSaPWlT"
|
||||
notes = "jKChDOdl"
|
||||
status = "5qFz6OZn"
|
||||
args = ["NMtYWlT9", "vj74JXsm"]
|
||||
http = "1LBDJhw4"
|
||||
header = {
|
||||
"cXPmnv1M" = [ "imDqfaBx", "NFxZ1bQe" ],
|
||||
"vr7wY7CS" = [ "EtCoNPPL", "9vAarJ5s" ]
|
||||
}
|
||||
method = "wzByP903"
|
||||
body = "4I8ucZgZ"
|
||||
tcp = "2exjZIGE"
|
||||
h2ping = "jTDuR1DC"
|
||||
interval = "5656s"
|
||||
output_max_size = 4096
|
||||
docker_container_id = "5tDBWpfA"
|
||||
shell = "rlTpLM8s"
|
||||
tls_server_name = "sOv5WTtp"
|
||||
tls_skip_verify = true
|
||||
timeout = "4868s"
|
||||
ttl = "11222s"
|
||||
deregister_critical_service_after = "68482s"
|
||||
|
|
|
@ -118,7 +118,6 @@
|
|||
"tls_server_name": "7BdnzBYk",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "5954s",
|
||||
"ttl": "30044s",
|
||||
"deregister_critical_service_after": "13209s"
|
||||
},
|
||||
"checks": [
|
||||
|
@ -146,7 +145,6 @@
|
|||
"tls_server_name": "bdeb5f6a",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "1813s",
|
||||
"ttl": "21743s",
|
||||
"deregister_critical_service_after": "14232s"
|
||||
},
|
||||
{
|
||||
|
@ -173,7 +171,6 @@
|
|||
"tls_server_name": "6adc3bfb",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "18506s",
|
||||
"ttl": "31006s",
|
||||
"deregister_critical_service_after": "2366s"
|
||||
}
|
||||
],
|
||||
|
@ -386,7 +383,6 @@
|
|||
"tls_server_name": "ECSHk8WF",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38483s",
|
||||
"ttl": "10943s",
|
||||
"deregister_critical_service_after": "68787s"
|
||||
},
|
||||
"checks": [
|
||||
|
@ -412,7 +408,6 @@
|
|||
"tls_server_name": "axw5QPL5",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "18913s",
|
||||
"ttl": "44743s",
|
||||
"deregister_critical_service_after": "8482s"
|
||||
},
|
||||
{
|
||||
|
@ -437,7 +432,6 @@
|
|||
"tls_server_name": "7uwWOnUS",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38282s",
|
||||
"ttl": "1181s",
|
||||
"deregister_critical_service_after": "4992s"
|
||||
}
|
||||
],
|
||||
|
@ -476,7 +470,6 @@
|
|||
"tls_server_name": "4f191d4F",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38333s",
|
||||
"ttl": "57201s",
|
||||
"deregister_critical_service_after": "44214s"
|
||||
},
|
||||
"connect": {
|
||||
|
@ -518,7 +511,6 @@
|
|||
"tls_server_name": "f43ouY7a",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "34738s",
|
||||
"ttl": "22773s",
|
||||
"deregister_critical_service_after": "84282s"
|
||||
},
|
||||
{
|
||||
|
@ -526,22 +518,7 @@
|
|||
"name": "PQSaPWlT",
|
||||
"notes": "jKChDOdl",
|
||||
"status": "5qFz6OZn",
|
||||
"args": ["NMtYWlT9", "vj74JXsm"],
|
||||
"http": "1LBDJhw4",
|
||||
"header": {
|
||||
"cXPmnv1M": [ "imDqfaBx", "NFxZ1bQe" ],
|
||||
"vr7wY7CS": [ "EtCoNPPL", "9vAarJ5s" ]
|
||||
},
|
||||
"method": "wzByP903",
|
||||
"body": "4I8ucZgZ",
|
||||
"tcp": "2exjZIGE",
|
||||
"h2ping": "jTDuR1DC",
|
||||
"interval": "5656s",
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "5tDBWpfA",
|
||||
"shell": "rlTpLM8s",
|
||||
"tls_server_name": "sOv5WTtp",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "4868s",
|
||||
"ttl": "11222s",
|
||||
"deregister_critical_service_after": "68482s"
|
||||
|
|
|
@ -757,10 +757,12 @@ func (r *ACLResolver) filterPoliciesByScope(policies structs.ACLPolicies) struct
|
|||
}
|
||||
|
||||
func (r *ACLResolver) resolvePoliciesForIdentity(identity structs.ACLIdentity) (structs.ACLPolicies, error) {
|
||||
policyIDs := identity.PolicyIDs()
|
||||
roleIDs := identity.RoleIDs()
|
||||
serviceIdentities := identity.ServiceIdentityList()
|
||||
nodeIdentities := identity.NodeIdentityList()
|
||||
var (
|
||||
policyIDs = identity.PolicyIDs()
|
||||
roleIDs = identity.RoleIDs()
|
||||
serviceIdentities = identity.ServiceIdentityList()
|
||||
nodeIdentities = identity.NodeIdentityList()
|
||||
)
|
||||
|
||||
if len(policyIDs) == 0 && len(serviceIdentities) == 0 && len(roleIDs) == 0 && len(nodeIdentities) == 0 {
|
||||
policy := identity.EmbeddedPolicy()
|
||||
|
@ -794,7 +796,7 @@ func (r *ACLResolver) resolvePoliciesForIdentity(identity structs.ACLIdentity) (
|
|||
|
||||
// Generate synthetic policies for all service identities in effect.
|
||||
syntheticPolicies := r.synthesizePoliciesForServiceIdentities(serviceIdentities, identity.EnterpriseMetadata())
|
||||
syntheticPolicies = append(syntheticPolicies, r.synthesizePoliciesForNodeIdentities(nodeIdentities)...)
|
||||
syntheticPolicies = append(syntheticPolicies, r.synthesizePoliciesForNodeIdentities(nodeIdentities, identity.EnterpriseMetadata())...)
|
||||
|
||||
// For the new ACLs policy replication is mandatory for correct operation on servers. Therefore
|
||||
// we only attempt to resolve policies locally
|
||||
|
@ -805,6 +807,7 @@ func (r *ACLResolver) resolvePoliciesForIdentity(identity structs.ACLIdentity) (
|
|||
|
||||
policies = append(policies, syntheticPolicies...)
|
||||
filtered := r.filterPoliciesByScope(policies)
|
||||
// TODO(partitions,acls): filter these by the partition/namespace of the token trying to use them?
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
|
@ -821,14 +824,14 @@ func (r *ACLResolver) synthesizePoliciesForServiceIdentities(serviceIdentities [
|
|||
return syntheticPolicies
|
||||
}
|
||||
|
||||
func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*structs.ACLNodeIdentity) []*structs.ACLPolicy {
|
||||
func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*structs.ACLNodeIdentity, entMeta *structs.EnterpriseMeta) []*structs.ACLPolicy {
|
||||
if len(nodeIdentities) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
syntheticPolicies := make([]*structs.ACLPolicy, 0, len(nodeIdentities))
|
||||
for _, n := range nodeIdentities {
|
||||
syntheticPolicies = append(syntheticPolicies, n.SyntheticPolicy())
|
||||
syntheticPolicies = append(syntheticPolicies, n.SyntheticPolicy(entMeta))
|
||||
}
|
||||
|
||||
return syntheticPolicies
|
||||
|
@ -1242,6 +1245,7 @@ func (r *ACLResolver) ResolveTokenToIdentityAndAuthorizer(token string) (structs
|
|||
}
|
||||
|
||||
if r.delegate.UseLegacyACLs() {
|
||||
// TODO(partitions,acls): do we have to care about legacy acls?
|
||||
identity, authorizer, err := r.resolveTokenLegacy(token)
|
||||
r.handleACLDisabledError(err)
|
||||
return identity, authorizer, err
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -26,3 +28,10 @@ type EnterpriseACLResolverTestDelegate struct{}
|
|||
func (d *EnterpriseACLResolverTestDelegate) RPC(string, interface{}, interface{}) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (d *EnterpriseACLResolverTestDelegate) UseTestLocalData(data []interface{}) {
|
||||
if len(data) > 0 {
|
||||
panic(fmt.Sprintf("unexpected data type: %T", data[0]))
|
||||
}
|
||||
}
|
||||
func (d *EnterpriseACLResolverTestDelegate) UseDefaultData() {}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -81,19 +82,6 @@ func resolveToken(t *testing.T, r *ACLResolver, token string) acl.Authorizer {
|
|||
|
||||
func testIdentityForToken(token string) (bool, structs.ACLIdentity, error) {
|
||||
switch token {
|
||||
case "missing-policy":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "435a75af-1763-4980-89f4-f0951dda53b4",
|
||||
SecretID: "b1b6be70-ed2e-4c80-8495-bdb3db110b1e",
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{
|
||||
ID: "not-found",
|
||||
},
|
||||
{
|
||||
ID: "acl-ro",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case "missing-role":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "435a75af-1763-4980-89f4-f0951dda53b4",
|
||||
|
@ -107,29 +95,6 @@ func testIdentityForToken(token string) (bool, structs.ACLIdentity, error) {
|
|||
},
|
||||
},
|
||||
}, nil
|
||||
case "missing-policy-on-role":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "435a75af-1763-4980-89f4-f0951dda53b4",
|
||||
SecretID: "b1b6be70-ed2e-4c80-8495-bdb3db110b1e",
|
||||
Roles: []structs.ACLTokenRoleLink{
|
||||
{
|
||||
ID: "missing-policy",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case "legacy-management":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "d109a033-99d1-47e2-a711-d6593373a973",
|
||||
SecretID: "415cd1e1-1493-4fb4-827d-d762ed9cfe7c",
|
||||
Type: structs.ACLTokenTypeManagement,
|
||||
}, nil
|
||||
case "legacy-client":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "b7375838-b104-4a25-b457-329d939bf257",
|
||||
SecretID: "03f49328-c23c-4b26-92a2-3b898332400d",
|
||||
Type: structs.ACLTokenTypeClient,
|
||||
Rules: `service "" { policy = "read" }`,
|
||||
}, nil
|
||||
case "found":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "5f57c1f6-6a89-4186-9445-531b316e01df",
|
||||
|
@ -173,58 +138,6 @@ func testIdentityForToken(token string) (bool, structs.ACLIdentity, error) {
|
|||
},
|
||||
},
|
||||
}, nil
|
||||
case "found-synthetic-policy-1":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "f6c5a5fb-4da4-422b-9abf-2c942813fc71",
|
||||
SecretID: "55cb7d69-2bea-42c3-a68f-2a1443d2abbc",
|
||||
ServiceIdentities: []*structs.ACLServiceIdentity{
|
||||
{
|
||||
ServiceName: "service1",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case "found-synthetic-policy-2":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "7c87dfad-be37-446e-8305-299585677cb5",
|
||||
SecretID: "dfca9676-ac80-453a-837b-4c0cf923473c",
|
||||
ServiceIdentities: []*structs.ACLServiceIdentity{
|
||||
{
|
||||
ServiceName: "service2",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case "found-synthetic-policy-3":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "bebccc92-3987-489d-84c2-ffd00d93ef93",
|
||||
SecretID: "de70f2e2-69d9-4e88-9815-f91c03c6bcb1",
|
||||
NodeIdentities: []*structs.ACLNodeIdentity{
|
||||
{
|
||||
NodeName: "test-node1",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
// as the resolver is in dc1 this identity should be ignored
|
||||
{
|
||||
NodeName: "test-node-dc2",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case "found-synthetic-policy-4":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "359b9927-25fd-46b9-bd14-3470f848ec65",
|
||||
SecretID: "83c4d500-847d-49f7-8c08-0483f6b4156e",
|
||||
NodeIdentities: []*structs.ACLNodeIdentity{
|
||||
{
|
||||
NodeName: "test-node2",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
// as the resolver is in dc1 this identity should be ignored
|
||||
{
|
||||
NodeName: "test-node-dc2",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case "found-role-node-identity":
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "f3f47a09-de29-4c57-8f54-b65a9be79641",
|
||||
|
@ -291,18 +204,8 @@ func testIdentityForToken(token string) (bool, structs.ACLIdentity, error) {
|
|||
},
|
||||
},
|
||||
}, nil
|
||||
case anonymousToken:
|
||||
return true, &structs.ACLToken{
|
||||
AccessorID: "00000000-0000-0000-0000-000000000002",
|
||||
SecretID: anonymousToken,
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{
|
||||
ID: "node-wr",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
default:
|
||||
return testIdentityForTokenEnterprise(token)
|
||||
return true, nil, acl.ErrNotFound
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,7 +280,7 @@ func testPolicyForID(policyID string) (bool, *structs.ACLPolicy, error) {
|
|||
p.SetHash(false)
|
||||
return true, p, nil
|
||||
default:
|
||||
return testPolicyForIDEnterprise(policyID)
|
||||
return true, nil, acl.ErrNotFound
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -407,21 +310,6 @@ func testRoleForID(roleID string) (bool, *structs.ACLRole, error) {
|
|||
},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
}, nil
|
||||
case "missing-policy":
|
||||
return true, &structs.ACLRole{
|
||||
ID: "missing-policy",
|
||||
Name: "missing-policy",
|
||||
Description: "missing-policy",
|
||||
Policies: []structs.ACLRolePolicyLink{
|
||||
{
|
||||
ID: "not-found",
|
||||
},
|
||||
{
|
||||
ID: "acl-ro",
|
||||
},
|
||||
},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
}, nil
|
||||
case "found":
|
||||
return true, &structs.ACLRole{
|
||||
ID: "found",
|
||||
|
@ -528,7 +416,7 @@ func testRoleForID(roleID string) (bool, *structs.ACLRole, error) {
|
|||
},
|
||||
}, nil
|
||||
default:
|
||||
return testRoleForIDEnterprise(roleID)
|
||||
return true, nil, acl.ErrNotFound
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -549,6 +437,13 @@ type ACLResolverTestDelegate struct {
|
|||
policyResolveFn func(*structs.ACLPolicyBatchGetRequest, *structs.ACLPolicyBatchResponse) error
|
||||
roleResolveFn func(*structs.ACLRoleBatchGetRequest, *structs.ACLRoleBatchResponse) error
|
||||
|
||||
// testTokens is used by plainTokenReadFn if not nil
|
||||
testTokens map[string]*structs.ACLToken
|
||||
// testPolicies is used by plainPolicyResolveFn if not nil
|
||||
testPolicies map[string]*structs.ACLPolicy
|
||||
// testRoles is used by plainRoleResolveFn if not nil
|
||||
testRoles map[string]*structs.ACLRole
|
||||
|
||||
localTokenResolutions int32
|
||||
remoteTokenResolutions int32
|
||||
localPolicyResolutions int32
|
||||
|
@ -567,6 +462,51 @@ type ACLResolverTestDelegate struct {
|
|||
EnterpriseACLResolverTestDelegate
|
||||
}
|
||||
|
||||
// UseTestLocalData will force delegate-local maps to be used in lieu of the
|
||||
// global factory functions.
|
||||
func (d *ACLResolverTestDelegate) UseTestLocalData(data []interface{}) {
|
||||
d.testTokens = make(map[string]*structs.ACLToken)
|
||||
d.testPolicies = make(map[string]*structs.ACLPolicy)
|
||||
d.testRoles = make(map[string]*structs.ACLRole)
|
||||
|
||||
var rest []interface{}
|
||||
for _, item := range data {
|
||||
switch x := item.(type) {
|
||||
case *structs.ACLToken:
|
||||
d.testTokens[x.SecretID] = x
|
||||
case *structs.ACLPolicy:
|
||||
d.testPolicies[x.ID] = x
|
||||
case *structs.ACLRole:
|
||||
d.testRoles[x.ID] = x
|
||||
case string:
|
||||
parts := strings.SplitN(x, ":", 2)
|
||||
switch parts[0] {
|
||||
case "token-not-found":
|
||||
d.testTokens[parts[1]] = nil
|
||||
case "policy-not-found":
|
||||
d.testPolicies[parts[1]] = nil
|
||||
case "role-not-found":
|
||||
d.testRoles[parts[1]] = nil
|
||||
default:
|
||||
rest = append(rest, item)
|
||||
}
|
||||
default:
|
||||
rest = append(rest, item)
|
||||
}
|
||||
}
|
||||
|
||||
d.EnterpriseACLResolverTestDelegate.UseTestLocalData(rest)
|
||||
}
|
||||
|
||||
// UseDefaultData will force the global factory functions to be used instead of
|
||||
// delegate-local maps.
|
||||
func (d *ACLResolverTestDelegate) UseDefaultData() {
|
||||
d.testTokens = nil
|
||||
d.testPolicies = nil
|
||||
d.testRoles = nil
|
||||
d.EnterpriseACLResolverTestDelegate.UseDefaultData()
|
||||
}
|
||||
|
||||
func (d *ACLResolverTestDelegate) Reset() {
|
||||
d.tokenCached = false
|
||||
d.policyCached = false
|
||||
|
@ -587,6 +527,17 @@ func (d *ACLResolverTestDelegate) defaultTokenReadFn(errAfterCached error) func(
|
|||
}
|
||||
|
||||
func (d *ACLResolverTestDelegate) plainTokenReadFn(args *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
|
||||
if d.testTokens != nil {
|
||||
token, ok := d.testTokens[args.TokenID]
|
||||
if ok {
|
||||
if token == nil {
|
||||
return acl.ErrNotFound
|
||||
}
|
||||
reply.Token = token
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_, token, err := testIdentityForToken(args.TokenID)
|
||||
if token != nil {
|
||||
reply.Token = token.(*structs.ACLToken)
|
||||
|
@ -611,9 +562,15 @@ func (d *ACLResolverTestDelegate) plainPolicyResolveFn(args *structs.ACLPolicyBa
|
|||
// TODO: and possibly return a not-found or permission-denied here
|
||||
|
||||
for _, policyID := range args.PolicyIDs {
|
||||
_, policy, _ := testPolicyForID(policyID)
|
||||
if policy != nil {
|
||||
reply.Policies = append(reply.Policies, policy)
|
||||
if d.testPolicies != nil {
|
||||
if policy := d.testPolicies[policyID]; policy != nil {
|
||||
reply.Policies = append(reply.Policies, policy)
|
||||
}
|
||||
} else {
|
||||
_, policy, _ := testPolicyForID(policyID)
|
||||
if policy != nil {
|
||||
reply.Policies = append(reply.Policies, policy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -639,9 +596,15 @@ func (d *ACLResolverTestDelegate) plainRoleResolveFn(args *structs.ACLRoleBatchG
|
|||
// TODO: and possibly return a not-found or permission-denied here
|
||||
|
||||
for _, roleID := range args.RoleIDs {
|
||||
_, role, _ := testRoleForID(roleID)
|
||||
if role != nil {
|
||||
reply.Roles = append(reply.Roles, role)
|
||||
if d.testRoles != nil {
|
||||
if role := d.testRoles[roleID]; role != nil {
|
||||
reply.Roles = append(reply.Roles, role)
|
||||
}
|
||||
} else {
|
||||
_, role, _ := testRoleForID(roleID)
|
||||
if role != nil {
|
||||
reply.Roles = append(reply.Roles, role)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -662,6 +625,14 @@ func (d *ACLResolverTestDelegate) ResolveIdentityFromToken(token string) (bool,
|
|||
}
|
||||
|
||||
atomic.AddInt32(&d.localTokenResolutions, 1)
|
||||
if d.testTokens != nil {
|
||||
if token, ok := d.testTokens[token]; ok {
|
||||
if token != nil {
|
||||
return true, token, nil
|
||||
}
|
||||
}
|
||||
return true, nil, acl.ErrNotFound
|
||||
}
|
||||
return testIdentityForToken(token)
|
||||
}
|
||||
|
||||
|
@ -671,6 +642,14 @@ func (d *ACLResolverTestDelegate) ResolvePolicyFromID(policyID string) (bool, *s
|
|||
}
|
||||
|
||||
atomic.AddInt32(&d.localPolicyResolutions, 1)
|
||||
if d.testPolicies != nil {
|
||||
if policy, ok := d.testPolicies[policyID]; ok {
|
||||
if policy != nil {
|
||||
return true, policy, nil
|
||||
}
|
||||
}
|
||||
return true, nil, acl.ErrNotFound
|
||||
}
|
||||
return testPolicyForID(policyID)
|
||||
}
|
||||
|
||||
|
@ -680,6 +659,14 @@ func (d *ACLResolverTestDelegate) ResolveRoleFromID(roleID string) (bool, *struc
|
|||
}
|
||||
|
||||
atomic.AddInt32(&d.localRoleResolutions, 1)
|
||||
if d.testRoles != nil {
|
||||
if role, ok := d.testRoles[roleID]; ok {
|
||||
if role != nil {
|
||||
return true, role, nil
|
||||
}
|
||||
}
|
||||
return true, nil, acl.ErrNotFound
|
||||
}
|
||||
return testRoleForID(roleID)
|
||||
}
|
||||
|
||||
|
@ -1762,6 +1749,7 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
}
|
||||
|
||||
runTwiceAndReset("Missing Identity", func(t *testing.T) {
|
||||
delegate.UseTestLocalData(nil)
|
||||
authz, err := r.ResolveToken("doesn't exist")
|
||||
require.Nil(t, authz)
|
||||
require.Error(t, err)
|
||||
|
@ -1769,6 +1757,25 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Missing Policy", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "435a75af-1763-4980-89f4-f0951dda53b4",
|
||||
SecretID: "missing-policy",
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{ID: "not-found"},
|
||||
{ID: "acl-ro"},
|
||||
},
|
||||
},
|
||||
"policy-not-found:not-found",
|
||||
&structs.ACLPolicy{
|
||||
ID: "acl-ro",
|
||||
Name: "acl-ro",
|
||||
Description: "acl-ro",
|
||||
Rules: `acl = "read"`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "missing-policy")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Allow, authz.ACLRead(nil))
|
||||
|
@ -1776,6 +1783,33 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Missing Role", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "435a75af-1763-4980-89f4-f0951dda53b4",
|
||||
SecretID: "missing-role",
|
||||
Roles: []structs.ACLTokenRoleLink{
|
||||
{ID: "not-found"},
|
||||
{ID: "acl-ro"},
|
||||
},
|
||||
},
|
||||
"role-not-found:not-found",
|
||||
&structs.ACLRole{
|
||||
ID: "acl-ro",
|
||||
Name: "acl-ro",
|
||||
Description: "acl-ro",
|
||||
Policies: []structs.ACLRolePolicyLink{
|
||||
{ID: "acl-ro"},
|
||||
},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "acl-ro",
|
||||
Name: "acl-ro",
|
||||
Description: "acl-ro",
|
||||
Rules: `acl = "read"`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "missing-role")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Allow, authz.ACLRead(nil))
|
||||
|
@ -1783,6 +1817,34 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Missing Policy on Role", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "435a75af-1763-4980-89f4-f0951dda53b4",
|
||||
SecretID: "missing-policy-on-role",
|
||||
Roles: []structs.ACLTokenRoleLink{
|
||||
{ID: "missing-policy"},
|
||||
},
|
||||
},
|
||||
&structs.ACLRole{
|
||||
ID: "missing-policy",
|
||||
Name: "missing-policy",
|
||||
Description: "missing-policy",
|
||||
Policies: []structs.ACLRolePolicyLink{
|
||||
{ID: "not-found"},
|
||||
{ID: "acl-ro"},
|
||||
},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
"policy-not-found:not-found",
|
||||
&structs.ACLPolicy{
|
||||
ID: "acl-ro",
|
||||
Name: "acl-ro",
|
||||
Description: "acl-ro",
|
||||
Rules: `acl = "read"`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "missing-policy-on-role")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Allow, authz.ACLRead(nil))
|
||||
|
@ -1790,6 +1852,34 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Normal with Policy", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "5f57c1f6-6a89-4186-9445-531b316e01df",
|
||||
SecretID: "found",
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{ID: "node-wr"},
|
||||
{ID: "dc2-key-wr"},
|
||||
},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "node-wr",
|
||||
Name: "node-wr",
|
||||
Description: "node-wr",
|
||||
Rules: `node_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc1"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "dc2-key-wr",
|
||||
Name: "dc2-key-wr",
|
||||
Description: "dc2-key-wr",
|
||||
Rules: `key_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc2"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "found")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Deny, authz.ACLRead(nil))
|
||||
|
@ -1797,6 +1887,42 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Normal with Role", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "5f57c1f6-6a89-4186-9445-531b316e01df",
|
||||
SecretID: "found-role",
|
||||
Roles: []structs.ACLTokenRoleLink{
|
||||
{ID: "found"},
|
||||
},
|
||||
},
|
||||
&structs.ACLRole{
|
||||
ID: "found",
|
||||
Name: "found",
|
||||
Description: "found",
|
||||
Policies: []structs.ACLRolePolicyLink{
|
||||
{ID: "node-wr"},
|
||||
{ID: "dc2-key-wr"},
|
||||
},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "node-wr",
|
||||
Name: "node-wr",
|
||||
Description: "node-wr",
|
||||
Rules: `node_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc1"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "dc2-key-wr",
|
||||
Name: "dc2-key-wr",
|
||||
Description: "dc2-key-wr",
|
||||
Rules: `key_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc2"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "found-role")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Deny, authz.ACLRead(nil))
|
||||
|
@ -1804,6 +1930,54 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Normal with Policy and Role", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "5f57c1f6-6a89-4186-9445-531b316e01df",
|
||||
SecretID: "found-policy-and-role",
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{ID: "node-wr"},
|
||||
{ID: "dc2-key-wr"},
|
||||
},
|
||||
Roles: []structs.ACLTokenRoleLink{
|
||||
{ID: "service-ro"},
|
||||
},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "node-wr",
|
||||
Name: "node-wr",
|
||||
Description: "node-wr",
|
||||
Rules: `node_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc1"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "dc2-key-wr",
|
||||
Name: "dc2-key-wr",
|
||||
Description: "dc2-key-wr",
|
||||
Rules: `key_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc2"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
&structs.ACLRole{
|
||||
ID: "service-ro",
|
||||
Name: "service-ro",
|
||||
Description: "service-ro",
|
||||
Policies: []structs.ACLRolePolicyLink{
|
||||
{ID: "service-ro"},
|
||||
},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "service-ro",
|
||||
Name: "service-ro",
|
||||
Description: "service-ro",
|
||||
Rules: `service_prefix "" { policy = "read" }`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "found-policy-and-role")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Deny, authz.ACLRead(nil))
|
||||
|
@ -1812,6 +1986,30 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Role With Node Identity", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "f3f47a09-de29-4c57-8f54-b65a9be79641",
|
||||
SecretID: "found-role-node-identity",
|
||||
Roles: []structs.ACLTokenRoleLink{
|
||||
{ID: "node-identity"},
|
||||
},
|
||||
},
|
||||
&structs.ACLRole{
|
||||
ID: "node-identity",
|
||||
Name: "node-identity",
|
||||
Description: "node-identity",
|
||||
NodeIdentities: []*structs.ACLNodeIdentity{
|
||||
{
|
||||
NodeName: "test-node",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
{
|
||||
NodeName: "test-node-dc2",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
authz := resolveToken(t, r, "found-role-node-identity")
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Allow, authz.NodeWrite("test-node", nil))
|
||||
|
@ -1821,10 +2019,57 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("Synthetic Policies Independently Cache", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "f6c5a5fb-4da4-422b-9abf-2c942813fc71",
|
||||
SecretID: "found-synthetic-policy-1",
|
||||
ServiceIdentities: []*structs.ACLServiceIdentity{
|
||||
{ServiceName: "service1"},
|
||||
},
|
||||
},
|
||||
&structs.ACLToken{
|
||||
AccessorID: "7c87dfad-be37-446e-8305-299585677cb5",
|
||||
SecretID: "found-synthetic-policy-2",
|
||||
ServiceIdentities: []*structs.ACLServiceIdentity{
|
||||
{ServiceName: "service2"},
|
||||
},
|
||||
},
|
||||
&structs.ACLToken{
|
||||
AccessorID: "bebccc92-3987-489d-84c2-ffd00d93ef93",
|
||||
SecretID: "found-synthetic-policy-3",
|
||||
NodeIdentities: []*structs.ACLNodeIdentity{
|
||||
{
|
||||
NodeName: "test-node1",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
// as the resolver is in dc1 this identity should be ignored
|
||||
{
|
||||
NodeName: "test-node-dc2",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.ACLToken{
|
||||
AccessorID: "359b9927-25fd-46b9-bd14-3470f848ec65",
|
||||
SecretID: "found-synthetic-policy-4",
|
||||
NodeIdentities: []*structs.ACLNodeIdentity{
|
||||
{
|
||||
NodeName: "test-node2",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
// as the resolver is in dc1 this identity should be ignored
|
||||
{
|
||||
NodeName: "test-node-dc2",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// We resolve these tokens in the same cache session
|
||||
// to verify that the keys for caching synthetic policies don't bleed
|
||||
// over between each other.
|
||||
{
|
||||
t.Run("synthetic-policy-1", func(t *testing.T) { // service identity
|
||||
authz, err := r.ResolveToken("found-synthetic-policy-1")
|
||||
require.NotNil(t, authz)
|
||||
require.NoError(t, err)
|
||||
|
@ -1837,8 +2082,8 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
require.Equal(t, acl.Allow, authz.ServiceWrite("service1", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceRead("literally-anything", nil))
|
||||
require.Equal(t, acl.Allow, authz.NodeRead("any-node", nil))
|
||||
}
|
||||
{
|
||||
})
|
||||
t.Run("synthetic-policy-2", func(t *testing.T) { // service identity
|
||||
authz, err := r.ResolveToken("found-synthetic-policy-2")
|
||||
require.NotNil(t, authz)
|
||||
require.NoError(t, err)
|
||||
|
@ -1851,8 +2096,8 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
require.Equal(t, acl.Allow, authz.ServiceWrite("service2", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceRead("literally-anything", nil))
|
||||
require.Equal(t, acl.Allow, authz.NodeRead("any-node", nil))
|
||||
}
|
||||
{
|
||||
})
|
||||
t.Run("synthetic-policy-3", func(t *testing.T) { // node identity
|
||||
authz, err := r.ResolveToken("found-synthetic-policy-3")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, authz)
|
||||
|
@ -1867,8 +2112,8 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
require.Equal(t, acl.Allow, authz.NodeWrite("test-node1", nil))
|
||||
// ensure node identity for other DC is ignored
|
||||
require.Equal(t, acl.Deny, authz.NodeWrite("test-node-dc2", nil))
|
||||
}
|
||||
{
|
||||
})
|
||||
t.Run("synthetic-policy-4", func(t *testing.T) { // node identity
|
||||
authz, err := r.ResolveToken("found-synthetic-policy-4")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, authz)
|
||||
|
@ -1883,10 +2128,28 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
require.Equal(t, acl.Allow, authz.NodeWrite("test-node2", nil))
|
||||
// ensure node identity for other DC is ignored
|
||||
require.Equal(t, acl.Deny, authz.NodeWrite("test-node-dc2", nil))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
runTwiceAndReset("Anonymous", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "00000000-0000-0000-0000-000000000002",
|
||||
SecretID: anonymousToken,
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{ID: "node-wr"},
|
||||
},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "node-wr",
|
||||
Name: "node-wr",
|
||||
Description: "node-wr",
|
||||
Rules: `node_prefix "" { policy = "write"}`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
Datacenters: []string{"dc1"},
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz, err := r.ResolveToken("")
|
||||
require.NotNil(t, authz)
|
||||
require.NoError(t, err)
|
||||
|
@ -1895,6 +2158,13 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("legacy-management", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "d109a033-99d1-47e2-a711-d6593373a973",
|
||||
SecretID: "legacy-management",
|
||||
Type: structs.ACLTokenTypeManagement,
|
||||
},
|
||||
})
|
||||
authz, err := r.ResolveToken("legacy-management")
|
||||
require.NotNil(t, authz)
|
||||
require.NoError(t, err)
|
||||
|
@ -1903,6 +2173,14 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
})
|
||||
|
||||
runTwiceAndReset("legacy-client", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "b7375838-b104-4a25-b457-329d939bf257",
|
||||
SecretID: "legacy-client",
|
||||
Type: structs.ACLTokenTypeClient,
|
||||
Rules: `service "" { policy = "read" }`,
|
||||
},
|
||||
})
|
||||
authz, err := r.ResolveToken("legacy-client")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, authz)
|
||||
|
@ -1910,6 +2188,42 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega
|
|||
require.Equal(t, acl.Deny, authz.OperatorRead(nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceRead("foo", nil))
|
||||
})
|
||||
|
||||
runTwiceAndReset("service and intention wildcard write", func(t *testing.T) {
|
||||
delegate.UseTestLocalData([]interface{}{
|
||||
&structs.ACLToken{
|
||||
AccessorID: "5f57c1f6-6a89-4186-9445-531b316e01df",
|
||||
SecretID: "with-intentions",
|
||||
Policies: []structs.ACLTokenPolicyLink{
|
||||
{ID: "ixn-write"},
|
||||
},
|
||||
},
|
||||
&structs.ACLPolicy{
|
||||
ID: "ixn-write",
|
||||
Name: "ixn-write",
|
||||
Description: "ixn-write",
|
||||
Rules: `service_prefix "" { policy = "write" intentions = "write" }`,
|
||||
Syntax: acl.SyntaxCurrent,
|
||||
RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 2},
|
||||
},
|
||||
})
|
||||
authz, err := r.ResolveToken("with-intentions")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, authz)
|
||||
require.Equal(t, acl.Allow, authz.ServiceRead("", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceRead("foo", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceRead("bar", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceWrite("", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceWrite("foo", nil))
|
||||
require.Equal(t, acl.Allow, authz.ServiceWrite("bar", nil))
|
||||
require.Equal(t, acl.Allow, authz.IntentionRead("", nil))
|
||||
require.Equal(t, acl.Allow, authz.IntentionRead("foo", nil))
|
||||
require.Equal(t, acl.Allow, authz.IntentionRead("bar", nil))
|
||||
require.Equal(t, acl.Allow, authz.IntentionWrite("", nil))
|
||||
require.Equal(t, acl.Allow, authz.IntentionWrite("foo", nil))
|
||||
require.Equal(t, acl.Allow, authz.IntentionWrite("bar", nil))
|
||||
require.Equal(t, acl.Deny, authz.NodeRead("server", nil))
|
||||
})
|
||||
}
|
||||
|
||||
func TestACLResolver_Legacy(t *testing.T) {
|
||||
|
|
|
@ -585,7 +585,7 @@ func (s *Intention) Match(args *structs.IntentionQueryRequest, reply *structs.In
|
|||
return err
|
||||
}
|
||||
|
||||
// Finish defaulting the namespace fields.
|
||||
// Finish defaulting the namespace and partition fields.
|
||||
for i := range args.Match.Entries {
|
||||
if args.Match.Entries[i].Namespace == "" {
|
||||
args.Match.Entries[i].Namespace = entMeta.NamespaceOrDefault()
|
||||
|
@ -594,6 +594,14 @@ func (s *Intention) Match(args *structs.IntentionQueryRequest, reply *structs.In
|
|||
return fmt.Errorf("Invalid match entry namespace %q: %v",
|
||||
args.Match.Entries[i].Namespace, err)
|
||||
}
|
||||
|
||||
if args.Match.Entries[i].Partition == "" {
|
||||
args.Match.Entries[i].Partition = entMeta.PartitionOrDefault()
|
||||
}
|
||||
if err := s.srv.validateEnterpriseIntentionPartition(args.Match.Entries[i].Partition); err != nil {
|
||||
return fmt.Errorf("Invalid match entry partition %q: %v",
|
||||
args.Match.Entries[i].Partition, err)
|
||||
}
|
||||
}
|
||||
|
||||
var authzContext acl.AuthorizerContext
|
||||
|
|
|
@ -175,8 +175,9 @@ func TestIntentionApply_createWithID(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
Op: structs.IntentionOpCreate,
|
||||
Intention: &structs.Intention{
|
||||
ID: generateUUID(),
|
||||
SourceName: "test",
|
||||
ID: generateUUID(),
|
||||
SourceName: "test",
|
||||
DestinationName: "test2",
|
||||
},
|
||||
}
|
||||
var reply string
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -11,57 +10,9 @@ import (
|
|||
pbacl "github.com/hashicorp/consul/proto/pbacl"
|
||||
)
|
||||
|
||||
type TokenExpirationIndex struct {
|
||||
LocalFilter bool
|
||||
}
|
||||
|
||||
func (s *TokenExpirationIndex) encodeTime(t time.Time) []byte {
|
||||
val := t.Unix()
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(val))
|
||||
return buf
|
||||
}
|
||||
|
||||
func (s *TokenExpirationIndex) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
token, ok := obj.(*structs.ACLToken)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf("object is not an ACLToken")
|
||||
}
|
||||
if s.LocalFilter != token.Local {
|
||||
return false, nil, nil
|
||||
}
|
||||
if !token.HasExpirationTime() {
|
||||
return false, nil, nil
|
||||
}
|
||||
if token.ExpirationTime.Unix() < 0 {
|
||||
return false, nil, fmt.Errorf("token expiration time cannot be before the unix epoch: %s", token.ExpirationTime)
|
||||
}
|
||||
|
||||
buf := s.encodeTime(*token.ExpirationTime)
|
||||
|
||||
return true, buf, nil
|
||||
}
|
||||
|
||||
func (s *TokenExpirationIndex) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
arg, ok := args[0].(time.Time)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a time.Time: %#v", args[0])
|
||||
}
|
||||
if arg.Unix() < 0 {
|
||||
return nil, fmt.Errorf("argument must be a time.Time after the unix epoch: %s", args[0])
|
||||
}
|
||||
|
||||
buf := s.encodeTime(arg)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// ACLTokens is used when saving a snapshot
|
||||
func (s *Snapshot) ACLTokens() (memdb.ResultIterator, error) {
|
||||
iter, err := s.tx.Get(tableACLTokens, "id")
|
||||
iter, err := s.tx.Get(tableACLTokens, indexID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -97,7 +48,7 @@ func (s *Restore) ACLRole(role *structs.ACLRole) error {
|
|||
|
||||
// ACLBindingRules is used when saving a snapshot
|
||||
func (s *Snapshot) ACLBindingRules() (memdb.ResultIterator, error) {
|
||||
iter, err := s.tx.Get("acl-binding-rules", "id")
|
||||
iter, err := s.tx.Get(tableACLBindingRules, indexID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -850,9 +801,9 @@ func (s *Store) ACLTokenListExpired(local bool, asOf time.Time, max int) (struct
|
|||
|
||||
func (s *Store) expiresIndexName(local bool) string {
|
||||
if local {
|
||||
return "expires-local"
|
||||
return indexExpiresLocal
|
||||
}
|
||||
return "expires-global"
|
||||
return indexExpiresGlobal
|
||||
}
|
||||
|
||||
// ACLTokenDeleteBySecret is used to remove an existing ACL from the state store. If
|
||||
|
|
|
@ -75,7 +75,7 @@ func aclTokenGetFromIndex(tx ReadTxn, id string, index string, entMeta *structs.
|
|||
}
|
||||
|
||||
func aclTokenListAll(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableACLTokens, "id")
|
||||
return tx.Get(tableACLTokens, indexID)
|
||||
}
|
||||
|
||||
func aclTokenListByPolicy(tx ReadTxn, policy string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
|
@ -167,12 +167,12 @@ func (s *Store) ACLRoleUpsertValidateEnterprise(role *structs.ACLRole, existing
|
|||
|
||||
func aclBindingRuleInsert(tx WriteTxn, rule *structs.ACLBindingRule) error {
|
||||
// insert the role into memdb
|
||||
if err := tx.Insert("acl-binding-rules", rule); err != nil {
|
||||
if err := tx.Insert(tableACLBindingRules, rule); err != nil {
|
||||
return fmt.Errorf("failed inserting acl role: %v", err)
|
||||
}
|
||||
|
||||
// update the overall acl-binding-rules index
|
||||
if err := indexUpdateMaxTxn(tx, rule.ModifyIndex, "acl-binding-rules"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, rule.ModifyIndex, tableACLBindingRules); err != nil {
|
||||
return fmt.Errorf("failed updating acl binding-rules index: %v", err)
|
||||
}
|
||||
|
||||
|
@ -180,32 +180,32 @@ func aclBindingRuleInsert(tx WriteTxn, rule *structs.ACLBindingRule) error {
|
|||
}
|
||||
|
||||
func aclBindingRuleGetByID(tx ReadTxn, id string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch("acl-binding-rules", "id", id)
|
||||
return tx.FirstWatch(tableACLBindingRules, indexID, id)
|
||||
}
|
||||
|
||||
func aclBindingRuleList(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("acl-binding-rules", "id")
|
||||
return tx.Get(tableACLBindingRules, indexID)
|
||||
}
|
||||
|
||||
func aclBindingRuleListByAuthMethod(tx ReadTxn, method string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("acl-binding-rules", "authmethod", method)
|
||||
return tx.Get(tableACLBindingRules, indexAuthMethod, Query{Value: method})
|
||||
}
|
||||
|
||||
func aclBindingRuleDeleteWithRule(tx WriteTxn, rule *structs.ACLBindingRule, idx uint64) error {
|
||||
// remove the rule
|
||||
if err := tx.Delete("acl-binding-rules", rule); err != nil {
|
||||
// remove the acl-binding-rule
|
||||
if err := tx.Delete(tableACLBindingRules, rule); err != nil {
|
||||
return fmt.Errorf("failed deleting acl binding rule: %v", err)
|
||||
}
|
||||
|
||||
// update the overall acl-binding-rules index
|
||||
if err := indexUpdateMaxTxn(tx, idx, "acl-binding-rules"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableACLBindingRules); err != nil {
|
||||
return fmt.Errorf("failed updating acl binding rules index: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func aclBindingRuleMaxIndex(tx ReadTxn, _ *structs.ACLBindingRule, entMeta *structs.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, "acl-binding-rules")
|
||||
return maxIndexTxn(tx, tableACLBindingRules)
|
||||
}
|
||||
|
||||
func aclBindingRuleUpsertValidateEnterprise(tx ReadTxn, rule *structs.ACLBindingRule, existing *structs.ACLBindingRule) error {
|
||||
|
|
|
@ -141,3 +141,34 @@ func testIndexerTableACLRoles() map[string]indexerTestCase {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testIndexerTableACLBindingRules() map[string]indexerTestCase {
|
||||
obj := &structs.ACLBindingRule{
|
||||
ID: "123e4567-e89a-12d7-a456-426614174abc",
|
||||
AuthMethod: "BinDingRuLe",
|
||||
}
|
||||
encodedID := []byte{0x12, 0x3e, 0x45, 0x67, 0xe8, 0x9a, 0x12, 0xd7, 0xa4, 0x56, 0x42, 0x66, 0x14, 0x17, 0x4a, 0xbc}
|
||||
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: obj.ID,
|
||||
expected: encodedID,
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: encodedID,
|
||||
},
|
||||
},
|
||||
indexAuthMethod: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "BinDingRuLe"},
|
||||
expected: []byte("bindingrule\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("bindingrule\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,12 +16,14 @@ const (
|
|||
tableACLBindingRules = "acl-binding-rules"
|
||||
tableACLAuthMethods = "acl-auth-methods"
|
||||
|
||||
indexAccessor = "accessor"
|
||||
indexPolicies = "policies"
|
||||
indexRoles = "roles"
|
||||
indexAuthMethod = "authmethod"
|
||||
indexLocality = "locality"
|
||||
indexName = "name"
|
||||
indexAccessor = "accessor"
|
||||
indexPolicies = "policies"
|
||||
indexRoles = "roles"
|
||||
indexAuthMethod = "authmethod"
|
||||
indexLocality = "locality"
|
||||
indexName = "name"
|
||||
indexExpiresGlobal = "expires-global"
|
||||
indexExpiresLocal = "expires-local"
|
||||
)
|
||||
|
||||
func tokensTableSchema() *memdb.TableSchema {
|
||||
|
@ -84,17 +86,23 @@ func tokensTableSchema() *memdb.TableSchema {
|
|||
writeIndex: writeIndex(indexLocalFromACLToken),
|
||||
},
|
||||
},
|
||||
"expires-global": {
|
||||
Name: "expires-global",
|
||||
indexExpiresGlobal: {
|
||||
Name: indexExpiresGlobal,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &TokenExpirationIndex{LocalFilter: false},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: readIndex(indexFromTimeQuery),
|
||||
writeIndex: writeIndex(indexExpiresGlobalFromACLToken),
|
||||
},
|
||||
},
|
||||
"expires-local": {
|
||||
Name: "expires-local",
|
||||
indexExpiresLocal: {
|
||||
Name: indexExpiresLocal,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &TokenExpirationIndex{LocalFilter: true},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: readIndex(indexFromTimeQuery),
|
||||
writeIndex: writeIndex(indexExpiresLocalFromACLToken),
|
||||
},
|
||||
},
|
||||
|
||||
//DEPRECATED (ACL-Legacy-Compat) - This index is only needed while we support upgrading v1 to v2 acls
|
||||
|
@ -260,23 +268,52 @@ func bindingRulesTableSchema() *memdb.TableSchema {
|
|||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.UUIDFieldIndex{
|
||||
Field: "ID",
|
||||
Indexer: indexerSingle{
|
||||
readIndex: readIndex(indexFromUUIDString),
|
||||
writeIndex: writeIndex(indexIDFromACLBindingRule),
|
||||
},
|
||||
},
|
||||
indexAuthMethod: {
|
||||
Name: indexAuthMethod,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "AuthMethod",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexAuthMethodFromACLBindingRule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexIDFromACLBindingRule(raw interface{}) ([]byte, error) {
|
||||
p, ok := raw.(*structs.ACLBindingRule)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ACLBindingRule index", raw)
|
||||
}
|
||||
vv, err := uuidStringToBytes(p.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vv, err
|
||||
}
|
||||
|
||||
func indexAuthMethodFromACLBindingRule(raw interface{}) ([]byte, error) {
|
||||
p, ok := raw.(*structs.ACLBindingRule)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ACLBindingRule index", raw)
|
||||
}
|
||||
|
||||
if p.AuthMethod == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(p.AuthMethod))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func authMethodsTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: tableACLAuthMethods,
|
||||
|
@ -423,3 +460,42 @@ func indexLocalFromACLToken(raw interface{}) ([]byte, error) {
|
|||
b.Bool(p.Local)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromTimeQuery(arg interface{}) ([]byte, error) {
|
||||
p, ok := arg.(*TimeQuery)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for TimeQuery index", arg)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.Time(p.Value)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexExpiresLocalFromACLToken(raw interface{}) ([]byte, error) {
|
||||
return indexExpiresFromACLToken(raw, true)
|
||||
}
|
||||
|
||||
func indexExpiresGlobalFromACLToken(raw interface{}) ([]byte, error) {
|
||||
return indexExpiresFromACLToken(raw, false)
|
||||
}
|
||||
|
||||
func indexExpiresFromACLToken(raw interface{}, local bool) ([]byte, error) {
|
||||
p, ok := raw.(*structs.ACLToken)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
|
||||
}
|
||||
if p.Local != local {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
if !p.HasExpirationTime() {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
if p.ExpirationTime.Unix() < 0 {
|
||||
return nil, fmt.Errorf("token expiration time cannot be before the unix epoch: %s", p.ExpirationTime)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.Time(*p.ExpirationTime)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
|
|
@ -3819,13 +3819,19 @@ func TestTokenPoliciesIndex(t *testing.T) {
|
|||
Name: "global",
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &TokenExpirationIndex{LocalFilter: false},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: readIndex(indexFromTimeQuery),
|
||||
writeIndex: writeIndex(indexExpiresGlobalFromACLToken),
|
||||
},
|
||||
}
|
||||
localIndex := &memdb.IndexSchema{
|
||||
Name: "local",
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &TokenExpirationIndex{LocalFilter: true},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: readIndex(indexFromTimeQuery),
|
||||
writeIndex: writeIndex(indexExpiresLocalFromACLToken),
|
||||
},
|
||||
}
|
||||
schema := &memdb.DBSchema{
|
||||
Tables: map[string]*memdb.TableSchema{
|
||||
|
@ -4207,7 +4213,7 @@ func TestStateStore_ACLBindingRules_Snapshot_Restore(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.ElementsMatch(t, rules, res)
|
||||
require.Equal(t, uint64(2), s.maxIndex("acl-binding-rules"))
|
||||
require.Equal(t, uint64(2), s.maxIndex(tableACLBindingRules))
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,9 @@ import (
|
|||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// indexerSingle implements both memdb.Indexer and memdb.SingleIndexer. It may
|
||||
|
@ -137,3 +140,27 @@ func (b *indexBuilder) Bytes() []byte {
|
|||
func (b *indexBuilder) Bool(v bool) {
|
||||
b.Raw([]byte{intFromBool(v)})
|
||||
}
|
||||
|
||||
type TimeQuery struct {
|
||||
Value time.Time
|
||||
structs.EnterpriseMeta
|
||||
}
|
||||
|
||||
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q TimeQuery) NamespaceOrDefault() string {
|
||||
return q.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
|
||||
// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q TimeQuery) PartitionOrDefault() string {
|
||||
return q.EnterpriseMeta.PartitionOrDefault()
|
||||
}
|
||||
|
||||
func (b *indexBuilder) Time(t time.Time) {
|
||||
val := t.Unix()
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(val))
|
||||
(*bytes.Buffer)(b).Write(buf)
|
||||
}
|
||||
|
|
|
@ -911,6 +911,7 @@ func intentionMatchOneTxn(tx ReadTxn, ws memdb.WatchSet,
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// TODO(partitions): Update for partitions
|
||||
// intentionMatchGetParams returns the tx.Get parameters to find all the
|
||||
// intentions for a certain entry.
|
||||
func intentionMatchGetParams(entry structs.IntentionMatchEntry) ([][]interface{}, error) {
|
||||
|
|
|
@ -38,9 +38,10 @@ func TestNewDBSchema_Indexers(t *testing.T) {
|
|||
|
||||
var testcases = map[string]func() map[string]indexerTestCase{
|
||||
// acl
|
||||
tableACLPolicies: testIndexerTableACLPolicies,
|
||||
tableACLRoles: testIndexerTableACLRoles,
|
||||
tableACLTokens: testIndexerTableACLTokens,
|
||||
tableACLBindingRules: testIndexerTableACLBindingRules,
|
||||
tableACLPolicies: testIndexerTableACLPolicies,
|
||||
tableACLRoles: testIndexerTableACLRoles,
|
||||
tableACLTokens: testIndexerTableACLTokens,
|
||||
// catalog
|
||||
tableChecks: testIndexerTableChecks,
|
||||
tableServices: testIndexerTableServices,
|
||||
|
|
|
@ -98,7 +98,7 @@ func systemMetadataSetTxn(tx WriteTxn, idx uint64, entry *structs.SystemMetadata
|
|||
if err := tx.Insert(tableSystemMetadata, entry); err != nil {
|
||||
return fmt.Errorf("failed inserting system metadata: %s", err)
|
||||
}
|
||||
if err := tx.Insert("index", &IndexEntry{tableSystemMetadata, idx}); err != nil {
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{tableSystemMetadata, idx}); err != nil {
|
||||
return fmt.Errorf("failed updating index: %v", err)
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ func systemMetadataDeleteTxn(tx WriteTxn, idx uint64, key string) error {
|
|||
if err := tx.Delete(tableSystemMetadata, existing); err != nil {
|
||||
return fmt.Errorf("failed removing system metadata: %s", err)
|
||||
}
|
||||
if err := tx.Insert("index", &IndexEntry{tableSystemMetadata, idx}); err != nil {
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{tableSystemMetadata, idx}); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -59,6 +59,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
|
|||
Entries: []structs.IntentionMatchEntry{
|
||||
{
|
||||
Namespace: s.proxyID.NamespaceOrDefault(),
|
||||
Partition: s.proxyID.PartitionOrDefault(),
|
||||
Name: s.proxyCfg.DestinationServiceName,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -139,6 +139,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
|
|||
Entries: []structs.IntentionMatchEntry{
|
||||
{
|
||||
Namespace: structs.IntentionDefaultNamespace,
|
||||
Partition: structs.IntentionDefaultNamespace,
|
||||
Name: "web",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -121,6 +121,7 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u cache.Up
|
|||
Entries: []structs.IntentionMatchEntry{
|
||||
{
|
||||
Namespace: svc.Service.NamespaceOrDefault(),
|
||||
Partition: svc.Service.PartitionOrDefault(),
|
||||
Name: svc.Service.Name,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -216,10 +216,10 @@ func (s *ACLNodeIdentity) EstimateSize() int {
|
|||
return len(s.NodeName) + len(s.Datacenter)
|
||||
}
|
||||
|
||||
func (s *ACLNodeIdentity) SyntheticPolicy() *ACLPolicy {
|
||||
func (s *ACLNodeIdentity) SyntheticPolicy(entMeta *EnterpriseMeta) *ACLPolicy {
|
||||
// Given that we validate this string name before persisting, we do not
|
||||
// have to escape it before doing the following interpolation.
|
||||
rules := fmt.Sprintf(aclPolicyTemplateNodeIdentity, s.NodeName)
|
||||
rules := aclNodeIdentityRules(s.NodeName, entMeta)
|
||||
|
||||
hasher := fnv.New128a()
|
||||
hashID := fmt.Sprintf("%x", hasher.Sum([]byte(rules)))
|
||||
|
@ -231,8 +231,7 @@ func (s *ACLNodeIdentity) SyntheticPolicy() *ACLPolicy {
|
|||
policy.Rules = rules
|
||||
policy.Syntax = acl.SyntaxCurrent
|
||||
policy.Datacenters = []string{s.Datacenter}
|
||||
// TODO(partitions,acls): this needs to be fed the correct partition
|
||||
policy.EnterpriseMeta = *DefaultEnterpriseMetaInDefaultPartition()
|
||||
policy.EnterpriseMeta.Merge(entMeta)
|
||||
policy.SetHash(true)
|
||||
return policy
|
||||
}
|
||||
|
|
|
@ -62,6 +62,10 @@ func aclServiceIdentityRules(svc string, _ *EnterpriseMeta) string {
|
|||
return fmt.Sprintf(aclPolicyTemplateServiceIdentity, svc)
|
||||
}
|
||||
|
||||
func aclNodeIdentityRules(node string, _ *EnterpriseMeta) string {
|
||||
return fmt.Sprintf(aclPolicyTemplateNodeIdentity, node)
|
||||
}
|
||||
|
||||
func (p *ACLPolicy) EnterprisePolicyMeta() *acl.EnterprisePolicyMeta {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ type CheckDefinition struct {
|
|||
Timeout time.Duration
|
||||
TTL time.Duration
|
||||
SuccessBeforePassing int
|
||||
FailuresBeforeWarning int
|
||||
FailuresBeforeCritical int
|
||||
DeregisterCriticalServiceAfter time.Duration
|
||||
OutputMaxSize int
|
||||
|
@ -196,6 +197,7 @@ func (c *CheckDefinition) CheckType() *CheckType {
|
|||
Timeout: c.Timeout,
|
||||
TTL: c.TTL,
|
||||
SuccessBeforePassing: c.SuccessBeforePassing,
|
||||
FailuresBeforeWarning: c.FailuresBeforeWarning,
|
||||
FailuresBeforeCritical: c.FailuresBeforeCritical,
|
||||
DeregisterCriticalServiceAfter: c.DeregisterCriticalServiceAfter,
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ type CheckType struct {
|
|||
Timeout time.Duration
|
||||
TTL time.Duration
|
||||
SuccessBeforePassing int
|
||||
FailuresBeforeWarning int
|
||||
FailuresBeforeCritical int
|
||||
|
||||
// Definition fields used when exposing checks through a proxy
|
||||
|
@ -182,6 +183,10 @@ func (c *CheckType) Validate() error {
|
|||
if c.OutputMaxSize < 0 {
|
||||
return fmt.Errorf("MaxOutputMaxSize must be positive")
|
||||
}
|
||||
if c.FailuresBeforeWarning > c.FailuresBeforeCritical {
|
||||
return fmt.Errorf("FailuresBeforeWarning can't be higher than FailuresBeforeCritical")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ func (e *IngressGatewayConfigEntry) Validate() error {
|
|||
serviceNames := make(map[ServiceID]struct{})
|
||||
for i, s := range listener.Services {
|
||||
if err := validateInnerEnterpriseMeta(&s.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
|
||||
return fmt.Errorf("Services[%d].%v", i, err)
|
||||
return fmt.Errorf("services[%d]: %w", i, err)
|
||||
}
|
||||
sn := NewServiceName(s.Name, &s.EnterpriseMeta)
|
||||
if err := s.RequestHeaders.Validate(listener.Protocol); err != nil {
|
||||
|
@ -401,7 +401,7 @@ func (e *TerminatingGatewayConfigEntry) Validate() error {
|
|||
cid := NewServiceID(svc.Name, &svc.EnterpriseMeta)
|
||||
|
||||
if err := validateInnerEnterpriseMeta(&svc.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
|
||||
return fmt.Errorf("Service %q: %v", cid.String(), err)
|
||||
return fmt.Errorf("service %q: %w", cid, err)
|
||||
}
|
||||
|
||||
// Check for duplicates within the entry
|
||||
|
|
|
@ -329,6 +329,14 @@ func (ixn *Intention) CanRead(authz acl.Authorizer) bool {
|
|||
}
|
||||
|
||||
func (ixn *Intention) CanWrite(authz acl.Authorizer) bool {
|
||||
if ixn.DestinationName == "" {
|
||||
// This is likely a strange form of legacy intention data validation
|
||||
// that happened within the authorization check, since intentions without
|
||||
// a destination cannot be written.
|
||||
// This may be able to be removed later.
|
||||
return false
|
||||
}
|
||||
|
||||
var authzContext acl.AuthorizerContext
|
||||
ixn.FillAuthzContext(&authzContext, true)
|
||||
return authz.IntentionWrite(ixn.DestinationName, &authzContext) == acl.Allow
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/hashicorp/consul/lib"
|
||||
)
|
||||
|
||||
// ServiceDefinition is used to JSON decode the Service definitions. For
|
||||
|
@ -123,7 +126,11 @@ func (s *ServiceDefinition) Validate() error {
|
|||
if err := s.NodeService().Validate(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
for _, c := range s.Checks {
|
||||
if err := c.Validate(); err != nil {
|
||||
return fmt.Errorf("check %q: %s", c.Name, err)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -639,10 +639,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
|||
targetSpiffeID := connect.SpiffeIDService{
|
||||
Host: cfgSnap.Roots.TrustDomain,
|
||||
Namespace: target.Namespace,
|
||||
Partition: target.Partition,
|
||||
Datacenter: target.Datacenter,
|
||||
Service: target.Service,
|
||||
|
||||
// TODO(partitions) Store partition
|
||||
}
|
||||
|
||||
if failoverThroughMeshGateway {
|
||||
|
@ -676,10 +675,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
|||
id := connect.SpiffeIDService{
|
||||
Host: cfgSnap.Roots.TrustDomain,
|
||||
Namespace: target.Namespace,
|
||||
Partition: target.Partition,
|
||||
Datacenter: target.Datacenter,
|
||||
Service: target.Service,
|
||||
|
||||
// TODO(partitions) Store partition
|
||||
}
|
||||
|
||||
// Failover targets might be subsets of the same service, so these are deduplicated.
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
envoy_network_rbac_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v3"
|
||||
envoy_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
|
@ -296,15 +297,16 @@ func (p *rbacPermission) Flatten() *envoy_rbac_v3.Permission {
|
|||
return andPermissions(parts)
|
||||
}
|
||||
|
||||
// simplifyNotSourceSlice will collapse NotSources elements together if any element is
|
||||
// a subset of another.
|
||||
// For example "default/web" is a subset of "default/*" because it is covered by the wildcard.
|
||||
func simplifyNotSourceSlice(notSources []structs.ServiceName) []structs.ServiceName {
|
||||
if len(notSources) <= 1 {
|
||||
return notSources
|
||||
}
|
||||
|
||||
// Collapse NotSources elements together if any element is a subset of
|
||||
// another.
|
||||
|
||||
// Sort, keeping the least wildcarded elements first.
|
||||
// More specific elements have a higher precedence over more wildcarded elements.
|
||||
sort.SliceStable(notSources, func(i, j int) bool {
|
||||
return countWild(notSources[i]) < countWild(notSources[j])
|
||||
})
|
||||
|
@ -457,6 +459,16 @@ func makeRBACRules(intentions structs.Intentions, intentionDefaultAllow bool, is
|
|||
return rbac, nil
|
||||
}
|
||||
|
||||
// removeSameSourceIntentions will iterate over intentions and remove any lower precedence
|
||||
// intentions that share the same source. Intentions are sorted by descending precedence
|
||||
// so once a source has been seen, additional intentions with the same source can be dropped.
|
||||
//
|
||||
// Example for the default/web service:
|
||||
// input: [(backend/* -> default/web), (backend/* -> default/*)]
|
||||
// output: [(backend/* -> default/web)]
|
||||
//
|
||||
// (backend/* -> default/*) was dropped because it is already known that any service
|
||||
// in the backend namespace can target default/web.
|
||||
func removeSameSourceIntentions(intentions structs.Intentions) structs.Intentions {
|
||||
if len(intentions) < 2 {
|
||||
return intentions
|
||||
|
@ -489,10 +501,11 @@ func removeSameSourceIntentions(intentions structs.Intentions) structs.Intention
|
|||
// 'against' service name via wildcard rules.
|
||||
//
|
||||
// For instance:
|
||||
// - (web, api) => false, because these have no wildcards
|
||||
// - (web, *) => true, because "all services" includes "web"
|
||||
// - (default/web, default/*) => true, because "all services in the default NS" includes "default/web"
|
||||
// - (default/*, */*) => true, "any service in any NS" includes "all services in the default NS"
|
||||
// - (web, api) => false, because these have no wildcards
|
||||
// - (web, *) => true, because "all services" includes "web"
|
||||
// - (default/web, default/*) => true, because "all services in the default NS" includes "default/web"
|
||||
// - (default/*, */*) => true, "any service in any NS" includes "all services in the default NS"
|
||||
// - (default/default/*, other/*/*) => false, "any service in "other" partition" does NOT include services in the default partition"
|
||||
func ixnSourceMatches(tester, against structs.ServiceName) bool {
|
||||
// We assume that we can't have the same intention twice before arriving
|
||||
// here.
|
||||
|
@ -505,13 +518,19 @@ func ixnSourceMatches(tester, against structs.ServiceName) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
matchesAP := tester.PartitionOrDefault() == against.PartitionOrDefault() || against.PartitionOrDefault() == structs.WildcardSpecifier
|
||||
matchesNS := tester.NamespaceOrDefault() == against.NamespaceOrDefault() || against.NamespaceOrDefault() == structs.WildcardSpecifier
|
||||
matchesName := tester.Name == against.Name || against.Name == structs.WildcardSpecifier
|
||||
return matchesNS && matchesName
|
||||
return matchesAP && matchesNS && matchesName
|
||||
}
|
||||
|
||||
// countWild counts the number of wildcard values in the given namespace and name.
|
||||
func countWild(src structs.ServiceName) int {
|
||||
// If Partition is wildcard, panic because it's not supported
|
||||
if src.PartitionOrDefault() == structs.WildcardSpecifier {
|
||||
panic("invalid state: intention references wildcard partition")
|
||||
}
|
||||
|
||||
// If NS is wildcard, it must be 2 since wildcards only follow exact
|
||||
if src.NamespaceOrDefault() == structs.WildcardSpecifier {
|
||||
return 2
|
||||
|
@ -546,7 +565,7 @@ func notPrincipal(id *envoy_rbac_v3.Principal) *envoy_rbac_v3.Principal {
|
|||
}
|
||||
|
||||
func idPrincipal(src structs.ServiceName) *envoy_rbac_v3.Principal {
|
||||
pattern := makeSpiffePattern(src.NamespaceOrDefault(), src.Name)
|
||||
pattern := makeSpiffePattern(src.PartitionOrDefault(), src.NamespaceOrDefault(), src.Name)
|
||||
|
||||
return &envoy_rbac_v3.Principal{
|
||||
Identifier: &envoy_rbac_v3.Principal_Authenticated_{
|
||||
|
@ -560,21 +579,41 @@ func idPrincipal(src structs.ServiceName) *envoy_rbac_v3.Principal {
|
|||
},
|
||||
}
|
||||
}
|
||||
func makeSpiffePattern(sourceNS, sourceName string) string {
|
||||
const (
|
||||
anyPath = `[^/]+`
|
||||
spiffeTemplate = `^spiffe://%s/ns/%s/dc/%s/svc/%s$`
|
||||
)
|
||||
switch {
|
||||
case sourceNS != structs.WildcardSpecifier && sourceName != structs.WildcardSpecifier:
|
||||
return fmt.Sprintf(spiffeTemplate, anyPath, sourceNS, anyPath, sourceName)
|
||||
case sourceNS != structs.WildcardSpecifier && sourceName == structs.WildcardSpecifier:
|
||||
return fmt.Sprintf(spiffeTemplate, anyPath, sourceNS, anyPath, anyPath)
|
||||
case sourceNS == structs.WildcardSpecifier && sourceName == structs.WildcardSpecifier:
|
||||
return fmt.Sprintf(spiffeTemplate, anyPath, anyPath, anyPath, anyPath)
|
||||
default:
|
||||
|
||||
func makeSpiffePattern(sourceAP, sourceNS, sourceName string) string {
|
||||
if sourceNS == structs.WildcardSpecifier && sourceName != structs.WildcardSpecifier {
|
||||
panic(fmt.Sprintf("not possible to have a wildcarded namespace %q but an exact service %q", sourceNS, sourceName))
|
||||
}
|
||||
if sourceAP == structs.WildcardSpecifier {
|
||||
panic("not possible to have a wildcarded source partition")
|
||||
}
|
||||
|
||||
const anyPath = `[^/]+`
|
||||
|
||||
// Match on any namespace or service if it is a wildcard, or on a specific value otherwise.
|
||||
ns := sourceNS
|
||||
if sourceNS == structs.WildcardSpecifier {
|
||||
ns = anyPath
|
||||
}
|
||||
|
||||
svc := sourceName
|
||||
if sourceName == structs.WildcardSpecifier {
|
||||
svc = anyPath
|
||||
}
|
||||
|
||||
id := connect.SpiffeIDService{
|
||||
Namespace: ns,
|
||||
Service: svc,
|
||||
|
||||
// Trust domain and datacenter are not verified by RBAC, so we match on any value.
|
||||
Host: anyPath,
|
||||
Datacenter: anyPath,
|
||||
|
||||
// Partition can only ever be an exact value.
|
||||
Partition: sourceAP,
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`^%s://%s%s$`, id.URI().Scheme, id.Host, id.URI().Path)
|
||||
}
|
||||
|
||||
func anyPermission() *envoy_rbac_v3.Permission {
|
||||
|
|
|
@ -887,14 +887,3 @@ func makeServiceNameSlice(slice []string) []structs.ServiceName {
|
|||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func unmakeServiceNameSlice(slice []structs.ServiceName) []string {
|
||||
if len(slice) == 0 {
|
||||
return nil
|
||||
}
|
||||
var out []string
|
||||
for _, src := range slice {
|
||||
out = append(out, src.String())
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -234,12 +234,20 @@ func TestAPI_ACLPolicy_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func prepTokenPolicies(t *testing.T, acl *ACL) (policies []*ACLPolicy) {
|
||||
return prepTokenPoliciesInPartition(t, acl, "")
|
||||
}
|
||||
|
||||
func prepTokenPoliciesInPartition(t *testing.T, acl *ACL, partition string) (policies []*ACLPolicy) {
|
||||
var wqPart *WriteOptions
|
||||
if partition != "" {
|
||||
wqPart = &WriteOptions{Partition: partition}
|
||||
}
|
||||
policy, _, err := acl.PolicyCreate(&ACLPolicy{
|
||||
Name: "one",
|
||||
Description: "one description",
|
||||
Rules: `acl = "read"`,
|
||||
Datacenters: []string{"dc1", "dc2"},
|
||||
}, nil)
|
||||
}, wqPart)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, policy)
|
||||
|
@ -250,7 +258,7 @@ func prepTokenPolicies(t *testing.T, acl *ACL) (policies []*ACLPolicy) {
|
|||
Description: "two description",
|
||||
Rules: `node_prefix "" { policy = "read" }`,
|
||||
Datacenters: []string{"dc1", "dc2"},
|
||||
}, nil)
|
||||
}, wqPart)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, policy)
|
||||
|
@ -260,7 +268,7 @@ func prepTokenPolicies(t *testing.T, acl *ACL) (policies []*ACLPolicy) {
|
|||
Name: "three",
|
||||
Description: "three description",
|
||||
Rules: `service_prefix "" { policy = "read" }`,
|
||||
}, nil)
|
||||
}, wqPart)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, policy)
|
||||
|
@ -270,7 +278,7 @@ func prepTokenPolicies(t *testing.T, acl *ACL) (policies []*ACLPolicy) {
|
|||
Name: "four",
|
||||
Description: "four description",
|
||||
Rules: `agent "foo" { policy = "write" }`,
|
||||
}, nil)
|
||||
}, wqPart)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, policy)
|
||||
|
|
|
@ -330,6 +330,7 @@ type AgentServiceCheck struct {
|
|||
AliasNode string `json:",omitempty"`
|
||||
AliasService string `json:",omitempty"`
|
||||
SuccessBeforePassing int `json:",omitempty"`
|
||||
FailuresBeforeWarning int `json:",omitempty"`
|
||||
FailuresBeforeCritical int `json:",omitempty"`
|
||||
|
||||
// In Consul 0.7 and later, checks that are associated with a service
|
||||
|
|
12
api/api.go
12
api/api.go
|
@ -660,6 +660,14 @@ func NewClient(config *Config) (*Client, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if config.Namespace == "" {
|
||||
config.Namespace = defConfig.Namespace
|
||||
}
|
||||
|
||||
if config.Partition == "" {
|
||||
config.Partition = defConfig.Partition
|
||||
}
|
||||
|
||||
parts := strings.SplitN(config.Address, "://", 2)
|
||||
if len(parts) == 2 {
|
||||
switch parts[0] {
|
||||
|
@ -1117,7 +1125,9 @@ func generateUnexpectedResponseCodeError(resp *http.Response) error {
|
|||
var buf bytes.Buffer
|
||||
io.Copy(&buf, resp.Body)
|
||||
closeResponseBody(resp)
|
||||
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||
|
||||
trimmed := strings.TrimSpace(string(buf.Bytes()))
|
||||
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, trimmed)
|
||||
}
|
||||
|
||||
func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
|
||||
|
|
|
@ -30,6 +30,7 @@ const (
|
|||
type ConfigEntry interface {
|
||||
GetKind() string
|
||||
GetName() string
|
||||
GetPartition() string
|
||||
GetNamespace() string
|
||||
GetMeta() map[string]string
|
||||
GetCreateIndex() uint64
|
||||
|
@ -133,6 +134,10 @@ type UpstreamConfiguration struct {
|
|||
type UpstreamConfig struct {
|
||||
// Name is only accepted within a service-defaults config entry.
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
// Partition is only accepted within a service-defaults config entry.
|
||||
Partition string `json:",omitempty"`
|
||||
|
||||
// Namespace is only accepted within a service-defaults config entry.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
|
@ -205,6 +210,7 @@ type UpstreamLimits struct {
|
|||
type ServiceConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Protocol string `json:",omitempty"`
|
||||
Mode ProxyMode `json:",omitempty"`
|
||||
|
@ -219,33 +225,18 @@ type ServiceConfigEntry struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetKind() string {
|
||||
return s.Kind
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetName() string {
|
||||
return s.Name
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetNamespace() string {
|
||||
return s.Namespace
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetMeta() map[string]string {
|
||||
return s.Meta
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
|
||||
return s.CreateIndex
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
|
||||
return s.ModifyIndex
|
||||
}
|
||||
func (s *ServiceConfigEntry) GetKind() string { return s.Kind }
|
||||
func (s *ServiceConfigEntry) GetName() string { return s.Name }
|
||||
func (s *ServiceConfigEntry) GetPartition() string { return s.Partition }
|
||||
func (s *ServiceConfigEntry) GetNamespace() string { return s.Namespace }
|
||||
func (s *ServiceConfigEntry) GetMeta() map[string]string { return s.Meta }
|
||||
func (s *ServiceConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex }
|
||||
func (s *ServiceConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex }
|
||||
|
||||
type ProxyConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Mode ProxyMode `json:",omitempty"`
|
||||
TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"`
|
||||
|
@ -257,29 +248,13 @@ type ProxyConfigEntry struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetKind() string {
|
||||
return p.Kind
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetName() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetNamespace() string {
|
||||
return p.Namespace
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetMeta() map[string]string {
|
||||
return p.Meta
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
|
||||
return p.CreateIndex
|
||||
}
|
||||
|
||||
func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
|
||||
return p.ModifyIndex
|
||||
}
|
||||
func (p *ProxyConfigEntry) GetKind() string { return p.Kind }
|
||||
func (p *ProxyConfigEntry) GetName() string { return p.Name }
|
||||
func (p *ProxyConfigEntry) GetPartition() string { return p.Partition }
|
||||
func (p *ProxyConfigEntry) GetNamespace() string { return p.Namespace }
|
||||
func (p *ProxyConfigEntry) GetMeta() map[string]string { return p.Meta }
|
||||
func (p *ProxyConfigEntry) GetCreateIndex() uint64 { return p.CreateIndex }
|
||||
func (p *ProxyConfigEntry) GetModifyIndex() uint64 { return p.ModifyIndex }
|
||||
|
||||
func makeConfigEntry(kind, name string) (ConfigEntry, error) {
|
||||
switch kind {
|
||||
|
|
|
@ -2,41 +2,44 @@ package api
|
|||
|
||||
import "encoding/json"
|
||||
|
||||
// MeshConfigEntry manages the global configuration for all service mesh
|
||||
// proxies.
|
||||
type MeshConfigEntry struct {
|
||||
Namespace string `json:",omitempty"`
|
||||
// Partition is the partition the MeshConfigEntry applies to.
|
||||
// Partitioning is a Consul Enterprise feature.
|
||||
Partition string `json:",omitempty"`
|
||||
|
||||
// Namespace is the namespace the MeshConfigEntry applies to.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// TransparentProxy applies configuration specific to proxies
|
||||
// in transparent mode.
|
||||
TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"`
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
|
||||
// CreateIndex is the Raft index this entry was created at. This is a
|
||||
// read-only field.
|
||||
CreateIndex uint64
|
||||
|
||||
// ModifyIndex is used for the Check-And-Set operations and can also be fed
|
||||
// back into the WaitIndex of the QueryOptions in order to perform blocking
|
||||
// queries.
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type TransparentProxyMeshConfig struct {
|
||||
MeshDestinationsOnly bool `alias:"mesh_destinations_only"`
|
||||
}
|
||||
|
||||
func (e *MeshConfigEntry) GetKind() string {
|
||||
return MeshConfig
|
||||
}
|
||||
|
||||
func (e *MeshConfigEntry) GetName() string {
|
||||
return MeshConfigMesh
|
||||
}
|
||||
|
||||
func (e *MeshConfigEntry) GetNamespace() string {
|
||||
return e.Namespace
|
||||
}
|
||||
|
||||
func (e *MeshConfigEntry) GetMeta() map[string]string {
|
||||
return e.Meta
|
||||
}
|
||||
|
||||
func (e *MeshConfigEntry) GetCreateIndex() uint64 {
|
||||
return e.CreateIndex
|
||||
}
|
||||
|
||||
func (e *MeshConfigEntry) GetModifyIndex() uint64 {
|
||||
return e.ModifyIndex
|
||||
}
|
||||
func (e *MeshConfigEntry) GetKind() string { return MeshConfig }
|
||||
func (e *MeshConfigEntry) GetName() string { return MeshConfigMesh }
|
||||
func (e *MeshConfigEntry) GetPartition() string { return e.Partition }
|
||||
func (e *MeshConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *MeshConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *MeshConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *MeshConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
// MarshalJSON adds the Kind field so that the JSON can be decoded back into the
|
||||
// correct type.
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
type ServiceRouterConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
Routes []ServiceRoute `json:",omitempty"`
|
||||
|
@ -19,6 +20,7 @@ type ServiceRouterConfigEntry struct {
|
|||
|
||||
func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceRouterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceRouterConfigEntry) GetPartition() string { return e.Partition }
|
||||
func (e *ServiceRouterConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceRouterConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
|
@ -61,8 +63,9 @@ type ServiceRouteHTTPMatchQueryParam struct {
|
|||
}
|
||||
|
||||
type ServiceRouteDestination struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
// Referencing other partitions is not supported.
|
||||
Namespace string `json:",omitempty"`
|
||||
PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"`
|
||||
RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"`
|
||||
|
@ -112,6 +115,7 @@ func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error {
|
|||
type ServiceSplitterConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
Splits []ServiceSplit `json:",omitempty"`
|
||||
|
@ -123,15 +127,17 @@ type ServiceSplitterConfigEntry struct {
|
|||
|
||||
func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceSplitterConfigEntry) GetPartition() string { return e.Partition }
|
||||
func (e *ServiceSplitterConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceSplitterConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceSplit struct {
|
||||
Weight float32
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
Weight float32
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
// Referencing other partitions is not supported.
|
||||
Namespace string `json:",omitempty"`
|
||||
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
|
||||
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
|
||||
|
@ -140,6 +146,7 @@ type ServiceSplit struct {
|
|||
type ServiceResolverConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
DefaultSubset string `json:",omitempty" alias:"default_subset"`
|
||||
|
@ -195,6 +202,7 @@ func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
|
|||
|
||||
func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceResolverConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceResolverConfigEntry) GetPartition() string { return e.Partition }
|
||||
func (e *ServiceResolverConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceResolverConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
|
@ -208,15 +216,17 @@ type ServiceResolverSubset struct {
|
|||
type ServiceResolverRedirect struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenter string `json:",omitempty"`
|
||||
// Referencing other partitions is not supported.
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenter string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceResolverFailover struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenters []string `json:",omitempty"`
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
// Referencing other partitions is not supported.
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenters []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancer determines the load balancing policy and configuration for services
|
||||
|
|
|
@ -139,6 +139,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test-failover",
|
||||
Partition: defaultPartition,
|
||||
Namespace: defaultNamespace,
|
||||
DefaultSubset: "v1",
|
||||
Subsets: map[string]ServiceResolverSubset{
|
||||
|
@ -171,6 +172,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test-redirect",
|
||||
Partition: defaultPartition,
|
||||
Namespace: defaultNamespace,
|
||||
Redirect: &ServiceResolverRedirect{
|
||||
Service: "test-failover",
|
||||
|
@ -186,6 +188,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
entry: &ServiceSplitterConfigEntry{
|
||||
Kind: ServiceSplitter,
|
||||
Name: "test-split",
|
||||
Partition: defaultPartition,
|
||||
Namespace: defaultNamespace,
|
||||
Splits: []ServiceSplit{
|
||||
{
|
||||
|
@ -220,6 +223,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
entry: &ServiceRouterConfigEntry{
|
||||
Kind: ServiceRouter,
|
||||
Name: "test-route",
|
||||
Partition: defaultPartition,
|
||||
Namespace: defaultNamespace,
|
||||
Routes: []ServiceRoute{
|
||||
{
|
||||
|
@ -329,6 +333,7 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) {
|
|||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test-least-req",
|
||||
Partition: defaultPartition,
|
||||
Namespace: defaultNamespace,
|
||||
LoadBalancer: &LoadBalancer{
|
||||
Policy: "least_request",
|
||||
|
@ -343,6 +348,7 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) {
|
|||
Kind: ServiceResolver,
|
||||
Name: "test-ring-hash",
|
||||
Namespace: defaultNamespace,
|
||||
Partition: defaultPartition,
|
||||
LoadBalancer: &LoadBalancer{
|
||||
Policy: "ring_hash",
|
||||
RingHashConfig: &RingHashConfig{
|
||||
|
|
|
@ -10,7 +10,11 @@ type IngressGatewayConfigEntry struct {
|
|||
// service. This should match the name provided in the service definition.
|
||||
Name string
|
||||
|
||||
// Namespace is the namespace the IngressGateway is associated with
|
||||
// Partition is the partition the IngressGateway is associated with.
|
||||
// Partitioning is a Consul Enterprise feature.
|
||||
Partition string `json:",omitempty"`
|
||||
|
||||
// Namespace is the namespace the IngressGateway is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
|
@ -34,7 +38,7 @@ type IngressGatewayConfigEntry struct {
|
|||
}
|
||||
|
||||
type GatewayTLSConfig struct {
|
||||
// Indicates that TLS should be enabled for this gateway service
|
||||
// Indicates that TLS should be enabled for this gateway service.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
|
@ -67,7 +71,7 @@ type IngressService struct {
|
|||
// protocol and means that the listener will forward traffic to all services.
|
||||
//
|
||||
// A name can be specified on multiple listeners, and will be exposed on both
|
||||
// of the listeners
|
||||
// of the listeners.
|
||||
Name string
|
||||
|
||||
// Hosts is a list of hostnames which should be associated to this service on
|
||||
|
@ -83,38 +87,24 @@ type IngressService struct {
|
|||
// using a "tcp" listener.
|
||||
Hosts []string
|
||||
|
||||
// Allow HTTP header manipulation to be configured.
|
||||
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
|
||||
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
|
||||
// Referencing other partitions is not supported.
|
||||
|
||||
// Namespace is the namespace where the service is located.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Allow HTTP header manipulation to be configured.
|
||||
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
|
||||
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetKind() string {
|
||||
return i.Kind
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetName() string {
|
||||
return i.Name
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetNamespace() string {
|
||||
return i.Namespace
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetMeta() map[string]string {
|
||||
return i.Meta
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 {
|
||||
return i.CreateIndex
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetModifyIndex() uint64 {
|
||||
return i.ModifyIndex
|
||||
}
|
||||
func (i *IngressGatewayConfigEntry) GetKind() string { return i.Kind }
|
||||
func (i *IngressGatewayConfigEntry) GetName() string { return i.Name }
|
||||
func (i *IngressGatewayConfigEntry) GetPartition() string { return i.Partition }
|
||||
func (i *IngressGatewayConfigEntry) GetNamespace() string { return i.Namespace }
|
||||
func (i *IngressGatewayConfigEntry) GetMeta() map[string]string { return i.Meta }
|
||||
func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 { return i.CreateIndex }
|
||||
func (i *IngressGatewayConfigEntry) GetModifyIndex() uint64 { return i.ModifyIndex }
|
||||
|
||||
// TerminatingGatewayConfigEntry manages the configuration for a terminating gateway
|
||||
// with the given name.
|
||||
|
@ -140,55 +130,45 @@ type TerminatingGatewayConfigEntry struct {
|
|||
// queries.
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the config entry is associated with
|
||||
// Partition is the partition the config entry is associated with.
|
||||
// Partitioning is a Consul Enterprise feature.
|
||||
Partition string `json:",omitempty"`
|
||||
|
||||
// Namespace is the namespace the config entry is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// A LinkedService is a service represented by a terminating gateway
|
||||
type LinkedService struct {
|
||||
// The namespace the service is registered in
|
||||
// Referencing other partitions is not supported.
|
||||
|
||||
// Namespace is where the service is registered.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Name is the name of the service, as defined in Consul's catalog
|
||||
// Name is the name of the service, as defined in Consul's catalog.
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
// CAFile is the optional path to a CA certificate to use for TLS connections
|
||||
// from the gateway to the linked service
|
||||
// from the gateway to the linked service.
|
||||
CAFile string `json:",omitempty" alias:"ca_file"`
|
||||
|
||||
// CertFile is the optional path to a client certificate to use for TLS connections
|
||||
// from the gateway to the linked service
|
||||
// from the gateway to the linked service.
|
||||
CertFile string `json:",omitempty" alias:"cert_file"`
|
||||
|
||||
// KeyFile is the optional path to a private key to use for TLS connections
|
||||
// from the gateway to the linked service
|
||||
// from the gateway to the linked service.
|
||||
KeyFile string `json:",omitempty" alias:"key_file"`
|
||||
|
||||
// SNI is the optional name to specify during the TLS handshake with a linked service
|
||||
// SNI is the optional name to specify during the TLS handshake with a linked service.
|
||||
SNI string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetKind() string {
|
||||
return g.Kind
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetName() string {
|
||||
return g.Name
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetNamespace() string {
|
||||
return g.Namespace
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetMeta() map[string]string {
|
||||
return g.Meta
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 {
|
||||
return g.CreateIndex
|
||||
}
|
||||
|
||||
func (g *TerminatingGatewayConfigEntry) GetModifyIndex() uint64 {
|
||||
return g.ModifyIndex
|
||||
}
|
||||
func (g *TerminatingGatewayConfigEntry) GetKind() string { return g.Kind }
|
||||
func (g *TerminatingGatewayConfigEntry) GetName() string { return g.Name }
|
||||
func (g *TerminatingGatewayConfigEntry) GetPartition() string { return g.Partition }
|
||||
func (g *TerminatingGatewayConfigEntry) GetNamespace() string { return g.Namespace }
|
||||
func (g *TerminatingGatewayConfigEntry) GetMeta() map[string]string { return g.Meta }
|
||||
func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 { return g.CreateIndex }
|
||||
func (g *TerminatingGatewayConfigEntry) GetModifyIndex() uint64 { return g.ModifyIndex }
|
||||
|
|
|
@ -5,6 +5,7 @@ import "time"
|
|||
type ServiceIntentionsConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
Sources []*SourceIntention
|
||||
|
@ -17,6 +18,7 @@ type ServiceIntentionsConfigEntry struct {
|
|||
|
||||
type SourceIntention struct {
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Action IntentionAction `json:",omitempty"`
|
||||
Permissions []*IntentionPermission `json:",omitempty"`
|
||||
|
@ -30,29 +32,13 @@ type SourceIntention struct {
|
|||
LegacyUpdateTime *time.Time `json:",omitempty" alias:"legacy_update_time"`
|
||||
}
|
||||
|
||||
func (e *ServiceIntentionsConfigEntry) GetKind() string {
|
||||
return e.Kind
|
||||
}
|
||||
|
||||
func (e *ServiceIntentionsConfigEntry) GetName() string {
|
||||
return e.Name
|
||||
}
|
||||
|
||||
func (e *ServiceIntentionsConfigEntry) GetNamespace() string {
|
||||
return e.Namespace
|
||||
}
|
||||
|
||||
func (e *ServiceIntentionsConfigEntry) GetMeta() map[string]string {
|
||||
return e.Meta
|
||||
}
|
||||
|
||||
func (e *ServiceIntentionsConfigEntry) GetCreateIndex() uint64 {
|
||||
return e.CreateIndex
|
||||
}
|
||||
|
||||
func (e *ServiceIntentionsConfigEntry) GetModifyIndex() uint64 {
|
||||
return e.ModifyIndex
|
||||
}
|
||||
func (e *ServiceIntentionsConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceIntentionsConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceIntentionsConfigEntry) GetPartition() string { return e.Partition }
|
||||
func (e *ServiceIntentionsConfigEntry) GetNamespace() string { return e.Namespace }
|
||||
func (e *ServiceIntentionsConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||
func (e *ServiceIntentionsConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceIntentionsConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type IntentionPermission struct {
|
||||
Action IntentionAction
|
||||
|
|
|
@ -204,6 +204,7 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
Partition: defaultPartition,
|
||||
Namespace: defaultNamespace,
|
||||
}
|
||||
ce := c.ConfigEntries()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
|
|
|
@ -513,12 +513,17 @@ func generateStatsTags(args *BootstrapTplArgs, initialTags []string, omitDepreca
|
|||
}
|
||||
tagJSONs = append(tagJSONs, tags...)
|
||||
|
||||
// Default the namespace here since it is also done for cluster SNI
|
||||
// Default the namespace and partition here since it is also done for cluster SNI
|
||||
ns := args.Namespace
|
||||
if ns == "" {
|
||||
ns = api.IntentionDefaultNamespace
|
||||
}
|
||||
|
||||
ap := args.Partition
|
||||
if ap == "" {
|
||||
ap = api.IntentionDefaultNamespace
|
||||
}
|
||||
|
||||
// Add some default tags if not already overridden. Note this is a slice not a
|
||||
// map since we need ordering to be deterministic.
|
||||
defaults := []struct {
|
||||
|
@ -540,6 +545,10 @@ func generateStatsTags(args *BootstrapTplArgs, initialTags []string, omitDepreca
|
|||
name: "consul.source.namespace",
|
||||
val: ns,
|
||||
},
|
||||
{
|
||||
name: "consul.source.partition",
|
||||
val: ap,
|
||||
},
|
||||
{
|
||||
name: "consul.source.datacenter",
|
||||
val: args.Datacenter,
|
||||
|
|
|
@ -89,6 +89,10 @@ type BootstrapTplArgs struct {
|
|||
// as registered with the Consul agent.
|
||||
Namespace string
|
||||
|
||||
// Partition is the Consul Enterprise Partition of the proxy service instance
|
||||
// as registered with the Consul agent.
|
||||
Partition string
|
||||
|
||||
// Datacenter is the datacenter where the proxy service instance is registered.
|
||||
Datacenter string
|
||||
|
||||
|
@ -141,6 +145,7 @@ const bootstrapTemplate = `{
|
|||
"id": "{{ .ProxyID }}",
|
||||
"metadata": {
|
||||
"namespace": "{{if ne .Namespace ""}}{{ .Namespace }}{{else}}default{{end}}",
|
||||
"partition": "{{if ne .Partition ""}}{{ .Partition }}{{else}}default{{end}}",
|
||||
"envoy_version": "{{ .EnvoyVersion }}"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -482,6 +482,7 @@ func (c *cmd) templateArgs() (*BootstrapTplArgs, error) {
|
|||
Token: httpCfg.Token,
|
||||
LocalAgentClusterName: xds.LocalAgentClusterName,
|
||||
Namespace: httpCfg.Namespace,
|
||||
Partition: httpCfg.Partition,
|
||||
EnvoyVersion: c.envoyVersion,
|
||||
Datacenter: httpCfg.Datacenter,
|
||||
PrometheusBackendPort: c.prometheusBackendPort,
|
||||
|
@ -525,18 +526,20 @@ func (c *cmd) generateConfig() ([]byte, error) {
|
|||
// Set the source service name from the proxy's own registration
|
||||
args.ProxySourceService = svc.Service
|
||||
}
|
||||
|
||||
// In most cases where namespaces and partitions are enabled they will already be set
|
||||
// correctly because the http client that fetched this will provide them explicitly.
|
||||
// However, if these arguments were not provided, they will be empty even
|
||||
// though Namespaces and Partitions are actually being used.
|
||||
// Overriding them ensures that we always set the Namespace and Partition args
|
||||
// if the cluster is using them. This prevents us from defaulting to the "default"
|
||||
// when a non-default partition or namespace was inferred from the ACL token.
|
||||
if svc.Namespace != "" {
|
||||
// In most cases where namespaces are enabled this will already be set
|
||||
// correctly because the http client that fetched this will need to have
|
||||
// had the namespace set on it which is also how we initially populate
|
||||
// this. However in the case of "default" namespace being accessed because
|
||||
// there was no namespace argument, args.Namespace will be empty even
|
||||
// though Namespaces are actually being used and the namespace of the request was
|
||||
// inferred from the ACL token or defaulted to the "default" namespace.
|
||||
// Overriding it here ensures that we always set the Namespace arg if the
|
||||
// cluster is using namespaces regardless.
|
||||
args.Namespace = svc.Namespace
|
||||
}
|
||||
if svc.Partition != "" {
|
||||
args.Partition = svc.Partition
|
||||
}
|
||||
|
||||
if svc.Datacenter != "" {
|
||||
// The agent will definitely have the definitive answer here.
|
||||
|
|
|
@ -90,7 +90,7 @@ func testSetAndResetEnv(t *testing.T, env []string) func() {
|
|||
// save it as a nil so we know to remove again
|
||||
old[pair[0]] = nil
|
||||
}
|
||||
os.Setenv(pair[0], pair[1])
|
||||
require.NoError(t, os.Setenv(pair[0], pair[1]))
|
||||
}
|
||||
// Return a func that will reset to old values
|
||||
return func() {
|
||||
|
@ -106,6 +106,7 @@ func testSetAndResetEnv(t *testing.T, env []string) func() {
|
|||
|
||||
type generateConfigTestCase struct {
|
||||
Name string
|
||||
TLSServer bool
|
||||
Flags []string
|
||||
Env []string
|
||||
Files map[string]string
|
||||
|
@ -452,9 +453,10 @@ func TestGenerateConfig(t *testing.T) {
|
|||
WantErr: "Error loading CA File: open some/path: no such file or directory",
|
||||
},
|
||||
{
|
||||
Name: "existing-ca-file",
|
||||
Flags: []string{"-proxy-id", "test-proxy", "-ca-file", "../../../test/ca/root.cer"},
|
||||
Env: []string{"CONSUL_HTTP_SSL=1"},
|
||||
Name: "existing-ca-file",
|
||||
TLSServer: true,
|
||||
Flags: []string{"-proxy-id", "test-proxy", "-ca-file", "../../../test/ca/root.cer"},
|
||||
Env: []string{"CONSUL_HTTP_SSL=1"},
|
||||
WantArgs: BootstrapTplArgs{
|
||||
EnvoyVersion: defaultEnvoyVersion,
|
||||
ProxyCluster: "test-proxy",
|
||||
|
@ -499,9 +501,10 @@ func TestGenerateConfig(t *testing.T) {
|
|||
WantErr: "lstat some/path: no such file or directory",
|
||||
},
|
||||
{
|
||||
Name: "existing-ca-path",
|
||||
Flags: []string{"-proxy-id", "test-proxy", "-ca-path", "../../../test/ca_path/"},
|
||||
Env: []string{"CONSUL_HTTP_SSL=1"},
|
||||
Name: "existing-ca-path",
|
||||
TLSServer: true,
|
||||
Flags: []string{"-proxy-id", "test-proxy", "-ca-path", "../../../test/ca_path/"},
|
||||
Env: []string{"CONSUL_HTTP_SSL=1"},
|
||||
WantArgs: BootstrapTplArgs{
|
||||
EnvoyVersion: defaultEnvoyVersion,
|
||||
ProxyCluster: "test-proxy",
|
||||
|
@ -887,15 +890,21 @@ func TestGenerateConfig(t *testing.T) {
|
|||
|
||||
// Run a mock agent API that just always returns the proxy config in the
|
||||
// test.
|
||||
srv := httptest.NewServer(testMockAgent(tc))
|
||||
var srv *httptest.Server
|
||||
if tc.TLSServer {
|
||||
srv = httptest.NewTLSServer(testMockAgent(tc))
|
||||
} else {
|
||||
srv = httptest.NewServer(testMockAgent(tc))
|
||||
}
|
||||
defer srv.Close()
|
||||
client, err := api.NewClient(&api.Config{Address: srv.URL})
|
||||
require.NoError(err)
|
||||
|
||||
testDirPrefix := testDir + string(filepath.Separator)
|
||||
myEnv := copyAndReplaceAll(tc.Env, "@@TEMPDIR@@", testDirPrefix)
|
||||
defer testSetAndResetEnv(t, myEnv)()
|
||||
|
||||
client, err := api.NewClient(&api.Config{Address: srv.URL, TLSConfig: api.TLSConfig{InsecureSkipVerify: true}})
|
||||
require.NoError(err)
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
// explicitly set the client to one which can connect to the httptest.Server
|
||||
|
@ -1073,6 +1082,7 @@ func testMockAgentGatewayConfig(namespacesEnabled bool) http.HandlerFunc {
|
|||
|
||||
if namespacesEnabled {
|
||||
svc[string(kind)].Namespace = namespaceFromQuery(r)
|
||||
svc[string(kind)].Partition = partitionFromQuery(r)
|
||||
}
|
||||
|
||||
cfgJSON, err := json.Marshal(svc)
|
||||
|
@ -1094,6 +1104,15 @@ func namespaceFromQuery(r *http.Request) string {
|
|||
return "default"
|
||||
}
|
||||
|
||||
func partitionFromQuery(r *http.Request) string {
|
||||
// Use the partition in the request if there is one, otherwise
|
||||
// use-default.
|
||||
if queryAP := r.URL.Query().Get("partition"); queryAP != "" {
|
||||
return queryAP
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
func testMockAgentProxyConfig(cfg map[string]interface{}, namespacesEnabled bool) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// Parse the proxy-id from the end of the URL (blindly assuming it's correct
|
||||
|
@ -1115,6 +1134,7 @@ func testMockAgentProxyConfig(cfg map[string]interface{}, namespacesEnabled bool
|
|||
|
||||
if namespacesEnabled {
|
||||
svc.Namespace = namespaceFromQuery(r)
|
||||
svc.Partition = partitionFromQuery(r)
|
||||
}
|
||||
|
||||
cfgJSON, err := json.Marshal(svc)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -157,6 +158,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -157,6 +158,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -157,6 +158,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -166,6 +167,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -157,6 +158,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -143,6 +144,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "ingress-gateway",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -230,6 +231,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "ingress-gateway",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -230,6 +231,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "my-gateway-123",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -230,6 +231,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "my-gateway",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -230,6 +231,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "ingress-gateway-1",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -230,6 +231,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -230,6 +231,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -144,6 +145,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"partition": "default",
|
||||
"envoy_version": "1.18.4"
|
||||
}
|
||||
},
|
||||
|
@ -168,6 +169,10 @@
|
|||
"tag_name": "consul.source.namespace",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.partition",
|
||||
"fixed_value": "default"
|
||||
},
|
||||
{
|
||||
"tag_name": "consul.source.datacenter",
|
||||
"fixed_value": "dc1"
|
||||
|
|
|
@ -30,6 +30,7 @@ func CheckTypeToStructs(s CheckType) structs.CheckType {
|
|||
t.TTL = s.TTL
|
||||
t.SuccessBeforePassing = int(s.SuccessBeforePassing)
|
||||
t.FailuresBeforeCritical = int(s.FailuresBeforeCritical)
|
||||
t.FailuresBeforeWarning = int(s.FailuresBeforeWarning)
|
||||
t.ProxyHTTP = s.ProxyHTTP
|
||||
t.ProxyGRPC = s.ProxyGRPC
|
||||
t.DeregisterCriticalServiceAfter = s.DeregisterCriticalServiceAfter
|
||||
|
@ -62,6 +63,7 @@ func NewCheckTypeFromStructs(t structs.CheckType) CheckType {
|
|||
s.TTL = t.TTL
|
||||
s.SuccessBeforePassing = int32(t.SuccessBeforePassing)
|
||||
s.FailuresBeforeCritical = int32(t.FailuresBeforeCritical)
|
||||
s.FailuresBeforeWarning = int32(t.FailuresBeforeWarning)
|
||||
s.ProxyHTTP = t.ProxyHTTP
|
||||
s.ProxyGRPC = t.ProxyGRPC
|
||||
s.DeregisterCriticalServiceAfter = t.DeregisterCriticalServiceAfter
|
||||
|
|
|
@ -232,6 +232,8 @@ type CheckType struct {
|
|||
// mog: func-to=int func-from=int32
|
||||
SuccessBeforePassing int32 `protobuf:"varint,21,opt,name=SuccessBeforePassing,proto3" json:"SuccessBeforePassing,omitempty"`
|
||||
// mog: func-to=int func-from=int32
|
||||
FailuresBeforeWarning int32 `protobuf:"varint,29,opt,name=FailuresBeforeWarning,proto3" json:"FailuresBeforeWarning,omitempty"`
|
||||
// mog: func-to=int func-from=int32
|
||||
FailuresBeforeCritical int32 `protobuf:"varint,22,opt,name=FailuresBeforeCritical,proto3" json:"FailuresBeforeCritical,omitempty"`
|
||||
// Definition fields used when exposing checks through a proxy
|
||||
ProxyHTTP string `protobuf:"bytes,23,opt,name=ProxyHTTP,proto3" json:"ProxyHTTP,omitempty"`
|
||||
|
@ -289,74 +291,75 @@ func init() {
|
|||
func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) }
|
||||
|
||||
var fileDescriptor_8a6f7448747c9fbe = []byte{
|
||||
// 1062 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x4f, 0xe3, 0x46,
|
||||
0x14, 0x8e, 0x09, 0x24, 0xf1, 0x64, 0x61, 0x61, 0x96, 0xa5, 0xb3, 0xec, 0xca, 0xa4, 0x74, 0x0f,
|
||||
0x54, 0xa5, 0x8e, 0x44, 0xd5, 0xaa, 0xad, 0xd4, 0x56, 0x84, 0xb0, 0x90, 0x0a, 0x68, 0xea, 0xb8,
|
||||
0x7b, 0xe8, 0xcd, 0x38, 0x93, 0xc4, 0xc2, 0xf1, 0x58, 0xe3, 0x31, 0x22, 0xfd, 0x15, 0x7b, 0xdc,
|
||||
0xff, 0xd0, 0x3f, 0xc2, 0x91, 0x63, 0xa5, 0x4a, 0xb4, 0x85, 0x7f, 0xd1, 0x53, 0x35, 0x6f, 0xec,
|
||||
0xe0, 0x6c, 0xbc, 0x25, 0x5d, 0x6d, 0x4f, 0xcc, 0xfb, 0xde, 0x7b, 0x33, 0x9e, 0x79, 0xdf, 0xf7,
|
||||
0x05, 0xf4, 0x61, 0xc8, 0x99, 0x60, 0xf5, 0xf0, 0x34, 0xa2, 0xfc, 0xdc, 0x73, 0x69, 0x7d, 0x40,
|
||||
0x1d, 0x5f, 0x0c, 0xdc, 0x01, 0x75, 0xcf, 0x4c, 0xc8, 0x61, 0x7d, 0x9c, 0x5c, 0x37, 0xfa, 0x8c,
|
||||
0xf5, 0x7d, 0x5a, 0x87, 0xc4, 0x69, 0xdc, 0xab, 0x77, 0x63, 0xee, 0x08, 0x8f, 0x05, 0xaa, 0x74,
|
||||
0xfd, 0x69, 0xba, 0x9b, 0xcb, 0x86, 0x43, 0x16, 0xd4, 0xd5, 0x9f, 0x24, 0xb9, 0xda, 0x67, 0x7d,
|
||||
0xa6, 0x0a, 0xe4, 0x4a, 0xa1, 0x9b, 0xbf, 0xcf, 0xa3, 0xea, 0x21, 0x9c, 0xb9, 0x27, 0xcf, 0xc4,
|
||||
0x18, 0xcd, 0x9f, 0xb0, 0x2e, 0x25, 0x5a, 0x4d, 0xdb, 0xd2, 0x2d, 0x58, 0xe3, 0x03, 0x54, 0x86,
|
||||
0x64, 0xab, 0x49, 0xe6, 0x24, 0xdc, 0xf8, 0xf4, 0xef, 0xeb, 0x8d, 0x8f, 0xfb, 0x9e, 0x18, 0xc4,
|
||||
0xa7, 0xa6, 0xcb, 0x86, 0xf5, 0x81, 0x13, 0x0d, 0x3c, 0x97, 0xf1, 0xb0, 0xee, 0xb2, 0x20, 0x8a,
|
||||
0xfd, 0xba, 0x18, 0x85, 0x34, 0x32, 0x93, 0x26, 0x2b, 0xed, 0x86, 0xcd, 0x9d, 0x21, 0x25, 0xc5,
|
||||
0x64, 0x73, 0x67, 0x48, 0xf1, 0x1a, 0x2a, 0x75, 0x84, 0x23, 0xe2, 0x88, 0xcc, 0x03, 0x9a, 0x44,
|
||||
0x78, 0x15, 0x2d, 0x9c, 0x30, 0x41, 0x23, 0xb2, 0x00, 0xb0, 0x0a, 0x64, 0xf5, 0x0f, 0xb1, 0x08,
|
||||
0x63, 0x41, 0x4a, 0xaa, 0x5a, 0x45, 0xf8, 0x19, 0xd2, 0x3b, 0xea, 0x91, 0x5a, 0x4d, 0x52, 0x86,
|
||||
0xd4, 0x1d, 0x80, 0x6b, 0xa8, 0x9a, 0x04, 0x70, 0x7c, 0x05, 0xf2, 0x59, 0x28, 0x53, 0x61, 0x3b,
|
||||
0xfd, 0x88, 0xe8, 0xb5, 0x62, 0xa6, 0x42, 0x42, 0xf2, 0xdb, 0xed, 0x51, 0x48, 0xc9, 0x03, 0xf5,
|
||||
0xed, 0x72, 0x8d, 0x5f, 0x20, 0xd4, 0xa4, 0x3d, 0x2f, 0xf0, 0xe4, 0x0c, 0x08, 0xaa, 0x69, 0x5b,
|
||||
0xd5, 0x9d, 0x9a, 0x39, 0x9e, 0x97, 0x99, 0x79, 0xd8, 0xbb, 0xba, 0xc6, 0xfc, 0xe5, 0xf5, 0x46,
|
||||
0xc1, 0xca, 0x74, 0xe2, 0xaf, 0x90, 0x6e, 0x39, 0x3d, 0xd1, 0x0a, 0xba, 0xf4, 0x82, 0x54, 0x61,
|
||||
0x9b, 0x15, 0x33, 0x19, 0xde, 0x38, 0xd1, 0xa8, 0xc8, 0xbe, 0xab, 0xeb, 0x0d, 0xcd, 0xba, 0xab,
|
||||
0xc6, 0x4d, 0xb4, 0xb4, 0x1f, 0x08, 0xca, 0x43, 0xee, 0x45, 0xf4, 0x98, 0x0a, 0x87, 0x2c, 0x42,
|
||||
0xff, 0x5a, 0xda, 0x3f, 0x99, 0x4d, 0x0e, 0x7f, 0xa3, 0x47, 0x5e, 0x7f, 0xff, 0x22, 0x64, 0x11,
|
||||
0xed, 0xb6, 0x19, 0x17, 0x64, 0xa9, 0xa6, 0x6d, 0x2d, 0x58, 0x59, 0x08, 0xaf, 0xa3, 0x4a, 0x4b,
|
||||
0xf6, 0x9c, 0x3b, 0x3e, 0x79, 0x08, 0x4f, 0x30, 0x8e, 0x31, 0x41, 0x65, 0xdb, 0x1b, 0x52, 0x16,
|
||||
0x0b, 0xb2, 0x0c, 0xa9, 0x34, 0xdc, 0xfc, 0x08, 0xc8, 0xd5, 0xa5, 0xfc, 0xa5, 0xe3, 0xc7, 0x54,
|
||||
0xce, 0x14, 0x16, 0x44, 0x83, 0xf7, 0x55, 0xc1, 0xe6, 0xab, 0x32, 0x7a, 0x9c, 0xfb, 0x52, 0xf2,
|
||||
0xcd, 0x0f, 0x6d, 0xbb, 0x9d, 0x92, 0x51, 0xae, 0xf1, 0x73, 0xb4, 0x68, 0x1f, 0x75, 0xe4, 0x64,
|
||||
0x28, 0x87, 0x69, 0x3e, 0x82, 0xe4, 0x24, 0x98, 0x56, 0x9d, 0x79, 0xe1, 0x4b, 0xca, 0xbd, 0xde,
|
||||
0x08, 0x88, 0x5b, 0xb1, 0x26, 0x41, 0xfc, 0x3d, 0x2a, 0xa9, 0xcf, 0x23, 0xc5, 0x5a, 0x71, 0xab,
|
||||
0xba, 0xb3, 0x7d, 0xdf, 0xec, 0x4c, 0x55, 0xbe, 0x1f, 0x08, 0x3e, 0x4a, 0x9e, 0x32, 0xd9, 0x41,
|
||||
0x32, 0xf3, 0x98, 0x8a, 0x01, 0xeb, 0xa6, 0x3c, 0x56, 0x91, 0xbc, 0x43, 0x83, 0x75, 0x47, 0x04,
|
||||
0xab, 0x3b, 0xc8, 0x35, 0x5e, 0x46, 0x45, 0x7b, 0xaf, 0x9d, 0x30, 0x5b, 0x2e, 0xf1, 0x77, 0x99,
|
||||
0xe7, 0x2d, 0xc1, 0x00, 0x9f, 0x98, 0x4a, 0xec, 0x66, 0x2a, 0x76, 0xb3, 0x99, 0x88, 0x5d, 0x11,
|
||||
0xe1, 0xf5, 0x1f, 0x1b, 0x5a, 0x66, 0x06, 0xcf, 0xd1, 0xa2, 0x92, 0xc2, 0xb1, 0x73, 0xd1, 0xf1,
|
||||
0x7e, 0xa1, 0x44, 0xaf, 0x69, 0x5b, 0x8b, 0xd6, 0x24, 0x88, 0xbf, 0xb9, 0x9b, 0x54, 0x79, 0xf6,
|
||||
0x53, 0xd2, 0x1e, 0x7c, 0x86, 0x8c, 0x26, 0xe5, 0xb4, 0xef, 0x45, 0x82, 0xf2, 0x3d, 0xee, 0x09,
|
||||
0xcf, 0x75, 0xfc, 0x44, 0x24, 0xbb, 0x3d, 0x41, 0x39, 0x48, 0x6b, 0xc6, 0x5d, 0xef, 0xd9, 0x0a,
|
||||
0x1b, 0x08, 0x75, 0x5c, 0xee, 0x85, 0x62, 0x97, 0xf7, 0x23, 0x82, 0x80, 0x31, 0x19, 0x04, 0x6f,
|
||||
0xa3, 0x95, 0x26, 0x73, 0xcf, 0x28, 0xdf, 0x63, 0x81, 0x70, 0xbc, 0x80, 0xf2, 0x56, 0x13, 0xc4,
|
||||
0xa3, 0x5b, 0xd3, 0x09, 0x49, 0xbd, 0xce, 0x80, 0xfa, 0x7e, 0xa2, 0x5f, 0x15, 0xc8, 0xa1, 0x1d,
|
||||
0xee, 0xb4, 0x5b, 0x27, 0x07, 0x64, 0x55, 0x0d, 0x4d, 0x45, 0x72, 0x68, 0x07, 0x56, 0x7b, 0x0f,
|
||||
0xb4, 0xa4, 0x5b, 0xb0, 0x96, 0xdf, 0x23, 0xff, 0xfe, 0x14, 0x51, 0xfb, 0xa8, 0x03, 0x12, 0xa9,
|
||||
0x58, 0x19, 0x44, 0x5a, 0xd0, 0xae, 0xef, 0x39, 0x11, 0xd8, 0xa7, 0x92, 0xc8, 0x1d, 0x80, 0x37,
|
||||
0xd1, 0x03, 0x08, 0x92, 0x2b, 0x26, 0x42, 0x99, 0xc0, 0xf0, 0xe7, 0xa8, 0x68, 0xdb, 0x47, 0x64,
|
||||
0x65, 0xf6, 0x37, 0x94, 0xf5, 0xeb, 0x3f, 0xa6, 0x22, 0x03, 0x5a, 0x4a, 0x72, 0x9d, 0xd1, 0x51,
|
||||
0xa2, 0x19, 0xb9, 0xc4, 0xdb, 0x68, 0xe1, 0x1c, 0x64, 0x37, 0x97, 0x58, 0xc3, 0x04, 0xcb, 0x53,
|
||||
0x75, 0x5a, 0xaa, 0xe8, 0xeb, 0xb9, 0x2f, 0xb5, 0xcd, 0x5f, 0x75, 0xa4, 0x03, 0xf5, 0xc1, 0xe6,
|
||||
0x32, 0xfe, 0xaf, 0xbd, 0x17, 0xff, 0x9f, 0xcb, 0xf5, 0xff, 0x62, 0xbe, 0xff, 0xcf, 0x67, 0xfd,
|
||||
0x7f, 0x92, 0x14, 0x0b, 0x53, 0xa4, 0x48, 0x1d, 0xa3, 0x94, 0x71, 0x8c, 0x6f, 0xc7, 0x2a, 0x5f,
|
||||
0x05, 0x95, 0x67, 0x1d, 0x7a, 0x7c, 0xc9, 0x99, 0x94, 0x5d, 0xce, 0x55, 0xf6, 0xfa, 0xb4, 0xb2,
|
||||
0x2b, 0xf9, 0xca, 0xd6, 0xdf, 0x45, 0xd9, 0x13, 0xbc, 0x42, 0xf7, 0xf1, 0xaa, 0x9a, 0xc3, 0xab,
|
||||
0x5c, 0xa5, 0x3c, 0xb8, 0x57, 0x29, 0x8b, 0xf9, 0x4a, 0x79, 0x96, 0xab, 0x94, 0xa5, 0xb7, 0x2a,
|
||||
0xe5, 0xe1, 0x94, 0x52, 0xa6, 0x2c, 0xfc, 0xe9, 0x4c, 0x16, 0xbe, 0x9c, 0x67, 0xe1, 0x19, 0x47,
|
||||
0x5b, 0x79, 0x07, 0x47, 0x4b, 0x24, 0x87, 0xff, 0x9b, 0xe4, 0xf0, 0x0e, 0x5a, 0xed, 0xc4, 0xae,
|
||||
0x4b, 0xa3, 0xa8, 0x41, 0x7b, 0x8c, 0xd3, 0xb6, 0x13, 0x45, 0x5e, 0xd0, 0x27, 0x8f, 0xe1, 0x87,
|
||||
0x33, 0x37, 0x87, 0xbf, 0x40, 0x6b, 0x2f, 0x1c, 0xcf, 0x8f, 0x39, 0x4d, 0x12, 0xa9, 0xeb, 0x91,
|
||||
0x35, 0xe8, 0x7a, 0x4b, 0x56, 0xce, 0xbf, 0xcd, 0xd9, 0xc5, 0x08, 0x78, 0xfd, 0x81, 0x9a, 0xff,
|
||||
0x18, 0x18, 0x67, 0x61, 0x08, 0x24, 0x93, 0x85, 0x49, 0xdc, 0x6f, 0xd8, 0x8f, 0xde, 0x9f, 0x61,
|
||||
0x4f, 0xfd, 0x04, 0x3d, 0x81, 0x7b, 0x4d, 0x82, 0xff, 0x83, 0x5b, 0x35, 0x8e, 0x2f, 0xff, 0x32,
|
||||
0x0a, 0x97, 0x37, 0x86, 0x76, 0x75, 0x63, 0x68, 0x7f, 0xde, 0x18, 0xda, 0xab, 0x5b, 0xa3, 0xf0,
|
||||
0xfa, 0xd6, 0x28, 0x5c, 0xdd, 0x1a, 0x85, 0xdf, 0x6e, 0x8d, 0xc2, 0xcf, 0x9f, 0xfc, 0x9b, 0x59,
|
||||
0xbd, 0xf1, 0x2f, 0xf8, 0x69, 0x09, 0x80, 0xcf, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0xce, 0x2e,
|
||||
0x2d, 0x41, 0x9c, 0x0b, 0x00, 0x00,
|
||||
// 1076 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x5d, 0x4f, 0xe3, 0x46,
|
||||
0x14, 0x8d, 0x09, 0x24, 0xf1, 0x04, 0x58, 0x98, 0x05, 0x3a, 0xcb, 0x6e, 0x4d, 0x4a, 0xf7, 0x81,
|
||||
0xaa, 0x34, 0x91, 0xe8, 0x87, 0xda, 0x4a, 0x6d, 0x45, 0x08, 0x0b, 0xa9, 0x80, 0xa6, 0x4e, 0xba,
|
||||
0x95, 0xfa, 0x66, 0x9c, 0x49, 0x62, 0xe1, 0x78, 0xac, 0xf1, 0x18, 0x91, 0xfe, 0x8a, 0x7d, 0xdc,
|
||||
0x9f, 0xc4, 0x23, 0x8f, 0x95, 0x2a, 0xd1, 0x2e, 0xfc, 0x8b, 0x3e, 0x55, 0x73, 0xc7, 0x0e, 0xf6,
|
||||
0xc6, 0x5b, 0xd2, 0xd5, 0xee, 0x13, 0x73, 0xef, 0xb9, 0x77, 0xc6, 0x33, 0xf7, 0x9c, 0x13, 0xd0,
|
||||
0x47, 0x3e, 0x67, 0x82, 0xd5, 0xfc, 0xd3, 0x80, 0xf2, 0x73, 0xc7, 0xa6, 0xb5, 0x01, 0xb5, 0x5c,
|
||||
0x31, 0xb0, 0x07, 0xd4, 0x3e, 0xab, 0x02, 0x86, 0xf5, 0x31, 0xb8, 0x6e, 0xf4, 0x19, 0xeb, 0xbb,
|
||||
0xb4, 0x06, 0xc0, 0x69, 0xd8, 0xab, 0x75, 0x43, 0x6e, 0x09, 0x87, 0x79, 0xaa, 0x74, 0xfd, 0x71,
|
||||
0xbc, 0x9b, 0xcd, 0x86, 0x43, 0xe6, 0xd5, 0xd4, 0x9f, 0x08, 0x5c, 0xe9, 0xb3, 0x3e, 0x53, 0x05,
|
||||
0x72, 0xa5, 0xb2, 0x9b, 0x7f, 0xce, 0xa2, 0xf2, 0x21, 0x9c, 0xb9, 0x27, 0xcf, 0xc4, 0x18, 0xcd,
|
||||
0x9e, 0xb0, 0x2e, 0x25, 0x5a, 0x45, 0xdb, 0xd2, 0x4d, 0x58, 0xe3, 0x03, 0x54, 0x04, 0xb0, 0xd9,
|
||||
0x20, 0x33, 0x32, 0x5d, 0xff, 0xec, 0x9f, 0xeb, 0x8d, 0x4f, 0xfa, 0x8e, 0x18, 0x84, 0xa7, 0x55,
|
||||
0x9b, 0x0d, 0x6b, 0x03, 0x2b, 0x18, 0x38, 0x36, 0xe3, 0x7e, 0xcd, 0x66, 0x5e, 0x10, 0xba, 0x35,
|
||||
0x31, 0xf2, 0x69, 0x50, 0x8d, 0x9a, 0xcc, 0xb8, 0x1b, 0x36, 0xb7, 0x86, 0x94, 0xe4, 0xa3, 0xcd,
|
||||
0xad, 0x21, 0xc5, 0x6b, 0xa8, 0xd0, 0x16, 0x96, 0x08, 0x03, 0x32, 0x0b, 0xd9, 0x28, 0xc2, 0x2b,
|
||||
0x68, 0xee, 0x84, 0x09, 0x1a, 0x90, 0x39, 0x48, 0xab, 0x40, 0x56, 0xff, 0x14, 0x0a, 0x3f, 0x14,
|
||||
0xa4, 0xa0, 0xaa, 0x55, 0x84, 0x9f, 0x20, 0xbd, 0xad, 0x1e, 0xa9, 0xd9, 0x20, 0x45, 0x80, 0xee,
|
||||
0x12, 0xb8, 0x82, 0xca, 0x51, 0x00, 0xc7, 0x97, 0x00, 0x4f, 0xa6, 0x12, 0x15, 0x1d, 0xab, 0x1f,
|
||||
0x10, 0xbd, 0x92, 0x4f, 0x54, 0xc8, 0x94, 0xfc, 0xf6, 0xce, 0xc8, 0xa7, 0x64, 0x5e, 0x7d, 0xbb,
|
||||
0x5c, 0xe3, 0x67, 0x08, 0x35, 0x68, 0xcf, 0xf1, 0x1c, 0x39, 0x03, 0x82, 0x2a, 0xda, 0x56, 0x79,
|
||||
0xa7, 0x52, 0x1d, 0xcf, 0xab, 0x9a, 0x78, 0xd8, 0xbb, 0xba, 0xfa, 0xec, 0xe5, 0xf5, 0x46, 0xce,
|
||||
0x4c, 0x74, 0xe2, 0x6f, 0x90, 0x6e, 0x5a, 0x3d, 0xd1, 0xf4, 0xba, 0xf4, 0x82, 0x94, 0x61, 0x9b,
|
||||
0xe5, 0x6a, 0x34, 0xbc, 0x31, 0x50, 0x2f, 0xc9, 0xbe, 0xab, 0xeb, 0x0d, 0xcd, 0xbc, 0xab, 0xc6,
|
||||
0x0d, 0xb4, 0xb8, 0xef, 0x09, 0xca, 0x7d, 0xee, 0x04, 0xf4, 0x98, 0x0a, 0x8b, 0x2c, 0x40, 0xff,
|
||||
0x5a, 0xdc, 0x9f, 0x46, 0xa3, 0xc3, 0x5f, 0xeb, 0x91, 0xd7, 0xdf, 0xbf, 0xf0, 0x59, 0x40, 0xbb,
|
||||
0x2d, 0xc6, 0x05, 0x59, 0xac, 0x68, 0x5b, 0x73, 0x66, 0x32, 0x85, 0xd7, 0x51, 0xa9, 0x29, 0x7b,
|
||||
0xce, 0x2d, 0x97, 0x3c, 0x80, 0x27, 0x18, 0xc7, 0x98, 0xa0, 0x62, 0xc7, 0x19, 0x52, 0x16, 0x0a,
|
||||
0xb2, 0x04, 0x50, 0x1c, 0x6e, 0x7e, 0x0c, 0xe4, 0xea, 0x52, 0xfe, 0xdc, 0x72, 0x43, 0x2a, 0x67,
|
||||
0x0a, 0x0b, 0xa2, 0xc1, 0xfb, 0xaa, 0x60, 0xf3, 0x45, 0x11, 0xad, 0x66, 0xbe, 0x94, 0x7c, 0xf3,
|
||||
0xc3, 0x4e, 0xa7, 0x15, 0x93, 0x51, 0xae, 0xf1, 0x53, 0xb4, 0xd0, 0x39, 0x6a, 0xcb, 0xc9, 0x50,
|
||||
0x0e, 0xd3, 0x7c, 0x08, 0x60, 0x3a, 0x19, 0x57, 0x9d, 0x39, 0xfe, 0x73, 0xca, 0x9d, 0xde, 0x08,
|
||||
0x88, 0x5b, 0x32, 0xd3, 0x49, 0xfc, 0x23, 0x2a, 0xa8, 0xcf, 0x23, 0xf9, 0x4a, 0x7e, 0xab, 0xbc,
|
||||
0xb3, 0x7d, 0xdf, 0xec, 0xaa, 0xaa, 0x7c, 0xdf, 0x13, 0x7c, 0x14, 0x3d, 0x65, 0xb4, 0x83, 0x64,
|
||||
0xe6, 0x31, 0x15, 0x03, 0xd6, 0x8d, 0x79, 0xac, 0x22, 0x79, 0x87, 0x3a, 0xeb, 0x8e, 0x08, 0x56,
|
||||
0x77, 0x90, 0x6b, 0xbc, 0x84, 0xf2, 0x9d, 0xbd, 0x56, 0xc4, 0x6c, 0xb9, 0xc4, 0x3f, 0x24, 0x9e,
|
||||
0xb7, 0x00, 0x03, 0x7c, 0x54, 0x55, 0x62, 0xaf, 0xc6, 0x62, 0xaf, 0x36, 0x22, 0xb1, 0x2b, 0x22,
|
||||
0xbc, 0xfc, 0x6b, 0x43, 0x4b, 0xcc, 0xe0, 0x29, 0x5a, 0x50, 0x52, 0x38, 0xb6, 0x2e, 0xda, 0xce,
|
||||
0xef, 0x94, 0xe8, 0x15, 0x6d, 0x6b, 0xc1, 0x4c, 0x27, 0xf1, 0x77, 0x77, 0x93, 0x2a, 0x4e, 0x7f,
|
||||
0x4a, 0xdc, 0x83, 0xcf, 0x90, 0xd1, 0xa0, 0x9c, 0xf6, 0x9d, 0x40, 0x50, 0xbe, 0xc7, 0x1d, 0xe1,
|
||||
0xd8, 0x96, 0x1b, 0x89, 0x64, 0xb7, 0x27, 0x28, 0x07, 0x69, 0x4d, 0xb9, 0xeb, 0x3d, 0x5b, 0x61,
|
||||
0x03, 0xa1, 0xb6, 0xcd, 0x1d, 0x5f, 0xec, 0xf2, 0x7e, 0x40, 0x10, 0x30, 0x26, 0x91, 0xc1, 0xdb,
|
||||
0x68, 0xb9, 0xc1, 0xec, 0x33, 0xca, 0xf7, 0x98, 0x27, 0x2c, 0xc7, 0xa3, 0xbc, 0xd9, 0x00, 0xf1,
|
||||
0xe8, 0xe6, 0x24, 0x20, 0xa9, 0xd7, 0x1e, 0x50, 0xd7, 0x8d, 0xf4, 0xab, 0x02, 0x39, 0xb4, 0xc3,
|
||||
0x9d, 0x56, 0xf3, 0xe4, 0x80, 0xac, 0xa8, 0xa1, 0xa9, 0x48, 0x0e, 0xed, 0xc0, 0x6c, 0xed, 0x81,
|
||||
0x96, 0x74, 0x13, 0xd6, 0xf2, 0x7b, 0xe4, 0xdf, 0x5f, 0x02, 0xda, 0x39, 0x6a, 0x83, 0x44, 0x4a,
|
||||
0x66, 0x22, 0x23, 0x2d, 0x68, 0xd7, 0x75, 0xac, 0x00, 0xec, 0x53, 0x49, 0xe4, 0x2e, 0x81, 0x37,
|
||||
0xd1, 0x3c, 0x04, 0xd1, 0x15, 0x23, 0xa1, 0xa4, 0x72, 0xf8, 0x4b, 0x94, 0xef, 0x74, 0x8e, 0xc8,
|
||||
0xf2, 0xf4, 0x6f, 0x28, 0xeb, 0xd7, 0x7f, 0x8e, 0x45, 0x06, 0xb4, 0x94, 0xe4, 0x3a, 0xa3, 0xa3,
|
||||
0x48, 0x33, 0x72, 0x89, 0xb7, 0xd1, 0xdc, 0x39, 0xc8, 0x6e, 0x26, 0xb2, 0x86, 0x14, 0xcb, 0x63,
|
||||
0x75, 0x9a, 0xaa, 0xe8, 0xdb, 0x99, 0xaf, 0xb5, 0xcd, 0x57, 0x3a, 0xd2, 0x81, 0xfa, 0x60, 0x73,
|
||||
0x09, 0xff, 0xd7, 0xde, 0x89, 0xff, 0xcf, 0x64, 0xfa, 0x7f, 0x3e, 0xdb, 0xff, 0x67, 0x93, 0xfe,
|
||||
0x9f, 0x26, 0xc5, 0xdc, 0x04, 0x29, 0x62, 0xc7, 0x28, 0x24, 0x1c, 0xe3, 0xfb, 0xb1, 0xca, 0x57,
|
||||
0x40, 0xe5, 0x49, 0x87, 0x1e, 0x5f, 0x72, 0x2a, 0x65, 0x17, 0x33, 0x95, 0xbd, 0x3e, 0xa9, 0xec,
|
||||
0x52, 0xb6, 0xb2, 0xf5, 0xb7, 0x51, 0x76, 0x8a, 0x57, 0xe8, 0x3e, 0x5e, 0x95, 0x33, 0x78, 0x95,
|
||||
0xa9, 0x94, 0xf9, 0x7b, 0x95, 0xb2, 0x90, 0xad, 0x94, 0x27, 0x99, 0x4a, 0x59, 0x7c, 0xa3, 0x52,
|
||||
0x1e, 0x4c, 0x28, 0x65, 0xc2, 0xc2, 0x1f, 0x4f, 0x65, 0xe1, 0x4b, 0x59, 0x16, 0x9e, 0x70, 0xb4,
|
||||
0xe5, 0xb7, 0x70, 0xb4, 0x48, 0x72, 0xf8, 0xff, 0x49, 0x0e, 0xef, 0xa0, 0x95, 0x76, 0x68, 0xdb,
|
||||
0x34, 0x08, 0xea, 0xb4, 0xc7, 0x38, 0x6d, 0x59, 0x41, 0xe0, 0x78, 0x7d, 0xb2, 0x0a, 0x3f, 0x9c,
|
||||
0x99, 0x18, 0xfe, 0x02, 0xad, 0x3e, 0xb3, 0x1c, 0x37, 0xe4, 0x34, 0x02, 0x7e, 0xb5, 0xb8, 0x27,
|
||||
0x9b, 0x3e, 0x84, 0xa6, 0x6c, 0x10, 0x7f, 0x85, 0xd6, 0xd2, 0x40, 0xec, 0x95, 0x64, 0x0d, 0xda,
|
||||
0xde, 0x80, 0x4a, 0xd6, 0xb4, 0x38, 0xbb, 0x18, 0x81, 0x1a, 0x3e, 0x50, 0xac, 0x19, 0x27, 0xc6,
|
||||
0x28, 0x8c, 0x8e, 0x24, 0x50, 0x98, 0xdf, 0xfd, 0x36, 0xff, 0xf0, 0xdd, 0xd9, 0xfc, 0xc4, 0x0f,
|
||||
0xd7, 0x23, 0xb8, 0x57, 0x3a, 0xf9, 0x1e, 0x3c, 0xae, 0x7e, 0x7c, 0xf9, 0xca, 0xc8, 0x5d, 0xde,
|
||||
0x18, 0xda, 0xd5, 0x8d, 0xa1, 0xfd, 0x7d, 0x63, 0x68, 0x2f, 0x6e, 0x8d, 0xdc, 0xcb, 0x5b, 0x23,
|
||||
0x77, 0x75, 0x6b, 0xe4, 0xfe, 0xb8, 0x35, 0x72, 0xbf, 0x7d, 0xfa, 0x5f, 0x16, 0xf7, 0xda, 0x3f,
|
||||
0xee, 0xa7, 0x05, 0x48, 0x7c, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x36, 0xc6, 0x76,
|
||||
0xd2, 0x0b, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *HealthCheck) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -757,6 +760,13 @@ func (m *CheckType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.FailuresBeforeWarning != 0 {
|
||||
i = encodeVarintHealthcheck(dAtA, i, uint64(m.FailuresBeforeWarning))
|
||||
i--
|
||||
dAtA[i] = 0x1
|
||||
i--
|
||||
dAtA[i] = 0xe8
|
||||
}
|
||||
if len(m.H2PING) > 0 {
|
||||
i -= len(m.H2PING)
|
||||
copy(dAtA[i:], m.H2PING)
|
||||
|
@ -1296,6 +1306,9 @@ func (m *CheckType) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 2 + l + sovHealthcheck(uint64(l))
|
||||
}
|
||||
if m.FailuresBeforeWarning != 0 {
|
||||
n += 2 + sovHealthcheck(uint64(m.FailuresBeforeWarning))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -3656,6 +3669,25 @@ func (m *CheckType) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.H2PING = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 29:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field FailuresBeforeWarning", wireType)
|
||||
}
|
||||
m.FailuresBeforeWarning = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowHealthcheck
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.FailuresBeforeWarning |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipHealthcheck(dAtA[iNdEx:])
|
||||
|
|
|
@ -136,6 +136,8 @@ message CheckType {
|
|||
// mog: func-to=int func-from=int32
|
||||
int32 SuccessBeforePassing = 21;
|
||||
// mog: func-to=int func-from=int32
|
||||
int32 FailuresBeforeWarning = 29;
|
||||
// mog: func-to=int func-from=int32
|
||||
int32 FailuresBeforeCritical = 22;
|
||||
|
||||
// Definition fields used when exposing checks through a proxy
|
||||
|
|
|
@ -14,6 +14,6 @@ func RequireErrorContains(t testing.TB, err error, expectedErrorMessage string)
|
|||
t.Fatal("An error is expected but got nil.")
|
||||
}
|
||||
if !strings.Contains(err.Error(), expectedErrorMessage) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
t.Fatalf("expected err %v to contain %q", err, expectedErrorMessage)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ export default class NspaceAbility extends BaseAbility {
|
|||
}
|
||||
|
||||
get canChoose() {
|
||||
return this.canUse && this.nspaces.length > 0;
|
||||
return this.canUse;
|
||||
}
|
||||
|
||||
get canUse() {
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
import BaseAbility from './base';
|
||||
import { inject as service } from '@ember/service';
|
||||
|
||||
export default class PartitionAbility extends BaseAbility {
|
||||
@service('env') env;
|
||||
|
||||
resource = 'operator';
|
||||
segmented = false;
|
||||
|
||||
get canManage() {
|
||||
return this.canCreate;
|
||||
}
|
||||
|
||||
get canDelete() {
|
||||
return this.item.Name !== 'default' && super.canDelete;
|
||||
}
|
||||
|
||||
get canChoose() {
|
||||
return this.canUse;
|
||||
}
|
||||
|
||||
get canUse() {
|
||||
return this.env.var('CONSUL_PARTITIONS_ENABLED');
|
||||
}
|
||||
}
|
|
@ -1,18 +1,19 @@
|
|||
import Adapter from './application';
|
||||
|
||||
export default class AuthMethodAdapter extends Adapter {
|
||||
requestForQuery(request, { dc, ns, index, id }) {
|
||||
requestForQuery(request, { dc, ns, partition, index, id }) {
|
||||
return request`
|
||||
GET /v1/acl/auth-methods?${{ dc }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForQueryRecord(request, { dc, ns, index, id }) {
|
||||
requestForQueryRecord(request, { dc, ns, partition, index, id }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
|
@ -20,7 +21,8 @@ export default class AuthMethodAdapter extends Adapter {
|
|||
GET /v1/acl/auth-method/${id}?${{ dc }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
import Adapter from './application';
|
||||
|
||||
export default class BindingRuleAdapter extends Adapter {
|
||||
requestForQuery(request, { dc, ns, authmethod, index, id }) {
|
||||
requestForQuery(request, { dc, ns, partition, authmethod, index }) {
|
||||
return request`
|
||||
GET /v1/acl/binding-rules?${{ dc, authmethod }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
import Adapter from './application';
|
||||
// TODO: Update to use this.formatDatacenter()
|
||||
export default class CoordinateAdapter extends Adapter {
|
||||
requestForQuery(request, { dc, index, uri }) {
|
||||
requestForQuery(request, { dc, partition, index, uri }) {
|
||||
return request`
|
||||
GET /v1/coordinate/nodes?${{ dc }}
|
||||
X-Request-ID: ${uri}
|
||||
|
||||
${{ index }}
|
||||
${{
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ import Adapter from './application';
|
|||
|
||||
// TODO: Update to use this.formatDatacenter()
|
||||
export default class DiscoveryChainAdapter extends Adapter {
|
||||
requestForQueryRecord(request, { dc, ns, index, id, uri }) {
|
||||
requestForQueryRecord(request, { dc, ns, partition, index, id, uri }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
|
@ -11,7 +11,8 @@ export default class DiscoveryChainAdapter extends Adapter {
|
|||
X-Request-ID: ${uri}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import Adapter, { DATACENTER_QUERY_PARAM as API_DATACENTER_KEY } from './application';
|
||||
import Adapter from './application';
|
||||
import { get } from '@ember/object';
|
||||
import { FOREIGN_KEY as DATACENTER_KEY } from 'consul-ui/models/dc';
|
||||
|
||||
// Intentions have different namespacing to the rest of the UI in that the don't
|
||||
// have a Namespace property, the DestinationNS is essentially its namespace.
|
||||
|
@ -22,7 +21,8 @@ export default class IntentionAdapter extends Adapter {
|
|||
}
|
||||
|
||||
${{
|
||||
...this.formatNspace('*'),
|
||||
partition: '',
|
||||
ns: '*',
|
||||
index,
|
||||
filter,
|
||||
}}
|
||||
|
@ -76,7 +76,7 @@ export default class IntentionAdapter extends Adapter {
|
|||
PUT /v1/connect/intentions/exact?${{
|
||||
source: `${data.SourceNS}/${data.SourceName}`,
|
||||
destination: `${data.DestinationNS}/${data.DestinationName}`,
|
||||
[API_DATACENTER_KEY]: data[DATACENTER_KEY],
|
||||
dc: data.Datacenter,
|
||||
}}
|
||||
|
||||
${body}
|
||||
|
@ -95,7 +95,7 @@ export default class IntentionAdapter extends Adapter {
|
|||
DELETE /v1/connect/intentions/exact?${{
|
||||
source: `${data.SourceNS}/${data.SourceName}`,
|
||||
destination: `${data.DestinationNS}/${data.DestinationName}`,
|
||||
[API_DATACENTER_KEY]: data[DATACENTER_KEY],
|
||||
dc: data.Datacenter,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
|
|
@ -2,28 +2,29 @@ import Adapter from './application';
|
|||
import isFolder from 'consul-ui/utils/isFolder';
|
||||
import keyToArray from 'consul-ui/utils/keyToArray';
|
||||
import { SLUG_KEY } from 'consul-ui/models/kv';
|
||||
import { FOREIGN_KEY as DATACENTER_KEY } from 'consul-ui/models/dc';
|
||||
import { NSPACE_KEY } from 'consul-ui/models/nspace';
|
||||
|
||||
// TODO: Update to use this.formatDatacenter()
|
||||
const API_KEYS_KEY = 'keys';
|
||||
|
||||
export default class KvAdapter extends Adapter {
|
||||
requestForQuery(request, { dc, ns, index, id, separator }) {
|
||||
async requestForQuery(request, { dc, ns, partition, index, id, separator }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
return request`
|
||||
const respond = await request`
|
||||
GET /v1/kv/${keyToArray(id)}?${{ [API_KEYS_KEY]: null, dc, separator }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
await respond((headers, body) => delete headers['x-consul-index']);
|
||||
return respond;
|
||||
}
|
||||
|
||||
requestForQueryRecord(request, { dc, ns, index, id }) {
|
||||
async requestForQueryRecord(request, { dc, ns, partition, index, id }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
|
@ -31,7 +32,8 @@ export default class KvAdapter extends Adapter {
|
|||
GET /v1/kv/${keyToArray(id)}?${{ dc }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
|
@ -41,8 +43,9 @@ export default class KvAdapter extends Adapter {
|
|||
// https://github.com/hashicorp/consul/issues/3804
|
||||
requestForCreateRecord(request, serialized, data) {
|
||||
const params = {
|
||||
...this.formatDatacenter(data[DATACENTER_KEY]),
|
||||
...this.formatNspace(data[NSPACE_KEY]),
|
||||
dc: data.Datacenter,
|
||||
ns: data.Namespace,
|
||||
partition: data.Partition,
|
||||
};
|
||||
return request`
|
||||
PUT /v1/kv/${keyToArray(data[SLUG_KEY])}?${params}
|
||||
|
@ -54,9 +57,10 @@ export default class KvAdapter extends Adapter {
|
|||
|
||||
requestForUpdateRecord(request, serialized, data) {
|
||||
const params = {
|
||||
...this.formatDatacenter(data[DATACENTER_KEY]),
|
||||
dc: data.Datacenter,
|
||||
ns: data.Namespace,
|
||||
partition: data.Partition,
|
||||
flags: data.Flags,
|
||||
...this.formatNspace(data[NSPACE_KEY]),
|
||||
};
|
||||
return request`
|
||||
PUT /v1/kv/${keyToArray(data[SLUG_KEY])}?${params}
|
||||
|
@ -72,8 +76,9 @@ export default class KvAdapter extends Adapter {
|
|||
recurse = null;
|
||||
}
|
||||
const params = {
|
||||
...this.formatDatacenter(data[DATACENTER_KEY]),
|
||||
...this.formatNspace(data[NSPACE_KEY]),
|
||||
dc: data.Datacenter,
|
||||
ns: data.Namespace,
|
||||
partition: data.Partition,
|
||||
recurse,
|
||||
};
|
||||
return request`
|
||||
|
|
|
@ -10,19 +10,20 @@ import Adapter from './application';
|
|||
// to the node.
|
||||
|
||||
export default class NodeAdapter extends Adapter {
|
||||
requestForQuery(request, { dc, ns, index, id, uri }) {
|
||||
requestForQuery(request, { dc, ns, partition, index, id, uri }) {
|
||||
return request`
|
||||
GET /v1/internal/ui/nodes?${{ dc }}
|
||||
X-Request-ID: ${uri}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForQueryRecord(request, { dc, ns, index, id, uri }) {
|
||||
requestForQueryRecord(request, { dc, ns, partition, index, id, uri }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
|
@ -31,12 +32,14 @@ export default class NodeAdapter extends Adapter {
|
|||
X-Request-ID: ${uri}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
// this does not require a partition parameter
|
||||
requestForQueryLeader(request, { dc, uri }) {
|
||||
return request`
|
||||
GET /v1/status/leader?${{ dc }}
|
||||
|
|
|
@ -3,29 +3,38 @@ import { SLUG_KEY } from 'consul-ui/models/nspace';
|
|||
|
||||
// namespaces aren't categorized by datacenter, therefore no dc
|
||||
export default class NspaceAdapter extends Adapter {
|
||||
requestForQuery(request, { index, uri }) {
|
||||
requestForQuery(request, { dc, partition, index, uri }) {
|
||||
return request`
|
||||
GET /v1/namespaces
|
||||
GET /v1/namespaces?${{ dc }}
|
||||
X-Request-ID: ${uri}
|
||||
|
||||
${{ index }}
|
||||
${{
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForQueryRecord(request, { index, id }) {
|
||||
requestForQueryRecord(request, { dc, partition, index, id }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an name');
|
||||
}
|
||||
return request`
|
||||
GET /v1/namespace/${id}
|
||||
GET /v1/namespace/${id}?${{ dc }}
|
||||
|
||||
${{ index }}
|
||||
${{
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForCreateRecord(request, serialized, data) {
|
||||
return request`
|
||||
PUT /v1/namespace/${data[SLUG_KEY]}
|
||||
PUT /v1/namespace/${data[SLUG_KEY]}?${{
|
||||
dc: data.Datacenter,
|
||||
partition: data.Partition,
|
||||
}}
|
||||
|
||||
${{
|
||||
Name: serialized.Name,
|
||||
|
@ -40,7 +49,10 @@ export default class NspaceAdapter extends Adapter {
|
|||
|
||||
requestForUpdateRecord(request, serialized, data) {
|
||||
return request`
|
||||
PUT /v1/namespace/${data[SLUG_KEY]}
|
||||
PUT /v1/namespace/${data[SLUG_KEY]}?${{
|
||||
dc: data.Datacenter,
|
||||
partition: data.Partition,
|
||||
}}
|
||||
|
||||
${{
|
||||
Description: serialized.Description,
|
||||
|
@ -54,7 +66,10 @@ export default class NspaceAdapter extends Adapter {
|
|||
|
||||
requestForDeleteRecord(request, serialized, data) {
|
||||
return request`
|
||||
DELETE /v1/namespace/${data[SLUG_KEY]}
|
||||
DELETE /v1/namespace/${data[SLUG_KEY]}?${{
|
||||
dc: data.Datacenter,
|
||||
partition: data.Partition,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,47 +1,38 @@
|
|||
import Adapter from './application';
|
||||
import { inject as service } from '@ember/service';
|
||||
import { env } from 'consul-ui/env';
|
||||
import nonEmptySet from 'consul-ui/utils/non-empty-set';
|
||||
|
||||
let Namespace;
|
||||
if (env('CONSUL_NSPACES_ENABLED')) {
|
||||
Namespace = nonEmptySet('Namespace');
|
||||
} else {
|
||||
Namespace = () => ({});
|
||||
}
|
||||
|
||||
export default class OidcProviderAdapter extends Adapter {
|
||||
@service('env') env;
|
||||
|
||||
requestForQuery(request, { dc, ns, index, uri }) {
|
||||
requestForQuery(request, { dc, ns, partition, index, uri }) {
|
||||
return request`
|
||||
GET /v1/internal/ui/oidc-auth-methods?${{ dc }}
|
||||
X-Request-ID: ${uri}
|
||||
|
||||
${{
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
...this.formatNspace(ns),
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForQueryRecord(request, { dc, ns, id }) {
|
||||
requestForQueryRecord(request, { dc, ns, partition, id }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
return request`
|
||||
POST /v1/acl/oidc/auth-url?${{ dc }}
|
||||
POST /v1/acl/oidc/auth-url?${{ dc, ns, partition }}
|
||||
Cache-Control: no-store
|
||||
|
||||
${{
|
||||
...Namespace(ns),
|
||||
AuthMethod: id,
|
||||
RedirectURI: `${this.env.var('CONSUL_BASE_UI_URL')}/oidc/callback`,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForAuthorize(request, { dc, ns, id, code, state }) {
|
||||
requestForAuthorize(request, { dc, ns, partition, id, code, state }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
|
@ -52,11 +43,10 @@ export default class OidcProviderAdapter extends Adapter {
|
|||
throw new Error('You must specify an state');
|
||||
}
|
||||
return request`
|
||||
POST /v1/acl/oidc/callback?${{ dc }}
|
||||
POST /v1/acl/oidc/callback?${{ dc, ns, partition }}
|
||||
Cache-Control: no-store
|
||||
|
||||
${{
|
||||
...Namespace(ns),
|
||||
AuthMethod: id,
|
||||
Code: code,
|
||||
State: state,
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
import Adapter from './application';
|
||||
|
||||
// Blocking query support for partitions is currently disabled
|
||||
export default class PartitionAdapter extends Adapter {
|
||||
// FIXME: Check overall hierarchy again
|
||||
async requestForQuery(request, { ns, dc, index }) {
|
||||
const respond = await request`
|
||||
GET /v1/partitions?${{ dc }}
|
||||
|
||||
${{ index }}
|
||||
`;
|
||||
await respond((headers, body) => delete headers['x-consul-index']);
|
||||
return respond;
|
||||
}
|
||||
|
||||
async requestForQueryRecord(request, { ns, dc, index, id }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
const respond = await request`
|
||||
GET /v1/partition/${id}?${{ dc }}
|
||||
|
||||
${{ index }}
|
||||
`;
|
||||
await respond((headers, body) => delete headers['x-consul-index']);
|
||||
return respond;
|
||||
}
|
||||
}
|
|
@ -4,18 +4,24 @@ import { inject as service } from '@ember/service';
|
|||
export default class PermissionAdapter extends Adapter {
|
||||
@service('env') env;
|
||||
|
||||
requestForAuthorize(request, { dc, ns, resources = [], index }) {
|
||||
requestForAuthorize(request, { dc, ns, partition, resources = [], index }) {
|
||||
// the authorize endpoint is slightly different to all others in that it
|
||||
// ignores an ns parameter, but accepts a Namespace property on each
|
||||
// resource. Here we hide this difference from the rest of the app as
|
||||
// currently we never need to ask for permissions/resources for multiple
|
||||
// different namespaces in one call so here we use the ns param and add
|
||||
// this to the resources instead of passing through on the queryParameter
|
||||
//
|
||||
// ^ same goes for Partitions
|
||||
|
||||
if (this.env.var('CONSUL_NSPACES_ENABLED')) {
|
||||
resources = resources.map(item => ({ ...item, Namespace: ns }));
|
||||
}
|
||||
if (this.env.var('CONSUL_PARTITIONS_ENABLED')) {
|
||||
resources = resources.map(item => ({ ...item, Partition: partition }));
|
||||
}
|
||||
return request`
|
||||
POST /v1/internal/acl/authorize?${{ dc, index }}
|
||||
POST /v1/internal/acl/authorize?${{ dc }}
|
||||
|
||||
${resources}
|
||||
`;
|
||||
|
|
|
@ -1,31 +1,21 @@
|
|||
import Adapter from './application';
|
||||
import { SLUG_KEY } from 'consul-ui/models/policy';
|
||||
import { FOREIGN_KEY as DATACENTER_KEY } from 'consul-ui/models/dc';
|
||||
import { NSPACE_KEY } from 'consul-ui/models/nspace';
|
||||
import { env } from 'consul-ui/env';
|
||||
import nonEmptySet from 'consul-ui/utils/non-empty-set';
|
||||
|
||||
let Namespace;
|
||||
if (env('CONSUL_NSPACES_ENABLED')) {
|
||||
Namespace = nonEmptySet('Namespace');
|
||||
} else {
|
||||
Namespace = () => ({});
|
||||
}
|
||||
|
||||
// TODO: Update to use this.formatDatacenter()
|
||||
export default class PolicyAdapter extends Adapter {
|
||||
requestForQuery(request, { dc, ns, index, id }) {
|
||||
requestForQuery(request, { dc, ns, partition, index, id }) {
|
||||
return request`
|
||||
GET /v1/acl/policies?${{ dc }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForQueryRecord(request, { dc, ns, index, id }) {
|
||||
requestForQueryRecord(request, { dc, ns, partition, index, id }) {
|
||||
if (typeof id === 'undefined') {
|
||||
throw new Error('You must specify an id');
|
||||
}
|
||||
|
@ -33,7 +23,8 @@ export default class PolicyAdapter extends Adapter {
|
|||
GET /v1/acl/policy/${id}?${{ dc }}
|
||||
|
||||
${{
|
||||
...this.formatNspace(ns),
|
||||
ns,
|
||||
partition,
|
||||
index,
|
||||
}}
|
||||
`;
|
||||
|
@ -41,7 +32,9 @@ export default class PolicyAdapter extends Adapter {
|
|||
|
||||
requestForCreateRecord(request, serialized, data) {
|
||||
const params = {
|
||||
...this.formatDatacenter(data[DATACENTER_KEY]),
|
||||
...this.formatDatacenter(data.Datacenter),
|
||||
ns: data.Namespace,
|
||||
partition: data.Partition,
|
||||
};
|
||||
return request`
|
||||
PUT /v1/acl/policy?${params}
|
||||
|
@ -51,14 +44,15 @@ export default class PolicyAdapter extends Adapter {
|
|||
Description: serialized.Description,
|
||||
Rules: serialized.Rules,
|
||||
Datacenters: serialized.Datacenters,
|
||||
...Namespace(serialized.Namespace),
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForUpdateRecord(request, serialized, data) {
|
||||
const params = {
|
||||
...this.formatDatacenter(data[DATACENTER_KEY]),
|
||||
...this.formatDatacenter(data.Datacenter),
|
||||
ns: data.Namespace,
|
||||
partition: data.Partition,
|
||||
};
|
||||
return request`
|
||||
PUT /v1/acl/policy/${data[SLUG_KEY]}?${params}
|
||||
|
@ -68,15 +62,15 @@ export default class PolicyAdapter extends Adapter {
|
|||
Description: serialized.Description,
|
||||
Rules: serialized.Rules,
|
||||
Datacenters: serialized.Datacenters,
|
||||
...Namespace(serialized.Namespace),
|
||||
}}
|
||||
`;
|
||||
}
|
||||
|
||||
requestForDeleteRecord(request, serialized, data) {
|
||||
const params = {
|
||||
...this.formatDatacenter(data[DATACENTER_KEY]),
|
||||
...this.formatNspace(data[NSPACE_KEY]),
|
||||
...this.formatDatacenter(data.Datacenter),
|
||||
ns: data.Namespace,
|
||||
partition: data.Partition,
|
||||
};
|
||||
return request`
|
||||
DELETE /v1/acl/policy/${data[SLUG_KEY]}?${params}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue