agent/consuk: Rename RPCRate -> RPCRateLimit

so that the field name is consistent across config structs.
This commit is contained in:
Daniel Nephin 2020-09-16 13:29:59 -04:00
parent e5320c2db6
commit e8427a48ab
10 changed files with 38 additions and 25 deletions

4
.changelog/8696.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
config: Fixed a bug where `rpc_max_conns_per_client` could not be changed by reloading the
config.
```

View File

@ -1143,7 +1143,7 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
// Rate limiting for RPC calls. // Rate limiting for RPC calls.
if runtimeCfg.RPCRateLimit > 0 { if runtimeCfg.RPCRateLimit > 0 {
cfg.RPCRate = runtimeCfg.RPCRateLimit cfg.RPCRateLimit = runtimeCfg.RPCRateLimit
} }
if runtimeCfg.RPCMaxBurst > 0 { if runtimeCfg.RPCMaxBurst > 0 {
cfg.RPCMaxBurst = runtimeCfg.RPCMaxBurst cfg.RPCMaxBurst = runtimeCfg.RPCMaxBurst

View File

@ -21,6 +21,7 @@ import (
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/debug" "github.com/hashicorp/consul/agent/debug"
"github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
@ -37,6 +38,7 @@ import (
"github.com/hashicorp/serf/serf" "github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/time/rate"
) )
func makeReadOnlyAgentACL(t *testing.T, srv *HTTPHandlers) string { func makeReadOnlyAgentACL(t *testing.T, srv *HTTPHandlers) string {
@ -1452,6 +1454,8 @@ func TestAgent_Reload(t *testing.T) {
`, `,
}) })
shim := &delegateConfigReloadShim{delegate: a.delegate}
a.delegate = shim
if err := a.reloadConfigInternal(cfg2); err != nil { if err := a.reloadConfigInternal(cfg2); err != nil {
t.Fatalf("got error %v want nil", err) t.Fatalf("got error %v want nil", err)
} }
@ -1459,13 +1463,8 @@ func TestAgent_Reload(t *testing.T) {
t.Fatal("missing redis-reloaded service") t.Fatal("missing redis-reloaded service")
} }
if a.config.RPCRateLimit != 2 { require.Equal(t, rate.Limit(2), shim.newCfg.RPCRateLimit)
t.Fatalf("RPC rate not set correctly. Got %v. Want 2", a.config.RPCRateLimit) require.Equal(t, 200, shim.newCfg.RPCMaxBurst)
}
if a.config.RPCMaxBurst != 200 {
t.Fatalf("RPC max burst not set correctly. Got %v. Want 200", a.config.RPCMaxBurst)
}
for _, wp := range a.watchPlans { for _, wp := range a.watchPlans {
if !wp.IsStopped() { if !wp.IsStopped() {
@ -1474,6 +1473,16 @@ func TestAgent_Reload(t *testing.T) {
} }
} }
type delegateConfigReloadShim struct {
delegate
newCfg consul.ReloadableConfig
}
func (s *delegateConfigReloadShim) ReloadConfig(cfg consul.ReloadableConfig) error {
s.newCfg = cfg
return s.delegate.ReloadConfig(cfg)
}
// TestAgent_ReloadDoesNotTriggerWatch Ensure watches not triggered after reload // TestAgent_ReloadDoesNotTriggerWatch Ensure watches not triggered after reload
// see https://github.com/hashicorp/consul/issues/7446 // see https://github.com/hashicorp/consul/issues/7446
func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {

View File

@ -921,8 +921,8 @@ type RuntimeConfig struct {
// RPCRateLimit and RPCMaxBurst control how frequently RPC calls are allowed // RPCRateLimit and RPCMaxBurst control how frequently RPC calls are allowed
// to happen. In any large enough time interval, rate limiter limits the // to happen. In any large enough time interval, rate limiter limits the
// rate to RPCRate tokens per second, with a maximum burst size of // rate to RPCRateLimit tokens per second, with a maximum burst size of
// RPCMaxBurst events. As a special case, if RPCRate == Inf (the infinite // RPCMaxBurst events. As a special case, if RPCRateLimit == Inf (the infinite
// rate), RPCMaxBurst is ignored. // rate), RPCMaxBurst is ignored.
// //
// See https://en.wikipedia.org/wiki/Token_bucket for more about token // See https://en.wikipedia.org/wiki/Token_bucket for more about token

View File

@ -113,7 +113,7 @@ func NewClient(config *Config, deps Deps) (*Client, error) {
tlsConfigurator: deps.TLSConfigurator, tlsConfigurator: deps.TLSConfigurator,
} }
c.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst)) c.rpcLimiter.Store(rate.NewLimiter(config.RPCRateLimit, config.RPCMaxBurst))
if err := c.initEnterprise(); err != nil { if err := c.initEnterprise(); err != nil {
c.Shutdown() c.Shutdown()

View File

@ -533,7 +533,7 @@ func TestClient_RPC_RateLimit(t *testing.T) {
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
_, conf2 := testClientConfig(t) _, conf2 := testClientConfig(t)
conf2.RPCRate = 2 conf2.RPCRateLimit = 2
conf2.RPCMaxBurst = 2 conf2.RPCMaxBurst = 2
c1 := newClient(t, conf2) c1 := newClient(t, conf2)
@ -602,7 +602,7 @@ func TestClient_SnapshotRPC_RateLimit(t *testing.T) {
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
_, conf1 := testClientConfig(t) _, conf1 := testClientConfig(t)
conf1.RPCRate = 2 conf1.RPCRateLimit = 2
conf1.RPCMaxBurst = 2 conf1.RPCMaxBurst = 2
c1 := newClient(t, conf1) c1 := newClient(t, conf1)
@ -765,7 +765,7 @@ func TestClientServer_UserEvent(t *testing.T) {
func TestClient_ReloadConfig(t *testing.T) { func TestClient_ReloadConfig(t *testing.T) {
_, cfg := testClientConfig(t) _, cfg := testClientConfig(t)
cfg.RPCRate = rate.Limit(500) cfg.RPCRateLimit = rate.Limit(500)
cfg.RPCMaxBurst = 5000 cfg.RPCMaxBurst = 5000
deps := newDefaultDeps(t, &Config{NodeName: "node1", Datacenter: "dc1"}) deps := newDefaultDeps(t, &Config{NodeName: "node1", Datacenter: "dc1"})
c, err := NewClient(cfg, deps) c, err := NewClient(cfg, deps)

View File

@ -416,15 +416,15 @@ type Config struct {
// place, and a small jitter is applied to avoid a thundering herd. // place, and a small jitter is applied to avoid a thundering herd.
RPCHoldTimeout time.Duration RPCHoldTimeout time.Duration
// RPCRate and RPCMaxBurst control how frequently RPC calls are allowed // RPCRateLimit and RPCMaxBurst control how frequently RPC calls are allowed
// to happen. In any large enough time interval, rate limiter limits the // to happen. In any large enough time interval, rate limiter limits the
// rate to RPCRate tokens per second, with a maximum burst size of // rate to RPCRateLimit tokens per second, with a maximum burst size of
// RPCMaxBurst events. As a special case, if RPCRate == Inf (the infinite // RPCMaxBurst events. As a special case, if RPCRateLimit == Inf (the infinite
// rate), RPCMaxBurst is ignored. // rate), RPCMaxBurst is ignored.
// //
// See https://en.wikipedia.org/wiki/Token_bucket for more about token // See https://en.wikipedia.org/wiki/Token_bucket for more about token
// buckets. // buckets.
RPCRate rate.Limit RPCRateLimit rate.Limit
RPCMaxBurst int RPCMaxBurst int
// RPCMaxConnsPerClient is the limit of how many concurrent connections are // RPCMaxConnsPerClient is the limit of how many concurrent connections are
@ -582,7 +582,7 @@ func DefaultConfig() *Config {
CheckOutputMaxSize: checks.DefaultBufSize, CheckOutputMaxSize: checks.DefaultBufSize,
RPCRate: rate.Inf, RPCRateLimit: rate.Inf,
RPCMaxBurst: 1000, RPCMaxBurst: 1000,
TLSMinVersion: "tls10", TLSMinVersion: "tls10",

View File

@ -730,7 +730,7 @@ func TestRPC_RPCMaxConnsPerClient(t *testing.T) {
// Reload config with higher limit // Reload config with higher limit
rc := ReloadableConfig{ rc := ReloadableConfig{
RPCRateLimit: s1.config.RPCRate, RPCRateLimit: s1.config.RPCRateLimit,
RPCMaxBurst: s1.config.RPCMaxBurst, RPCMaxBurst: s1.config.RPCMaxBurst,
RPCMaxConnsPerClient: 10, RPCMaxConnsPerClient: 10,
} }

View File

@ -395,7 +395,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) {
return nil, err return nil, err
} }
s.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst)) s.rpcLimiter.Store(rate.NewLimiter(config.RPCRateLimit, config.RPCMaxBurst))
configReplicatorConfig := ReplicatorConfig{ configReplicatorConfig := ReplicatorConfig{
Name: logging.ConfigEntry, Name: logging.ConfigEntry,

View File

@ -1479,7 +1479,7 @@ func TestServer_ReloadConfig(t *testing.T) {
dir1, s := testServerWithConfig(t, func(c *Config) { dir1, s := testServerWithConfig(t, func(c *Config) {
c.Build = "1.5.0" c.Build = "1.5.0"
c.RPCRate = 500 c.RPCRateLimit = 500
c.RPCMaxBurst = 5000 c.RPCMaxBurst = 5000
}) })
defer os.RemoveAll(dir1) defer os.RemoveAll(dir1)
@ -1520,7 +1520,7 @@ func TestServer_RPC_RateLimit(t *testing.T) {
t.Parallel() t.Parallel()
_, conf1 := testServerConfig(t) _, conf1 := testServerConfig(t)
conf1.RPCRate = 2 conf1.RPCRateLimit = 2
conf1.RPCMaxBurst = 2 conf1.RPCMaxBurst = 2
s1, err := newServer(t, conf1) s1, err := newServer(t, conf1)
if err != nil { if err != nil {