Try DRYing up createCluster in integration tests (#16199)
This commit is contained in:
parent
cfc546eeec
commit
840497933e
|
@ -66,7 +66,7 @@ func NewN(t TestingT, conf Config, count int) (*Cluster, error) {
|
||||||
func New(t TestingT, configs []Config) (*Cluster, error) {
|
func New(t TestingT, configs []Config) (*Cluster, error) {
|
||||||
id, err := shortid.Generate()
|
id, err := shortid.Generate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not cluster id: %w", err)
|
return nil, fmt.Errorf("could not generate cluster id: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
name := fmt.Sprintf("consul-int-cluster-%s", id)
|
name := fmt.Sprintf("consul-int-cluster-%s", id)
|
||||||
|
@ -114,7 +114,7 @@ func (c *Cluster) AddN(conf Config, count int, join bool) error {
|
||||||
return c.Add(configs, join)
|
return c.Add(configs, join)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add starts an agent with the given configuration and joins it with the existing cluster
|
// Add starts agents with the given configurations and joins them to the existing cluster
|
||||||
func (c *Cluster) Add(configs []Config, serfJoin bool) (xe error) {
|
func (c *Cluster) Add(configs []Config, serfJoin bool) (xe error) {
|
||||||
if c.Index == 0 && !serfJoin {
|
if c.Index == 0 && !serfJoin {
|
||||||
return fmt.Errorf("the first call to Cluster.Add must have serfJoin=true")
|
return fmt.Errorf("the first call to Cluster.Add must have serfJoin=true")
|
||||||
|
@ -160,9 +160,11 @@ func (c *Cluster) Add(configs []Config, serfJoin bool) (xe error) {
|
||||||
func (c *Cluster) Join(agents []Agent) error {
|
func (c *Cluster) Join(agents []Agent) error {
|
||||||
return c.join(agents, false)
|
return c.join(agents, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) JoinExternally(agents []Agent) error {
|
func (c *Cluster) JoinExternally(agents []Agent) error {
|
||||||
return c.join(agents, true)
|
return c.join(agents, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) join(agents []Agent, skipSerfJoin bool) error {
|
func (c *Cluster) join(agents []Agent, skipSerfJoin bool) error {
|
||||||
if len(agents) == 0 {
|
if len(agents) == 0 {
|
||||||
return nil // no change
|
return nil // no change
|
||||||
|
|
|
@ -489,7 +489,7 @@ func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (t
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
const pauseImage = "k8s.gcr.io/pause:3.3"
|
const pauseImage = "registry.k8s.io/pause:3.3"
|
||||||
|
|
||||||
type containerOpts struct {
|
type containerOpts struct {
|
||||||
configFile string
|
configFile string
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
"github.com/hashicorp/consul/test/integration/consul-container/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestBasicConnectService Summary
|
// TestBasicConnectService Summary
|
||||||
|
@ -23,7 +23,14 @@ import (
|
||||||
// - Make sure a call to the client sidecar local bind port returns a response from the upstream, static-server
|
// - Make sure a call to the client sidecar local bind port returns a response from the upstream, static-server
|
||||||
func TestBasicConnectService(t *testing.T) {
|
func TestBasicConnectService(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
cluster := createCluster(t)
|
|
||||||
|
buildOptions := &libcluster.BuildOptions{
|
||||||
|
InjectAutoEncryption: true,
|
||||||
|
InjectGossipEncryption: true,
|
||||||
|
// TODO(rb): fix the test to not need the service/envoy stack to use :8500
|
||||||
|
AllowHTTPAnyway: true,
|
||||||
|
}
|
||||||
|
cluster := test.CreateCluster(t, "", nil, buildOptions, true)
|
||||||
|
|
||||||
clientService := createServices(t, cluster)
|
clientService := createServices(t, cluster)
|
||||||
_, port := clientService.GetAddr()
|
_, port := clientService.GetAddr()
|
||||||
|
@ -37,38 +44,6 @@ func TestBasicConnectService(t *testing.T) {
|
||||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCluster(t *testing.T) *libcluster.Cluster {
|
|
||||||
opts := libcluster.BuildOptions{
|
|
||||||
InjectAutoEncryption: true,
|
|
||||||
InjectGossipEncryption: true,
|
|
||||||
// TODO: fix the test to not need the service/envoy stack to use :8500
|
|
||||||
AllowHTTPAnyway: true,
|
|
||||||
}
|
|
||||||
ctx := libcluster.NewBuildContext(t, opts)
|
|
||||||
|
|
||||||
conf := libcluster.NewConfigBuilder(ctx).
|
|
||||||
ToAgentConfig(t)
|
|
||||||
t.Logf("Cluster config:\n%s", conf.JSON)
|
|
||||||
|
|
||||||
configs := []libcluster.Config{*conf}
|
|
||||||
|
|
||||||
cluster, err := libcluster.New(t, configs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
node := cluster.Agents[0]
|
|
||||||
client := node.GetClient()
|
|
||||||
|
|
||||||
libcluster.WaitForLeader(t, cluster, client)
|
|
||||||
libcluster.WaitForMembers(t, client, 1)
|
|
||||||
|
|
||||||
// Default Proxy Settings
|
|
||||||
ok, err := utils.ApplyDefaultProxySettings(client)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
return cluster
|
|
||||||
}
|
|
||||||
|
|
||||||
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {
|
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {
|
||||||
node := cluster.Agents[0]
|
node := cluster.Agents[0]
|
||||||
client := node.GetClient()
|
client := node.GetClient()
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/testcontainers/testcontainers-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestLogConsumer struct {
|
||||||
|
Msgs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TestLogConsumer) Accept(l testcontainers.Log) {
|
||||||
|
g.Msgs = append(g.Msgs, string(l.Content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a cluster with options for basic customization. All args except t
|
||||||
|
// are optional and will use sensible defaults when not provided.
|
||||||
|
func CreateCluster(
|
||||||
|
t *testing.T,
|
||||||
|
cmd string,
|
||||||
|
logConsumer *TestLogConsumer,
|
||||||
|
buildOptions *libcluster.BuildOptions,
|
||||||
|
applyDefaultProxySettings bool) *libcluster.Cluster {
|
||||||
|
|
||||||
|
// optional
|
||||||
|
if buildOptions == nil {
|
||||||
|
buildOptions = &libcluster.BuildOptions{
|
||||||
|
InjectAutoEncryption: true,
|
||||||
|
InjectGossipEncryption: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx := libcluster.NewBuildContext(t, *buildOptions)
|
||||||
|
|
||||||
|
conf := libcluster.NewConfigBuilder(ctx).ToAgentConfig(t)
|
||||||
|
|
||||||
|
// optional
|
||||||
|
if logConsumer != nil {
|
||||||
|
conf.LogConsumer = logConsumer
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Cluster config:\n%s", conf.JSON)
|
||||||
|
|
||||||
|
// optional custom cmd
|
||||||
|
if cmd != "" {
|
||||||
|
conf.Cmd = append(conf.Cmd, cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster, err := libcluster.New(t, []libcluster.Config{*conf})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
client, err := cluster.GetClient(nil, true)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
libcluster.WaitForLeader(t, cluster, client)
|
||||||
|
libcluster.WaitForMembers(t, client, 1)
|
||||||
|
|
||||||
|
if applyDefaultProxySettings {
|
||||||
|
ok, err := utils.ApplyDefaultProxySettings(client)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cluster
|
||||||
|
}
|
|
@ -7,11 +7,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -25,7 +25,7 @@ const (
|
||||||
// - read_rate - returns 429 - was blocked and returns retryable error
|
// - read_rate - returns 429 - was blocked and returns retryable error
|
||||||
// - write_rate - returns 503 - was blocked and is not retryable
|
// - write_rate - returns 503 - was blocked and is not retryable
|
||||||
// - on each
|
// - on each
|
||||||
// - fires metrics forexceeding
|
// - fires metrics for exceeding
|
||||||
// - logs for exceeding
|
// - logs for exceeding
|
||||||
|
|
||||||
func TestServerRequestRateLimit(t *testing.T) {
|
func TestServerRequestRateLimit(t *testing.T) {
|
||||||
|
@ -69,7 +69,7 @@ func TestServerRequestRateLimit(t *testing.T) {
|
||||||
testCases := []testCase{
|
testCases := []testCase{
|
||||||
// HTTP & net/RPC
|
// HTTP & net/RPC
|
||||||
{
|
{
|
||||||
description: "HTTP & net/RPC / Mode: disabled - errors: no / exceeded logs: no / metrics: no",
|
description: "HTTP & net-RPC | Mode: disabled - errors: no | exceeded logs: no | metrics: no",
|
||||||
cmd: `-hcl=limits { request_limits { mode = "disabled" read_rate = 0 write_rate = 0 }}`,
|
cmd: `-hcl=limits { request_limits { mode = "disabled" read_rate = 0 write_rate = 0 }}`,
|
||||||
mode: "disabled",
|
mode: "disabled",
|
||||||
operations: []operation{
|
operations: []operation{
|
||||||
|
@ -88,7 +88,7 @@ func TestServerRequestRateLimit(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "HTTP & net/RPC / Mode: permissive - errors: no / exceeded logs: yes / metrics: yes",
|
description: "HTTP & net-RPC | Mode: permissive - errors: no | exceeded logs: yes | metrics: yes",
|
||||||
cmd: `-hcl=limits { request_limits { mode = "permissive" read_rate = 0 write_rate = 0 }}`,
|
cmd: `-hcl=limits { request_limits { mode = "permissive" read_rate = 0 write_rate = 0 }}`,
|
||||||
mode: "permissive",
|
mode: "permissive",
|
||||||
operations: []operation{
|
operations: []operation{
|
||||||
|
@ -107,7 +107,7 @@ func TestServerRequestRateLimit(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "HTTP & net/RPC / Mode: enforcing - errors: yes / exceeded logs: yes / metrics: yes",
|
description: "HTTP & net-RPC | Mode: enforcing - errors: yes | exceeded logs: yes | metrics: yes",
|
||||||
cmd: `-hcl=limits { request_limits { mode = "enforcing" read_rate = 0 write_rate = 0 }}`,
|
cmd: `-hcl=limits { request_limits { mode = "enforcing" read_rate = 0 write_rate = 0 }}`,
|
||||||
mode: "enforcing",
|
mode: "enforcing",
|
||||||
operations: []operation{
|
operations: []operation{
|
||||||
|
@ -128,8 +128,8 @@ func TestServerRequestRateLimit(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
logConsumer := &TestLogConsumer{}
|
logConsumer := &test.TestLogConsumer{}
|
||||||
cluster := createCluster(t, tc.cmd, logConsumer)
|
cluster := test.CreateCluster(t, tc.cmd, logConsumer, nil, false)
|
||||||
defer terminate(t, cluster)
|
defer terminate(t, cluster)
|
||||||
|
|
||||||
client, err := cluster.GetClient(nil, true)
|
client, err := cluster.GetClient(nil, true)
|
||||||
|
@ -218,44 +218,3 @@ func terminate(t *testing.T, cluster *libcluster.Cluster) {
|
||||||
err := cluster.Terminate()
|
err := cluster.Terminate()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestLogConsumer struct {
|
|
||||||
Msgs []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *TestLogConsumer) Accept(l testcontainers.Log) {
|
|
||||||
g.Msgs = append(g.Msgs, string(l.Content))
|
|
||||||
}
|
|
||||||
|
|
||||||
// createCluster
|
|
||||||
func createCluster(t *testing.T, cmd string, logConsumer *TestLogConsumer) *libcluster.Cluster {
|
|
||||||
opts := libcluster.BuildOptions{
|
|
||||||
InjectAutoEncryption: true,
|
|
||||||
InjectGossipEncryption: true,
|
|
||||||
}
|
|
||||||
ctx := libcluster.NewBuildContext(t, opts)
|
|
||||||
|
|
||||||
conf := libcluster.NewConfigBuilder(ctx).ToAgentConfig(t)
|
|
||||||
conf.LogConsumer = logConsumer
|
|
||||||
|
|
||||||
t.Logf("Cluster config:\n%s", conf.JSON)
|
|
||||||
|
|
||||||
parsedConfigs := []libcluster.Config{*conf}
|
|
||||||
|
|
||||||
cfgs := []libcluster.Config{}
|
|
||||||
for _, cfg := range parsedConfigs {
|
|
||||||
// add command
|
|
||||||
cfg.Cmd = append(cfg.Cmd, cmd)
|
|
||||||
cfgs = append(cfgs, cfg)
|
|
||||||
}
|
|
||||||
cluster, err := libcluster.New(t, cfgs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
client, err := cluster.GetClient(nil, true)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
libcluster.WaitForLeader(t, cluster, client)
|
|
||||||
libcluster.WaitForMembers(t, client, 1)
|
|
||||||
|
|
||||||
return cluster
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue