Add versions compatibility tests between Consul (#12702)
* add a sample * Consul cluster test * add build dockerfile * add tests to cover mixed versions tests * use flag to pass docker image name * remove default config and rely on flags to inject the right image to test * add cluster abstraction * fix imports and remove old files * fix imports and remove old files * fix dockerIgnore * make a `Node interface` and encapsulate ConsulContainer * fix a test bug where we only check the leader against a single node. * add upgrade tests to CI * fix yaml alignment * fix alignment take 2 * fix flag naming * fix image to build * fix test run and go mod tidy * add a debug command * run without RYUK * fix parallel run * add skip reaper code * make tempdir in local dir * chmod the temp dir to 0777 * chmod the right dir name * change executor to use machine instead of docker * add docker layer caching * remove setup docker * add gotestsum * install go version * use variable for GO installed version * add environment * add environment in the right place * do not disable RYUK in CI * add service check to tests * assertions outside routines * add queryBackend to the api query meta. * check if we are using the right backend for those tests (streaming) * change the tested endpoint to use one that have streaming. * refactor to test multiple scenarios for streaming * Fix dockerfile Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> * rename Clients to clients Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> * check if cluster have 0 node * tidy code and add some doc strings * use uuid instead of random string * add doc strings to tests * add queryBackend to the api query meta. * add a changelog * fix for api backend query * add missing require * fix q.QueryBackend * Revert "fix q.QueryBackend" This reverts commit cd0e5f7b1a1730e191673d624f8e89b591871c05. * fix circle ci config * tidy go mod after merging main * rename package and fix test scenario * update go download url * address review comments * rename flag in CI * add readme to the upgrade tests * fix golang download url * fix golang arch downloaded * fix AddNodes to handle an empty cluster case * use `parseBool` * rename circle job and add comment * update testcontainer to 0.13 * fix circle ci config * remove build docker file and use `make dev-docker` instead * Apply suggestions from code review Co-authored-by: Dan Upton <daniel@floppy.co> * fix a typo Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> Co-authored-by: Dan Upton <daniel@floppy.co>
This commit is contained in:
parent
9dc5200155
commit
fe22a002e1
|
@ -12,18 +12,8 @@ parameters:
|
|||
description: "Boolean whether to run the load test workflow"
|
||||
|
||||
references:
|
||||
images:
|
||||
# When updating the Go version, remember to also update the versions in the
|
||||
# workflows section for go-test-lib jobs.
|
||||
go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.18.1
|
||||
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
|
||||
|
||||
paths:
|
||||
test-results: &TEST_RESULTS_DIR /tmp/test-results
|
||||
|
||||
cache:
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
|
||||
|
||||
environment: &ENVIRONMENT
|
||||
TEST_RESULTS_DIR: *TEST_RESULTS_DIR
|
||||
EMAIL: noreply@hashicorp.com
|
||||
|
@ -32,6 +22,14 @@ references:
|
|||
S3_ARTIFACT_BUCKET: consul-dev-artifacts-v2
|
||||
BASH_ENV: .circleci/bash_env.sh
|
||||
VAULT_BINARY_VERSION: 1.9.4
|
||||
GO_VERSION: 1.18.1
|
||||
images:
|
||||
# When updating the Go version, remember to also update the versions in the
|
||||
# workflows section for go-test-lib jobs.
|
||||
go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.18.1
|
||||
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
|
||||
cache:
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
|
||||
|
||||
steps:
|
||||
install-gotestsum: &install-gotestsum
|
||||
|
@ -257,8 +255,8 @@ jobs:
|
|||
- run:
|
||||
command: |
|
||||
sudo rm -rf /usr/local/go
|
||||
wget https://dl.google.com/go/go1.18.1.linux-arm64.tar.gz
|
||||
sudo tar -C /usr/local -xzvf go1.18.1.linux-arm64.tar.gz
|
||||
wget https://dl.google.com/go/go${GO_VERSION}.linux-arm64.tar.gz
|
||||
sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-arm64.tar.gz
|
||||
- run: *install-gotestsum
|
||||
- run: go mod download
|
||||
- run:
|
||||
|
@ -803,6 +801,54 @@ jobs:
|
|||
command: make test-coverage-ci
|
||||
- run: *notify-slack-failure
|
||||
|
||||
compatibility-integration-test:
|
||||
machine:
|
||||
image: ubuntu-2004:202101-01
|
||||
docker_layer_caching: true
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
# Get go binary from workspace
|
||||
- attach_workspace:
|
||||
at: .
|
||||
# Build the consul-dev image from the already built binary
|
||||
- run:
|
||||
command: |
|
||||
sudo rm -rf /usr/local/go
|
||||
wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-amd64.tar.gz
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
- run: *install-gotestsum
|
||||
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
|
||||
- run:
|
||||
name: Compatibility Integration Tests
|
||||
command: |
|
||||
subtests=$(ls -d test/integration/consul-container/*/ | grep -v libs | xargs -n 1 basename | circleci tests split)
|
||||
echo "Running $(echo $subtests | wc -w) subtests"
|
||||
echo "$subtests"
|
||||
subtests_pipe_sepr=$(echo "$subtests" | xargs | sed 's/ /|/g')
|
||||
mkdir -p /tmp/test-results/
|
||||
docker run consul:local consul version
|
||||
cd ./test/integration/consul-container
|
||||
gotestsum -- -timeout=30m ./$subtests_pipe_sepr --target-version local --latest-version latest
|
||||
ls -lrt
|
||||
environment:
|
||||
# this is needed because of incompatibility between RYUK container and circleci
|
||||
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
|
||||
GOTESTSUM_FORMAT: standard-verbose
|
||||
COMPOSE_INTERACTIVE_NO_CLI: 1
|
||||
# tput complains if this isn't set to something.
|
||||
TERM: ansi
|
||||
- store_artifacts:
|
||||
path: ./test/integration/consul-container/upgrade/workdir/logs
|
||||
destination: container-logs
|
||||
- store_test_results:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- store_artifacts:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_19_3: &ENVOY_TESTS
|
||||
machine:
|
||||
image: ubuntu-2004:202101-01
|
||||
|
@ -1110,6 +1156,9 @@ workflows:
|
|||
- envoy-integration-test-1_22_0:
|
||||
requires:
|
||||
- dev-build
|
||||
- compatibility-integration-test:
|
||||
requires:
|
||||
- dev-build
|
||||
|
||||
frontend:
|
||||
unless: << pipeline.parameters.trigger-load-test >>
|
||||
|
|
|
@ -158,7 +158,8 @@ dev-docker: linux
|
|||
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
|
||||
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
||||
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
|
||||
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
||||
# 'consul:local' tag is needed to run the integration tests
|
||||
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
||||
|
||||
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
|
||||
# should only run in CI and not locally.
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
module github.com/hashicorp/consul/integration/consul-container
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/armon/go-metrics v0.3.10 // indirect
|
||||
github.com/docker/docker v20.10.11+incompatible
|
||||
github.com/hashicorp/consul/api v1.11.0
|
||||
github.com/hashicorp/consul/sdk v0.8.0
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/memberlist v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/testcontainers/testcontainers-go v0.13.0
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
google.golang.org/grpc v1.41.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/consul/api => ../../../api
|
||||
|
||||
replace github.com/hashicorp/consul/sdk => ../../../sdk
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,88 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/integration/consul-container/libs/node"
|
||||
)
|
||||
|
||||
// Cluster provides an interface for creating and controlling a Consul cluster
|
||||
// in integration tests, with nodes running in containers.
|
||||
type Cluster struct {
|
||||
Nodes []node.Node
|
||||
}
|
||||
|
||||
// New creates a Consul cluster. A node will be started for each of the given
|
||||
// configs and joined to the cluster.
|
||||
func New(configs []node.Config) (*Cluster, error) {
|
||||
cluster := Cluster{}
|
||||
|
||||
nodes := make([]node.Node, len(configs))
|
||||
for idx, c := range configs {
|
||||
n, err := node.NewConsulContainer(context.Background(), c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes[idx] = n
|
||||
}
|
||||
if err := cluster.AddNodes(nodes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cluster, nil
|
||||
}
|
||||
|
||||
// AddNodes joins the given nodes to the cluster.
|
||||
func (c *Cluster) AddNodes(nodes []node.Node) error {
|
||||
var joinAddr string
|
||||
if len(c.Nodes) >= 1 {
|
||||
joinAddr, _ = c.Nodes[0].GetAddr()
|
||||
} else if len(nodes) >= 1 {
|
||||
joinAddr, _ = nodes[0].GetAddr()
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
err := node.GetClient().Agent().Join(joinAddr, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Terminate will attempt to terminate all nodes in the cluster. If any node
|
||||
// termination fails, Terminate will abort and return an error.
|
||||
func (c *Cluster) Terminate() error {
|
||||
for _, n := range c.Nodes {
|
||||
err := n.Terminate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Leader returns the cluster leader node, or an error if no leader is
|
||||
// available.
|
||||
func (c *Cluster) Leader() (node.Node, error) {
|
||||
if len(c.Nodes) < 1 {
|
||||
return nil, fmt.Errorf("no node available")
|
||||
}
|
||||
n0 := c.Nodes[0]
|
||||
leaderAdd, err := n0.GetClient().Status().Leader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if leaderAdd == "" {
|
||||
return nil, fmt.Errorf("no leader available")
|
||||
}
|
||||
for _, n := range c.Nodes {
|
||||
addr, _ := n.GetAddr()
|
||||
if strings.Contains(leaderAdd, addr) {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("leader not found")
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
|
||||
"github.com/hashicorp/consul/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
const bootLogLine = "Consul agent running"
|
||||
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
|
||||
|
||||
// consulContainerNode implements the Node interface by running a Consul node
|
||||
// in a container.
|
||||
type consulContainerNode struct {
|
||||
ctx context.Context
|
||||
client *api.Client
|
||||
container testcontainers.Container
|
||||
ip string
|
||||
port int
|
||||
}
|
||||
|
||||
// NewConsulContainer starts a Consul node in a container with the given config.
|
||||
func NewConsulContainer(ctx context.Context, config Config) (Node, error) {
|
||||
|
||||
name := utils.RandName("consul-")
|
||||
tmpDir, err := ioutils.TempDir("", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = os.Chmod(tmpDir, 0777)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = os.Mkdir(tmpDir+"/config", 0777)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configFile := tmpDir + "/config/config.hcl"
|
||||
err = os.WriteFile(configFile, []byte(config.HCL), 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
skipReaper := isRYUKDisabled()
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "consul:" + config.Version,
|
||||
ExposedPorts: []string{"8500/tcp"},
|
||||
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(10 * time.Second),
|
||||
AutoRemove: false,
|
||||
Name: name,
|
||||
Mounts: testcontainers.ContainerMounts{testcontainers.ContainerMount{Source: testcontainers.DockerBindMountSource{HostPath: configFile}, Target: "/consul/config/config.hcl"}},
|
||||
Cmd: config.Cmd,
|
||||
SkipReaper: skipReaper,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localIP, err := container.Host(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mappedPort, err := container.MappedPort(ctx, "8500")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ip, err := container.ContainerIP(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := fmt.Sprintf("http://%s:%s", localIP, mappedPort.Port())
|
||||
c := new(consulContainerNode)
|
||||
c.container = container
|
||||
c.ip = ip
|
||||
c.port = mappedPort.Int()
|
||||
apiConfig := api.DefaultConfig()
|
||||
apiConfig.Address = uri
|
||||
c.client, err = api.NewClient(apiConfig)
|
||||
c.ctx = ctx
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// GetClient returns an API client that can be used to communicate with the Node.
|
||||
func (c *consulContainerNode) GetClient() *api.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
// GetAddr return the network address associated with the Node.
|
||||
func (c *consulContainerNode) GetAddr() (string, int) {
|
||||
return c.ip, c.port
|
||||
}
|
||||
|
||||
// Terminate attempts to terminate the container. On failure, an error will be
|
||||
// returned and the reaper process (RYUK) will handle cleanup.
|
||||
func (c *consulContainerNode) Terminate() error {
|
||||
return c.container.Terminate(c.ctx)
|
||||
}
|
||||
|
||||
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
|
||||
// by an environment variable.
|
||||
//
|
||||
// https://github.com/testcontainers/moby-ryuk
|
||||
func isRYUKDisabled() bool {
|
||||
skipReaperStr := os.Getenv(disableRYUKEnv)
|
||||
skipReaper, err := strconv.ParseBool(skipReaperStr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return skipReaper
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
package node
|
||||
|
||||
import "github.com/hashicorp/consul/api"
|
||||
|
||||
// Node represent a Consul node abstraction
|
||||
type Node interface {
|
||||
Terminate() error
|
||||
GetClient() *api.Client
|
||||
GetAddr() (string, int)
|
||||
}
|
||||
|
||||
// Config is a set of configurations required to create a Node
|
||||
type Config struct {
|
||||
HCL string
|
||||
Version string
|
||||
Cmd []string
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
||||
func RandName(name string) string {
|
||||
generateUUID, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return name + generateUUID
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
# Consul Upgrade Integration tests
|
||||
## Local run
|
||||
- run `make dev-docker`
|
||||
- run the tests.
|
||||
|
||||
To specify targets and latest image pass `target-version` and `latest-version` to the tests. By default, it uses the `consul` docker image with respectively `local` and `latest` tags.
|
|
@ -0,0 +1,274 @@
|
|||
package consul_container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/hashicorp/consul/integration/consul-container/libs/cluster"
|
||||
"github.com/hashicorp/consul/integration/consul-container/libs/node"
|
||||
|
||||
"github.com/hashicorp/consul/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var targetImage = flag.String("target-version", "local", "docker image to be used as UUT (unit under test)")
|
||||
var latestImage = flag.String("latest-version", "latest", "docker image to be used as latest")
|
||||
|
||||
const retryTimeout = 10 * time.Second
|
||||
const retryFrequency = 500 * time.Millisecond
|
||||
|
||||
// Test health check GRPC call using Current Servers and Latest GA Clients
|
||||
func TestCurrentServersWithLatestGAClients(t *testing.T) {
|
||||
t.Parallel()
|
||||
numServers := 3
|
||||
cluster, err := serversCluster(t, numServers, *targetImage)
|
||||
require.NoError(t, err)
|
||||
defer Terminate(t, cluster)
|
||||
numClients := 1
|
||||
|
||||
clients, err := clientsCreate(numClients)
|
||||
client := cluster.Nodes[0].GetClient()
|
||||
err = cluster.AddNodes(clients)
|
||||
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(r, err)
|
||||
require.NotEmpty(r, leader)
|
||||
members, err := client.Agent().Members(false)
|
||||
require.Len(r, members, 4)
|
||||
})
|
||||
serviceName := "api"
|
||||
err, index := serviceCreate(t, client, serviceName)
|
||||
|
||||
ch := make(chan []*api.ServiceEntry)
|
||||
errCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
|
||||
if q.QueryBackend != api.QueryBackendStreaming {
|
||||
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
ch <- service
|
||||
}
|
||||
}()
|
||||
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998})
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
require.NoError(t, err)
|
||||
case service := <-ch:
|
||||
require.Len(t, service, 1)
|
||||
require.Equal(t, serviceName, service[0].Service.Service)
|
||||
require.Equal(t, 9998, service[0].Service.Port)
|
||||
case <-timer.C:
|
||||
t.Fatalf("test timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients
|
||||
func TestMixedServersMajorityLatestGAClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
var configs []node.Config
|
||||
configs = append(configs,
|
||||
node.Config{
|
||||
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
||||
log_level="TRACE"
|
||||
server=true`,
|
||||
Cmd: []string{"agent", "-client=0.0.0.0"},
|
||||
Version: *targetImage,
|
||||
})
|
||||
|
||||
for i := 1; i < 3; i++ {
|
||||
configs = append(configs,
|
||||
node.Config{
|
||||
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
||||
log_level="TRACE"
|
||||
bootstrap_expect=3
|
||||
server=true`,
|
||||
Cmd: []string{"agent", "-client=0.0.0.0"},
|
||||
Version: *latestImage,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
cluster, err := cluster.New(configs)
|
||||
require.NoError(t, err)
|
||||
defer Terminate(t, cluster)
|
||||
|
||||
numClients := 1
|
||||
clients, err := clientsCreate(numClients)
|
||||
client := clients[0].GetClient()
|
||||
err = cluster.AddNodes(clients)
|
||||
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(r, err)
|
||||
require.NotEmpty(r, leader)
|
||||
members, err := client.Agent().Members(false)
|
||||
require.Len(r, members, 4)
|
||||
})
|
||||
|
||||
serviceName := "api"
|
||||
err, index := serviceCreate(t, client, serviceName)
|
||||
|
||||
ch := make(chan []*api.ServiceEntry)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
|
||||
if q.QueryBackend != api.QueryBackendStreaming {
|
||||
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
ch <- service
|
||||
}
|
||||
}()
|
||||
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998})
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
require.NoError(t, err)
|
||||
case service := <-ch:
|
||||
require.Len(t, service, 1)
|
||||
require.Equal(t, serviceName, service[0].Service.Service)
|
||||
require.Equal(t, 9998, service[0].Service.Port)
|
||||
case <-timer.C:
|
||||
t.Fatalf("test timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// Test health check GRPC call using Mixed (majority current) Servers and Latest GA Clients
|
||||
func TestMixedServersMajorityCurrentGAClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
var configs []node.Config
|
||||
for i := 0; i < 2; i++ {
|
||||
configs = append(configs,
|
||||
node.Config{
|
||||
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
||||
log_level="TRACE"
|
||||
bootstrap_expect=3
|
||||
server=true`,
|
||||
Cmd: []string{"agent", "-client=0.0.0.0"},
|
||||
Version: *targetImage,
|
||||
})
|
||||
|
||||
}
|
||||
configs = append(configs,
|
||||
node.Config{
|
||||
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
||||
log_level="TRACE"
|
||||
server=true`,
|
||||
Cmd: []string{"agent", "-client=0.0.0.0"},
|
||||
Version: *latestImage,
|
||||
})
|
||||
|
||||
cluster, err := cluster.New(configs)
|
||||
require.NoError(t, err)
|
||||
defer Terminate(t, cluster)
|
||||
|
||||
numClients := 1
|
||||
clients, err := clientsCreate(numClients)
|
||||
client := clients[0].GetClient()
|
||||
err = cluster.AddNodes(clients)
|
||||
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(r, err)
|
||||
require.NotEmpty(r, leader)
|
||||
members, err := client.Agent().Members(false)
|
||||
require.Len(r, members, 4)
|
||||
})
|
||||
|
||||
serviceName := "api"
|
||||
err, index := serviceCreate(t, client, serviceName)
|
||||
|
||||
ch := make(chan []*api.ServiceEntry)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
|
||||
if q.QueryBackend != api.QueryBackendStreaming {
|
||||
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
ch <- service
|
||||
}
|
||||
}()
|
||||
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998})
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
require.NoError(t, err)
|
||||
case service := <-ch:
|
||||
require.Len(t, service, 1)
|
||||
require.Equal(t, serviceName, service[0].Service.Service)
|
||||
require.Equal(t, 9998, service[0].Service.Port)
|
||||
case <-timer.C:
|
||||
t.Fatalf("test timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func clientsCreate(numClients int) ([]node.Node, error) {
|
||||
clients := make([]node.Node, numClients)
|
||||
var err error
|
||||
for i := 0; i < numClients; i++ {
|
||||
clients[i], err = node.NewConsulContainer(context.Background(),
|
||||
node.Config{
|
||||
HCL: `node_name="` + utils.RandName("consul-client") + `"
|
||||
log_level="TRACE"`,
|
||||
Cmd: []string{"agent", "-client=0.0.0.0"},
|
||||
Version: *targetImage,
|
||||
})
|
||||
}
|
||||
return clients, err
|
||||
}
|
||||
|
||||
func serviceCreate(t *testing.T, client *api.Client, serviceName string) (error, uint64) {
|
||||
err := client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9999})
|
||||
require.NoError(t, err)
|
||||
service, meta, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, service, 1)
|
||||
require.Equal(t, serviceName, service[0].ServiceName)
|
||||
require.Equal(t, 9999, service[0].ServicePort)
|
||||
return err, meta.LastIndex
|
||||
}
|
||||
|
||||
func serversCluster(t *testing.T, numServers int, image string) (*cluster.Cluster, error) {
|
||||
var err error
|
||||
var configs []node.Config
|
||||
for i := 0; i < numServers; i++ {
|
||||
configs = append(configs, node.Config{
|
||||
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
||||
log_level="TRACE"
|
||||
bootstrap_expect=3
|
||||
server=true`,
|
||||
Cmd: []string{"agent", "-client=0.0.0.0"},
|
||||
Version: image,
|
||||
})
|
||||
}
|
||||
cluster, err := cluster.New(configs)
|
||||
require.NoError(t, err)
|
||||
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(r, err)
|
||||
require.NotEmpty(r, leader)
|
||||
members, err := cluster.Nodes[0].GetClient().Agent().Members(false)
|
||||
require.Len(r, members, numServers)
|
||||
})
|
||||
return cluster, err
|
||||
}
|
||||
|
||||
func Terminate(t *testing.T, cluster *cluster.Cluster) {
|
||||
err := cluster.Terminate()
|
||||
require.NoError(t, err)
|
||||
}
|
Loading…
Reference in New Issue