fe22a002e1
* add a sample * Consul cluster test * add build dockerfile * add tests to cover mixed versions tests * use flag to pass docker image name * remove default config and rely on flags to inject the right image to test * add cluster abstraction * fix imports and remove old files * fix imports and remove old files * fix dockerIgnore * make a `Node interface` and encapsulate ConsulContainer * fix a test bug where we only check the leader against a single node. * add upgrade tests to CI * fix yaml alignment * fix alignment take 2 * fix flag naming * fix image to build * fix test run and go mod tidy * add a debug command * run without RYUK * fix parallel run * add skip reaper code * make tempdir in local dir * chmod the temp dir to 0777 * chmod the right dir name * change executor to use machine instead of docker * add docker layer caching * remove setup docker * add gotestsum * install go version * use variable for GO installed version * add environment * add environment in the right place * do not disable RYUK in CI * add service check to tests * assertions outside routines * add queryBackend to the api query meta. * check if we are using the right backend for those tests (streaming) * change the tested endpoint to use one that have streaming. * refactor to test multiple scenarios for streaming * Fix dockerfile Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> * rename Clients to clients Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> * check if cluster have 0 node * tidy code and add some doc strings * use uuid instead of random string * add doc strings to tests * add queryBackend to the api query meta. * add a changelog * fix for api backend query * add missing require * fix q.QueryBackend * Revert "fix q.QueryBackend" This reverts commit cd0e5f7b1a1730e191673d624f8e89b591871c05. * fix circle ci config * tidy go mod after merging main * rename package and fix test scenario * update go download url * address review comments * rename flag in CI * add readme to the upgrade tests * fix golang download url * fix golang arch downloaded * fix AddNodes to handle an empty cluster case * use `parseBool` * rename circle job and add comment * update testcontainer to 0.13 * fix circle ci config * remove build docker file and use `make dev-docker` instead * Apply suggestions from code review Co-authored-by: Dan Upton <daniel@floppy.co> * fix a typo Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> Co-authored-by: Dan Upton <daniel@floppy.co>
275 lines
8 KiB
Go
275 lines
8 KiB
Go
package consul_container
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"fmt"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/integration/consul-container/libs/cluster"
|
|
"github.com/hashicorp/consul/integration/consul-container/libs/node"
|
|
|
|
"github.com/hashicorp/consul/integration/consul-container/libs/utils"
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
var targetImage = flag.String("target-version", "local", "docker image to be used as UUT (unit under test)")
|
|
var latestImage = flag.String("latest-version", "latest", "docker image to be used as latest")
|
|
|
|
const retryTimeout = 10 * time.Second
|
|
const retryFrequency = 500 * time.Millisecond
|
|
|
|
// Test health check GRPC call using Current Servers and Latest GA Clients
|
|
func TestCurrentServersWithLatestGAClients(t *testing.T) {
|
|
t.Parallel()
|
|
numServers := 3
|
|
cluster, err := serversCluster(t, numServers, *targetImage)
|
|
require.NoError(t, err)
|
|
defer Terminate(t, cluster)
|
|
numClients := 1
|
|
|
|
clients, err := clientsCreate(numClients)
|
|
client := cluster.Nodes[0].GetClient()
|
|
err = cluster.AddNodes(clients)
|
|
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
|
leader, err := cluster.Leader()
|
|
require.NoError(r, err)
|
|
require.NotEmpty(r, leader)
|
|
members, err := client.Agent().Members(false)
|
|
require.Len(r, members, 4)
|
|
})
|
|
serviceName := "api"
|
|
err, index := serviceCreate(t, client, serviceName)
|
|
|
|
ch := make(chan []*api.ServiceEntry)
|
|
errCh := make(chan error)
|
|
|
|
go func() {
|
|
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
|
|
if q.QueryBackend != api.QueryBackendStreaming {
|
|
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
|
}
|
|
if err != nil {
|
|
errCh <- err
|
|
} else {
|
|
ch <- service
|
|
}
|
|
}()
|
|
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998})
|
|
timer := time.NewTimer(1 * time.Second)
|
|
select {
|
|
case err := <-errCh:
|
|
require.NoError(t, err)
|
|
case service := <-ch:
|
|
require.Len(t, service, 1)
|
|
require.Equal(t, serviceName, service[0].Service.Service)
|
|
require.Equal(t, 9998, service[0].Service.Port)
|
|
case <-timer.C:
|
|
t.Fatalf("test timeout")
|
|
}
|
|
}
|
|
|
|
// Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients
|
|
func TestMixedServersMajorityLatestGAClient(t *testing.T) {
|
|
t.Parallel()
|
|
var configs []node.Config
|
|
configs = append(configs,
|
|
node.Config{
|
|
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
|
log_level="TRACE"
|
|
server=true`,
|
|
Cmd: []string{"agent", "-client=0.0.0.0"},
|
|
Version: *targetImage,
|
|
})
|
|
|
|
for i := 1; i < 3; i++ {
|
|
configs = append(configs,
|
|
node.Config{
|
|
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
|
log_level="TRACE"
|
|
bootstrap_expect=3
|
|
server=true`,
|
|
Cmd: []string{"agent", "-client=0.0.0.0"},
|
|
Version: *latestImage,
|
|
})
|
|
|
|
}
|
|
|
|
cluster, err := cluster.New(configs)
|
|
require.NoError(t, err)
|
|
defer Terminate(t, cluster)
|
|
|
|
numClients := 1
|
|
clients, err := clientsCreate(numClients)
|
|
client := clients[0].GetClient()
|
|
err = cluster.AddNodes(clients)
|
|
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
|
leader, err := cluster.Leader()
|
|
require.NoError(r, err)
|
|
require.NotEmpty(r, leader)
|
|
members, err := client.Agent().Members(false)
|
|
require.Len(r, members, 4)
|
|
})
|
|
|
|
serviceName := "api"
|
|
err, index := serviceCreate(t, client, serviceName)
|
|
|
|
ch := make(chan []*api.ServiceEntry)
|
|
errCh := make(chan error)
|
|
go func() {
|
|
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
|
|
if q.QueryBackend != api.QueryBackendStreaming {
|
|
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
|
}
|
|
if err != nil {
|
|
errCh <- err
|
|
} else {
|
|
ch <- service
|
|
}
|
|
}()
|
|
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998})
|
|
timer := time.NewTimer(1 * time.Second)
|
|
select {
|
|
case err := <-errCh:
|
|
require.NoError(t, err)
|
|
case service := <-ch:
|
|
require.Len(t, service, 1)
|
|
require.Equal(t, serviceName, service[0].Service.Service)
|
|
require.Equal(t, 9998, service[0].Service.Port)
|
|
case <-timer.C:
|
|
t.Fatalf("test timeout")
|
|
}
|
|
}
|
|
|
|
// Test health check GRPC call using Mixed (majority current) Servers and Latest GA Clients
|
|
func TestMixedServersMajorityCurrentGAClient(t *testing.T) {
|
|
t.Parallel()
|
|
var configs []node.Config
|
|
for i := 0; i < 2; i++ {
|
|
configs = append(configs,
|
|
node.Config{
|
|
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
|
log_level="TRACE"
|
|
bootstrap_expect=3
|
|
server=true`,
|
|
Cmd: []string{"agent", "-client=0.0.0.0"},
|
|
Version: *targetImage,
|
|
})
|
|
|
|
}
|
|
configs = append(configs,
|
|
node.Config{
|
|
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
|
log_level="TRACE"
|
|
server=true`,
|
|
Cmd: []string{"agent", "-client=0.0.0.0"},
|
|
Version: *latestImage,
|
|
})
|
|
|
|
cluster, err := cluster.New(configs)
|
|
require.NoError(t, err)
|
|
defer Terminate(t, cluster)
|
|
|
|
numClients := 1
|
|
clients, err := clientsCreate(numClients)
|
|
client := clients[0].GetClient()
|
|
err = cluster.AddNodes(clients)
|
|
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
|
leader, err := cluster.Leader()
|
|
require.NoError(r, err)
|
|
require.NotEmpty(r, leader)
|
|
members, err := client.Agent().Members(false)
|
|
require.Len(r, members, 4)
|
|
})
|
|
|
|
serviceName := "api"
|
|
err, index := serviceCreate(t, client, serviceName)
|
|
|
|
ch := make(chan []*api.ServiceEntry)
|
|
errCh := make(chan error)
|
|
go func() {
|
|
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
|
|
if q.QueryBackend != api.QueryBackendStreaming {
|
|
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
|
}
|
|
if err != nil {
|
|
errCh <- err
|
|
} else {
|
|
ch <- service
|
|
}
|
|
}()
|
|
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998})
|
|
timer := time.NewTimer(1 * time.Second)
|
|
select {
|
|
case err := <-errCh:
|
|
require.NoError(t, err)
|
|
case service := <-ch:
|
|
require.Len(t, service, 1)
|
|
require.Equal(t, serviceName, service[0].Service.Service)
|
|
require.Equal(t, 9998, service[0].Service.Port)
|
|
case <-timer.C:
|
|
t.Fatalf("test timeout")
|
|
}
|
|
}
|
|
|
|
func clientsCreate(numClients int) ([]node.Node, error) {
|
|
clients := make([]node.Node, numClients)
|
|
var err error
|
|
for i := 0; i < numClients; i++ {
|
|
clients[i], err = node.NewConsulContainer(context.Background(),
|
|
node.Config{
|
|
HCL: `node_name="` + utils.RandName("consul-client") + `"
|
|
log_level="TRACE"`,
|
|
Cmd: []string{"agent", "-client=0.0.0.0"},
|
|
Version: *targetImage,
|
|
})
|
|
}
|
|
return clients, err
|
|
}
|
|
|
|
func serviceCreate(t *testing.T, client *api.Client, serviceName string) (error, uint64) {
|
|
err := client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9999})
|
|
require.NoError(t, err)
|
|
service, meta, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{})
|
|
require.NoError(t, err)
|
|
require.Len(t, service, 1)
|
|
require.Equal(t, serviceName, service[0].ServiceName)
|
|
require.Equal(t, 9999, service[0].ServicePort)
|
|
return err, meta.LastIndex
|
|
}
|
|
|
|
func serversCluster(t *testing.T, numServers int, image string) (*cluster.Cluster, error) {
|
|
var err error
|
|
var configs []node.Config
|
|
for i := 0; i < numServers; i++ {
|
|
configs = append(configs, node.Config{
|
|
HCL: `node_name="` + utils.RandName("consul-server") + `"
|
|
log_level="TRACE"
|
|
bootstrap_expect=3
|
|
server=true`,
|
|
Cmd: []string{"agent", "-client=0.0.0.0"},
|
|
Version: image,
|
|
})
|
|
}
|
|
cluster, err := cluster.New(configs)
|
|
require.NoError(t, err)
|
|
retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) {
|
|
leader, err := cluster.Leader()
|
|
require.NoError(r, err)
|
|
require.NotEmpty(r, leader)
|
|
members, err := cluster.Nodes[0].GetClient().Agent().Members(false)
|
|
require.Len(r, members, numServers)
|
|
})
|
|
return cluster, err
|
|
}
|
|
|
|
func Terminate(t *testing.T, cluster *cluster.Cluster) {
|
|
err := cluster.Terminate()
|
|
require.NoError(t, err)
|
|
}
|