initial code (#16296)
This commit is contained in:
parent
df1106e1fb
commit
29c56a1b5e
|
@ -11,6 +11,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -70,7 +71,11 @@ func AssertUpstreamEndpointStatus(t *testing.T, adminPort int, clusterName, heal
|
||||||
filter := fmt.Sprintf(`.cluster_statuses[] | select(.name|contains("%s")) | [.host_statuses[].health_status.eds_health_status] | [select(.[] == "%s")] | length`, clusterName, healthStatus)
|
filter := fmt.Sprintf(`.cluster_statuses[] | select(.name|contains("%s")) | [.host_statuses[].health_status.eds_health_status] | [select(.[] == "%s")] | length`, clusterName, healthStatus)
|
||||||
results, err := utils.JQFilter(clusters, filter)
|
results, err := utils.JQFilter(clusters, filter)
|
||||||
require.NoErrorf(r, err, "could not found cluster name %s", clusterName)
|
require.NoErrorf(r, err, "could not found cluster name %s", clusterName)
|
||||||
require.Equal(r, count, len(results))
|
|
||||||
|
resultToString := strings.Join(results, " ")
|
||||||
|
result, err := strconv.Atoi(resultToString)
|
||||||
|
assert.NoError(r, err)
|
||||||
|
require.Equal(r, count, result)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,3 +256,14 @@ func sanitizeResult(s string) []string {
|
||||||
result := strings.Split(strings.ReplaceAll(s, `,`, " "), " ")
|
result := strings.Split(strings.ReplaceAll(s, `,`, " "), " ")
|
||||||
return append(result[:0], result[1:]...)
|
return append(result[:0], result[1:]...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AssertServiceHasHealthyInstances asserts the number of instances of service equals count for a given service.
|
||||||
|
// https://developer.hashicorp.com/consul/docs/connect/config-entries/service-resolver#onlypassing
|
||||||
|
func AssertServiceHasHealthyInstances(t *testing.T, node libcluster.Agent, service string, onlypassing bool, count int) {
|
||||||
|
services, _, err := node.GetClient().Health().Service(service, "", onlypassing, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, v := range services {
|
||||||
|
fmt.Printf("%s service status: %s\n", v.Service.ID, v.Checks.AggregatedStatus())
|
||||||
|
}
|
||||||
|
require.Equal(t, count, len(services))
|
||||||
|
}
|
||||||
|
|
|
@ -600,6 +600,20 @@ func (c *Cluster) ConfigEntryWrite(entry api.ConfigEntry) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) ConfigEntryDelete(entry api.ConfigEntry) error {
|
||||||
|
client, err := c.GetClient(nil, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := client.ConfigEntries()
|
||||||
|
_, err = entries.Delete(entry.GetKind(), entry.GetName(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deleting config entry: %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func extractSecretIDFrom(tokenOutput string) (string, error) {
|
func extractSecretIDFrom(tokenOutput string) (string, error) {
|
||||||
lines := strings.Split(tokenOutput, "\n")
|
lines := strings.Split(tokenOutput, "\n")
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package service
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
@ -14,20 +15,60 @@ const (
|
||||||
StaticClientServiceName = "static-client"
|
StaticClientServiceName = "static-client"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Checks struct {
|
||||||
|
Name string
|
||||||
|
TTL string
|
||||||
|
}
|
||||||
|
|
||||||
|
type SidecarService struct {
|
||||||
|
Port int
|
||||||
|
}
|
||||||
|
|
||||||
type ServiceOpts struct {
|
type ServiceOpts struct {
|
||||||
Name string
|
Name string
|
||||||
ID string
|
ID string
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
HTTPPort int
|
HTTPPort int
|
||||||
GRPCPort int
|
GRPCPort int
|
||||||
|
Checks Checks
|
||||||
|
Connect SidecarService
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
|
||||||
|
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int, svc *api.AgentServiceRegistration) (Service, Service, error) {
|
||||||
// Do some trickery to ensure that partial completion is correctly torn
|
// Do some trickery to ensure that partial completion is correctly torn
|
||||||
// down, but successful execution is not.
|
// down, but successful execution is not.
|
||||||
var deferClean utils.ResettableDefer
|
var deferClean utils.ResettableDefer
|
||||||
defer deferClean.Execute()
|
defer deferClean.Execute()
|
||||||
|
|
||||||
|
if err := node.GetClient().Agent().ServiceRegister(svc); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a service and proxy instance
|
||||||
|
serverService, err := NewExampleService(context.Background(), svc.ID, svc.Port, grpcPort, node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
deferClean.Add(func() {
|
||||||
|
_ = serverService.Terminate()
|
||||||
|
})
|
||||||
|
|
||||||
|
serverConnectProxy, err := NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", svc.ID), svc.ID, []int{svc.Port}, node) // bindPort not used
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
deferClean.Add(func() {
|
||||||
|
_ = serverConnectProxy.Terminate()
|
||||||
|
})
|
||||||
|
|
||||||
|
// disable cleanup functions now that we have an object with a Terminate() function
|
||||||
|
deferClean.Reset()
|
||||||
|
|
||||||
|
return serverService, serverConnectProxy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
||||||
// Register the static-server service and sidecar first to prevent race with sidecar
|
// Register the static-server service and sidecar first to prevent race with sidecar
|
||||||
// trying to get xDS before it's ready
|
// trying to get xDS before it's ready
|
||||||
req := &api.AgentServiceRegistration{
|
req := &api.AgentServiceRegistration{
|
||||||
|
@ -47,32 +88,32 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts
|
||||||
},
|
},
|
||||||
Meta: serviceOpts.Meta,
|
Meta: serviceOpts.Meta,
|
||||||
}
|
}
|
||||||
|
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.GRPCPort, req)
|
||||||
|
}
|
||||||
|
|
||||||
if err := node.GetClient().Agent().ServiceRegister(req); err != nil {
|
func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
||||||
return nil, nil, err
|
// Register the static-server service and sidecar first to prevent race with sidecar
|
||||||
|
// trying to get xDS before it's ready
|
||||||
|
req := &api.AgentServiceRegistration{
|
||||||
|
Name: serviceOpts.Name,
|
||||||
|
ID: serviceOpts.ID,
|
||||||
|
Port: serviceOpts.HTTPPort,
|
||||||
|
Connect: &api.AgentServiceConnect{
|
||||||
|
SidecarService: &api.AgentServiceRegistration{
|
||||||
|
Proxy: &api.AgentServiceConnectProxyConfig{},
|
||||||
|
Port: serviceOpts.Connect.Port,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Checks: api.AgentServiceChecks{
|
||||||
|
{
|
||||||
|
Name: serviceOpts.Checks.Name,
|
||||||
|
TTL: serviceOpts.Checks.TTL,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Meta: serviceOpts.Meta,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a service and proxy instance
|
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.GRPCPort, req)
|
||||||
serverService, err := NewExampleService(context.Background(), serviceOpts.ID, serviceOpts.HTTPPort, serviceOpts.GRPCPort, node)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
deferClean.Add(func() {
|
|
||||||
_ = serverService.Terminate()
|
|
||||||
})
|
|
||||||
|
|
||||||
serverConnectProxy, err := NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", serviceOpts.ID), serviceOpts.ID, []int{serviceOpts.HTTPPort}, node) // bindPort not used
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
deferClean.Add(func() {
|
|
||||||
_ = serverConnectProxy.Terminate()
|
|
||||||
})
|
|
||||||
|
|
||||||
// disable cleanup functions now that we have an object with a Terminate() function
|
|
||||||
deferClean.Reset()
|
|
||||||
|
|
||||||
return serverService, serverConnectProxy, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndRegisterStaticClientSidecar(
|
func CreateAndRegisterStaticClientSidecar(
|
||||||
|
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"gotest.tools/assert"
|
"gotest.tools/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestTrafficManagement_Upgrade Summary
|
// TestTrafficManagement_ServiceResolverDefaultSubset Summary
|
||||||
// This test starts up 3 servers and 1 client in the same datacenter.
|
// This test starts up 3 servers and 1 client in the same datacenter.
|
||||||
//
|
//
|
||||||
// Steps:
|
// Steps:
|
||||||
|
@ -26,7 +26,7 @@ import (
|
||||||
// - Create one static-server and 2 subsets and 1 client and sidecar, then register them with Consul
|
// - Create one static-server and 2 subsets and 1 client and sidecar, then register them with Consul
|
||||||
// - Validate static-server and 2 subsets are and proxy admin endpoint is healthy - 3 instances
|
// - Validate static-server and 2 subsets are and proxy admin endpoint is healthy - 3 instances
|
||||||
// - Validate static servers proxy listeners should be up and have right certs
|
// - Validate static servers proxy listeners should be up and have right certs
|
||||||
func TestTrafficManagement_ServiceWithSubsets(t *testing.T) {
|
func TestTrafficManagement_ServiceResolverDefaultSubset(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
var responseFormat = map[string]string{"format": "json"}
|
var responseFormat = map[string]string{"format": "json"}
|
||||||
|
@ -151,8 +151,8 @@ func createService(t *testing.T, cluster *libcluster.Cluster) (libservice.Servic
|
||||||
GRPCPort: 8079,
|
GRPCPort: 8079,
|
||||||
}
|
}
|
||||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
||||||
libassert.CatalogServiceExists(t, client, "static-server")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, "static-server")
|
||||||
|
|
||||||
serviceOptsV1 := &libservice.ServiceOpts{
|
serviceOptsV1 := &libservice.ServiceOpts{
|
||||||
Name: libservice.StaticServerServiceName,
|
Name: libservice.StaticServerServiceName,
|
||||||
|
@ -162,8 +162,8 @@ func createService(t *testing.T, cluster *libcluster.Cluster) (libservice.Servic
|
||||||
GRPCPort: 8078,
|
GRPCPort: 8078,
|
||||||
}
|
}
|
||||||
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV1)
|
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV1)
|
||||||
libassert.CatalogServiceExists(t, client, "static-server")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, "static-server")
|
||||||
|
|
||||||
serviceOptsV2 := &libservice.ServiceOpts{
|
serviceOptsV2 := &libservice.ServiceOpts{
|
||||||
Name: libservice.StaticServerServiceName,
|
Name: libservice.StaticServerServiceName,
|
||||||
|
@ -173,8 +173,8 @@ func createService(t *testing.T, cluster *libcluster.Cluster) (libservice.Servic
|
||||||
GRPCPort: 8077,
|
GRPCPort: 8077,
|
||||||
}
|
}
|
||||||
_, serverConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
_, serverConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
||||||
libassert.CatalogServiceExists(t, client, "static-server")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, "static-server")
|
||||||
|
|
||||||
// Create a client proxy instance with the server as an upstream
|
// Create a client proxy instance with the server as an upstream
|
||||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
|
@ -0,0 +1,196 @@
|
||||||
|
package upgrade
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
|
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
"github.com/hashicorp/go-version"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestTrafficManagement_ServiceResolverSubsetOnlyPassing Summary
|
||||||
|
// This test starts up 2 servers and 1 client in the same datacenter.
|
||||||
|
//
|
||||||
|
// Steps:
|
||||||
|
// - Create a single agent cluster.
|
||||||
|
// - Create one static-server, 1 subset server 1 client and sidecars for all services, then register them with Consul
|
||||||
|
func TestTrafficManagement_ServiceResolverSubsetOnlyPassing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
responseFormat := map[string]string{"format": "json"}
|
||||||
|
|
||||||
|
type testcase struct {
|
||||||
|
oldversion string
|
||||||
|
targetVersion string
|
||||||
|
}
|
||||||
|
tcs := []testcase{
|
||||||
|
{
|
||||||
|
oldversion: "1.13",
|
||||||
|
targetVersion: utils.TargetVersion,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
oldversion: "1.14",
|
||||||
|
targetVersion: utils.TargetVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
run := func(t *testing.T, tc testcase) {
|
||||||
|
buildOpts := &libcluster.BuildOptions{
|
||||||
|
ConsulVersion: tc.oldversion,
|
||||||
|
Datacenter: "dc1",
|
||||||
|
InjectAutoEncryption: true,
|
||||||
|
}
|
||||||
|
// If version < 1.14 disable AutoEncryption
|
||||||
|
oldVersion, _ := version.NewVersion(tc.oldversion)
|
||||||
|
if oldVersion.LessThan(libutils.Version_1_14) {
|
||||||
|
buildOpts.InjectAutoEncryption = false
|
||||||
|
}
|
||||||
|
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
|
||||||
|
// Register service resolver
|
||||||
|
serviceResolver := &api.ServiceResolverConfigEntry{
|
||||||
|
Kind: api.ServiceResolver,
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
DefaultSubset: "test",
|
||||||
|
Subsets: map[string]api.ServiceResolverSubset{
|
||||||
|
"test": {
|
||||||
|
OnlyPassing: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ConnectTimeout: 120 * time.Second,
|
||||||
|
}
|
||||||
|
err := cluster.ConfigEntryWrite(serviceResolver)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
serverConnectProxy, serverConnectProxyV1, clientConnectProxy := createServiceAndSubset(t, cluster)
|
||||||
|
|
||||||
|
_, port := clientConnectProxy.GetAddr()
|
||||||
|
_, adminPort := clientConnectProxy.GetAdminAddr()
|
||||||
|
_, serverAdminPort := serverConnectProxy.GetAdminAddr()
|
||||||
|
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
||||||
|
|
||||||
|
// Upgrade cluster, restart sidecars then begin service traffic validation
|
||||||
|
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
|
||||||
|
require.NoError(t, clientConnectProxy.Restart())
|
||||||
|
require.NoError(t, serverConnectProxy.Restart())
|
||||||
|
require.NoError(t, serverConnectProxyV1.Restart())
|
||||||
|
|
||||||
|
// force static-server-v1 into a warning state
|
||||||
|
err = node.GetClient().Agent().UpdateTTL("service:static-server-v1", "", "warn")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// validate static-client is up and running
|
||||||
|
libassert.AssertContainerState(t, clientConnectProxy, "running")
|
||||||
|
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||||
|
|
||||||
|
// validate static-client proxy admin is up
|
||||||
|
_, clientStatusCode, err := libassert.GetEnvoyOutput(adminPort, "stats", responseFormat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, clientStatusCode, fmt.Sprintf("service cannot be reached %v", clientStatusCode))
|
||||||
|
|
||||||
|
// validate static-server proxy admin is up
|
||||||
|
_, serverStatusCode, err := libassert.GetEnvoyOutput(serverAdminPort, "stats", responseFormat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, serverStatusCode, fmt.Sprintf("service cannot be reached %v", serverStatusCode))
|
||||||
|
|
||||||
|
// validate static-server-v1 proxy admin is up
|
||||||
|
_, serverStatusCodeV1, err := libassert.GetEnvoyOutput(serverAdminPortV1, "stats", responseFormat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, serverStatusCodeV1, fmt.Sprintf("service cannot be reached %v", serverStatusCodeV1))
|
||||||
|
|
||||||
|
// certs are valid
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, adminPort, libservice.StaticClientServiceName)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPort, libservice.StaticServerServiceName)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV1, libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
// ###########################
|
||||||
|
// ## with onlypassing=true
|
||||||
|
// assert only one static-server proxy is healthy
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 1)
|
||||||
|
|
||||||
|
// static-client upstream should have 1 healthy endpoint for test.static-server
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "HEALTHY", 1)
|
||||||
|
|
||||||
|
// static-client upstream should have 1 unhealthy endpoint for test.static-server
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "UNHEALTHY", 1)
|
||||||
|
|
||||||
|
// static-client upstream should connect to static-server-v2 because the default subset value is to v2 set in the service resolver
|
||||||
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
// ###########################
|
||||||
|
// ## with onlypassing=false
|
||||||
|
// revert to OnlyPassing=false by deleting the config
|
||||||
|
err = cluster.ConfigEntryDelete(serviceResolver)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Consul health check assert only one static-server proxy is healthy when onlyPassing is false
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, false, 2)
|
||||||
|
|
||||||
|
// Although the service status is in warning state, when onlypassing is set to false Envoy
|
||||||
|
// health check returns all service instances with "warning" or "passing" state as Healthy enpoints
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 2)
|
||||||
|
|
||||||
|
// static-client upstream should have 0 unhealthy endpoint for static-server
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "UNHEALTHY", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcs {
|
||||||
|
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
|
||||||
|
func(t *testing.T) {
|
||||||
|
run(t, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create 2 servers and 1 client
|
||||||
|
func createServiceAndSubset(t *testing.T, cluster *libcluster.Cluster) (libservice.Service, libservice.Service, libservice.Service) {
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
client := node.GetClient()
|
||||||
|
|
||||||
|
serviceOpts := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
ID: libservice.StaticServerServiceName,
|
||||||
|
HTTPPort: 8080,
|
||||||
|
GRPCPort: 8079,
|
||||||
|
}
|
||||||
|
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
serviceOptsV1 := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
ID: "static-server-v1",
|
||||||
|
Meta: map[string]string{"version": "v1"},
|
||||||
|
HTTPPort: 8081,
|
||||||
|
GRPCPort: 8078,
|
||||||
|
Checks: libservice.Checks{
|
||||||
|
Name: "main",
|
||||||
|
TTL: "30m",
|
||||||
|
},
|
||||||
|
Connect: libservice.SidecarService{
|
||||||
|
Port: 21011,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecarWithChecks(node, serviceOptsV1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
// Create a client proxy instance with the server as an upstream
|
||||||
|
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
||||||
|
|
||||||
|
return serverConnectProxy, serverConnectProxyV1, clientConnectProxy
|
||||||
|
}
|
|
@ -64,10 +64,11 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
||||||
GRPCPort: 8078,
|
GRPCPort: 8078,
|
||||||
}
|
}
|
||||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(c.Clients()[0], serviceOpts)
|
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(c.Clients()[0], serviceOpts)
|
||||||
libassert.CatalogServiceExists(t, c.Clients()[0].GetClient(), libservice.StaticServer2ServiceName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
libassert.CatalogServiceExists(t, c.Clients()[0].GetClient(), libservice.StaticServer2ServiceName)
|
||||||
|
|
||||||
err = c.ConfigEntryWrite(&api.ProxyConfigEntry{
|
err = c.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||||
Kind: api.ProxyDefaults,
|
Kind: api.ProxyDefaults,
|
||||||
Name: "global",
|
Name: "global",
|
||||||
|
|
Loading…
Reference in New Issue