NET-2292: port ingress-gateway test case "http" from BATS addendum (#16490)

This commit is contained in:
Nick Irvine 2023-03-01 12:45:27 -08:00 committed by GitHub
parent c898a26ba0
commit f3d8341d05
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 360 additions and 31 deletions

View File

@ -122,8 +122,10 @@ func ServiceLogContains(t *testing.T, service libservice.Service, target string)
// has a `FORTIO_NAME` env variable set. This validates that the client is sending
// traffic to the right envoy proxy.
//
// If reqHost is set, the Host field of the HTTP request will be set to its value.
//
// It retries with timeout defaultHTTPTimeout and wait defaultHTTPWait.
func AssertFortioName(t *testing.T, urlbase string, name string) {
func AssertFortioName(t *testing.T, urlbase string, name string, reqHost string) {
t.Helper()
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
client := &http.Client{
@ -133,11 +135,13 @@ func AssertFortioName(t *testing.T, urlbase string, name string) {
}
retry.RunWith(&retry.Timer{Timeout: defaultHTTPTimeout, Wait: defaultHTTPWait}, t, func(r *retry.R) {
fullurl := fmt.Sprintf("%s/debug?env=dump", urlbase)
t.Logf("making call to %s", fullurl)
req, err := http.NewRequest("GET", fullurl, nil)
if err != nil {
r.Fatal("could not make request to service ", fullurl)
}
if reqHost != "" {
req.Host = reqHost
}
resp, err := client.Do(req)
if err != nil {

View File

@ -125,10 +125,10 @@ func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error)
// Each agent gets it's own area in the cluster scratch.
conf.ScratchDir = filepath.Join(c.ScratchDir, strconv.Itoa(c.Index))
if err := os.MkdirAll(conf.ScratchDir, 0777); err != nil {
return err
return fmt.Errorf("container %d: %w", idx, err)
}
if err := os.Chmod(conf.ScratchDir, 0777); err != nil {
return err
return fmt.Errorf("container %d: %w", idx, err)
}
n, err := NewConsulContainer(
@ -138,7 +138,7 @@ func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error)
ports...,
)
if err != nil {
return fmt.Errorf("could not add container index %d: %w", idx, err)
return fmt.Errorf("container %d: %w", idx, err)
}
agents = append(agents, n)
c.Index++

View File

@ -44,12 +44,12 @@ func BasicPeeringTwoClustersSetup(
peeringThroughMeshgateway bool,
) (*BuiltCluster, *BuiltCluster) {
// acceptingCluster, acceptingCtx, acceptingClient := NewPeeringCluster(t, "dc1", 3, consulVersion, true)
acceptingCluster, acceptingCtx, acceptingClient := NewPeeringCluster(t, 3, &libcluster.BuildOptions{
acceptingCluster, acceptingCtx, acceptingClient := NewPeeringCluster(t, 3, 1, &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulVersion: consulVersion,
InjectAutoEncryption: true,
})
dialingCluster, dialingCtx, dialingClient := NewPeeringCluster(t, 1, &libcluster.BuildOptions{
dialingCluster, dialingCtx, dialingClient := NewPeeringCluster(t, 1, 1, &libcluster.BuildOptions{
Datacenter: "dc2",
ConsulVersion: consulVersion,
InjectAutoEncryption: true,
@ -133,7 +133,7 @@ func BasicPeeringTwoClustersSetup(
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", DialingPeerName), "HEALTHY", 1)
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
return &BuiltCluster{
Cluster: acceptingCluster,
@ -210,6 +210,7 @@ func NewDialingCluster(
func NewPeeringCluster(
t *testing.T,
numServers int,
numClients int,
buildOpts *libcluster.BuildOptions,
) (*libcluster.Cluster, *libcluster.BuildContext, *api.Client) {
require.NotEmpty(t, buildOpts.Datacenter)
@ -239,7 +240,7 @@ func NewPeeringCluster(
retryJoin = append(retryJoin, fmt.Sprintf("agent-%d", i))
}
// Add a stable client to register the service
// Add numClients static clients to register the service
configbuiilder := libcluster.NewConfigBuilder(ctx).
Client().
Peering(true).
@ -247,13 +248,13 @@ func NewPeeringCluster(
clientConf := configbuiilder.ToAgentConfig(t)
t.Logf("%s client config: \n%s", opts.Datacenter, clientConf.JSON)
require.NoError(t, cluster.AddN(*clientConf, 1, true))
require.NoError(t, cluster.AddN(*clientConf, numClients, true))
// Use the client agent as the HTTP endpoint since we will not rotate it in many tests.
clientNode := cluster.Agents[numServers]
client := clientNode.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, numServers+1)
libcluster.WaitForMembers(t, client, numServers+numClients)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)

View File

@ -41,7 +41,7 @@ func TestBasicConnectService(t *testing.T) {
libassert.AssertContainerState(t, clientService, "running")
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
}
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {

View File

@ -45,7 +45,7 @@ func TestAccessLogs(t *testing.T) {
t.Skip()
}
cluster, _, _ := topology.NewPeeringCluster(t, 1, &libcluster.BuildOptions{
cluster, _, _ := topology.NewPeeringCluster(t, 1, 1, &libcluster.BuildOptions{
Datacenter: "dc1",
InjectAutoEncryption: true,
})
@ -70,7 +70,7 @@ func TestAccessLogs(t *testing.T) {
// Validate Custom JSON
require.Eventually(t, func() bool {
libassert.HTTPServiceEchoes(t, "localhost", port, "banana")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
client := libassert.ServiceLogContains(t, clientService, "\"banana_path\":\"/banana\"")
server := libassert.ServiceLogContains(t, serverService, "\"banana_path\":\"/banana\"")
return client && server
@ -112,7 +112,7 @@ func TestAccessLogs(t *testing.T) {
_, port = clientService.GetAddr()
require.Eventually(t, func() bool {
libassert.HTTPServiceEchoes(t, "localhost", port, "orange")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
client := libassert.ServiceLogContains(t, clientService, "Orange you glad I didn't say banana: /orange, -")
server := libassert.ServiceLogContains(t, serverService, "Orange you glad I didn't say banana: /orange, -")
return client && server

View File

@ -94,7 +94,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
}
testutil.RunStep(t, "rotate exporting cluster's root CA", func(t *testing.T) {
@ -144,7 +144,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
// Connectivity should still be contained
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
verifySidecarHasTwoRootCAs(t, clientSidecarService)
})
@ -166,7 +166,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
})
}

View File

@ -18,7 +18,7 @@ import (
func TestTroubleshootProxy(t *testing.T) {
t.Parallel()
cluster, _, _ := topology.NewPeeringCluster(t, 1, &libcluster.BuildOptions{
cluster, _, _ := topology.NewPeeringCluster(t, 1, 1, &libcluster.BuildOptions{
Datacenter: "dc1",
InjectAutoEncryption: true,
})

View File

@ -36,7 +36,7 @@ func TestACL_Upgrade_Node_Token(t *testing.T) {
run := func(t *testing.T, tc testcase) {
// NOTE: Disable auto.encrypt due to its conflict with ACL token during bootstrap
cluster, _, _ := libtopology.NewPeeringCluster(t, 1, &libcluster.BuildOptions{
cluster, _, _ := libtopology.NewPeeringCluster(t, 1, 1, &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulVersion: tc.oldversion,
InjectAutoEncryption: false,

View File

@ -0,0 +1,324 @@
package upgrade
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"testing"
"time"
"github.com/docker/go-connections/nat"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// These tests adapt BATS-based tests from test/integration/connect/case-ingress-gateway*
// TestIngressGateway_UpgradeToTarget_fromLatest:
// - starts a cluster with 2 static services,
// - configures an ingress gateway + router
// - performs tests to ensure our routing rules work (namely header manipulation)
// - upgrades the cluster
// - performs these tests again
func TestIngressGateway_UpgradeToTarget_fromLatest(t *testing.T) {
t.Parallel()
type testcase struct {
oldversion string
targetVersion string
}
tcs := []testcase{
{
oldversion: "1.13",
targetVersion: libutils.TargetVersion,
},
{
oldversion: "1.14",
targetVersion: libutils.TargetVersion,
},
}
run := func(t *testing.T, oldVersion, targetVersion string) {
// setup
// TODO? we don't need a peering cluster, so maybe this is overkill
cluster, _, client := topology.NewPeeringCluster(t, 1, 2, &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulVersion: oldVersion,
// TODO? InjectAutoEncryption: true,
})
// upsert config entry making http default protocol for global
require.NoError(t, cluster.ConfigEntryWrite(&api.ProxyConfigEntry{
Name: api.ProxyConfigGlobal,
Kind: api.ProxyDefaults,
Config: map[string]interface{}{
"protocol": "http",
},
}))
const (
nameIG = "ingress-gateway"
nameRouter = "router"
)
// upsert config entry for `service-router` `router`:
// - prefix matching `/$nameS1` goes to service s1
// - prefix matching `/$nameS2` goes to service s2
const nameS1 = libservice.StaticServerServiceName
const nameS2 = libservice.StaticServer2ServiceName
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceRouterConfigEntry{
Kind: api.ServiceRouter,
// This is a "virtual" service name and will not have a backing
// service definition. It must match the name defined in the ingress
// configuration.
Name: nameRouter,
Routes: []api.ServiceRoute{
{
Match: &api.ServiceRouteMatch{
HTTP: &api.ServiceRouteHTTPMatch{
PathPrefix: fmt.Sprintf("/%s/", nameS1),
},
},
Destination: &api.ServiceRouteDestination{
Service: nameS1,
PrefixRewrite: "/",
},
},
{
Match: &api.ServiceRouteMatch{
HTTP: &api.ServiceRouteHTTPMatch{
PathPrefix: fmt.Sprintf("/%s/", nameS2),
},
},
Destination: &api.ServiceRouteDestination{
Service: nameS2,
PrefixRewrite: "/",
},
},
},
}))
igw, err := libservice.NewGatewayService(context.Background(), nameIG, "ingress", cluster.Servers()[0])
require.NoError(t, err)
t.Logf("created gateway: %#v", igw)
// upsert config entry for ingress-gateway ig1, protocol http, service s1
// - listener points at service `router`
// - add request headers: 1 new, 1 existing
// - set request headers: 1 existing, 1 new, to client IP
// - add response headers: 1 new, 1 existing
// - set response headers: 1 existing
// - remove response header: 1 existing
// this must be one of the externally-mapped ports from
// https://github.com/hashicorp/consul/blob/c5e729e86576771c4c22c6da1e57aaa377319323/test/integration/consul-container/libs/cluster/container.go#L521-L525
const portRouter = 8080
require.NoError(t, cluster.ConfigEntryWrite(&api.IngressGatewayConfigEntry{
Kind: api.IngressGateway,
Name: nameIG,
Listeners: []api.IngressListener{
{
Port: portRouter,
Protocol: "http",
Services: []api.IngressService{
{
Name: nameRouter,
// TODO: extract these header values to consts to test
RequestHeaders: &api.HTTPHeaderModifiers{
Add: map[string]string{
"x-foo": "bar-req",
"x-existing-1": "appended-req",
},
Set: map[string]string{
"x-existing-2": "replaced-req",
"x-client-ip": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%",
},
Remove: []string{"x-bad-req"},
},
ResponseHeaders: &api.HTTPHeaderModifiers{
Add: map[string]string{
"x-foo": "bar-resp",
"x-existing-1": "appended-resp",
},
Set: map[string]string{
"x-existing-2": "replaced-resp",
},
Remove: []string{"x-bad-resp"},
},
},
},
},
},
}))
// create s1
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
cluster.Clients()[0],
&libservice.ServiceOpts{
Name: nameS1,
ID: nameS1,
HTTPPort: 8080,
GRPCPort: 8079,
},
)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, nameS1)
// create s2
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
cluster.Clients()[1],
&libservice.ServiceOpts{
Name: nameS2,
ID: nameS2,
HTTPPort: 8080,
GRPCPort: 8079,
},
)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, nameS2)
// checks
// TODO: other checks from verify.bats
// ingress-gateway proxy admin up
// s1 proxy admin up
// s2 proxy admin up
// s1 proxy listener has right cert
// s2 proxy listener has right cert
// ig1 has healthy endpoints for s1
// ig1 has healthy endpoints for s2
// TODO ^ ??? s1 and s2 aren't direct listeners, only in `router`, so why are they endpoints?
// tests
tests := func(t *testing.T) {
// fortio name should be $nameS<X> for /$nameS<X> prefix on router
portRouterMapped, _ := cluster.Servers()[0].GetPod().MappedPort(
context.Background(),
nat.Port(fmt.Sprintf("%d/tcp", portRouter)),
)
reqHost := fmt.Sprintf("router.ingress.consul:%d", portRouter)
libassert.AssertFortioName(t,
fmt.Sprintf("http://localhost:%d/%s", portRouterMapped.Int(), nameS1), nameS1, reqHost)
libassert.AssertFortioName(t,
fmt.Sprintf("http://localhost:%d/%s", portRouterMapped.Int(), nameS2), nameS2, reqHost)
urlbaseS2 := fmt.Sprintf("http://%s/%s", reqHost, nameS2)
t.Run("request header manipulation", func(t *testing.T) {
resp := mappedHTTPGET(t, fmt.Sprintf("%s/debug?env=dump", urlbaseS2), portRouterMapped.Int(), http.Header(map[string][]string{
"X-Existing-1": {"original"},
"X-Existing-2": {"original"},
"X-Bad-Req": {"true"},
}))
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// The following check the body, which should echo the headers received
// by the fortio container
assert.Contains(t, string(body), "X-Foo: bar-req",
"Ingress should have added the new request header")
assert.Contains(t, string(body), "X-Existing-1: original,appended-req",
"Ingress should have appended the first existing header - both should be present")
assert.Contains(t, string(body), "X-Existing-2: replaced-req",
"Ingress should have replaced the second existing header")
// TODO: This 172. is the prefix of the IP for the gateway for our docker network.
// Perhaps there's some way to look this up.
// This is a deviation from BATS, because their tests run inside Docker, and ours run outside.
assert.Contains(t, string(body), "X-Client-Ip: 172.",
"Ingress should have set the client ip from dynamic Envoy variable")
assert.NotContains(t, string(body), "X-Bad-Req: true",
"Ingress should have removed the bad request header")
})
t.Run("response header manipulation", func(t *testing.T) {
const params = "?header=x-bad-resp:true&header=x-existing-1:original&header=x-existing-2:original"
resp := mappedHTTPGET(t,
fmt.Sprintf("%s/echo%s", urlbaseS2, params),
portRouterMapped.Int(),
nil,
)
defer resp.Body.Close()
assert.Contains(t, resp.Header.Values("x-foo"), "bar-resp",
"Ingress should have added the new response header")
assert.Contains(t, resp.Header.Values("x-existing-1"), "original",
"Ingress should have appended the first existing header - both should be present")
assert.Contains(t, resp.Header.Values("x-existing-1"), "appended-resp",
"Ingress should have appended the first existing header - both should be present")
assert.Contains(t, resp.Header.Values("x-existing-2"), "replaced-resp",
"Ingress should have replaced the second existing header")
assert.NotContains(t, resp.Header.Values("x-existing-2"), "original",
"x-existing-2 response header should have been overridden")
assert.NotContains(t, resp.Header.Values("x-bad-resp"), "true",
"X-Bad-Resp response header should have been stripped")
})
}
t.Run(fmt.Sprintf("pre-upgrade from %s to %s", oldVersion, targetVersion), func(t *testing.T) {
tests(t)
})
if t.Failed() {
t.Fatal("failing fast: failed assertions pre-upgrade")
}
// Upgrade the cluster to targetVersion
t.Logf("Upgrade to version %s", targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), targetVersion)
require.NoError(t, err)
require.NoError(t, igw.Restart())
t.Run(fmt.Sprintf("post-upgrade from %s to %s", oldVersion, targetVersion), func(t *testing.T) {
tests(t)
})
}
for _, tc := range tcs {
// copy to avoid lint loopclosure
tc := tc
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
func(t *testing.T) {
t.Parallel()
run(t, tc.oldversion, tc.targetVersion)
})
time.Sleep(3 * time.Second)
}
}
func mappedHTTPGET(t *testing.T, uri string, mappedPort int, header http.Header) *http.Response {
t.Helper()
var hostHdr string
u, _ := url.Parse(uri)
hostHdr = u.Host
u.Host = fmt.Sprintf("localhost:%d", mappedPort)
uri = u.String()
client := &http.Client{
Transport: &http.Transport{
DisableKeepAlives: true,
},
}
var resp *http.Response
retry.RunWith(&retry.Timer{Timeout: 1 * time.Minute, Wait: 50 * time.Millisecond}, t, func(r *retry.R) {
req, err := http.NewRequest("GET", uri, nil)
if header != nil {
req.Header = header
}
if err != nil {
r.Fatal("could not make request to service ", uri)
}
if hostHdr != "" {
req.Host = hostHdr
}
resp, err = client.Do(req)
if err != nil {
r.Fatal("could not make call to service ", uri)
}
})
return resp
}

View File

@ -121,7 +121,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server.default", "HEALTHY", 1)
// static-client upstream should connect to static-server-v2 because the default subset value is to v2 set in the service resolver
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-v2")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-v2", "")
},
},
{
@ -194,7 +194,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "UNHEALTHY", 1)
// static-client upstream should connect to static-server since it is passing
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName)
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "")
// ###########################
// ## with onlypassing=false
@ -318,7 +318,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
_, appPort := clientConnectProxy.GetAddr()
_, adminPort := clientConnectProxy.GetAdminAddr()
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPort), "static-server-2-v2")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPort), "static-server-2-v2", "")
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server-2.default", "HEALTHY", 1)
},
},
@ -335,7 +335,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
if oldVersionTmp.LessThan(libutils.Version_1_14) {
buildOpts.InjectAutoEncryption = false
}
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
cluster, _, _ := topology.NewPeeringCluster(t, 1, 1, buildOpts)
node := cluster.Agents[0]
client := node.GetClient()

View File

@ -102,7 +102,7 @@ func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
require.NoError(t, clientSidecarService.Restart())
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", libtopology.DialingPeerName), "HEALTHY", 1)
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
}
for _, tc := range tcs {

View File

@ -100,7 +100,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
return serverConnectProxy, nil, func() {}, err
},
extraAssertion: func(clientUpstreamPort int) {
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d/static-server-2", clientUpstreamPort), "static-server-2")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d/static-server-2", clientUpstreamPort), "static-server-2", "")
},
},
{
@ -301,14 +301,14 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
_, appPorts := clientConnectProxy.GetAddrs()
assertionFn := func() {
// assert traffic can fail-over to static-server in peered cluster and restor to local static-server
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing", "")
require.NoError(t, serverConnectProxy.Stop())
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server", "")
require.NoError(t, serverConnectProxy.Start())
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing", "")
// assert peer-static-server resolves to static-server in peered cluster
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[1]), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[1]), "static-server", "")
}
return serverConnectProxy, clientConnectProxy, assertionFn, nil
},
@ -376,7 +376,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
_, adminPort := clientSidecarService.GetAdminAddr()
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", libtopology.DialingPeerName), "HEALTHY", 1)
libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
// TODO: restart static-server-2's sidecar
tc.extraAssertion(appPort)