TProxy integration test (#17103)
* TProxy integration test * Fix GHA compatibility integration test command Previously, when test splitting allocated multiple test directories to a runner, the workflow ran `go tests "./test/dir1 ./test/dir2"` which results in a directory not found error. This fixes that.
This commit is contained in:
parent
8082ca612c
commit
69e9e21bf4
|
@ -309,7 +309,7 @@ jobs:
|
|||
docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version
|
||||
echo "Running $(sed 's,|, ,g' <<< "${{ matrix.test-cases }}" |wc -w) subtests"
|
||||
# shellcheck disable=SC2001
|
||||
sed 's,|,\n,g' <<< "${{ matrix.test-cases }}"
|
||||
sed 's, ,\n,g' <<< "${{ matrix.test-cases }}"
|
||||
go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \
|
||||
--raw-command \
|
||||
--format=short-verbose \
|
||||
|
@ -321,7 +321,7 @@ jobs:
|
|||
-tags "${{ env.GOTAGS }}" \
|
||||
-timeout=30m \
|
||||
-json \
|
||||
"${{ matrix.test-cases }}" \
|
||||
${{ matrix.test-cases }} \
|
||||
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||
--target-version local \
|
||||
--latest-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||
|
|
|
@ -5,4 +5,17 @@ ARG ENVOY_VERSION
|
|||
FROM ${CONSUL_IMAGE} as consul
|
||||
|
||||
FROM docker.mirror.hashicorp.services/envoyproxy/envoy:v${ENVOY_VERSION}
|
||||
|
||||
# Install iptables and sudo, needed for tproxy.
|
||||
RUN apt update -y \
|
||||
&& apt install -y iptables sudo curl dnsutils \
|
||||
&& adduser envoy sudo \
|
||||
&& echo 'envoy ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
||||
|
||||
COPY tproxy-startup.sh /bin/tproxy-startup.sh
|
||||
RUN chmod +x /bin/tproxy-startup.sh \
|
||||
&& chown envoy:envoy /bin/tproxy-startup.sh
|
||||
|
||||
COPY --from=consul /bin/consul /bin/consul
|
||||
|
||||
USER envoy
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env sh
|
||||
|
||||
set -ex
|
||||
|
||||
# HACK: UID of consul in the consul-client container
|
||||
# This is conveniently also the UID of apt in the envoy container
|
||||
CONSUL_UID=100
|
||||
ENVOY_UID=$(id -u)
|
||||
|
||||
sudo consul connect redirect-traffic \
|
||||
-proxy-uid $ENVOY_UID \
|
||||
-exclude-uid $CONSUL_UID \
|
||||
$REDIRECT_TRAFFIC_ARGS
|
||||
|
||||
exec "$@"
|
|
@ -21,7 +21,10 @@ const (
|
|||
)
|
||||
|
||||
//go:embed assets/Dockerfile-consul-envoy
|
||||
var consulEnvoyDockerfile string
|
||||
var consulEnvoyDockerfile []byte
|
||||
|
||||
//go:embed assets/tproxy-startup.sh
|
||||
var tproxyStartupScript []byte
|
||||
|
||||
// getDevContainerDockerfile returns the necessary context to build a combined consul and
|
||||
// envoy image for running "consul connect envoy ..."
|
||||
|
@ -29,18 +32,29 @@ func getDevContainerDockerfile() (testcontainers.FromDockerfile, error) {
|
|||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
|
||||
dockerfileBytes := []byte(consulEnvoyDockerfile)
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: "Dockerfile",
|
||||
Mode: 0600,
|
||||
Size: int64(len(dockerfileBytes)),
|
||||
Size: int64(len(consulEnvoyDockerfile)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return testcontainers.FromDockerfile{}, err
|
||||
}
|
||||
|
||||
if _, err := tw.Write(dockerfileBytes); err != nil {
|
||||
if _, err := tw.Write(consulEnvoyDockerfile); err != nil {
|
||||
return testcontainers.FromDockerfile{}, err
|
||||
}
|
||||
|
||||
hdr = &tar.Header{
|
||||
Name: "tproxy-startup.sh",
|
||||
Mode: 0600,
|
||||
Size: int64(len(tproxyStartupScript)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return testcontainers.FromDockerfile{}, err
|
||||
}
|
||||
|
||||
if _, err := tw.Write(tproxyStartupScript); err != nil {
|
||||
return testcontainers.FromDockerfile{}, err
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
|
@ -146,9 +147,10 @@ func (g ConnectContainer) GetStatus() (string, error) {
|
|||
}
|
||||
|
||||
type SidecarConfig struct {
|
||||
Name string
|
||||
ServiceID string
|
||||
Namespace string
|
||||
Name string
|
||||
ServiceID string
|
||||
Namespace string
|
||||
EnableTProxy bool
|
||||
}
|
||||
|
||||
// NewConnectService returns a container that runs envoy sidecar, launched by
|
||||
|
@ -199,6 +201,26 @@ func NewConnectService(ctx context.Context, sidecarCfg SidecarConfig, serviceBin
|
|||
Env: make(map[string]string),
|
||||
}
|
||||
|
||||
if sidecarCfg.EnableTProxy {
|
||||
req.Entrypoint = []string{"/bin/tproxy-startup.sh"}
|
||||
req.Env["REDIRECT_TRAFFIC_ARGS"] = strings.Join(
|
||||
[]string{
|
||||
"-exclude-inbound-port", fmt.Sprint(internalAdminPort),
|
||||
"-exclude-inbound-port", "8300",
|
||||
"-exclude-inbound-port", "8301",
|
||||
"-exclude-inbound-port", "8302",
|
||||
"-exclude-inbound-port", "8500",
|
||||
"-exclude-inbound-port", "8502",
|
||||
"-exclude-inbound-port", "8600",
|
||||
"-consul-dns-ip", "127.0.0.1",
|
||||
"-consul-dns-port", "8600",
|
||||
"-proxy-id", fmt.Sprintf("%s-sidecar-proxy", sidecarCfg.ServiceID),
|
||||
},
|
||||
" ",
|
||||
)
|
||||
req.CapAdd = append(req.CapAdd, "NET_ADMIN")
|
||||
}
|
||||
|
||||
nodeInfo := node.GetInfo()
|
||||
if nodeInfo.UseTLSForAPI || nodeInfo.UseTLSForGRPC {
|
||||
req.Mounts = append(req.Mounts, testcontainers.ContainerMount{
|
||||
|
|
|
@ -27,7 +27,12 @@ type Checks struct {
|
|||
}
|
||||
|
||||
type SidecarService struct {
|
||||
Port int
|
||||
Port int
|
||||
Proxy ConnectProxy
|
||||
}
|
||||
|
||||
type ConnectProxy struct {
|
||||
Mode string
|
||||
}
|
||||
|
||||
type ServiceOpts struct {
|
||||
|
@ -63,9 +68,10 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int
|
|||
_ = serverService.Terminate()
|
||||
})
|
||||
sidecarCfg := SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", svc.ID),
|
||||
ServiceID: svc.ID,
|
||||
Namespace: svc.Namespace,
|
||||
Name: fmt.Sprintf("%s-sidecar", svc.ID),
|
||||
ServiceID: svc.ID,
|
||||
Namespace: svc.Namespace,
|
||||
EnableTProxy: svc.Proxy != nil && svc.Proxy.Mode == "transparent",
|
||||
}
|
||||
serverConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{svc.Port}, node) // bindPort not used
|
||||
if err != nil {
|
||||
|
@ -102,7 +108,9 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts
|
|||
Port: p,
|
||||
Connect: &api.AgentServiceConnect{
|
||||
SidecarService: &api.AgentServiceRegistration{
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{},
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
Mode: api.ProxyMode(serviceOpts.Connect.Proxy.Mode),
|
||||
},
|
||||
},
|
||||
},
|
||||
Namespace: serviceOpts.Namespace,
|
||||
|
@ -141,15 +149,34 @@ func CreateAndRegisterStaticClientSidecar(
|
|||
node libcluster.Agent,
|
||||
peerName string,
|
||||
localMeshGateway bool,
|
||||
enableTProxy bool,
|
||||
) (*ConnectContainer, error) {
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer
|
||||
defer deferClean.Execute()
|
||||
|
||||
mgwMode := api.MeshGatewayModeRemote
|
||||
if localMeshGateway {
|
||||
mgwMode = api.MeshGatewayModeLocal
|
||||
var proxy *api.AgentServiceConnectProxyConfig
|
||||
if enableTProxy {
|
||||
proxy = &api.AgentServiceConnectProxyConfig{
|
||||
Mode: "transparent",
|
||||
}
|
||||
} else {
|
||||
mgwMode := api.MeshGatewayModeRemote
|
||||
if localMeshGateway {
|
||||
mgwMode = api.MeshGatewayModeLocal
|
||||
}
|
||||
proxy = &api.AgentServiceConnectProxyConfig{
|
||||
Upstreams: []api.Upstream{{
|
||||
DestinationName: StaticServerServiceName,
|
||||
DestinationPeer: peerName,
|
||||
LocalBindAddress: "0.0.0.0",
|
||||
LocalBindPort: libcluster.ServiceUpstreamLocalBindPort,
|
||||
MeshGateway: api.MeshGatewayConfig{
|
||||
Mode: mgwMode,
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// Register the static-client service and sidecar first to prevent race with sidecar
|
||||
|
@ -159,17 +186,7 @@ func CreateAndRegisterStaticClientSidecar(
|
|||
Port: 8080,
|
||||
Connect: &api.AgentServiceConnect{
|
||||
SidecarService: &api.AgentServiceRegistration{
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
Upstreams: []api.Upstream{{
|
||||
DestinationName: StaticServerServiceName,
|
||||
DestinationPeer: peerName,
|
||||
LocalBindAddress: "0.0.0.0",
|
||||
LocalBindPort: libcluster.ServiceUpstreamLocalBindPort,
|
||||
MeshGateway: api.MeshGatewayConfig{
|
||||
Mode: mgwMode,
|
||||
},
|
||||
}},
|
||||
},
|
||||
Proxy: proxy,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -180,8 +197,9 @@ func CreateAndRegisterStaticClientSidecar(
|
|||
|
||||
// Create a service and proxy instance
|
||||
sidecarCfg := SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", StaticClientServiceName),
|
||||
ServiceID: StaticClientServiceName,
|
||||
Name: fmt.Sprintf("%s-sidecar", StaticClientServiceName),
|
||||
ServiceID: StaticClientServiceName,
|
||||
EnableTProxy: enableTProxy,
|
||||
}
|
||||
|
||||
clientConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{libcluster.ServiceUpstreamLocalBindPort}, node)
|
||||
|
|
|
@ -140,7 +140,7 @@ func BasicPeeringTwoClustersSetup(
|
|||
|
||||
// Create a service and proxy instance
|
||||
var err error
|
||||
clientSidecarService, err = libservice.CreateAndRegisterStaticClientSidecar(clientNode, DialingPeerName, true)
|
||||
clientSidecarService, err = libservice.CreateAndRegisterStaticClientSidecar(clientNode, DialingPeerName, true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, dialingClient, "static-client-sidecar-proxy", nil)
|
||||
|
|
|
@ -45,7 +45,7 @@ func CreateServices(t *testing.T, cluster *libcluster.Cluster) (libservice.Servi
|
|||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName), nil)
|
||||
|
|
|
@ -71,7 +71,7 @@ func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Servic
|
|||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", nil)
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package tproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
)
|
||||
|
||||
// TestTProxyService
|
||||
// This test makes sure two services in the same datacenter have connectivity
|
||||
// with transparent proxy enabled.
|
||||
//
|
||||
// Steps:
|
||||
// - Create a single server cluster.
|
||||
// - Create the example static-server and sidecar containers, then register them both with Consul
|
||||
// - Create an example static-client sidecar, then register both the service and sidecar with Consul
|
||||
// - Make sure a request from static-client to the virtual address (<svc>.virtual.consul) returns a
|
||||
// response from the upstream.
|
||||
func TestTProxyService(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 2,
|
||||
ApplyDefaultProxySettings: true,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
// TODO(rb): fix the test to not need the service/envoy stack to use :8500
|
||||
AllowHTTPAnyway: true,
|
||||
},
|
||||
})
|
||||
|
||||
clientService := createServices(t, cluster)
|
||||
_, adminPort := clientService.GetAdminAddr()
|
||||
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1)
|
||||
libassert.AssertContainerState(t, clientService, "running")
|
||||
assertHTTPRequestToVirtualAddress(t, clientService)
|
||||
}
|
||||
|
||||
func assertHTTPRequestToVirtualAddress(t *testing.T, clientService libservice.Service) {
|
||||
timer := &retry.Timer{Timeout: 120 * time.Second, Wait: 500 * time.Millisecond}
|
||||
|
||||
retry.RunWith(timer, t, func(r *retry.R) {
|
||||
// Test that we can make a request to the virtual ip to reach the upstream.
|
||||
//
|
||||
// This uses a workaround for DNS because I had trouble modifying
|
||||
// /etc/resolv.conf. There is a --dns option to docker run, but it
|
||||
// didn't seem to be exposed via testcontainers. I'm not sure if it would
|
||||
// do what I want. In any case, Docker sets up /etc/resolv.conf for certain
|
||||
// functionality so it seems better to leave DNS alone.
|
||||
//
|
||||
// But, that means DNS queries aren't redirected to Consul out of the box.
|
||||
// As a workaround, we `dig @localhost:53` which is iptables-redirected to
|
||||
// localhost:8600 where the Consul client responds with the virtual ip.
|
||||
//
|
||||
// In tproxy tests, Envoy is not configured with a unique listener for each
|
||||
// upstream. This means the usual approach for non-tproxy tests doesn't
|
||||
// work - where we send the request to a host address mapped in to Envoy's
|
||||
// upstream listener. Instead, we exec into the container and run curl.
|
||||
//
|
||||
// We must make this request with a non-envoy user. The envoy and consul
|
||||
// users are excluded from traffic redirection rules, so instead we
|
||||
// make the request as root.
|
||||
out, err := clientService.Exec(
|
||||
context.Background(),
|
||||
[]string{"sudo", "sh", "-c", `
|
||||
set -e
|
||||
VIRTUAL=$(dig @localhost +short static-server.virtual.consul)
|
||||
echo "Virtual IP: $VIRTUAL"
|
||||
curl -s "$VIRTUAL/debug?env=dump"
|
||||
`,
|
||||
},
|
||||
)
|
||||
t.Logf("making call to upstream\nerr = %v\nout = %s", err, out)
|
||||
require.NoError(r, err)
|
||||
require.Regexp(r, `Virtual IP: 240.0.0.\d+`, out)
|
||||
require.Contains(r, out, "FORTIO_NAME=static-server")
|
||||
})
|
||||
}
|
||||
|
||||
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {
|
||||
{
|
||||
node := cluster.Agents[1]
|
||||
client := node.GetClient()
|
||||
// Create a service and proxy instance
|
||||
serviceOpts := &libservice.ServiceOpts{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
ID: "static-server",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
Connect: libservice.SidecarService{
|
||||
Proxy: libservice.ConnectProxy{
|
||||
Mode: "transparent",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy", nil)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||
}
|
||||
|
||||
{
|
||||
node := cluster.Agents[2]
|
||||
client := node.GetClient()
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", nil)
|
||||
return clientConnectProxy
|
||||
}
|
||||
}
|
|
@ -91,7 +91,7 @@ func TestIngressGateway_GRPC_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
serverNodes := cluster.Servers()
|
||||
require.NoError(t, err)
|
||||
require.True(t, len(serverNodes) > 0)
|
||||
staticClientSvcSidecar, err := libservice.CreateAndRegisterStaticClientSidecar(serverNodes[0], "", true)
|
||||
staticClientSvcSidecar, err := libservice.CreateAndRegisterStaticClientSidecar(serverNodes[0], "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := func(t *testing.T) {
|
||||
|
|
|
@ -344,7 +344,7 @@ func setup(t *testing.T) (*libcluster.Cluster, libservice.Service, libservice.Se
|
|||
require.NoError(t, err)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
staticClientProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||
staticClientProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -80,7 +80,7 @@ func TestPeering_ControlPlaneMGW(t *testing.T) {
|
|||
"upstream_cx_total", 1)
|
||||
require.NoError(t, accepting.Gateway.Start())
|
||||
|
||||
clientSidecarService, err := libservice.CreateAndRegisterStaticClientSidecar(dialingCluster.Servers()[0], libtopology.DialingPeerName, true)
|
||||
clientSidecarService, err := libservice.CreateAndRegisterStaticClientSidecar(dialingCluster.Servers()[0], libtopology.DialingPeerName, true, false)
|
||||
require.NoError(t, err)
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
_, adminPort := clientSidecarService.GetAdminAddr()
|
||||
|
|
|
@ -324,7 +324,7 @@ func peeringUpgrade(t *testing.T, accepting, dialing *libtopology.BuiltCluster,
|
|||
func peeringPostUpgradeValidation(t *testing.T, dialing *libtopology.BuiltCluster) {
|
||||
t.Helper()
|
||||
|
||||
clientSidecarService, err := libservice.CreateAndRegisterStaticClientSidecar(dialing.Cluster.Servers()[0], libtopology.DialingPeerName, true)
|
||||
clientSidecarService, err := libservice.CreateAndRegisterStaticClientSidecar(dialing.Cluster.Servers()[0], libtopology.DialingPeerName, true, false)
|
||||
require.NoError(t, err)
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
_, adminPort := clientSidecarService.GetAdminAddr()
|
||||
|
|
|
@ -57,7 +57,7 @@ func TestPeering_WanFedSecondaryDC(t *testing.T) {
|
|||
require.NoError(t, service.Export("default", "alpha-to-secondary", c3Agent.GetClient()))
|
||||
|
||||
// Create a testing sidecar to proxy requests through
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(c2Agent, "secondary-to-alpha", false)
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(c2Agent, "secondary-to-alpha", false, false)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, c2Agent.GetClient(), "static-client-sidecar-proxy", nil)
|
||||
|
||||
|
|
Loading…
Reference in New Issue