Basic gobased API gateway spinup test (#16278)
* wip, proof of concept, gateway service being registered, don't know how to hit it * checkpoint * Fix up API Gateway go tests (#16297) * checkpoint, getting InvalidDiscoveryChain route protocol does not match targeted service protocol * checkpoint * httproute hittable * tests working, one header test failing * differentiate services by status code, minor cleanup * working tests * updated GetPort interface * fix getport --------- Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com>
This commit is contained in:
parent
4f3bfdbb91
commit
2c942b089e
|
@ -22,6 +22,7 @@ type Agent interface {
|
|||
GetConfig() Config
|
||||
GetInfo() AgentInfo
|
||||
GetDatacenter() string
|
||||
GetNetwork() string
|
||||
IsServer() bool
|
||||
RegisterTermination(func() error)
|
||||
Terminate() error
|
||||
|
|
|
@ -63,7 +63,7 @@ func NewN(t TestingT, conf Config, count int) (*Cluster, error) {
|
|||
//
|
||||
// The provided TestingT is used to register a cleanup function to terminate
|
||||
// the cluster.
|
||||
func New(t TestingT, configs []Config) (*Cluster, error) {
|
||||
func New(t TestingT, configs []Config, ports ...int) (*Cluster, error) {
|
||||
id, err := shortid.Generate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate cluster id: %w", err)
|
||||
|
@ -99,7 +99,7 @@ func New(t TestingT, configs []Config) (*Cluster, error) {
|
|||
_ = cluster.Terminate()
|
||||
})
|
||||
|
||||
if err := cluster.Add(configs, true); err != nil {
|
||||
if err := cluster.Add(configs, true, ports...); err != nil {
|
||||
return nil, fmt.Errorf("could not start or join all agents: %w", err)
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ func (c *Cluster) AddN(conf Config, count int, join bool) error {
|
|||
}
|
||||
|
||||
// Add starts agents with the given configurations and joins them to the existing cluster
|
||||
func (c *Cluster) Add(configs []Config, serfJoin bool) (xe error) {
|
||||
func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error) {
|
||||
if c.Index == 0 && !serfJoin {
|
||||
return fmt.Errorf("the first call to Cluster.Add must have serfJoin=true")
|
||||
}
|
||||
|
@ -135,6 +135,7 @@ func (c *Cluster) Add(configs []Config, serfJoin bool) (xe error) {
|
|||
context.Background(),
|
||||
conf,
|
||||
c,
|
||||
ports...,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not add container index %d: %w", idx, err)
|
||||
|
|
|
@ -73,7 +73,7 @@ func (c *consulContainerNode) ClaimAdminPort() (int, error) {
|
|||
}
|
||||
|
||||
// NewConsulContainer starts a Consul agent in a container with the given config.
|
||||
func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster) (Agent, error) {
|
||||
func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, ports ...int) (Agent, error) {
|
||||
network := cluster.NetworkName
|
||||
index := cluster.Index
|
||||
if config.ScratchDir == "" {
|
||||
|
@ -128,7 +128,7 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster) (A
|
|||
addtionalNetworks: []string{"bridge", network},
|
||||
hostname: fmt.Sprintf("agent-%d", index),
|
||||
}
|
||||
podReq, consulReq := newContainerRequest(config, opts)
|
||||
podReq, consulReq := newContainerRequest(config, opts, ports...)
|
||||
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
|
@ -291,6 +291,10 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster) (A
|
|||
return node, nil
|
||||
}
|
||||
|
||||
func (c *consulContainerNode) GetNetwork() string {
|
||||
return c.network
|
||||
}
|
||||
|
||||
func (c *consulContainerNode) GetName() string {
|
||||
if c.container == nil {
|
||||
return c.consulReq.Name // TODO: is this safe to do all the time?
|
||||
|
@ -501,7 +505,7 @@ type containerOpts struct {
|
|||
addtionalNetworks []string
|
||||
}
|
||||
|
||||
func newContainerRequest(config Config, opts containerOpts) (podRequest, consulRequest testcontainers.ContainerRequest) {
|
||||
func newContainerRequest(config Config, opts containerOpts, ports ...int) (podRequest, consulRequest testcontainers.ContainerRequest) {
|
||||
skipReaper := isRYUKDisabled()
|
||||
|
||||
pod := testcontainers.ContainerRequest{
|
||||
|
@ -541,6 +545,10 @@ func newContainerRequest(config Config, opts containerOpts) (podRequest, consulR
|
|||
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", basePort+i))
|
||||
}
|
||||
|
||||
for _, port := range ports {
|
||||
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", port))
|
||||
}
|
||||
|
||||
// For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready".
|
||||
// If we only wait until the log stream starts, subsequent commands to agents will fail.
|
||||
// TODO: optimize the wait strategy
|
||||
|
|
|
@ -2,6 +2,7 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
@ -58,6 +59,10 @@ func (g ConnectContainer) GetAddrs() (string, []int) {
|
|||
return g.ip, g.appPort
|
||||
}
|
||||
|
||||
func (g ConnectContainer) GetPort(port int) (int, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (g ConnectContainer) Restart() error {
|
||||
_, err := g.GetStatus()
|
||||
if err != nil {
|
||||
|
|
|
@ -68,6 +68,10 @@ func (g exampleContainer) GetAddrs() (string, []int) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
func (g exampleContainer) GetPort(port int) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (g exampleContainer) Restart() error {
|
||||
return fmt.Errorf("Restart Unimplemented by ConnectContainer")
|
||||
}
|
||||
|
@ -121,7 +125,7 @@ func (c exampleContainer) GetStatus() (string, error) {
|
|||
return state.Status, err
|
||||
}
|
||||
|
||||
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libcluster.Agent) (Service, error) {
|
||||
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libcluster.Agent, containerArgs ...string) (Service, error) {
|
||||
namePrefix := fmt.Sprintf("%s-service-example-%s", node.GetDatacenter(), name)
|
||||
containerName := utils.RandName(namePrefix)
|
||||
|
||||
|
@ -135,18 +139,22 @@ func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort
|
|||
grpcPortStr = strconv.Itoa(grpcPort)
|
||||
)
|
||||
|
||||
command := []string{
|
||||
"server",
|
||||
"-http-port", httpPortStr,
|
||||
"-grpc-port", grpcPortStr,
|
||||
"-redirect-port", "-disabled",
|
||||
}
|
||||
|
||||
command = append(command, containerArgs...)
|
||||
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: hashicorpDockerProxy + "/fortio/fortio",
|
||||
WaitingFor: wait.ForLog("").WithStartupTimeout(10 * time.Second),
|
||||
AutoRemove: false,
|
||||
Name: containerName,
|
||||
Cmd: []string{
|
||||
"server",
|
||||
"-http-port", httpPortStr,
|
||||
"-grpc-port", grpcPortStr,
|
||||
"-redirect-port", "-disabled",
|
||||
},
|
||||
Env: map[string]string{"FORTIO_NAME": name},
|
||||
Cmd: command,
|
||||
Env: map[string]string{"FORTIO_NAME": name},
|
||||
}
|
||||
|
||||
info, err := cluster.LaunchContainerOnNode(ctx, node, req, []string{httpPortStr, grpcPortStr})
|
||||
|
|
|
@ -20,12 +20,13 @@ import (
|
|||
|
||||
// gatewayContainer
|
||||
type gatewayContainer struct {
|
||||
ctx context.Context
|
||||
container testcontainers.Container
|
||||
ip string
|
||||
port int
|
||||
adminPort int
|
||||
serviceName string
|
||||
ctx context.Context
|
||||
container testcontainers.Container
|
||||
ip string
|
||||
port int
|
||||
adminPort int
|
||||
serviceName string
|
||||
portMappings map[int]int
|
||||
}
|
||||
|
||||
var _ Service = (*gatewayContainer)(nil)
|
||||
|
@ -105,6 +106,15 @@ func (g gatewayContainer) GetAdminAddr() (string, int) {
|
|||
return "localhost", g.adminPort
|
||||
}
|
||||
|
||||
func (g gatewayContainer) GetPort(port int) (int, error) {
|
||||
p, ok := g.portMappings[port]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("port does not exist")
|
||||
}
|
||||
return p, nil
|
||||
|
||||
}
|
||||
|
||||
func (g gatewayContainer) Restart() error {
|
||||
_, err := g.container.State(g.ctx)
|
||||
if err != nil {
|
||||
|
@ -130,7 +140,7 @@ func (g gatewayContainer) GetStatus() (string, error) {
|
|||
return state.Status, err
|
||||
}
|
||||
|
||||
func NewGatewayService(ctx context.Context, name string, kind string, node libcluster.Agent) (Service, error) {
|
||||
func NewGatewayService(ctx context.Context, name string, kind string, node libcluster.Agent, ports ...int) (Service, error) {
|
||||
nodeConfig := node.GetConfig()
|
||||
if nodeConfig.ScratchDir == "" {
|
||||
return nil, fmt.Errorf("node ScratchDir is required")
|
||||
|
@ -207,21 +217,33 @@ func NewGatewayService(ctx context.Context, name string, kind string, node libcl
|
|||
adminPortStr = strconv.Itoa(adminPort)
|
||||
)
|
||||
|
||||
info, err := cluster.LaunchContainerOnNode(ctx, node, req, []string{
|
||||
extraPorts := []string{}
|
||||
for _, port := range ports {
|
||||
extraPorts = append(extraPorts, strconv.Itoa(port))
|
||||
}
|
||||
|
||||
info, err := cluster.LaunchContainerOnNode(ctx, node, req, append(
|
||||
extraPorts,
|
||||
portStr,
|
||||
adminPortStr,
|
||||
})
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
portMappings := make(map[int]int)
|
||||
for _, port := range ports {
|
||||
portMappings[port] = info.MappedPorts[strconv.Itoa(port)].Int()
|
||||
}
|
||||
|
||||
out := &gatewayContainer{
|
||||
ctx: ctx,
|
||||
container: info.Container,
|
||||
ip: info.IP,
|
||||
port: info.MappedPorts[portStr].Int(),
|
||||
adminPort: info.MappedPorts[adminPortStr].Int(),
|
||||
serviceName: name,
|
||||
ctx: ctx,
|
||||
container: info.Container,
|
||||
ip: info.IP,
|
||||
port: info.MappedPorts[portStr].Int(),
|
||||
adminPort: info.MappedPorts[adminPortStr].Int(),
|
||||
serviceName: name,
|
||||
portMappings: portMappings,
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
|
|
@ -35,7 +35,7 @@ type ServiceOpts struct {
|
|||
}
|
||||
|
||||
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
|
||||
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int, svc *api.AgentServiceRegistration) (Service, Service, error) {
|
||||
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int, svc *api.AgentServiceRegistration, containerArgs ...string) (Service, Service, error) {
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer
|
||||
|
@ -46,7 +46,7 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int
|
|||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
serverService, err := NewExampleService(context.Background(), svc.ID, svc.Port, grpcPort, node)
|
||||
serverService, err := NewExampleService(context.Background(), svc.ID, svc.Port, grpcPort, node, containerArgs...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int
|
|||
return serverService, serverConnectProxy, nil
|
||||
}
|
||||
|
||||
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
||||
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts, containerArgs ...string) (Service, Service, error) {
|
||||
// Register the static-server service and sidecar first to prevent race with sidecar
|
||||
// trying to get xDS before it's ready
|
||||
req := &api.AgentServiceRegistration{
|
||||
|
@ -88,7 +88,7 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts
|
|||
},
|
||||
Meta: serviceOpts.Meta,
|
||||
}
|
||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.GRPCPort, req)
|
||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.GRPCPort, req, containerArgs...)
|
||||
}
|
||||
|
||||
func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
|
@ -13,6 +14,7 @@ type Service interface {
|
|||
Export(partition, peer string, client *api.Client) error
|
||||
GetAddr() (string, int)
|
||||
GetAddrs() (string, []int)
|
||||
GetPort(port int) (int, error)
|
||||
// GetAdminAddr returns the external admin address
|
||||
GetAdminAddr() (string, int)
|
||||
GetLogs() (string, error)
|
||||
|
|
|
@ -0,0 +1,250 @@
|
|||
package gateways
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
checkTimeout = 1 * time.Minute
|
||||
checkInterval = 1 * time.Second
|
||||
)
|
||||
|
||||
// Creates a gateway service and tests to see if it is routable
|
||||
func TestAPIGatewayCreate(t *testing.T) {
|
||||
t.Skip()
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
listenerPortOne := 6000
|
||||
|
||||
cluster := createCluster(t, listenerPortOne)
|
||||
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
//setup
|
||||
apiGateway := &api.APIGatewayConfigEntry{
|
||||
Kind: "api-gateway",
|
||||
Name: "api-gateway",
|
||||
Listeners: []api.APIGatewayListener{
|
||||
{
|
||||
Port: listenerPortOne,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, _, err := client.ConfigEntries().Set(apiGateway, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tcpRoute := &api.TCPRouteConfigEntry{
|
||||
Kind: "tcp-route",
|
||||
Name: "api-gateway-route",
|
||||
Parents: []api.ResourceReference{
|
||||
{
|
||||
Kind: "api-gateway",
|
||||
Name: "api-gateway",
|
||||
},
|
||||
},
|
||||
Services: []api.TCPService{
|
||||
{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = client.ConfigEntries().Set(tcpRoute, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
_, gatewayService := createServices(t, cluster, listenerPortOne)
|
||||
|
||||
//check statuses
|
||||
gatewayReady := false
|
||||
routeReady := false
|
||||
|
||||
//make sure the gateway/route come online
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get("api-gateway", "api-gateway", nil)
|
||||
assert.NoError(t, err)
|
||||
apiEntry := entry.(*api.APIGatewayConfigEntry)
|
||||
gatewayReady = isAccepted(apiEntry.Status.Conditions)
|
||||
|
||||
e, _, err := client.ConfigEntries().Get("tcp-route", "api-gateway-route", nil)
|
||||
assert.NoError(t, err)
|
||||
routeEntry := e.(*api.TCPRouteConfigEntry)
|
||||
routeReady = isBound(routeEntry.Status.Conditions)
|
||||
|
||||
return gatewayReady && routeReady
|
||||
}, time.Second*10, time.Second*1)
|
||||
|
||||
port, err := gatewayService.GetPort(listenerPortOne)
|
||||
assert.NoError(t, err)
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
}
|
||||
|
||||
func isAccepted(conditions []api.Condition) bool {
|
||||
return conditionStatusIsValue("Accepted", "True", conditions)
|
||||
}
|
||||
|
||||
func isBound(conditions []api.Condition) bool {
|
||||
return conditionStatusIsValue("Bound", "True", conditions)
|
||||
}
|
||||
|
||||
func conditionStatusIsValue(typeName string, statusValue string, conditions []api.Condition) bool {
|
||||
for _, c := range conditions {
|
||||
if c.Type == typeName && c.Status == statusValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO this code is just copy pasted from elsewhere, it is likely we will need to modify it some
|
||||
func createCluster(t *testing.T, ports ...int) *libcluster.Cluster {
|
||||
opts := libcluster.BuildOptions{
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
}
|
||||
ctx := libcluster.NewBuildContext(t, opts)
|
||||
|
||||
conf := libcluster.NewConfigBuilder(ctx).
|
||||
ToAgentConfig(t)
|
||||
t.Logf("Cluster config:\n%s", conf.JSON)
|
||||
|
||||
configs := []libcluster.Config{*conf}
|
||||
|
||||
cluster, err := libcluster.New(t, configs, ports...)
|
||||
require.NoError(t, err)
|
||||
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
libcluster.WaitForMembers(t, client, 1)
|
||||
|
||||
// Default Proxy Settings
|
||||
ok, err := utils.ApplyDefaultProxySettings(client)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func createService(t *testing.T, cluster *libcluster.Cluster, serviceOpts *libservice.ServiceOpts, containerArgs []string) libservice.Service {
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
// Create a service and proxy instance
|
||||
service, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts, containerArgs...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, serviceOpts.Name+"-sidecar-proxy")
|
||||
libassert.CatalogServiceExists(t, client, serviceOpts.Name)
|
||||
|
||||
return service
|
||||
|
||||
}
|
||||
func createServices(t *testing.T, cluster *libcluster.Cluster, ports ...int) (libservice.Service, libservice.Service) {
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
// Create a service and proxy instance
|
||||
serviceOpts := &libservice.ServiceOpts{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
ID: "static-server",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
}
|
||||
|
||||
clientConnectProxy := createService(t, cluster, serviceOpts, nil)
|
||||
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), "api-gateway", "api", cluster.Agents[0], ports...)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, "api-gateway")
|
||||
|
||||
return clientConnectProxy, gatewayService
|
||||
}
|
||||
|
||||
// checkRoute, customized version of libassert.RouteEchos to allow for headers/distinguishing between the server instances
|
||||
|
||||
type checkOptions struct {
|
||||
debug bool
|
||||
statusCode int
|
||||
testName string
|
||||
}
|
||||
|
||||
func checkRoute(t *testing.T, ip string, port int, path string, headers map[string]string, expected checkOptions) {
|
||||
const phrase = "hello"
|
||||
|
||||
failer := func() *retry.Timer {
|
||||
return &retry.Timer{Timeout: time.Second * 60, Wait: time.Second * 60}
|
||||
}
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
url := fmt.Sprintf("http://%s:%d", ip, port)
|
||||
|
||||
if path != "" {
|
||||
url += "/" + path
|
||||
}
|
||||
|
||||
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||
t.Logf("making call to %s", url)
|
||||
reader := strings.NewReader(phrase)
|
||||
req, err := http.NewRequest("POST", url, reader)
|
||||
assert.NoError(t, err)
|
||||
headers["content-type"] = "text/plain"
|
||||
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
|
||||
if k == "Host" {
|
||||
req.Host = v
|
||||
}
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
r.Fatal("could not make call to service ", url)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
r.Fatal("could not read response body ", url)
|
||||
}
|
||||
|
||||
assert.Equal(t, expected.statusCode, res.StatusCode)
|
||||
if expected.statusCode != res.StatusCode {
|
||||
r.Fatal("unexpected response code returned")
|
||||
}
|
||||
|
||||
//if debug is expected, debug should be in the response body
|
||||
assert.Equal(t, expected.debug, strings.Contains(string(body), "debug"))
|
||||
if expected.statusCode != res.StatusCode {
|
||||
r.Fatal("unexpected response body returned")
|
||||
}
|
||||
|
||||
if !strings.Contains(string(body), phrase) {
|
||||
r.Fatal("received an incorrect response ", string(body))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
|
@ -0,0 +1,258 @@
|
|||
package gateways
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getNamespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// randomName generates a random name of n length with the provided
|
||||
// prefix. If prefix is omitted, the then entire name is random char.
|
||||
func randomName(prefix string, n int) string {
|
||||
if n == 0 {
|
||||
n = 32
|
||||
}
|
||||
if len(prefix) >= n {
|
||||
return prefix
|
||||
}
|
||||
p := make([]byte, n)
|
||||
rand.Read(p)
|
||||
return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(p))[:n]
|
||||
}
|
||||
|
||||
func TestHTTPRouteFlattening(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
//infrastructure set up
|
||||
listenerPort := 6000
|
||||
//create cluster
|
||||
cluster := createCluster(t, listenerPort)
|
||||
client := cluster.Agents[0].GetClient()
|
||||
service1ResponseCode := 200
|
||||
service2ResponseCode := 418
|
||||
serviceOne := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "service1",
|
||||
ID: "service1",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
}, []string{
|
||||
//customizes response code so we can distinguish between which service is responding
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", service1ResponseCode),
|
||||
})
|
||||
serviceTwo := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "service2",
|
||||
ID: "service2",
|
||||
HTTPPort: 8081,
|
||||
GRPCPort: 8082,
|
||||
}, []string{
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", service2ResponseCode),
|
||||
},
|
||||
)
|
||||
|
||||
//TODO this should only matter in consul enterprise I believe?
|
||||
namespace := getNamespace()
|
||||
gatewayName := randomName("gw", 16)
|
||||
routeOneName := randomName("route", 16)
|
||||
routeTwoName := randomName("route", 16)
|
||||
path1 := "/"
|
||||
path2 := "/v2"
|
||||
|
||||
//write config entries
|
||||
proxyDefaults := &api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Namespace: namespace,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err := client.ConfigEntries().Set(proxyDefaults, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
apiGateway := &api.APIGatewayConfigEntry{
|
||||
Kind: "api-gateway",
|
||||
Name: gatewayName,
|
||||
Listeners: []api.APIGatewayListener{
|
||||
{
|
||||
Name: "listener",
|
||||
Port: listenerPort,
|
||||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
routeOne := &api.HTTPRouteConfigEntry{
|
||||
Kind: api.HTTPRoute,
|
||||
Name: routeOneName,
|
||||
Parents: []api.ResourceReference{
|
||||
{
|
||||
Kind: api.APIGateway,
|
||||
Name: gatewayName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
Hostnames: []string{
|
||||
"test.foo",
|
||||
"test.example",
|
||||
},
|
||||
Namespace: namespace,
|
||||
Rules: []api.HTTPRouteRule{
|
||||
{
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: serviceOne.GetServiceName(),
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
Matches: []api.HTTPMatch{
|
||||
{
|
||||
Path: api.HTTPPathMatch{
|
||||
Match: api.HTTPPathMatchPrefix,
|
||||
Value: path1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
routeTwo := &api.HTTPRouteConfigEntry{
|
||||
Kind: api.HTTPRoute,
|
||||
Name: routeTwoName,
|
||||
Parents: []api.ResourceReference{
|
||||
{
|
||||
Kind: api.APIGateway,
|
||||
Name: gatewayName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
Hostnames: []string{
|
||||
"test.foo",
|
||||
},
|
||||
Namespace: namespace,
|
||||
Rules: []api.HTTPRouteRule{
|
||||
{
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: serviceTwo.GetServiceName(),
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
Matches: []api.HTTPMatch{
|
||||
{
|
||||
Path: api.HTTPPathMatch{
|
||||
Match: api.HTTPPathMatchPrefix,
|
||||
Value: path2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Headers: []api.HTTPHeaderMatch{{
|
||||
Match: api.HTTPHeaderMatchExact,
|
||||
Name: "x-v2",
|
||||
Value: "v2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = client.ConfigEntries().Set(apiGateway, nil)
|
||||
assert.NoError(t, err)
|
||||
_, _, err = client.ConfigEntries().Set(routeOne, nil)
|
||||
assert.NoError(t, err)
|
||||
_, _, err = client.ConfigEntries().Set(routeTwo, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
//create gateway service
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), gatewayName, "api", cluster.Agents[0], listenerPort)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, gatewayName)
|
||||
|
||||
//make sure config entries have been properly created
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.APIGateway, gatewayName, &api.QueryOptions{Namespace: namespace})
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
return false
|
||||
}
|
||||
apiEntry := entry.(*api.APIGatewayConfigEntry)
|
||||
t.Log(entry)
|
||||
return isAccepted(apiEntry.Status.Conditions)
|
||||
}, time.Second*10, time.Second*1)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.HTTPRoute, routeOneName, &api.QueryOptions{Namespace: namespace})
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
apiEntry := entry.(*api.HTTPRouteConfigEntry)
|
||||
t.Log(entry)
|
||||
return isBound(apiEntry.Status.Conditions)
|
||||
}, time.Second*10, time.Second*1)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.HTTPRoute, routeTwoName, nil)
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
apiEntry := entry.(*api.HTTPRouteConfigEntry)
|
||||
return isBound(apiEntry.Status.Conditions)
|
||||
}, time.Second*10, time.Second*1)
|
||||
|
||||
//gateway resolves routes
|
||||
ip := "localhost"
|
||||
gatewayPort, err := gatewayService.GetPort(listenerPort)
|
||||
assert.NoError(t, err)
|
||||
|
||||
//Same v2 path with and without header
|
||||
checkRoute(t, ip, gatewayPort, "v2", map[string]string{
|
||||
"Host": "test.foo",
|
||||
"x-v2": "v2",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 header and path"})
|
||||
checkRoute(t, ip, gatewayPort, "v2", map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 just path match"})
|
||||
|
||||
////v1 path with the header
|
||||
checkRoute(t, ip, gatewayPort, "check", map[string]string{
|
||||
"Host": "test.foo",
|
||||
"x-v2": "v2",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 just header match"})
|
||||
|
||||
checkRoute(t, ip, gatewayPort, "v2/path/value", map[string]string{
|
||||
"Host": "test.foo",
|
||||
"x-v2": "v2",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 v2 with path"})
|
||||
|
||||
//hit service 1 by hitting root path
|
||||
checkRoute(t, ip, gatewayPort, "", map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{debug: false, statusCode: service1ResponseCode, testName: "service1 root prefix"})
|
||||
|
||||
//hit service 1 by hitting v2 path with v1 hostname
|
||||
checkRoute(t, ip, gatewayPort, "v2", map[string]string{
|
||||
"Host": "test.example",
|
||||
}, checkOptions{debug: false, statusCode: service1ResponseCode, testName: "service1, v2 path with v2 hostname"})
|
||||
|
||||
}
|
Loading…
Reference in New Issue