integ test: remove hardcoded upstream local bind port and max number of envoy sidecar (#16092)

This commit is contained in:
cskh 2023-01-27 10:19:10 -05:00 committed by GitHub
parent 77d805ceb6
commit c5f771b87c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 32 additions and 21 deletions

View file

@ -16,7 +16,7 @@ type Agent interface {
GetClient() *api.Client GetClient() *api.Client
GetName() string GetName() string
GetPod() testcontainers.Container GetPod() testcontainers.Container
ClaimAdminPort() int ClaimAdminPort() (int, error)
GetConfig() Config GetConfig() Config
GetInfo() AgentInfo GetInfo() AgentInfo
GetDatacenter() string GetDatacenter() string

View file

@ -23,6 +23,10 @@ import (
const bootLogLine = "Consul agent running" const bootLogLine = "Consul agent running"
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED" const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
// Exposed ports info
const MaxEnvoyOnNode = 10 // the max number of Envoy sidecar can run along with the agent, base is 19000
const ServiceUpstreamLocalBindPort = 5000 // local bind Port of service's upstream
// consulContainerNode implements the Agent interface by running a Consul agent // consulContainerNode implements the Agent interface by running a Consul agent
// in a container. // in a container.
type consulContainerNode struct { type consulContainerNode struct {
@ -55,10 +59,14 @@ func (c *consulContainerNode) GetPod() testcontainers.Container {
return c.pod return c.pod
} }
func (c *consulContainerNode) ClaimAdminPort() int { func (c *consulContainerNode) ClaimAdminPort() (int, error) {
if c.nextAdminPortOffset >= MaxEnvoyOnNode {
return 0, fmt.Errorf("running out of envoy admin port, max %d, already claimed %d",
MaxEnvoyOnNode, c.nextAdminPortOffset)
}
p := 19000 + c.nextAdminPortOffset p := 19000 + c.nextAdminPortOffset
c.nextAdminPortOffset++ c.nextAdminPortOffset++
return p return p, nil
} }
// NewConsulContainer starts a Consul agent in a container with the given config. // NewConsulContainer starts a Consul agent in a container with the given config.
@ -425,32 +433,29 @@ func newContainerRequest(config Config, opts containerOpts) (podRequest, consulR
Name: opts.name + "-pod", Name: opts.name + "-pod",
SkipReaper: skipReaper, SkipReaper: skipReaper,
ExposedPorts: []string{ ExposedPorts: []string{
"8500/tcp", "8500/tcp", // Consul HTTP API
"8501/tcp", "8501/tcp", // Consul HTTPs API
"8443/tcp", // Envoy Gateway Listener "8443/tcp", // Envoy Gateway Listener
"5000/tcp", // Envoy App Listener
"8079/tcp", // Envoy App Listener "8079/tcp", // Envoy App Listener
"8080/tcp", // Envoy App Listener "8080/tcp", // Envoy App Listener
"9998/tcp", // Envoy App Listener "9998/tcp", // Envoy App Listener
"9999/tcp", // Envoy App Listener "9999/tcp", // Envoy App Listener
"19000/tcp", // Envoy Admin Port
"19001/tcp", // Envoy Admin Port
"19002/tcp", // Envoy Admin Port
"19003/tcp", // Envoy Admin Port
"19004/tcp", // Envoy Admin Port
"19005/tcp", // Envoy Admin Port
"19006/tcp", // Envoy Admin Port
"19007/tcp", // Envoy Admin Port
"19008/tcp", // Envoy Admin Port
"19009/tcp", // Envoy Admin Port
}, },
Hostname: opts.hostname, Hostname: opts.hostname,
Networks: opts.addtionalNetworks, Networks: opts.addtionalNetworks,
} }
// Envoy upstream listener
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", ServiceUpstreamLocalBindPort))
// Reserve the exposed ports for Envoy admin port, e.g., 19000 - 19009
basePort := 19000
for i := 0; i < MaxEnvoyOnNode; i++ {
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", basePort+i))
}
// For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready". // For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready".
// If we only wait until the log stream starts, subsequent commands to agents will fail. // If we only wait until the log stream starts, subsequent commands to agents will fail.
// TODO: optimize the wait strategy // TODO: optimize the wait strategy

View file

@ -107,7 +107,10 @@ func NewConnectService(ctx context.Context, name string, serviceName string, ser
} }
dockerfileCtx.BuildArgs = buildargs dockerfileCtx.BuildArgs = buildargs
adminPort := node.ClaimAdminPort() adminPort, err := node.ClaimAdminPort()
if err != nil {
return nil, err
}
req := testcontainers.ContainerRequest{ req := testcontainers.ContainerRequest{
FromDockerfile: dockerfileCtx, FromDockerfile: dockerfileCtx,

View file

@ -101,7 +101,10 @@ func NewGatewayService(ctx context.Context, name string, kind string, node libcl
} }
dockerfileCtx.BuildArgs = buildargs dockerfileCtx.BuildArgs = buildargs
adminPort := node.ClaimAdminPort() adminPort, err := node.ClaimAdminPort()
if err != nil {
return nil, err
}
req := testcontainers.ContainerRequest{ req := testcontainers.ContainerRequest{
FromDockerfile: dockerfileCtx, FromDockerfile: dockerfileCtx,

View file

@ -96,7 +96,7 @@ func CreateAndRegisterStaticClientSidecar(
DestinationName: StaticServerServiceName, DestinationName: StaticServerServiceName,
DestinationPeer: peerName, DestinationPeer: peerName,
LocalBindAddress: "0.0.0.0", LocalBindAddress: "0.0.0.0",
LocalBindPort: 5000, LocalBindPort: libcluster.ServiceUpstreamLocalBindPort,
MeshGateway: api.MeshGatewayConfig{ MeshGateway: api.MeshGatewayConfig{
Mode: mgwMode, Mode: mgwMode,
}, },
@ -111,7 +111,7 @@ func CreateAndRegisterStaticClientSidecar(
} }
// Create a service and proxy instance // Create a service and proxy instance
clientConnectProxy, err := NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", StaticClientServiceName), StaticClientServiceName, 5000, node) clientConnectProxy, err := NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", StaticClientServiceName), StaticClientServiceName, libcluster.ServiceUpstreamLocalBindPort, node)
if err != nil { if err != nil {
return nil, err return nil, err
} }