open-consul/test/integration/connect/envoy/docker-compose.yml

665 lines
16 KiB
YAML
Raw Normal View History

version: '3.4'
x-workdir:
&workdir-volume
type: volume
source: workdir
target: /workdir
volume:
nocopy: true
volumes:
workdir:
services:
# This is a dummy container that we use to create volume and keep it
# accessible while other containers are down.
workdir:
image: alpine
volumes:
- *workdir-volume
command:
- sleep
- "86400"
consul-primary:
image: "consul-dev"
command:
- "agent"
- "-dev"
- "-datacenter"
- "primary"
- "-config-dir"
- "/workdir/primary/consul"
- "-client"
- "0.0.0.0"
volumes:
- *workdir-volume
ports:
# Exposing to host makes debugging locally a bit easier
- "8500:8500"
- "8502:8502"
# For zipkin which uses this containers network
- 9411:9411
# Jaeger UI
- 16686:16686
s1:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s1"
command:
- "server"
- "-http-port"
- ":8080"
- "-grpc-port"
- ":8079"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s2:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s2"
command:
- "server"
- "-http-port"
- ":8181"
- "-grpc-port"
- ":8179"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s2-v1:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s2-v1"
command:
- "server"
- "-http-port"
- ":8182"
- "-grpc-port"
- ":8178"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s2-v2:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s2-v2"
command:
- "server"
- "-http-port"
- ":8183"
- "-grpc-port"
- ":8177"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s3:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s3"
command:
- "server"
- "-http-port"
- ":8282"
- "-grpc-port"
- ":8279"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s3-v1:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s3-v1"
command:
- "server"
- "-http-port"
- ":8283"
- "-grpc-port"
- ":8278"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s3-v2:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s3-v2"
command:
- "server"
- "-http-port"
- ":8284"
- "-grpc-port"
- ":8277"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s3-alt:
depends_on:
- consul-primary
image: "fortio/fortio"
environment:
- "FORTIO_NAME=s3-alt"
command:
- "server"
- "-http-port"
- ":8286"
- "-grpc-port"
- ":8280"
- "-redirect-port"
- "disabled"
network_mode: service:consul-primary
s1-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s1-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s2-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s2-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s2-v1-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s2-v1-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s2-v2-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s2-v2-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s3-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s3-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s3-v1-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s3-v1-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s3-v2-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s3-v2-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s3-alt-sidecar-proxy:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/s3-alt-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
verify-primary:
depends_on:
- consul-primary
build:
context: .
dockerfile: Dockerfile-bats
tty: true
environment:
- ENVOY_VERSION
command:
- "--pretty"
- "/workdir/primary/bats"
volumes:
- *workdir-volume
network_mode: service:consul-primary
s1-sidecar-proxy-consul-exec:
depends_on:
- consul-primary
build:
context: .
dockerfile: Dockerfile-consul-envoy
args:
ENVOY_VERSION: ${ENVOY_VERSION}
image: consul-dev-envoy:${ENVOY_VERSION}
command:
- "consul"
- "connect"
- "envoy"
- "-sidecar-for"
- "s1"
- "-envoy-version"
- ${ENVOY_VERSION}
- "--"
- "-l"
- "debug"
network_mode: service:consul-primary
fake-statsd:
depends_on:
- consul-primary
image: "alpine/socat"
command:
- -u
- UDP-RECVFROM:8125,fork,reuseaddr
# This magic incantation is needed since Envoy doesn't add newlines and so
# we need each packet to be passed to echo to add a new line before
# appending.
- SYSTEM:'xargs -0 echo >> /workdir/primary/statsd/statsd.log'
volumes:
- *workdir-volume
network_mode: service:consul-primary
wipe-volumes:
volumes:
- *workdir-volume
image: alpine
command:
- sh
- -c
- 'rm -rf /workdir/*'
# This is a debugging tool run docker-compose up dump-volumes to see the
# current state.
dump-volumes:
volumes:
- *workdir-volume
- ./:/cwd
image: alpine
command:
- cp
- -r
- /workdir/.
- /cwd/workdir/
zipkin:
volumes:
- *workdir-volume
image: openzipkin/zipkin
network_mode: service:consul-primary
jaeger:
volumes:
- *workdir-volume
image: jaegertracing/all-in-one:1.11
network_mode: service:consul-primary
command:
- --collector.zipkin.http-port=9411
consul-secondary:
image: "consul-dev"
command:
- "agent"
- "-dev"
- "-datacenter"
- "secondary"
- "-config-dir"
- "/workdir/secondary/consul"
- "-client"
- "0.0.0.0"
volumes:
- *workdir-volume
ports:
# Exposing to host makes debugging locally a bit easier
- "9500:8500"
- "9502:8502"
s1-secondary:
depends_on:
- consul-secondary
image: "fortio/fortio"
environment:
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
- "FORTIO_NAME=s1-secondary"
command:
- "server"
- "-http-port"
- ":8080"
- "-grpc-port"
- ":8079"
- "-redirect-port"
- "disabled"
network_mode: service:consul-secondary
s2-secondary:
depends_on:
- consul-secondary
image: "fortio/fortio"
environment:
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
- "FORTIO_NAME=s2-secondary"
command:
- "server"
- "-http-port"
- ":8181"
- "-grpc-port"
- ":8179"
- "-redirect-port"
- "disabled"
network_mode: service:consul-secondary
s1-sidecar-proxy-secondary:
depends_on:
- consul-secondary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/secondary/envoy/s1-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-secondary
s2-sidecar-proxy-secondary:
depends_on:
- consul-secondary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/secondary/envoy/s2-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-secondary
gateway-primary:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/mesh-gateway-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
gateway-secondary:
depends_on:
- consul-secondary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/secondary/envoy/mesh-gateway-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-secondary
ingress-gateway-primary:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/ingress-gateway-bootstrap.json"
- "-l"
- "debug"
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
2020-04-14 21:13:58 +00:00
terminating-gateway-primary:
depends_on:
- consul-primary
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
command:
- "envoy"
- "-c"
- "/workdir/primary/envoy/terminating-gateway-bootstrap.json"
- "-l"
- "debug"
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul-primary
verify-primary:
depends_on:
- consul-primary
build:
context: .
dockerfile: Dockerfile-bats
tty: true
environment:
- ENVOY_VERSION
command:
- "--pretty"
- "/workdir/primary/bats"
volumes:
- *workdir-volume
network_mode: service:consul-primary
pid: host
verify-secondary:
depends_on:
- consul-secondary
build:
context: .
dockerfile: Dockerfile-bats
tty: true
environment:
- ENVOY_VERSION
command:
- "--pretty"
- "/workdir/secondary/bats"
volumes:
- *workdir-volume
network_mode: service:consul-secondary
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
pid: host
tcpdump-primary:
depends_on:
- consul-primary
build:
context: .
dockerfile: Dockerfile-tcpdump
# we cant do this in circle but its only here to temporarily enable.
volumes:
- type: bind
source: ./${LOG_DIR}
target: /data
command: -v -i any -w /data/primary.pcap
network_mode: service:consul-primary
tcpdump-secondary:
depends_on:
- consul-secondary
build:
context: .
dockerfile: Dockerfile-tcpdump
# we cant do this in circle but its only here to temporarily enable.
volumes:
- type: bind
source: ./${LOG_DIR}
target: /data
command: -v -i any -w /data/secondary.pcap
network_mode: service:consul-secondary