feat(ingress gateway: support configuring limits in ingress-gateway c… (#14749)
* feat(ingress gateway: support configuring limits in ingress-gateway config entry - a new Defaults field with max_connections, max_pending_connections, max_requests is added to ingress gateway config entry - new field max_connections, max_pending_connections, max_requests in individual services to overwrite the value in Default - added unit test and integration test - updated doc Co-authored-by: Chris S. Kim <ckim@hashicorp.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Dan Stough <dan.stough@hashicorp.com>
This commit is contained in:
parent
099f29387b
commit
4ece020bf1
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
config-entry(ingress-gateway): Added support for `max_connections` for upstream clusters
|
||||
```
|
|
@ -98,6 +98,9 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
|
|||
|
||||
snap.IngressGateway.GatewayConfigLoaded = true
|
||||
snap.IngressGateway.TLSConfig = gatewayConf.TLS
|
||||
if gatewayConf.Defaults != nil {
|
||||
snap.IngressGateway.Defaults = *gatewayConf.Defaults
|
||||
}
|
||||
|
||||
// Load each listener's config from the config entry so we don't have to
|
||||
// pass listener config through "upstreams" types as that grows.
|
||||
|
|
|
@ -619,6 +619,9 @@ type configSnapshotIngressGateway struct {
|
|||
// Listeners is the original listener config from the ingress-gateway config
|
||||
// entry to save us trying to pass fields through Upstreams
|
||||
Listeners map[IngressListenerKey]structs.IngressListener
|
||||
|
||||
// Defaults is the default configuration for upstream service instances
|
||||
Defaults structs.IngressServiceConfig
|
||||
}
|
||||
|
||||
// isEmpty is a test helper
|
||||
|
|
|
@ -31,11 +31,20 @@ type IngressGatewayConfigEntry struct {
|
|||
// what services to associated to those ports.
|
||||
Listeners []IngressListener
|
||||
|
||||
// Defaults contains default configuration for all upstream service instances
|
||||
Defaults *IngressServiceConfig `json:",omitempty"`
|
||||
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
||||
RaftIndex
|
||||
}
|
||||
|
||||
type IngressServiceConfig struct {
|
||||
MaxConnections uint32
|
||||
MaxPendingRequests uint32
|
||||
MaxConcurrentRequests uint32
|
||||
}
|
||||
|
||||
type IngressListener struct {
|
||||
// Port declares the port on which the ingress gateway should listen for traffic.
|
||||
Port int
|
||||
|
@ -90,6 +99,10 @@ type IngressService struct {
|
|||
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
|
||||
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
|
||||
|
||||
MaxConnections uint32 `json:",omitempty" alias:"max_connections"`
|
||||
MaxPendingRequests uint32 `json:",omitempty" alias:"max_pending_requests"`
|
||||
MaxConcurrentRequests uint32 `json:",omitempty" alias:"max_concurrent_requests"`
|
||||
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
||||
}
|
||||
|
|
|
@ -630,7 +630,7 @@ func (s *ResourceGenerator) injectGatewayDestinationAddons(cfgSnap *proxycfg.Con
|
|||
func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
||||
var clusters []proto.Message
|
||||
createdClusters := make(map[proxycfg.UpstreamID]bool)
|
||||
for _, upstreams := range cfgSnap.IngressGateway.Upstreams {
|
||||
for listenerKey, upstreams := range cfgSnap.IngressGateway.Upstreams {
|
||||
for _, u := range upstreams {
|
||||
uid := proxycfg.NewUpstreamID(&u)
|
||||
|
||||
|
@ -658,6 +658,7 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
|
|||
}
|
||||
|
||||
for _, c := range upstreamClusters {
|
||||
s.configIngressUpstreamCluster(c, cfgSnap, listenerKey, &u)
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
createdClusters[uid] = true
|
||||
|
@ -666,6 +667,50 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
|
|||
return clusters, nil
|
||||
}
|
||||
|
||||
func (s *ResourceGenerator) configIngressUpstreamCluster(c *envoy_cluster_v3.Cluster, cfgSnap *proxycfg.ConfigSnapshot, listenerKey proxycfg.IngressListenerKey, u *structs.Upstream) {
|
||||
var threshold *envoy_cluster_v3.CircuitBreakers_Thresholds
|
||||
setThresholdLimit := func(limitType string, limit int) {
|
||||
if limit <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if threshold == nil {
|
||||
threshold = &envoy_cluster_v3.CircuitBreakers_Thresholds{}
|
||||
}
|
||||
|
||||
switch limitType {
|
||||
case "max_connections":
|
||||
threshold.MaxConnections = makeUint32Value(limit)
|
||||
case "max_pending_requests":
|
||||
threshold.MaxPendingRequests = makeUint32Value(limit)
|
||||
case "max_requests":
|
||||
threshold.MaxRequests = makeUint32Value(limit)
|
||||
}
|
||||
}
|
||||
|
||||
setThresholdLimit("max_connections", int(cfgSnap.IngressGateway.Defaults.MaxConnections))
|
||||
setThresholdLimit("max_pending_requests", int(cfgSnap.IngressGateway.Defaults.MaxPendingRequests))
|
||||
setThresholdLimit("max_requests", int(cfgSnap.IngressGateway.Defaults.MaxConcurrentRequests))
|
||||
|
||||
// Adjust the limit for upstream service
|
||||
// Lookup listener and service config details from ingress gateway
|
||||
// definition.
|
||||
var svc *structs.IngressService
|
||||
if lCfg, ok := cfgSnap.IngressGateway.Listeners[listenerKey]; ok {
|
||||
svc = findIngressServiceMatchingUpstream(lCfg, *u)
|
||||
}
|
||||
|
||||
if svc != nil {
|
||||
setThresholdLimit("max_connections", int(svc.MaxConnections))
|
||||
setThresholdLimit("max_pending_requests", int(svc.MaxPendingRequests))
|
||||
setThresholdLimit("max_requests", int(svc.MaxConcurrentRequests))
|
||||
}
|
||||
|
||||
if threshold != nil {
|
||||
c.CircuitBreakers.Thresholds = []*envoy_cluster_v3.CircuitBreakers_Thresholds{threshold}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, name, pathProtocol string, port int) (*envoy_cluster_v3.Cluster, error) {
|
||||
var c *envoy_cluster_v3.Cluster
|
||||
var err error
|
||||
|
|
|
@ -493,6 +493,45 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
"simple", nil, nil, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ingress-with-service-max-connections",
|
||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||
"simple", nil,
|
||||
func(entry *structs.IngressGatewayConfigEntry) {
|
||||
entry.Listeners[0].Services[0].MaxConnections = 4096
|
||||
}, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ingress-with-defaults-service-max-connections",
|
||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||
"simple", nil,
|
||||
func(entry *structs.IngressGatewayConfigEntry) {
|
||||
entry.Defaults = &structs.IngressServiceConfig{
|
||||
MaxConnections: 2048,
|
||||
MaxPendingRequests: 512,
|
||||
MaxConcurrentRequests: 4096,
|
||||
}
|
||||
}, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ingress-with-overwrite-defaults-service-max-connections",
|
||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||
"simple", nil,
|
||||
func(entry *structs.IngressGatewayConfigEntry) {
|
||||
entry.Defaults = &structs.IngressServiceConfig{
|
||||
MaxConnections: 2048,
|
||||
MaxPendingRequests: 512,
|
||||
}
|
||||
entry.Listeners[0].Services[0].MaxConnections = 4096
|
||||
entry.Listeners[0].Services[0].MaxPendingRequests = 2048
|
||||
}, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ingress-with-chain-external-sni",
|
||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||
|
|
71
agent/xds/testdata/clusters/ingress-with-defaults-service-max-connections.latest.golden
vendored
Normal file
71
agent/xds/testdata/clusters/ingress-with-defaults-service-max-connections.latest.golden
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "33s",
|
||||
"circuitBreakers": {
|
||||
"thresholds":[
|
||||
{
|
||||
"maxConnections": 2048,
|
||||
"maxPendingRequests": 512,
|
||||
"maxRequests": 4096
|
||||
}
|
||||
]
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
},
|
||||
"commonLbConfig": {
|
||||
"healthyPanicThreshold": {
|
||||
|
||||
}
|
||||
},
|
||||
"transportSocket": {
|
||||
"name": "tls",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"matchSubjectAltNames": [
|
||||
{
|
||||
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "33s",
|
||||
"circuitBreakers": {
|
||||
"thresholds":[
|
||||
{
|
||||
"maxConnections": 4096,
|
||||
"maxPendingRequests": 2048
|
||||
}
|
||||
]
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
},
|
||||
"commonLbConfig": {
|
||||
"healthyPanicThreshold": {
|
||||
|
||||
}
|
||||
},
|
||||
"transportSocket": {
|
||||
"name": "tls",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"matchSubjectAltNames": [
|
||||
{
|
||||
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
69
agent/xds/testdata/clusters/ingress-with-service-max-connections.latest.golden
vendored
Normal file
69
agent/xds/testdata/clusters/ingress-with-service-max-connections.latest.golden
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "33s",
|
||||
"circuitBreakers": {
|
||||
"thresholds":[
|
||||
{
|
||||
"maxConnections": 4096
|
||||
}
|
||||
]
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
},
|
||||
"commonLbConfig": {
|
||||
"healthyPanicThreshold": {
|
||||
|
||||
}
|
||||
},
|
||||
"transportSocket": {
|
||||
"name": "tls",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"matchSubjectAltNames": [
|
||||
{
|
||||
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -27,6 +27,9 @@ type IngressGatewayConfigEntry struct {
|
|||
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
|
||||
// Defaults is default configuration for all upstream services
|
||||
Defaults *IngressServiceConfig `json:",omitempty"`
|
||||
|
||||
// CreateIndex is the Raft index this entry was created at. This is a
|
||||
// read-only field.
|
||||
CreateIndex uint64
|
||||
|
@ -37,6 +40,12 @@ type IngressGatewayConfigEntry struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type IngressServiceConfig struct {
|
||||
MaxConnections *uint32
|
||||
MaxPendingRequests *uint32
|
||||
MaxConcurrentRequests *uint32
|
||||
}
|
||||
|
||||
type GatewayTLSConfig struct {
|
||||
// Indicates that TLS should be enabled for this gateway service.
|
||||
Enabled bool
|
||||
|
@ -124,6 +133,10 @@ type IngressService struct {
|
|||
// Allow HTTP header manipulation to be configured.
|
||||
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
|
||||
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
|
||||
|
||||
MaxConnections *uint32 `json:",omitempty" alias:"max_connections"`
|
||||
MaxPendingRequests *uint32 `json:",omitempty" alias:"max_pending_requests"`
|
||||
MaxConcurrentRequests *uint32 `json:",omitempty" alias:"max_concurrent_requests"`
|
||||
}
|
||||
|
||||
func (i *IngressGatewayConfigEntry) GetKind() string { return i.Kind }
|
||||
|
|
|
@ -29,6 +29,10 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
Enabled: true,
|
||||
TLSMinVersion: "TLSv1_2",
|
||||
},
|
||||
Defaults: &IngressServiceConfig{
|
||||
MaxConnections: uint32Pointer(2048),
|
||||
MaxPendingRequests: uint32Pointer(4096),
|
||||
},
|
||||
}
|
||||
|
||||
global := &ProxyConfigEntry{
|
||||
|
@ -93,6 +97,9 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
CertResource: "bar",
|
||||
},
|
||||
},
|
||||
MaxConnections: uint32Pointer(5120),
|
||||
MaxPendingRequests: uint32Pointer(512),
|
||||
MaxConcurrentRequests: uint32Pointer(2048),
|
||||
},
|
||||
},
|
||||
TLS: &GatewayTLSConfig{
|
||||
|
@ -168,6 +175,9 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
require.True(t, ok)
|
||||
require.Equal(t, ingress2.Kind, readIngress.Kind)
|
||||
require.Equal(t, ingress2.Name, readIngress.Name)
|
||||
require.Equal(t, *ingress2.Defaults.MaxConnections, *readIngress.Defaults.MaxConnections)
|
||||
require.Equal(t, uint32(4096), *readIngress.Defaults.MaxPendingRequests)
|
||||
require.Equal(t, uint32(0), *readIngress.Defaults.MaxConcurrentRequests)
|
||||
require.Len(t, readIngress.Listeners, 1)
|
||||
require.Len(t, readIngress.Listeners[0].Services, 1)
|
||||
// Set namespace and partition to blank so that OSS and ent can utilize the same tests
|
||||
|
|
|
@ -141,6 +141,11 @@ func IngressGatewayToStructs(s *IngressGateway, t *structs.IngressGatewayConfigE
|
|||
}
|
||||
}
|
||||
}
|
||||
if s.Defaults != nil {
|
||||
var x structs.IngressServiceConfig
|
||||
IngressServiceConfigToStructs(s.Defaults, &x)
|
||||
t.Defaults = &x
|
||||
}
|
||||
t.Meta = s.Meta
|
||||
}
|
||||
func IngressGatewayFromStructs(t *structs.IngressGatewayConfigEntry, s *IngressGateway) {
|
||||
|
@ -162,6 +167,11 @@ func IngressGatewayFromStructs(t *structs.IngressGatewayConfigEntry, s *IngressG
|
|||
}
|
||||
}
|
||||
}
|
||||
if t.Defaults != nil {
|
||||
var x IngressServiceConfig
|
||||
IngressServiceConfigFromStructs(t.Defaults, &x)
|
||||
s.Defaults = &x
|
||||
}
|
||||
s.Meta = t.Meta
|
||||
}
|
||||
func IngressListenerToStructs(s *IngressListener, t *structs.IngressListener) {
|
||||
|
@ -227,6 +237,9 @@ func IngressServiceToStructs(s *IngressService, t *structs.IngressService) {
|
|||
HTTPHeaderModifiersToStructs(s.ResponseHeaders, &x)
|
||||
t.ResponseHeaders = &x
|
||||
}
|
||||
t.MaxConnections = s.MaxConnections
|
||||
t.MaxPendingRequests = s.MaxPendingRequests
|
||||
t.MaxConcurrentRequests = s.MaxConcurrentRequests
|
||||
t.Meta = s.Meta
|
||||
t.EnterpriseMeta = enterpriseMetaToStructs(s.EnterpriseMeta)
|
||||
}
|
||||
|
@ -251,9 +264,28 @@ func IngressServiceFromStructs(t *structs.IngressService, s *IngressService) {
|
|||
HTTPHeaderModifiersFromStructs(t.ResponseHeaders, &x)
|
||||
s.ResponseHeaders = &x
|
||||
}
|
||||
s.MaxConnections = t.MaxConnections
|
||||
s.MaxPendingRequests = t.MaxPendingRequests
|
||||
s.MaxConcurrentRequests = t.MaxConcurrentRequests
|
||||
s.Meta = t.Meta
|
||||
s.EnterpriseMeta = enterpriseMetaFromStructs(t.EnterpriseMeta)
|
||||
}
|
||||
func IngressServiceConfigToStructs(s *IngressServiceConfig, t *structs.IngressServiceConfig) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
t.MaxConnections = s.MaxConnections
|
||||
t.MaxPendingRequests = s.MaxPendingRequests
|
||||
t.MaxConcurrentRequests = s.MaxConcurrentRequests
|
||||
}
|
||||
func IngressServiceConfigFromStructs(t *structs.IngressServiceConfig, s *IngressServiceConfig) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.MaxConnections = t.MaxConnections
|
||||
s.MaxPendingRequests = t.MaxPendingRequests
|
||||
s.MaxConcurrentRequests = t.MaxConcurrentRequests
|
||||
}
|
||||
func IntentionHTTPHeaderPermissionToStructs(s *IntentionHTTPHeaderPermission, t *structs.IntentionHTTPHeaderPermission) {
|
||||
if s == nil {
|
||||
return
|
||||
|
|
|
@ -187,6 +187,16 @@ func (msg *IngressGateway) UnmarshalBinary(b []byte) error {
|
|||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *IngressServiceConfig) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *IngressServiceConfig) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *GatewayTLSConfig) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -228,6 +228,18 @@ message IngressGateway {
|
|||
GatewayTLSConfig TLS = 1;
|
||||
repeated IngressListener Listeners = 2;
|
||||
map<string, string> Meta = 3;
|
||||
IngressServiceConfig Defaults = 4;
|
||||
}
|
||||
|
||||
// mog annotation:
|
||||
//
|
||||
// target=github.com/hashicorp/consul/agent/structs.IngressServiceConfig
|
||||
// output=config_entry.gen.go
|
||||
// name=Structs
|
||||
message IngressServiceConfig {
|
||||
uint32 MaxConnections = 1;
|
||||
uint32 MaxPendingRequests = 2;
|
||||
uint32 MaxConcurrentRequests = 3;
|
||||
}
|
||||
|
||||
// mog annotation:
|
||||
|
@ -283,6 +295,9 @@ message IngressService {
|
|||
map<string, string> Meta = 6;
|
||||
// mog: func-to=enterpriseMetaToStructs func-from=enterpriseMetaFromStructs
|
||||
common.EnterpriseMeta EnterpriseMeta = 7;
|
||||
uint32 MaxConnections = 8;
|
||||
uint32 MaxPendingRequests = 9;
|
||||
uint32 MaxConcurrentRequests = 10;
|
||||
}
|
||||
|
||||
// mog annotation:
|
||||
|
|
|
@ -11,6 +11,11 @@ config_entries {
|
|||
kind = "ingress-gateway"
|
||||
name = "ingress-gateway"
|
||||
|
||||
Defaults {
|
||||
MaxConnections = 10
|
||||
MaxPendingRequests = 20
|
||||
MaxConcurrentRequests = 30
|
||||
}
|
||||
listeners = [
|
||||
{
|
||||
port = 9999
|
||||
|
@ -28,6 +33,9 @@ config_entries {
|
|||
{
|
||||
name = "s1"
|
||||
hosts = ["test.example.com"]
|
||||
MaxConnections = 100
|
||||
MaxPendingRequests = 200
|
||||
MaxConcurrentRequests = 300
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -26,10 +26,44 @@ load helpers
|
|||
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 s1 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 proxy should have been configured with max_connections in services" {
|
||||
CLUSTER_THRESHOLD=$(get_envoy_cluster_config 127.0.0.1:20000 s1.default.primary | jq '.circuit_breakers.thresholds[0]')
|
||||
echo $CLUSTER_THRESHOLD
|
||||
|
||||
MAX_CONNS=$(echo $CLUSTER_THRESHOLD | jq --raw-output '.max_connections')
|
||||
MAX_PENDING_REQS=$(echo $CLUSTER_THRESHOLD | jq --raw-output '.max_pending_requests')
|
||||
MAX_REQS=$(echo $CLUSTER_THRESHOLD | jq --raw-output '.max_requests')
|
||||
|
||||
echo "MAX_CONNS = $MAX_CONNS"
|
||||
echo "MAX_PENDING_REQS = $MAX_PENDING_REQS"
|
||||
echo "MAX_REQS = $MAX_REQS"
|
||||
|
||||
[ "$MAX_CONNS" = "100" ]
|
||||
[ "$MAX_PENDING_REQS" = "200" ]
|
||||
[ "$MAX_REQS" = "300" ]
|
||||
}
|
||||
|
||||
@test "ingress-gateway should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s2 proxy should have been configured with max_connections using defaults" {
|
||||
CLUSTER_THRESHOLD=$(get_envoy_cluster_config 127.0.0.1:20000 s2.default.primary | jq '.circuit_breakers.thresholds[0]')
|
||||
echo $CLUSTER_THRESHOLD
|
||||
|
||||
MAX_CONNS=$(echo $CLUSTER_THRESHOLD | jq --raw-output '.max_connections')
|
||||
MAX_PENDING_REQS=$(echo $CLUSTER_THRESHOLD | jq --raw-output '.max_pending_requests')
|
||||
MAX_REQS=$(echo $CLUSTER_THRESHOLD | jq --raw-output '.max_requests')
|
||||
|
||||
echo "MAX_CONNS = $MAX_CONNS"
|
||||
echo "MAX_PENDING_REQS = $MAX_PENDING_REQS"
|
||||
echo "MAX_REQS = $MAX_REQS"
|
||||
|
||||
[ "$MAX_CONNS" = "10" ]
|
||||
[ "$MAX_PENDING_REQS" = "20" ]
|
||||
[ "$MAX_REQS" = "30" ]
|
||||
}
|
||||
|
||||
@test "ingress should be able to connect to s1 using Host header" {
|
||||
assert_expected_fortio_name s1 s1.ingress.consul 9999
|
||||
}
|
||||
|
|
|
@ -328,6 +328,7 @@ In the following example, two listeners are configured on an ingress gateway nam
|
|||
- The first listener is configured to listen on port `8080` and uses a wildcard (`*`) to proxy traffic to all services in the datacenter.
|
||||
- The second listener exposes the `api` and `web` services on port `4567` at user-provided hosts.
|
||||
- TLS is enabled on every listener.
|
||||
- The `max_connections` of the ingress gateway proxy to each upstream cluster is set to 4096.
|
||||
|
||||
The Consul Enterprise version implements the following additional configurations:
|
||||
|
||||
|
@ -346,6 +347,10 @@ TLS {
|
|||
Enabled = true
|
||||
}
|
||||
|
||||
Defaults {
|
||||
MaxConnections = 4096
|
||||
}
|
||||
|
||||
Listeners = [
|
||||
{
|
||||
Port = 8080
|
||||
|
@ -1041,6 +1046,38 @@ You can specify the following parameters to configure ingress gateway configurat
|
|||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Defaults',
|
||||
type: 'IngressServiceConfig: <optional>',
|
||||
description: `Default configuration that applies to all upstreams.`,
|
||||
children: [
|
||||
{
|
||||
name: 'MaxConnections',
|
||||
type: 'int: 0',
|
||||
description: `The maximum number of connections a service instance
|
||||
will be allowed to establish against the given upstream. Use this to limit
|
||||
HTTP/1.1 traffic, since HTTP/1.1 has a request per connection.
|
||||
If not specified, it uses the default value. For example, 1024 for Envoy proxy.`,
|
||||
},
|
||||
{
|
||||
name: 'MaxPendingRequests',
|
||||
type: 'int: 0',
|
||||
description: `The maximum number of requests that will be queued
|
||||
while waiting for a connection to be established. For this configuration to
|
||||
be respected, a L7 protocol must be defined in the \`protocol\` field.
|
||||
If not specified, it uses the default value. For example, 1024 for Envoy proxy.`,
|
||||
},
|
||||
{
|
||||
name: 'MaxConcurrentRequests',
|
||||
type: 'int: 0',
|
||||
description: `The maximum number of concurrent requests that
|
||||
will be allowed at a single point in time. Use this to limit HTTP/2 traffic,
|
||||
since HTTP/2 has many requests per connection. For this configuration to be
|
||||
respected, a L7 protocol must be defined in the \`protocol\` field.
|
||||
If not specified, it uses the default value. For example, 1024 for Envoy proxy.`,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Listeners',
|
||||
type: 'array<IngressListener>: <optional>)',
|
||||
|
@ -1156,6 +1193,21 @@ You can specify the following parameters to configure ingress gateway configurat
|
|||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'MaxConnections',
|
||||
type: 'int: 0',
|
||||
description: 'overrides for the [`Defaults` field](#available-fields)',
|
||||
},
|
||||
{
|
||||
name: 'MaxPendingRequests',
|
||||
type: 'int: 0',
|
||||
description: 'overrides for the [`Defaults` field](#available-fields)',
|
||||
},
|
||||
{
|
||||
name: 'MaxConcurrentRequests',
|
||||
type: 'int: 0',
|
||||
description: 'overrides for the [`Defaults` field](#available-fields)',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue