b656981cf0
Plan rejections occur when the scheduler work and the leader plan applier disagree on the feasibility of a plan. This may happen for valid reasons: since Nomad does parallel scheduling, it is expected that different workers will have a different state when computing placements. As the final plan reaches the leader plan applier, it may no longer be valid due to a concurrent scheduling taking up intended resources. In these situations the plan applier will notify the worker that the plan was rejected and that they should refresh their state before trying again. In some rare and unexpected circumstances it has been observed that workers will repeatedly submit the same plan, even if they are always rejected. While the root cause is still unknown this mitigation has been put in place. The plan applier will now track the history of plan rejections per client and include in the plan result a list of node IDs that should be set as ineligible if the number of rejections in a given time window crosses a certain threshold. The window size and threshold value can be adjusted in the server configuration. To avoid marking several nodes as ineligible at one, the operation is rate limited to 5 nodes every 30min, with an initial burst of 10 operations.
90 lines
2 KiB
JSON
90 lines
2 KiB
JSON
{
|
|
"autopilot": {
|
|
"cleanup_dead_servers": true
|
|
},
|
|
"acl": {
|
|
"enabled": true
|
|
},
|
|
"audit": {
|
|
"enabled": true,
|
|
"sink": [
|
|
{
|
|
"file": {
|
|
"type": "file",
|
|
"format": "json",
|
|
"delivery_guarantee": "enforced",
|
|
"path": "/opt/nomad/audit.log",
|
|
"rotate_bytes": 100,
|
|
"rotate_duration": "24h",
|
|
"rotate_max_files": 10
|
|
}
|
|
}
|
|
],
|
|
"filter": [
|
|
{
|
|
"default": [
|
|
{
|
|
"endpoints": ["/v1/metrics"],
|
|
"operations": ["*"],
|
|
"stages": ["*"],
|
|
"type": "HTTPEvent"
|
|
}
|
|
]
|
|
}
|
|
]
|
|
},
|
|
"advertise": {
|
|
"http": "host.example.com",
|
|
"rpc": "host.example.com",
|
|
"serf": "host.example.com"
|
|
},
|
|
"bind_addr": "0.0.0.0",
|
|
"consul": {
|
|
"server_auto_join": false,
|
|
"client_auto_join": false,
|
|
"token": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
|
|
},
|
|
"data_dir": "/opt/data/nomad/data",
|
|
"datacenter": "dc1",
|
|
"enable_syslog": true,
|
|
"leave_on_interrupt": true,
|
|
"leave_on_terminate": true,
|
|
"log_level": "INFO",
|
|
"region": "global",
|
|
"server": {
|
|
"bootstrap_expect": 3,
|
|
"enabled": true,
|
|
"encrypt": "sHck3WL6cxuhuY7Mso9BHA==",
|
|
"plan_rejection_tracker": {
|
|
"node_threshold": 100,
|
|
"node_window": "31m"
|
|
},
|
|
"retry_join": [
|
|
"10.0.0.101",
|
|
"10.0.0.102",
|
|
"10.0.0.103"
|
|
]
|
|
},
|
|
"syslog_facility": "LOCAL0",
|
|
"telemetry": {
|
|
"collection_interval": "60s",
|
|
"disable_hostname": true,
|
|
"prometheus_metrics": true,
|
|
"publish_allocation_metrics": true,
|
|
"publish_node_metrics": true
|
|
},
|
|
"tls": {
|
|
"ca_file": "/opt/data/nomad/certs/nomad-ca.pem",
|
|
"cert_file": "/opt/data/nomad/certs/server.pem",
|
|
"http": true,
|
|
"key_file": "/opt/data/nomad/certs/server-key.pem",
|
|
"rpc": true,
|
|
"verify_server_hostname": true
|
|
},
|
|
"vault": {
|
|
"address": "http://host.example.com:8200",
|
|
"create_from_role": "nomad-cluster",
|
|
"enabled": true
|
|
}
|
|
}
|