2015-03-02 18:48:53 +00:00
|
|
|
package physical
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
import (
|
|
|
|
"fmt"
|
2015-10-15 20:09:45 +00:00
|
|
|
"io/ioutil"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"net"
|
|
|
|
"net/url"
|
2015-11-03 20:26:07 +00:00
|
|
|
"strconv"
|
2015-04-03 23:44:32 +00:00
|
|
|
"strings"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"sync"
|
2016-04-24 14:04:51 +00:00
|
|
|
"sync/atomic"
|
2015-04-14 18:09:24 +00:00
|
|
|
"time"
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
log "github.com/mgutz/logxi/v1"
|
|
|
|
|
2015-07-28 09:00:42 +00:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
2015-04-03 23:44:32 +00:00
|
|
|
|
2015-04-14 18:09:24 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2015-04-03 23:44:32 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2015-11-03 20:26:07 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2015-12-17 20:23:13 +00:00
|
|
|
"github.com/hashicorp/go-cleanhttp"
|
2016-07-22 08:37:46 +00:00
|
|
|
"github.com/hashicorp/vault/helper/strutil"
|
2016-07-12 23:56:35 +00:00
|
|
|
"github.com/hashicorp/vault/helper/tlsutil"
|
2015-04-03 23:44:32 +00:00
|
|
|
)
|
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
const (
|
|
|
|
// checkJitterFactor specifies the jitter factor used to stagger checks
|
|
|
|
checkJitterFactor = 16
|
|
|
|
|
|
|
|
// checkMinBuffer specifies provides a guarantee that a check will not
|
|
|
|
// be executed too close to the TTL check timeout
|
|
|
|
checkMinBuffer = 100 * time.Millisecond
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
// consulRetryInterval specifies the retry duration to use when an
|
|
|
|
// API call to the Consul agent fails.
|
|
|
|
consulRetryInterval = 1 * time.Second
|
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
// defaultCheckTimeout changes the timeout of TTL checks
|
|
|
|
defaultCheckTimeout = 5 * time.Second
|
|
|
|
|
2016-07-21 23:04:43 +00:00
|
|
|
// DefaultServiceName is the default Consul service name used when
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
// advertising a Vault instance.
|
2016-07-21 23:04:43 +00:00
|
|
|
DefaultServiceName = "vault"
|
2016-04-24 14:04:51 +00:00
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
// reconcileTimeout is how often Vault should query Consul to detect
|
|
|
|
// and fix any state drift.
|
|
|
|
reconcileTimeout = 60 * time.Second
|
2017-01-19 22:36:33 +00:00
|
|
|
|
|
|
|
// consistencyModeDefault is the configuration value used to tell
|
|
|
|
// consul to use default consistency.
|
|
|
|
consistencyModeDefault = "default"
|
|
|
|
|
|
|
|
// consistencyModeStrong is the configuration value used to tell
|
|
|
|
// consul to use strong consistency.
|
|
|
|
consistencyModeStrong = "strong"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
)
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
type notifyEvent struct{}
|
|
|
|
|
2015-03-02 18:48:53 +00:00
|
|
|
// ConsulBackend is a physical backend that stores data at specific
|
|
|
|
// prefix within Consul. It is used for most production situations as
|
|
|
|
// it allows Vault to run on multiple machines in a highly-available manner.
|
|
|
|
type ConsulBackend struct {
|
2016-04-24 02:53:21 +00:00
|
|
|
path string
|
2016-08-19 20:45:17 +00:00
|
|
|
logger log.Logger
|
2016-04-24 02:53:21 +00:00
|
|
|
client *api.Client
|
|
|
|
kv *api.KV
|
|
|
|
permitPool *PermitPool
|
|
|
|
serviceLock sync.RWMutex
|
2016-08-15 13:42:42 +00:00
|
|
|
redirectHost string
|
|
|
|
redirectPort int64
|
2016-04-24 02:53:21 +00:00
|
|
|
serviceName string
|
2016-07-22 08:37:46 +00:00
|
|
|
serviceTags []string
|
2016-04-24 02:53:21 +00:00
|
|
|
disableRegistration bool
|
|
|
|
checkTimeout time.Duration
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode string
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
notifyActiveCh chan notifyEvent
|
|
|
|
notifySealedCh chan notifyEvent
|
2015-03-02 18:48:53 +00:00
|
|
|
}
|
|
|
|
|
2015-03-05 21:47:10 +00:00
|
|
|
// newConsulBackend constructs a Consul backend using the given API client
|
2015-03-02 18:48:53 +00:00
|
|
|
// and the prefix in the KV store.
|
2016-08-19 20:45:17 +00:00
|
|
|
func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
2015-04-03 23:44:32 +00:00
|
|
|
// Get the path in Consul
|
|
|
|
path, ok := conf["path"]
|
2015-04-04 00:05:18 +00:00
|
|
|
if !ok {
|
|
|
|
path = "vault/"
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config path set", "path", path)
|
|
|
|
}
|
2015-04-04 00:05:18 +00:00
|
|
|
|
|
|
|
// Ensure path is suffixed but not prefixed
|
2015-04-03 23:44:32 +00:00
|
|
|
if !strings.HasSuffix(path, "/") {
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Warn("physical/consul: appending trailing forward slash to path")
|
2015-04-03 23:44:32 +00:00
|
|
|
path += "/"
|
|
|
|
}
|
2015-04-04 00:05:18 +00:00
|
|
|
if strings.HasPrefix(path, "/") {
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Warn("physical/consul: trimming path of its forward slash")
|
2015-04-04 00:05:18 +00:00
|
|
|
path = strings.TrimPrefix(path, "/")
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
|
2016-04-24 02:53:21 +00:00
|
|
|
// Allow admins to disable consul integration
|
|
|
|
disableReg, ok := conf["disable_registration"]
|
|
|
|
var disableRegistration bool
|
|
|
|
if ok && disableReg != "" {
|
|
|
|
b, err := strconv.ParseBool(disableReg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed parsing disable_registration parameter: {{err}}", err)
|
|
|
|
}
|
|
|
|
disableRegistration = b
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration)
|
|
|
|
}
|
2016-04-24 02:53:21 +00:00
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
// Get the service name to advertise in Consul
|
|
|
|
service, ok := conf["service"]
|
|
|
|
if !ok {
|
2016-07-21 23:04:43 +00:00
|
|
|
service = DefaultServiceName
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config service set", "service", service)
|
|
|
|
}
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
|
2016-07-22 08:37:46 +00:00
|
|
|
// Get the additional tags to attach to the registered service name
|
2016-08-15 13:42:42 +00:00
|
|
|
tags := conf["service_tags"]
|
2016-07-22 08:37:46 +00:00
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config service_tags set", "service_tags", tags)
|
|
|
|
}
|
2016-07-22 08:37:46 +00:00
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
checkTimeout := defaultCheckTimeout
|
|
|
|
checkTimeoutStr, ok := conf["check_timeout"]
|
|
|
|
if ok {
|
|
|
|
d, err := time.ParseDuration(checkTimeoutStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
min, _ := lib.DurationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor)
|
2016-04-24 01:05:56 +00:00
|
|
|
if min < checkMinBuffer {
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
return nil, fmt.Errorf("Consul check_timeout must be greater than %v", min)
|
|
|
|
}
|
|
|
|
|
|
|
|
checkTimeout = d
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config check_timeout set", "check_timeout", d)
|
|
|
|
}
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Configure the client
|
|
|
|
consulConf := api.DefaultConfig()
|
2015-10-15 20:09:45 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
if addr, ok := conf["address"]; ok {
|
|
|
|
consulConf.Address = addr
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config address set", "address", addr)
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
if scheme, ok := conf["scheme"]; ok {
|
|
|
|
consulConf.Scheme = scheme
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config scheme set", "scheme", scheme)
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
if token, ok := conf["token"]; ok {
|
|
|
|
consulConf.Token = token
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Debug("physical/consul: config token set")
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
|
|
|
if consulConf.Scheme == "https" {
|
|
|
|
tlsClientConfig, err := setupTLSConfig(conf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-02-17 21:53:30 +00:00
|
|
|
transport := cleanhttp.DefaultPooledTransport()
|
|
|
|
transport.MaxIdleConnsPerHost = 4
|
2015-12-17 20:23:13 +00:00
|
|
|
transport.TLSClientConfig = tlsClientConfig
|
|
|
|
consulConf.HttpClient.Transport = transport
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Debug("physical/consul: configured TLS")
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
client, err := api.NewClient(consulConf)
|
|
|
|
if err != nil {
|
2015-11-03 20:26:07 +00:00
|
|
|
return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
maxParStr, ok := conf["max_parallel"]
|
|
|
|
var maxParInt int
|
|
|
|
if ok {
|
|
|
|
maxParInt, err = strconv.Atoi(maxParStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt)
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode, ok := conf["consistency_mode"]
|
2017-01-13 17:49:04 +00:00
|
|
|
if ok {
|
2017-01-19 22:36:33 +00:00
|
|
|
switch consistencyMode {
|
|
|
|
case consistencyModeDefault, consistencyModeStrong:
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("invalid consistency_mode value: %s", consistencyMode)
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
2017-01-19 22:36:33 +00:00
|
|
|
} else {
|
|
|
|
consistencyMode = consistencyModeDefault
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Setup the backend
|
|
|
|
c := &ConsulBackend{
|
2016-04-24 02:53:21 +00:00
|
|
|
path: path,
|
2016-04-26 03:10:32 +00:00
|
|
|
logger: logger,
|
2016-04-24 02:53:21 +00:00
|
|
|
client: client,
|
|
|
|
kv: client.KV(),
|
|
|
|
permitPool: NewPermitPool(maxParInt),
|
|
|
|
serviceName: service,
|
2016-08-03 18:18:22 +00:00
|
|
|
serviceTags: strutil.ParseDedupAndSortStrings(tags, ","),
|
2016-04-24 02:53:21 +00:00
|
|
|
checkTimeout: checkTimeout,
|
|
|
|
disableRegistration: disableRegistration,
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode: consistencyMode,
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2015-07-28 09:00:42 +00:00
|
|
|
func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
|
|
|
|
serverName := strings.Split(conf["address"], ":")
|
|
|
|
|
|
|
|
insecureSkipVerify := false
|
|
|
|
if _, ok := conf["tls_skip_verify"]; ok {
|
2015-07-28 12:06:56 +00:00
|
|
|
insecureSkipVerify = true
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2016-07-12 23:56:35 +00:00
|
|
|
tlsMinVersionStr, ok := conf["tls_min_version"]
|
|
|
|
if !ok {
|
|
|
|
// Set the default value
|
|
|
|
tlsMinVersionStr = "tls12"
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsMinVersion, ok := tlsutil.TLSLookup[tlsMinVersionStr]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("invalid 'tls_min_version'")
|
|
|
|
}
|
|
|
|
|
2015-07-28 09:00:42 +00:00
|
|
|
tlsClientConfig := &tls.Config{
|
2016-07-12 23:56:35 +00:00
|
|
|
MinVersion: tlsMinVersion,
|
2015-07-28 09:00:42 +00:00
|
|
|
InsecureSkipVerify: insecureSkipVerify,
|
|
|
|
ServerName: serverName[0],
|
|
|
|
}
|
|
|
|
|
|
|
|
_, okCert := conf["tls_cert_file"]
|
2015-10-15 20:09:45 +00:00
|
|
|
_, okKey := conf["tls_key_file"]
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
if okCert && okKey {
|
|
|
|
tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"])
|
2015-07-28 09:00:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("client tls setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
|
2015-07-28 12:06:56 +00:00
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
if tlsCaFile, ok := conf["tls_ca_file"]; ok {
|
2015-07-28 12:31:30 +00:00
|
|
|
caPool := x509.NewCertPool()
|
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
data, err := ioutil.ReadFile(tlsCaFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to read CA file: %v", err)
|
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
if !caPool.AppendCertsFromPEM(data) {
|
2015-07-28 12:31:30 +00:00
|
|
|
return nil, fmt.Errorf("failed to parse CA certificate")
|
2015-07-28 12:06:56 +00:00
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:31:30 +00:00
|
|
|
tlsClientConfig.RootCAs = caPool
|
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
return tlsClientConfig, nil
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Put is used to insert or update an entry
|
|
|
|
func (c *ConsulBackend) Put(entry *Entry) error {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "put"}, time.Now())
|
2015-04-03 23:44:32 +00:00
|
|
|
pair := &api.KVPair{
|
|
|
|
Key: c.path + entry.Key,
|
|
|
|
Value: entry.Value,
|
|
|
|
}
|
2015-11-03 16:47:16 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
_, err := c.kv.Put(pair, nil)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get is used to fetch an entry
|
|
|
|
func (c *ConsulBackend) Get(key string) (*Entry, error) {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "get"}, time.Now())
|
2015-11-03 16:47:16 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2017-01-13 17:49:04 +00:00
|
|
|
var queryOptions *api.QueryOptions
|
2017-01-19 22:36:33 +00:00
|
|
|
if c.consistencyMode == consistencyModeStrong {
|
2017-01-13 17:49:04 +00:00
|
|
|
queryOptions = &api.QueryOptions{
|
2017-01-19 22:36:33 +00:00
|
|
|
RequireConsistent: true,
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pair, _, err := c.kv.Get(c.path+key, queryOptions)
|
2015-04-03 23:44:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if pair == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
ent := &Entry{
|
|
|
|
Key: key,
|
|
|
|
Value: pair.Value,
|
|
|
|
}
|
|
|
|
return ent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete is used to permanently delete an entry
|
|
|
|
func (c *ConsulBackend) Delete(key string) error {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "delete"}, time.Now())
|
2015-11-03 16:47:16 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
_, err := c.kv.Delete(c.path+key, nil)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-05-09 06:37:16 +00:00
|
|
|
// List is used to list all the keys under a given
|
2015-04-03 23:44:32 +00:00
|
|
|
// prefix, up to the next prefix.
|
|
|
|
func (c *ConsulBackend) List(prefix string) ([]string, error) {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "list"}, time.Now())
|
2015-04-03 23:44:32 +00:00
|
|
|
scan := c.path + prefix
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2016-01-19 22:05:01 +00:00
|
|
|
// The TrimPrefix call below will not work correctly if we have "//" at the
|
|
|
|
// end. This can happen in cases where you are e.g. listing the root of a
|
|
|
|
// prefix in a logical backend via "/" instead of ""
|
|
|
|
if strings.HasSuffix(scan, "//") {
|
|
|
|
scan = scan[:len(scan)-1]
|
|
|
|
}
|
|
|
|
|
2015-11-03 16:47:16 +00:00
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
out, _, err := c.kv.Keys(scan, "/", nil)
|
|
|
|
for idx, val := range out {
|
|
|
|
out[idx] = strings.TrimPrefix(val, scan)
|
|
|
|
}
|
2016-01-19 22:05:01 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
return out, err
|
2015-03-02 18:48:53 +00:00
|
|
|
}
|
2015-04-14 18:49:46 +00:00
|
|
|
|
|
|
|
// Lock is used for mutual exclusion based on the given key.
|
2015-04-14 23:36:53 +00:00
|
|
|
func (c *ConsulBackend) LockWith(key, value string) (Lock, error) {
|
2015-04-14 18:49:46 +00:00
|
|
|
// Create the lock
|
|
|
|
opts := &api.LockOptions{
|
2015-12-01 05:08:14 +00:00
|
|
|
Key: c.path + key,
|
|
|
|
Value: []byte(value),
|
|
|
|
SessionName: "Vault Lock",
|
|
|
|
MonitorRetries: 5,
|
2015-04-14 18:49:46 +00:00
|
|
|
}
|
|
|
|
lock, err := c.client.LockOpts(opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to create lock: %v", err)
|
|
|
|
}
|
2015-04-14 23:36:53 +00:00
|
|
|
cl := &ConsulLock{
|
2017-01-19 22:36:33 +00:00
|
|
|
client: c.client,
|
|
|
|
key: c.path + key,
|
|
|
|
lock: lock,
|
|
|
|
consistencyMode: c.consistencyMode,
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
2016-07-18 17:19:58 +00:00
|
|
|
// HAEnabled indicates whether the HA functionality should be exposed.
|
|
|
|
// Currently always returns true.
|
|
|
|
func (c *ConsulBackend) HAEnabled() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-05-02 22:34:39 +00:00
|
|
|
// DetectHostAddr is used to detect the host address by asking the Consul agent
|
|
|
|
func (c *ConsulBackend) DetectHostAddr() (string, error) {
|
|
|
|
agent := c.client.Agent()
|
|
|
|
self, err := agent.Self()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-04-24 00:17:07 +00:00
|
|
|
addr, ok := self["Member"]["Addr"].(string)
|
|
|
|
if !ok {
|
|
|
|
return "", fmt.Errorf("Unable to convert an address to string")
|
|
|
|
}
|
2015-05-02 22:34:39 +00:00
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:36:53 +00:00
|
|
|
// ConsulLock is used to provide the Lock interface backed by Consul
|
|
|
|
type ConsulLock struct {
|
2017-01-19 22:36:33 +00:00
|
|
|
client *api.Client
|
|
|
|
key string
|
|
|
|
lock *api.Lock
|
|
|
|
consistencyMode string
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|
|
|
return c.lock.Lock(stopCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Unlock() error {
|
|
|
|
return c.lock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Value() (bool, string, error) {
|
|
|
|
kv := c.client.KV()
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2017-01-13 20:22:14 +00:00
|
|
|
var queryOptions *api.QueryOptions
|
2017-01-19 22:36:33 +00:00
|
|
|
if c.consistencyMode == consistencyModeStrong {
|
2017-01-13 20:22:14 +00:00
|
|
|
queryOptions = &api.QueryOptions{
|
2017-01-19 22:36:33 +00:00
|
|
|
RequireConsistent: true,
|
2017-01-13 20:22:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pair, _, err := kv.Get(c.key, queryOptions)
|
2015-04-14 23:36:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, "", err
|
|
|
|
}
|
|
|
|
if pair == nil {
|
|
|
|
return false, "", nil
|
|
|
|
}
|
|
|
|
held := pair.Session != ""
|
|
|
|
value := string(pair.Value)
|
|
|
|
return held, value, nil
|
2015-04-14 18:49:46 +00:00
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
func (c *ConsulBackend) NotifyActiveStateChange() error {
|
|
|
|
select {
|
|
|
|
case c.notifyActiveCh <- notifyEvent{}:
|
|
|
|
default:
|
|
|
|
// NOTE: If this occurs Vault's active status could be out of
|
|
|
|
// sync with Consul until reconcileTimer expires.
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("physical/consul: Concurrent state change notify dropped")
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulBackend) NotifySealedStateChange() error {
|
|
|
|
select {
|
|
|
|
case c.notifySealedCh <- notifyEvent{}:
|
|
|
|
default:
|
|
|
|
// NOTE: If this occurs Vault's sealed status could be out of
|
|
|
|
// sync with Consul until checkTimer expires.
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("physical/consul: Concurrent sealed state change notify dropped")
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulBackend) checkDuration() time.Duration {
|
|
|
|
return lib.DurationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor)
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) (err error) {
|
|
|
|
if err := c.setRedirectAddr(redirectAddr); err != nil {
|
2016-04-28 18:04:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-01 15:15:25 +00:00
|
|
|
// 'server' command will wait for the below goroutine to complete
|
2016-07-30 17:17:29 +00:00
|
|
|
waitGroup.Add(1)
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
go c.runEventDemuxer(waitGroup, shutdownCh, redirectAddr, activeFunc, sealedFunc)
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) {
|
2016-08-01 14:24:27 +00:00
|
|
|
// This defer statement should be executed last. So push it first.
|
|
|
|
defer waitGroup.Done()
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
// Fire the reconcileTimer immediately upon starting the event demuxer
|
|
|
|
reconcileTimer := time.NewTimer(0)
|
|
|
|
defer reconcileTimer.Stop()
|
|
|
|
|
|
|
|
// Schedule the first check. Consul TTL checks are passing by
|
|
|
|
// default, checkTimer does not need to be run immediately.
|
|
|
|
checkTimer := time.NewTimer(c.checkDuration())
|
|
|
|
defer checkTimer.Stop()
|
|
|
|
|
|
|
|
// Use a reactor pattern to handle and dispatch events to singleton
|
|
|
|
// goroutine handlers for execution. It is not acceptable to drop
|
|
|
|
// inbound events from Notify*().
|
|
|
|
//
|
|
|
|
// goroutines are dispatched if the demuxer can acquire a lock (via
|
|
|
|
// an atomic CAS incr) on the handler. Handlers are responsible for
|
|
|
|
// deregistering themselves (atomic CAS decr). Handlers and the
|
|
|
|
// demuxer share a lock to synchronize information at the beginning
|
|
|
|
// and end of a handler's life (or after a handler wakes up from
|
|
|
|
// sleeping during a back-off/retry).
|
|
|
|
var shutdown bool
|
|
|
|
var checkLock int64
|
|
|
|
var registeredServiceID string
|
|
|
|
var serviceRegLock int64
|
2016-08-01 15:58:45 +00:00
|
|
|
|
2016-07-31 14:09:16 +00:00
|
|
|
for !shutdown {
|
2016-04-28 18:04:49 +00:00
|
|
|
select {
|
|
|
|
case <-c.notifyActiveCh:
|
|
|
|
// Run reconcile immediately upon active state change notification
|
|
|
|
reconcileTimer.Reset(0)
|
|
|
|
case <-c.notifySealedCh:
|
|
|
|
// Run check timer immediately upon a seal state change notification
|
|
|
|
checkTimer.Reset(0)
|
|
|
|
case <-reconcileTimer.C:
|
|
|
|
// Unconditionally rearm the reconcileTimer
|
|
|
|
reconcileTimer.Reset(reconcileTimeout - lib.RandomStagger(reconcileTimeout/checkJitterFactor))
|
|
|
|
|
|
|
|
// Abort if service discovery is disabled or a
|
|
|
|
// reconcile handler is already active
|
|
|
|
if !c.disableRegistration && atomic.CompareAndSwapInt64(&serviceRegLock, 0, 1) {
|
|
|
|
// Enter handler with serviceRegLock held
|
|
|
|
go func() {
|
|
|
|
defer atomic.CompareAndSwapInt64(&serviceRegLock, 1, 0)
|
|
|
|
for !shutdown {
|
2016-07-22 12:44:16 +00:00
|
|
|
serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc)
|
2016-04-28 18:04:49 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("physical/consul: reconcile unable to talk with Consul backend", "error", err)
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
time.Sleep(consulRetryInterval)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
c.serviceLock.Lock()
|
|
|
|
defer c.serviceLock.Unlock()
|
|
|
|
|
|
|
|
registeredServiceID = serviceID
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
case <-checkTimer.C:
|
|
|
|
checkTimer.Reset(c.checkDuration())
|
|
|
|
// Abort if service discovery is disabled or a
|
|
|
|
// reconcile handler is active
|
|
|
|
if !c.disableRegistration && atomic.CompareAndSwapInt64(&checkLock, 0, 1) {
|
2016-06-03 23:00:31 +00:00
|
|
|
// Enter handler with checkLock held
|
2016-04-28 18:04:49 +00:00
|
|
|
go func() {
|
|
|
|
defer atomic.CompareAndSwapInt64(&checkLock, 1, 0)
|
|
|
|
for !shutdown {
|
2016-06-03 23:00:31 +00:00
|
|
|
sealed := sealedFunc()
|
|
|
|
if err := c.runCheck(sealed); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("physical/consul: check unable to talk with Consul backend", "error", err)
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
time.Sleep(consulRetryInterval)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
case <-shutdownCh:
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("physical/consul: Shutting down consul backend")
|
2016-04-28 18:04:49 +00:00
|
|
|
shutdown = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.serviceLock.RLock()
|
|
|
|
defer c.serviceLock.RUnlock()
|
|
|
|
if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("physical/consul: service deregistration failed", "error", err)
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkID returns the ID used for a Consul Check. Assume at least a read
|
|
|
|
// lock is held.
|
|
|
|
func (c *ConsulBackend) checkID() string {
|
2016-07-19 18:17:50 +00:00
|
|
|
return fmt.Sprintf("%s:vault-sealed-check", c.serviceID())
|
2016-07-19 11:05:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// serviceID returns the Vault ServiceID for use in Consul. Assume at least
|
|
|
|
// a read lock is held.
|
|
|
|
func (c *ConsulBackend) serviceID() string {
|
2016-08-15 13:42:42 +00:00
|
|
|
return fmt.Sprintf("%s:%s:%d", c.serviceName, c.redirectHost, c.redirectPort)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// reconcileConsul queries the state of Vault Core and Consul and fixes up
|
|
|
|
// Consul's state according to what's in Vault. reconcileConsul is called
|
|
|
|
// without any locks held and can be run concurrently, therefore no changes
|
|
|
|
// to ConsulBackend can be made in this method (i.e. wtb const receiver for
|
|
|
|
// compiler enforced safety).
|
2016-07-22 12:44:16 +00:00
|
|
|
func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc activeFunction, sealedFunc sealedFunction) (serviceID string, err error) {
|
2016-04-28 18:04:49 +00:00
|
|
|
// Query vault Core for its current state
|
|
|
|
active := activeFunc()
|
|
|
|
sealed := sealedFunc()
|
|
|
|
|
|
|
|
agent := c.client.Agent()
|
2016-07-19 11:05:18 +00:00
|
|
|
catalog := c.client.Catalog()
|
|
|
|
|
|
|
|
serviceID = c.serviceID()
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
// Get the current state of Vault from Consul
|
2016-07-19 11:05:18 +00:00
|
|
|
var currentVaultService *api.CatalogService
|
2016-07-19 18:07:06 +00:00
|
|
|
if services, _, err := catalog.Service(c.serviceName, "", &api.QueryOptions{AllowStale: true}); err == nil {
|
2016-07-19 11:05:18 +00:00
|
|
|
for _, service := range services {
|
|
|
|
if serviceID == service.ServiceID {
|
|
|
|
currentVaultService = service
|
|
|
|
break
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 08:37:46 +00:00
|
|
|
tags := c.fetchServiceTags(active)
|
2016-04-28 18:04:49 +00:00
|
|
|
|
2016-07-22 12:44:16 +00:00
|
|
|
var reregister bool
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case currentVaultService == nil, registeredServiceID == "":
|
|
|
|
reregister = true
|
|
|
|
default:
|
|
|
|
switch {
|
|
|
|
case !strutil.EquivalentSlices(currentVaultService.ServiceTags, tags):
|
|
|
|
reregister = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reregister {
|
2016-07-19 11:05:18 +00:00
|
|
|
// When re-registration is not required, return a valid serviceID
|
|
|
|
// to avoid registration in the next cycle.
|
|
|
|
return serviceID, nil
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
service := &api.AgentServiceRegistration{
|
|
|
|
ID: serviceID,
|
|
|
|
Name: c.serviceName,
|
|
|
|
Tags: tags,
|
2016-08-15 13:42:42 +00:00
|
|
|
Port: int(c.redirectPort),
|
|
|
|
Address: c.redirectHost,
|
2016-04-28 18:04:49 +00:00
|
|
|
EnableTagOverride: false,
|
|
|
|
}
|
|
|
|
|
|
|
|
checkStatus := api.HealthCritical
|
|
|
|
if !sealed {
|
|
|
|
checkStatus = api.HealthPassing
|
|
|
|
}
|
|
|
|
|
|
|
|
sealedCheck := &api.AgentCheckRegistration{
|
|
|
|
ID: c.checkID(),
|
|
|
|
Name: "Vault Sealed Status",
|
|
|
|
Notes: "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
|
|
|
|
ServiceID: serviceID,
|
|
|
|
AgentServiceCheck: api.AgentServiceCheck{
|
|
|
|
TTL: c.checkTimeout.String(),
|
|
|
|
Status: checkStatus,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := agent.ServiceRegister(service); err != nil {
|
|
|
|
return "", errwrap.Wrapf(`service registration failed: {{err}}`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := agent.CheckRegister(sealedCheck); err != nil {
|
|
|
|
return serviceID, errwrap.Wrapf(`service check registration failed: {{err}}`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return serviceID, nil
|
|
|
|
}
|
|
|
|
|
2016-06-03 23:00:31 +00:00
|
|
|
// runCheck immediately pushes a TTL check.
|
|
|
|
func (c *ConsulBackend) runCheck(sealed bool) error {
|
2016-04-28 18:04:49 +00:00
|
|
|
// Run a TTL check
|
|
|
|
agent := c.client.Agent()
|
2016-06-03 23:00:31 +00:00
|
|
|
if !sealed {
|
2016-04-28 18:04:49 +00:00
|
|
|
return agent.PassTTL(c.checkID(), "Vault Unsealed")
|
|
|
|
} else {
|
|
|
|
return agent.FailTTL(c.checkID(), "Vault Sealed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 08:37:46 +00:00
|
|
|
// fetchServiceTags returns all of the relevant tags for Consul.
|
|
|
|
func (c *ConsulBackend) fetchServiceTags(active bool) []string {
|
2016-04-28 18:04:49 +00:00
|
|
|
activeTag := "standby"
|
|
|
|
if active {
|
|
|
|
activeTag = "active"
|
|
|
|
}
|
2016-07-22 08:37:46 +00:00
|
|
|
return append(c.serviceTags, activeTag)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func (c *ConsulBackend) setRedirectAddr(addr string) (err error) {
|
2016-04-28 18:04:49 +00:00
|
|
|
if addr == "" {
|
2016-08-15 13:42:42 +00:00
|
|
|
return fmt.Errorf("redirect address must not be empty")
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
url, err := url.Parse(addr)
|
|
|
|
if err != nil {
|
2016-08-15 13:42:42 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf(`failed to parse redirect URL "%v": {{err}}`, addr), err)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var portStr string
|
2016-08-15 13:42:42 +00:00
|
|
|
c.redirectHost, portStr, err = net.SplitHostPort(url.Host)
|
2016-04-28 18:04:49 +00:00
|
|
|
if err != nil {
|
|
|
|
if url.Scheme == "http" {
|
|
|
|
portStr = "80"
|
|
|
|
} else if url.Scheme == "https" {
|
|
|
|
portStr = "443"
|
|
|
|
} else if url.Scheme == "unix" {
|
|
|
|
portStr = "-1"
|
2016-08-15 13:42:42 +00:00
|
|
|
c.redirectHost = url.Path
|
2016-04-28 18:04:49 +00:00
|
|
|
} else {
|
2016-08-15 13:42:42 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf(`failed to find a host:port in redirect address "%v": {{err}}`, url.Host), err)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
c.redirectPort, err = strconv.ParseInt(portStr, 10, 0)
|
|
|
|
if err != nil || c.redirectPort < -1 || c.redirectPort > 65535 {
|
2016-04-28 18:04:49 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf(`failed to parse valid port "%v": {{err}}`, portStr), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|