2017-08-03 17:24:27 +00:00
|
|
|
package consul
|
2015-03-02 18:48:53 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
import (
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2017-02-17 14:15:35 +00:00
|
|
|
"errors"
|
2015-04-03 23:44:32 +00:00
|
|
|
"fmt"
|
2015-10-15 20:09:45 +00:00
|
|
|
"io/ioutil"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"net"
|
2017-05-24 15:10:59 +00:00
|
|
|
"net/http"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"net/url"
|
2018-02-12 21:11:59 +00:00
|
|
|
"regexp"
|
2015-11-03 20:26:07 +00:00
|
|
|
"strconv"
|
2015-04-03 23:44:32 +00:00
|
|
|
"strings"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"sync"
|
2016-04-24 14:04:51 +00:00
|
|
|
"sync/atomic"
|
2015-04-14 18:09:24 +00:00
|
|
|
"time"
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2017-02-27 17:49:35 +00:00
|
|
|
"golang.org/x/net/http2"
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
log "github.com/mgutz/logxi/v1"
|
|
|
|
|
2015-07-28 09:00:42 +00:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
2015-04-03 23:44:32 +00:00
|
|
|
|
2015-04-14 18:09:24 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2015-04-03 23:44:32 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2015-11-03 20:26:07 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2017-02-17 14:15:35 +00:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2017-02-16 18:16:06 +00:00
|
|
|
"github.com/hashicorp/vault/helper/consts"
|
2017-12-19 19:24:21 +00:00
|
|
|
"github.com/hashicorp/vault/helper/parseutil"
|
2016-07-22 08:37:46 +00:00
|
|
|
"github.com/hashicorp/vault/helper/strutil"
|
2016-07-12 23:56:35 +00:00
|
|
|
"github.com/hashicorp/vault/helper/tlsutil"
|
2017-08-03 17:24:27 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
2015-04-03 23:44:32 +00:00
|
|
|
)
|
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
const (
|
|
|
|
// checkJitterFactor specifies the jitter factor used to stagger checks
|
|
|
|
checkJitterFactor = 16
|
|
|
|
|
|
|
|
// checkMinBuffer specifies provides a guarantee that a check will not
|
|
|
|
// be executed too close to the TTL check timeout
|
|
|
|
checkMinBuffer = 100 * time.Millisecond
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
// consulRetryInterval specifies the retry duration to use when an
|
|
|
|
// API call to the Consul agent fails.
|
|
|
|
consulRetryInterval = 1 * time.Second
|
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
// defaultCheckTimeout changes the timeout of TTL checks
|
|
|
|
defaultCheckTimeout = 5 * time.Second
|
|
|
|
|
2016-07-21 23:04:43 +00:00
|
|
|
// DefaultServiceName is the default Consul service name used when
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
// advertising a Vault instance.
|
2016-07-21 23:04:43 +00:00
|
|
|
DefaultServiceName = "vault"
|
2016-04-24 14:04:51 +00:00
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
// reconcileTimeout is how often Vault should query Consul to detect
|
|
|
|
// and fix any state drift.
|
|
|
|
reconcileTimeout = 60 * time.Second
|
2017-01-19 22:36:33 +00:00
|
|
|
|
|
|
|
// consistencyModeDefault is the configuration value used to tell
|
|
|
|
// consul to use default consistency.
|
|
|
|
consistencyModeDefault = "default"
|
|
|
|
|
|
|
|
// consistencyModeStrong is the configuration value used to tell
|
|
|
|
// consul to use strong consistency.
|
|
|
|
consistencyModeStrong = "strong"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
)
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
type notifyEvent struct{}
|
|
|
|
|
2018-01-20 01:44:24 +00:00
|
|
|
// Verify ConsulBackend satisfies the correct interfaces
|
|
|
|
var _ physical.Backend = (*ConsulBackend)(nil)
|
|
|
|
var _ physical.HABackend = (*ConsulBackend)(nil)
|
|
|
|
var _ physical.Lock = (*ConsulLock)(nil)
|
|
|
|
var _ physical.Transactional = (*ConsulBackend)(nil)
|
2018-01-19 06:44:44 +00:00
|
|
|
|
2018-02-12 21:11:59 +00:00
|
|
|
var (
|
|
|
|
hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
|
|
|
|
)
|
|
|
|
|
2015-03-02 18:48:53 +00:00
|
|
|
// ConsulBackend is a physical backend that stores data at specific
|
|
|
|
// prefix within Consul. It is used for most production situations as
|
|
|
|
// it allows Vault to run on multiple machines in a highly-available manner.
|
|
|
|
type ConsulBackend struct {
|
2016-04-24 02:53:21 +00:00
|
|
|
path string
|
2016-08-19 20:45:17 +00:00
|
|
|
logger log.Logger
|
2016-04-24 02:53:21 +00:00
|
|
|
client *api.Client
|
|
|
|
kv *api.KV
|
2017-08-03 17:24:27 +00:00
|
|
|
permitPool *physical.PermitPool
|
2016-04-24 02:53:21 +00:00
|
|
|
serviceLock sync.RWMutex
|
2016-08-15 13:42:42 +00:00
|
|
|
redirectHost string
|
|
|
|
redirectPort int64
|
2016-04-24 02:53:21 +00:00
|
|
|
serviceName string
|
2016-07-22 08:37:46 +00:00
|
|
|
serviceTags []string
|
2018-02-23 16:15:29 +00:00
|
|
|
serviceAddress *string
|
2016-04-24 02:53:21 +00:00
|
|
|
disableRegistration bool
|
|
|
|
checkTimeout time.Duration
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode string
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
notifyActiveCh chan notifyEvent
|
|
|
|
notifySealedCh chan notifyEvent
|
2015-03-02 18:48:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
// NewConsulBackend constructs a Consul backend using the given API client
|
2015-03-02 18:48:53 +00:00
|
|
|
// and the prefix in the KV store.
|
2017-08-03 17:24:27 +00:00
|
|
|
func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
2015-04-03 23:44:32 +00:00
|
|
|
// Get the path in Consul
|
|
|
|
path, ok := conf["path"]
|
2015-04-04 00:05:18 +00:00
|
|
|
if !ok {
|
|
|
|
path = "vault/"
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config path set", "path", path)
|
|
|
|
}
|
2015-04-04 00:05:18 +00:00
|
|
|
|
|
|
|
// Ensure path is suffixed but not prefixed
|
2015-04-03 23:44:32 +00:00
|
|
|
if !strings.HasSuffix(path, "/") {
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Warn("physical/consul: appending trailing forward slash to path")
|
2015-04-03 23:44:32 +00:00
|
|
|
path += "/"
|
|
|
|
}
|
2015-04-04 00:05:18 +00:00
|
|
|
if strings.HasPrefix(path, "/") {
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Warn("physical/consul: trimming path of its forward slash")
|
2015-04-04 00:05:18 +00:00
|
|
|
path = strings.TrimPrefix(path, "/")
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
|
2016-04-24 02:53:21 +00:00
|
|
|
// Allow admins to disable consul integration
|
|
|
|
disableReg, ok := conf["disable_registration"]
|
|
|
|
var disableRegistration bool
|
|
|
|
if ok && disableReg != "" {
|
2017-12-19 19:24:21 +00:00
|
|
|
b, err := parseutil.ParseBool(disableReg)
|
2016-04-24 02:53:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed parsing disable_registration parameter: {{err}}", err)
|
|
|
|
}
|
|
|
|
disableRegistration = b
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration)
|
|
|
|
}
|
2016-04-24 02:53:21 +00:00
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
// Get the service name to advertise in Consul
|
|
|
|
service, ok := conf["service"]
|
|
|
|
if !ok {
|
2016-07-21 23:04:43 +00:00
|
|
|
service = DefaultServiceName
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
}
|
2018-02-12 21:11:59 +00:00
|
|
|
if !hostnameRegex.MatchString(service) {
|
|
|
|
return nil, errors.New("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes")
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config service set", "service", service)
|
|
|
|
}
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
|
2016-07-22 08:37:46 +00:00
|
|
|
// Get the additional tags to attach to the registered service name
|
2016-08-15 13:42:42 +00:00
|
|
|
tags := conf["service_tags"]
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config service_tags set", "service_tags", tags)
|
|
|
|
}
|
2016-07-22 08:37:46 +00:00
|
|
|
|
2018-02-23 16:15:29 +00:00
|
|
|
// Get the service-specific address to override the use of the HA redirect address
|
|
|
|
var serviceAddr *string
|
|
|
|
serviceAddrStr, ok := conf["service_address"]
|
|
|
|
if ok {
|
|
|
|
serviceAddr = &serviceAddrStr
|
|
|
|
}
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config service_address set", "service_address", serviceAddr)
|
|
|
|
}
|
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
checkTimeout := defaultCheckTimeout
|
|
|
|
checkTimeoutStr, ok := conf["check_timeout"]
|
|
|
|
if ok {
|
|
|
|
d, err := time.ParseDuration(checkTimeoutStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
min, _ := lib.DurationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor)
|
2016-04-24 01:05:56 +00:00
|
|
|
if min < checkMinBuffer {
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
return nil, fmt.Errorf("Consul check_timeout must be greater than %v", min)
|
|
|
|
}
|
|
|
|
|
|
|
|
checkTimeout = d
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config check_timeout set", "check_timeout", d)
|
|
|
|
}
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Configure the client
|
|
|
|
consulConf := api.DefaultConfig()
|
2017-02-16 18:16:06 +00:00
|
|
|
// Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore
|
2017-05-24 15:10:59 +00:00
|
|
|
consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
|
2015-10-15 20:09:45 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
if addr, ok := conf["address"]; ok {
|
|
|
|
consulConf.Address = addr
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config address set", "address", addr)
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
if scheme, ok := conf["scheme"]; ok {
|
|
|
|
consulConf.Scheme = scheme
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: config scheme set", "scheme", scheme)
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
if token, ok := conf["token"]; ok {
|
|
|
|
consulConf.Token = token
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Debug("physical/consul: config token set")
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
|
|
|
if consulConf.Scheme == "https" {
|
|
|
|
tlsClientConfig, err := setupTLSConfig(conf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-24 15:10:59 +00:00
|
|
|
consulConf.Transport.TLSClientConfig = tlsClientConfig
|
|
|
|
if err := http2.ConfigureTransport(consulConf.Transport); err != nil {
|
2017-02-27 17:49:35 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
logger.Debug("physical/consul: configured TLS")
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 15:10:59 +00:00
|
|
|
consulConf.HttpClient = &http.Client{Transport: consulConf.Transport}
|
2015-04-03 23:44:32 +00:00
|
|
|
client, err := api.NewClient(consulConf)
|
|
|
|
if err != nil {
|
2015-11-03 20:26:07 +00:00
|
|
|
return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
maxParStr, ok := conf["max_parallel"]
|
|
|
|
var maxParInt int
|
|
|
|
if ok {
|
|
|
|
maxParInt, err = strconv.Atoi(maxParStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt)
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode, ok := conf["consistency_mode"]
|
2017-01-13 17:49:04 +00:00
|
|
|
if ok {
|
2017-01-19 22:36:33 +00:00
|
|
|
switch consistencyMode {
|
|
|
|
case consistencyModeDefault, consistencyModeStrong:
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("invalid consistency_mode value: %s", consistencyMode)
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
2017-01-19 22:36:33 +00:00
|
|
|
} else {
|
|
|
|
consistencyMode = consistencyModeDefault
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Setup the backend
|
|
|
|
c := &ConsulBackend{
|
2016-04-24 02:53:21 +00:00
|
|
|
path: path,
|
2016-04-26 03:10:32 +00:00
|
|
|
logger: logger,
|
2016-04-24 02:53:21 +00:00
|
|
|
client: client,
|
|
|
|
kv: client.KV(),
|
2017-08-03 17:24:27 +00:00
|
|
|
permitPool: physical.NewPermitPool(maxParInt),
|
2016-04-24 02:53:21 +00:00
|
|
|
serviceName: service,
|
2017-04-19 14:39:07 +00:00
|
|
|
serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","),
|
2018-02-23 16:15:29 +00:00
|
|
|
serviceAddress: serviceAddr,
|
2016-04-24 02:53:21 +00:00
|
|
|
checkTimeout: checkTimeout,
|
|
|
|
disableRegistration: disableRegistration,
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode: consistencyMode,
|
2017-03-31 16:01:55 +00:00
|
|
|
notifyActiveCh: make(chan notifyEvent),
|
|
|
|
notifySealedCh: make(chan notifyEvent),
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2015-07-28 09:00:42 +00:00
|
|
|
func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
|
2017-08-31 16:31:34 +00:00
|
|
|
serverName, _, err := net.SplitHostPort(conf["address"])
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
|
|
|
case strings.Contains(err.Error(), "missing port"):
|
|
|
|
serverName = conf["address"]
|
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
|
|
|
insecureSkipVerify := false
|
2017-12-19 19:24:21 +00:00
|
|
|
tlsSkipVerify, ok := conf["tls_skip_verify"]
|
|
|
|
|
|
|
|
if ok && tlsSkipVerify != "" {
|
|
|
|
b, err := parseutil.ParseBool(tlsSkipVerify)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed parsing tls_skip_verify parameter: {{err}}", err)
|
|
|
|
}
|
|
|
|
insecureSkipVerify = b
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2016-07-12 23:56:35 +00:00
|
|
|
tlsMinVersionStr, ok := conf["tls_min_version"]
|
|
|
|
if !ok {
|
|
|
|
// Set the default value
|
|
|
|
tlsMinVersionStr = "tls12"
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsMinVersion, ok := tlsutil.TLSLookup[tlsMinVersionStr]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("invalid 'tls_min_version'")
|
|
|
|
}
|
|
|
|
|
2015-07-28 09:00:42 +00:00
|
|
|
tlsClientConfig := &tls.Config{
|
2016-07-12 23:56:35 +00:00
|
|
|
MinVersion: tlsMinVersion,
|
2015-07-28 09:00:42 +00:00
|
|
|
InsecureSkipVerify: insecureSkipVerify,
|
2017-08-31 16:31:34 +00:00
|
|
|
ServerName: serverName,
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_, okCert := conf["tls_cert_file"]
|
2015-10-15 20:09:45 +00:00
|
|
|
_, okKey := conf["tls_key_file"]
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
if okCert && okKey {
|
|
|
|
tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"])
|
2015-07-28 09:00:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("client tls setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
|
2015-07-28 12:06:56 +00:00
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
if tlsCaFile, ok := conf["tls_ca_file"]; ok {
|
2015-07-28 12:31:30 +00:00
|
|
|
caPool := x509.NewCertPool()
|
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
data, err := ioutil.ReadFile(tlsCaFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to read CA file: %v", err)
|
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
if !caPool.AppendCertsFromPEM(data) {
|
2015-07-28 12:31:30 +00:00
|
|
|
return nil, fmt.Errorf("failed to parse CA certificate")
|
2015-07-28 12:06:56 +00:00
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:31:30 +00:00
|
|
|
tlsClientConfig.RootCAs = caPool
|
|
|
|
}
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2015-07-28 12:06:56 +00:00
|
|
|
return tlsClientConfig, nil
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2017-02-17 14:15:35 +00:00
|
|
|
// Used to run multiple entries via a transaction
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
|
2017-02-17 14:15:35 +00:00
|
|
|
if len(txns) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ops := make([]*api.KVTxnOp, 0, len(txns))
|
|
|
|
|
|
|
|
for _, op := range txns {
|
|
|
|
cop := &api.KVTxnOp{
|
|
|
|
Key: c.path + op.Entry.Key,
|
|
|
|
}
|
|
|
|
switch op.Operation {
|
2017-08-03 17:24:27 +00:00
|
|
|
case physical.DeleteOperation:
|
2017-02-17 14:15:35 +00:00
|
|
|
cop.Verb = api.KVDelete
|
2017-08-03 17:24:27 +00:00
|
|
|
case physical.PutOperation:
|
2017-02-17 14:15:35 +00:00
|
|
|
cop.Verb = api.KVSet
|
|
|
|
cop.Value = op.Entry.Value
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
|
|
|
|
}
|
|
|
|
|
|
|
|
ops = append(ops, cop)
|
|
|
|
}
|
|
|
|
|
2017-03-09 17:59:35 +00:00
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2017-02-17 14:15:35 +00:00
|
|
|
ok, resp, _, err := c.kv.Txn(ops, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-23 20:42:56 +00:00
|
|
|
if ok && len(resp.Errors) == 0 {
|
2017-02-17 14:15:35 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var retErr *multierror.Error
|
|
|
|
for _, res := range resp.Errors {
|
|
|
|
retErr = multierror.Append(retErr, errors.New(res.What))
|
|
|
|
}
|
|
|
|
|
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Put is used to insert or update an entry
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "put"}, time.Now())
|
2017-02-17 14:15:35 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
pair := &api.KVPair{
|
|
|
|
Key: c.path + entry.Key,
|
|
|
|
Value: entry.Value,
|
|
|
|
}
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
_, err := c.kv.Put(pair, nil)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get is used to fetch an entry
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "get"}, time.Now())
|
2015-11-03 16:47:16 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2017-01-13 17:49:04 +00:00
|
|
|
var queryOptions *api.QueryOptions
|
2017-01-19 22:36:33 +00:00
|
|
|
if c.consistencyMode == consistencyModeStrong {
|
2017-01-13 17:49:04 +00:00
|
|
|
queryOptions = &api.QueryOptions{
|
2017-01-19 22:36:33 +00:00
|
|
|
RequireConsistent: true,
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pair, _, err := c.kv.Get(c.path+key, queryOptions)
|
2015-04-03 23:44:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if pair == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-08-03 17:24:27 +00:00
|
|
|
ent := &physical.Entry{
|
2015-04-03 23:44:32 +00:00
|
|
|
Key: key,
|
|
|
|
Value: pair.Value,
|
|
|
|
}
|
|
|
|
return ent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete is used to permanently delete an entry
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Delete(ctx context.Context, key string) error {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "delete"}, time.Now())
|
2015-11-03 16:47:16 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
_, err := c.kv.Delete(c.path+key, nil)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-05-09 06:37:16 +00:00
|
|
|
// List is used to list all the keys under a given
|
2015-04-03 23:44:32 +00:00
|
|
|
// prefix, up to the next prefix.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) List(ctx context.Context, prefix string) ([]string, error) {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "list"}, time.Now())
|
2015-04-03 23:44:32 +00:00
|
|
|
scan := c.path + prefix
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2016-01-19 22:05:01 +00:00
|
|
|
// The TrimPrefix call below will not work correctly if we have "//" at the
|
|
|
|
// end. This can happen in cases where you are e.g. listing the root of a
|
|
|
|
// prefix in a logical backend via "/" instead of ""
|
|
|
|
if strings.HasSuffix(scan, "//") {
|
|
|
|
scan = scan[:len(scan)-1]
|
|
|
|
}
|
|
|
|
|
2015-11-03 16:47:16 +00:00
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
out, _, err := c.kv.Keys(scan, "/", nil)
|
|
|
|
for idx, val := range out {
|
|
|
|
out[idx] = strings.TrimPrefix(val, scan)
|
|
|
|
}
|
2016-01-19 22:05:01 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
return out, err
|
2015-03-02 18:48:53 +00:00
|
|
|
}
|
2015-04-14 18:49:46 +00:00
|
|
|
|
|
|
|
// Lock is used for mutual exclusion based on the given key.
|
2017-08-03 17:24:27 +00:00
|
|
|
func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) {
|
2015-04-14 18:49:46 +00:00
|
|
|
// Create the lock
|
|
|
|
opts := &api.LockOptions{
|
2015-12-01 05:08:14 +00:00
|
|
|
Key: c.path + key,
|
|
|
|
Value: []byte(value),
|
|
|
|
SessionName: "Vault Lock",
|
|
|
|
MonitorRetries: 5,
|
2015-04-14 18:49:46 +00:00
|
|
|
}
|
|
|
|
lock, err := c.client.LockOpts(opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to create lock: %v", err)
|
|
|
|
}
|
2015-04-14 23:36:53 +00:00
|
|
|
cl := &ConsulLock{
|
2017-01-19 22:36:33 +00:00
|
|
|
client: c.client,
|
|
|
|
key: c.path + key,
|
|
|
|
lock: lock,
|
|
|
|
consistencyMode: c.consistencyMode,
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
2016-07-18 17:19:58 +00:00
|
|
|
// HAEnabled indicates whether the HA functionality should be exposed.
|
|
|
|
// Currently always returns true.
|
|
|
|
func (c *ConsulBackend) HAEnabled() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-05-02 22:34:39 +00:00
|
|
|
// DetectHostAddr is used to detect the host address by asking the Consul agent
|
|
|
|
func (c *ConsulBackend) DetectHostAddr() (string, error) {
|
|
|
|
agent := c.client.Agent()
|
|
|
|
self, err := agent.Self()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-04-24 00:17:07 +00:00
|
|
|
addr, ok := self["Member"]["Addr"].(string)
|
|
|
|
if !ok {
|
|
|
|
return "", fmt.Errorf("Unable to convert an address to string")
|
|
|
|
}
|
2015-05-02 22:34:39 +00:00
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:36:53 +00:00
|
|
|
// ConsulLock is used to provide the Lock interface backed by Consul
|
|
|
|
type ConsulLock struct {
|
2017-01-19 22:36:33 +00:00
|
|
|
client *api.Client
|
|
|
|
key string
|
|
|
|
lock *api.Lock
|
|
|
|
consistencyMode string
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|
|
|
return c.lock.Lock(stopCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Unlock() error {
|
|
|
|
return c.lock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Value() (bool, string, error) {
|
|
|
|
kv := c.client.KV()
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2017-01-13 20:22:14 +00:00
|
|
|
var queryOptions *api.QueryOptions
|
2017-01-19 22:36:33 +00:00
|
|
|
if c.consistencyMode == consistencyModeStrong {
|
2017-01-13 20:22:14 +00:00
|
|
|
queryOptions = &api.QueryOptions{
|
2017-01-19 22:36:33 +00:00
|
|
|
RequireConsistent: true,
|
2017-01-13 20:22:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pair, _, err := kv.Get(c.key, queryOptions)
|
2015-04-14 23:36:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, "", err
|
|
|
|
}
|
|
|
|
if pair == nil {
|
|
|
|
return false, "", nil
|
|
|
|
}
|
|
|
|
held := pair.Session != ""
|
|
|
|
value := string(pair.Value)
|
|
|
|
return held, value, nil
|
2015-04-14 18:49:46 +00:00
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
func (c *ConsulBackend) NotifyActiveStateChange() error {
|
|
|
|
select {
|
|
|
|
case c.notifyActiveCh <- notifyEvent{}:
|
|
|
|
default:
|
|
|
|
// NOTE: If this occurs Vault's active status could be out of
|
|
|
|
// sync with Consul until reconcileTimer expires.
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("physical/consul: Concurrent state change notify dropped")
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulBackend) NotifySealedStateChange() error {
|
|
|
|
select {
|
|
|
|
case c.notifySealedCh <- notifyEvent{}:
|
|
|
|
default:
|
|
|
|
// NOTE: If this occurs Vault's sealed status could be out of
|
|
|
|
// sync with Consul until checkTimer expires.
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("physical/consul: Concurrent sealed state change notify dropped")
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulBackend) checkDuration() time.Duration {
|
|
|
|
return lib.DurationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor)
|
|
|
|
}
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (err error) {
|
2016-08-15 13:42:42 +00:00
|
|
|
if err := c.setRedirectAddr(redirectAddr); err != nil {
|
2016-04-28 18:04:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-01 15:15:25 +00:00
|
|
|
// 'server' command will wait for the below goroutine to complete
|
2016-07-30 17:17:29 +00:00
|
|
|
waitGroup.Add(1)
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
go c.runEventDemuxer(waitGroup, shutdownCh, redirectAddr, activeFunc, sealedFunc)
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) {
|
2016-08-01 14:24:27 +00:00
|
|
|
// This defer statement should be executed last. So push it first.
|
|
|
|
defer waitGroup.Done()
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
// Fire the reconcileTimer immediately upon starting the event demuxer
|
|
|
|
reconcileTimer := time.NewTimer(0)
|
|
|
|
defer reconcileTimer.Stop()
|
|
|
|
|
|
|
|
// Schedule the first check. Consul TTL checks are passing by
|
|
|
|
// default, checkTimer does not need to be run immediately.
|
|
|
|
checkTimer := time.NewTimer(c.checkDuration())
|
|
|
|
defer checkTimer.Stop()
|
|
|
|
|
|
|
|
// Use a reactor pattern to handle and dispatch events to singleton
|
|
|
|
// goroutine handlers for execution. It is not acceptable to drop
|
|
|
|
// inbound events from Notify*().
|
|
|
|
//
|
|
|
|
// goroutines are dispatched if the demuxer can acquire a lock (via
|
|
|
|
// an atomic CAS incr) on the handler. Handlers are responsible for
|
|
|
|
// deregistering themselves (atomic CAS decr). Handlers and the
|
|
|
|
// demuxer share a lock to synchronize information at the beginning
|
|
|
|
// and end of a handler's life (or after a handler wakes up from
|
|
|
|
// sleeping during a back-off/retry).
|
|
|
|
var shutdown bool
|
|
|
|
var checkLock int64
|
|
|
|
var registeredServiceID string
|
|
|
|
var serviceRegLock int64
|
2016-08-01 15:58:45 +00:00
|
|
|
|
2016-07-31 14:09:16 +00:00
|
|
|
for !shutdown {
|
2016-04-28 18:04:49 +00:00
|
|
|
select {
|
|
|
|
case <-c.notifyActiveCh:
|
|
|
|
// Run reconcile immediately upon active state change notification
|
|
|
|
reconcileTimer.Reset(0)
|
|
|
|
case <-c.notifySealedCh:
|
|
|
|
// Run check timer immediately upon a seal state change notification
|
|
|
|
checkTimer.Reset(0)
|
|
|
|
case <-reconcileTimer.C:
|
|
|
|
// Unconditionally rearm the reconcileTimer
|
|
|
|
reconcileTimer.Reset(reconcileTimeout - lib.RandomStagger(reconcileTimeout/checkJitterFactor))
|
|
|
|
|
|
|
|
// Abort if service discovery is disabled or a
|
|
|
|
// reconcile handler is already active
|
|
|
|
if !c.disableRegistration && atomic.CompareAndSwapInt64(&serviceRegLock, 0, 1) {
|
|
|
|
// Enter handler with serviceRegLock held
|
|
|
|
go func() {
|
|
|
|
defer atomic.CompareAndSwapInt64(&serviceRegLock, 1, 0)
|
|
|
|
for !shutdown {
|
2016-07-22 12:44:16 +00:00
|
|
|
serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc)
|
2016-04-28 18:04:49 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("physical/consul: reconcile unable to talk with Consul backend", "error", err)
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
time.Sleep(consulRetryInterval)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
c.serviceLock.Lock()
|
|
|
|
defer c.serviceLock.Unlock()
|
|
|
|
|
|
|
|
registeredServiceID = serviceID
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
case <-checkTimer.C:
|
|
|
|
checkTimer.Reset(c.checkDuration())
|
|
|
|
// Abort if service discovery is disabled or a
|
|
|
|
// reconcile handler is active
|
|
|
|
if !c.disableRegistration && atomic.CompareAndSwapInt64(&checkLock, 0, 1) {
|
2016-06-03 23:00:31 +00:00
|
|
|
// Enter handler with checkLock held
|
2016-04-28 18:04:49 +00:00
|
|
|
go func() {
|
|
|
|
defer atomic.CompareAndSwapInt64(&checkLock, 1, 0)
|
|
|
|
for !shutdown {
|
2016-06-03 23:00:31 +00:00
|
|
|
sealed := sealedFunc()
|
|
|
|
if err := c.runCheck(sealed); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("physical/consul: check unable to talk with Consul backend", "error", err)
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
time.Sleep(consulRetryInterval)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
case <-shutdownCh:
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("physical/consul: Shutting down consul backend")
|
2016-04-28 18:04:49 +00:00
|
|
|
shutdown = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.serviceLock.RLock()
|
|
|
|
defer c.serviceLock.RUnlock()
|
|
|
|
if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("physical/consul: service deregistration failed", "error", err)
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkID returns the ID used for a Consul Check. Assume at least a read
|
|
|
|
// lock is held.
|
|
|
|
func (c *ConsulBackend) checkID() string {
|
2016-07-19 18:17:50 +00:00
|
|
|
return fmt.Sprintf("%s:vault-sealed-check", c.serviceID())
|
2016-07-19 11:05:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// serviceID returns the Vault ServiceID for use in Consul. Assume at least
|
|
|
|
// a read lock is held.
|
|
|
|
func (c *ConsulBackend) serviceID() string {
|
2016-08-15 13:42:42 +00:00
|
|
|
return fmt.Sprintf("%s:%s:%d", c.serviceName, c.redirectHost, c.redirectPort)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// reconcileConsul queries the state of Vault Core and Consul and fixes up
|
|
|
|
// Consul's state according to what's in Vault. reconcileConsul is called
|
|
|
|
// without any locks held and can be run concurrently, therefore no changes
|
|
|
|
// to ConsulBackend can be made in this method (i.e. wtb const receiver for
|
|
|
|
// compiler enforced safety).
|
2017-08-03 17:24:27 +00:00
|
|
|
func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (serviceID string, err error) {
|
2016-04-28 18:04:49 +00:00
|
|
|
// Query vault Core for its current state
|
|
|
|
active := activeFunc()
|
|
|
|
sealed := sealedFunc()
|
|
|
|
|
|
|
|
agent := c.client.Agent()
|
2016-07-19 11:05:18 +00:00
|
|
|
catalog := c.client.Catalog()
|
|
|
|
|
|
|
|
serviceID = c.serviceID()
|
2016-04-28 18:04:49 +00:00
|
|
|
|
|
|
|
// Get the current state of Vault from Consul
|
2016-07-19 11:05:18 +00:00
|
|
|
var currentVaultService *api.CatalogService
|
2016-07-19 18:07:06 +00:00
|
|
|
if services, _, err := catalog.Service(c.serviceName, "", &api.QueryOptions{AllowStale: true}); err == nil {
|
2016-07-19 11:05:18 +00:00
|
|
|
for _, service := range services {
|
|
|
|
if serviceID == service.ServiceID {
|
|
|
|
currentVaultService = service
|
|
|
|
break
|
|
|
|
}
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 08:37:46 +00:00
|
|
|
tags := c.fetchServiceTags(active)
|
2016-04-28 18:04:49 +00:00
|
|
|
|
2016-07-22 12:44:16 +00:00
|
|
|
var reregister bool
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case currentVaultService == nil, registeredServiceID == "":
|
|
|
|
reregister = true
|
|
|
|
default:
|
|
|
|
switch {
|
|
|
|
case !strutil.EquivalentSlices(currentVaultService.ServiceTags, tags):
|
|
|
|
reregister = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reregister {
|
2016-07-19 11:05:18 +00:00
|
|
|
// When re-registration is not required, return a valid serviceID
|
|
|
|
// to avoid registration in the next cycle.
|
|
|
|
return serviceID, nil
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
2018-02-23 16:15:29 +00:00
|
|
|
// If service address was set explicitly in configuration, use that
|
|
|
|
// as the service-specific address instead of the HA redirect address.
|
|
|
|
var serviceAddress string
|
|
|
|
if c.serviceAddress == nil {
|
|
|
|
serviceAddress = c.redirectHost
|
|
|
|
} else {
|
|
|
|
serviceAddress = *c.serviceAddress
|
|
|
|
}
|
|
|
|
|
2016-04-28 18:04:49 +00:00
|
|
|
service := &api.AgentServiceRegistration{
|
|
|
|
ID: serviceID,
|
|
|
|
Name: c.serviceName,
|
|
|
|
Tags: tags,
|
2016-08-15 13:42:42 +00:00
|
|
|
Port: int(c.redirectPort),
|
2018-02-23 16:15:29 +00:00
|
|
|
Address: serviceAddress,
|
2016-04-28 18:04:49 +00:00
|
|
|
EnableTagOverride: false,
|
|
|
|
}
|
|
|
|
|
|
|
|
checkStatus := api.HealthCritical
|
|
|
|
if !sealed {
|
|
|
|
checkStatus = api.HealthPassing
|
|
|
|
}
|
|
|
|
|
|
|
|
sealedCheck := &api.AgentCheckRegistration{
|
|
|
|
ID: c.checkID(),
|
|
|
|
Name: "Vault Sealed Status",
|
|
|
|
Notes: "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
|
|
|
|
ServiceID: serviceID,
|
|
|
|
AgentServiceCheck: api.AgentServiceCheck{
|
|
|
|
TTL: c.checkTimeout.String(),
|
|
|
|
Status: checkStatus,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := agent.ServiceRegister(service); err != nil {
|
|
|
|
return "", errwrap.Wrapf(`service registration failed: {{err}}`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := agent.CheckRegister(sealedCheck); err != nil {
|
|
|
|
return serviceID, errwrap.Wrapf(`service check registration failed: {{err}}`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return serviceID, nil
|
|
|
|
}
|
|
|
|
|
2016-06-03 23:00:31 +00:00
|
|
|
// runCheck immediately pushes a TTL check.
|
|
|
|
func (c *ConsulBackend) runCheck(sealed bool) error {
|
2016-04-28 18:04:49 +00:00
|
|
|
// Run a TTL check
|
|
|
|
agent := c.client.Agent()
|
2016-06-03 23:00:31 +00:00
|
|
|
if !sealed {
|
2016-04-28 18:04:49 +00:00
|
|
|
return agent.PassTTL(c.checkID(), "Vault Unsealed")
|
|
|
|
} else {
|
|
|
|
return agent.FailTTL(c.checkID(), "Vault Sealed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 08:37:46 +00:00
|
|
|
// fetchServiceTags returns all of the relevant tags for Consul.
|
|
|
|
func (c *ConsulBackend) fetchServiceTags(active bool) []string {
|
2016-04-28 18:04:49 +00:00
|
|
|
activeTag := "standby"
|
|
|
|
if active {
|
|
|
|
activeTag = "active"
|
|
|
|
}
|
2016-07-22 08:37:46 +00:00
|
|
|
return append(c.serviceTags, activeTag)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
func (c *ConsulBackend) setRedirectAddr(addr string) (err error) {
|
2016-04-28 18:04:49 +00:00
|
|
|
if addr == "" {
|
2016-08-15 13:42:42 +00:00
|
|
|
return fmt.Errorf("redirect address must not be empty")
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
url, err := url.Parse(addr)
|
|
|
|
if err != nil {
|
2016-08-15 13:42:42 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf(`failed to parse redirect URL "%v": {{err}}`, addr), err)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var portStr string
|
2016-08-15 13:42:42 +00:00
|
|
|
c.redirectHost, portStr, err = net.SplitHostPort(url.Host)
|
2016-04-28 18:04:49 +00:00
|
|
|
if err != nil {
|
|
|
|
if url.Scheme == "http" {
|
|
|
|
portStr = "80"
|
|
|
|
} else if url.Scheme == "https" {
|
|
|
|
portStr = "443"
|
|
|
|
} else if url.Scheme == "unix" {
|
|
|
|
portStr = "-1"
|
2016-08-15 13:42:42 +00:00
|
|
|
c.redirectHost = url.Path
|
2016-04-28 18:04:49 +00:00
|
|
|
} else {
|
2016-08-15 13:42:42 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf(`failed to find a host:port in redirect address "%v": {{err}}`, url.Host), err)
|
2016-04-28 18:04:49 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
c.redirectPort, err = strconv.ParseInt(portStr, 10, 0)
|
|
|
|
if err != nil || c.redirectPort < -1 || c.redirectPort > 65535 {
|
2016-04-28 18:04:49 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf(`failed to parse valid port "%v": {{err}}`, portStr), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|