2023-03-15 16:00:52 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
package consul
|
2015-03-02 18:48:53 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
import (
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2017-02-17 14:15:35 +00:00
|
|
|
"errors"
|
2015-04-03 23:44:32 +00:00
|
|
|
"fmt"
|
2020-01-24 17:42:03 +00:00
|
|
|
"net/http"
|
2015-11-03 20:26:07 +00:00
|
|
|
"strconv"
|
2015-04-03 23:44:32 +00:00
|
|
|
"strings"
|
2023-10-02 16:49:05 +00:00
|
|
|
"sync"
|
2023-01-17 21:38:18 +00:00
|
|
|
"sync/atomic"
|
2015-04-14 18:09:24 +00:00
|
|
|
"time"
|
2015-07-28 09:00:42 +00:00
|
|
|
|
2020-01-24 17:42:03 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2015-04-03 23:44:32 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2020-01-24 17:42:03 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/go-multierror"
|
2021-07-16 00:17:31 +00:00
|
|
|
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
|
|
|
"github.com/hashicorp/go-secure-stdlib/tlsutil"
|
2020-01-24 17:42:03 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
2019-04-12 21:54:35 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/physical"
|
2021-04-28 15:55:18 +00:00
|
|
|
"github.com/hashicorp/vault/vault/diagnose"
|
2020-01-24 17:42:03 +00:00
|
|
|
"golang.org/x/net/http2"
|
2015-04-03 23:44:32 +00:00
|
|
|
)
|
|
|
|
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
const (
|
2017-01-19 22:36:33 +00:00
|
|
|
// consistencyModeDefault is the configuration value used to tell
|
|
|
|
// consul to use default consistency.
|
|
|
|
consistencyModeDefault = "default"
|
|
|
|
|
|
|
|
// consistencyModeStrong is the configuration value used to tell
|
|
|
|
// consul to use strong consistency.
|
|
|
|
consistencyModeStrong = "strong"
|
2023-01-17 21:38:18 +00:00
|
|
|
|
|
|
|
// nonExistentKey is used as part of a capabilities check against Consul
|
|
|
|
nonExistentKey = "F35C28E1-7035-40BB-B865-6BED9E3A1B28"
|
Teach Vault how to register with Consul
Vault will now register itself with Consul. The active node can be found using `active.vault.service.consul`. All standby vaults are available via `standby.vault.service.consul`. All unsealed vaults are considered healthy and available via `vault.service.consul`. Change in status and registration is event driven and should happen at the speed of a write to Consul (~network RTT + ~1x fsync(2)).
Healthy/active:
```
curl -X GET 'http://127.0.0.1:8500/v1/health/service/vault?pretty' && echo;
[
{
"Node": {
"Node": "vm1",
"Address": "127.0.0.1",
"TaggedAddresses": {
"wan": "127.0.0.1"
},
"CreateIndex": 3,
"ModifyIndex": 20
},
"Service": {
"ID": "vault:127.0.0.1:8200",
"Service": "vault",
"Tags": [
"active"
],
"Address": "127.0.0.1",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm1",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm1",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.1:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Healthy/standby:
```
[snip]
"Service": {
"ID": "vault:127.0.0.2:8200",
"Service": "vault",
"Tags": [
"standby"
],
"Address": "127.0.0.2",
"Port": 8200,
"EnableTagOverride": false,
"CreateIndex": 17,
"ModifyIndex": 20
},
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "passing",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 19
}
]
}
]
```
Sealed:
```
"Checks": [
{
"Node": "vm2",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"CreateIndex": 3,
"ModifyIndex": 3
},
{
"Node": "vm2",
"CheckID": "vault-sealed-check",
"Name": "Vault Sealed Status",
"Status": "critical",
"Notes": "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
"Output": "Vault Sealed",
"ServiceID": "vault:127.0.0.2:8200",
"ServiceName": "vault",
"CreateIndex": 19,
"ModifyIndex": 38
}
]
```
2016-04-24 00:15:05 +00:00
|
|
|
)
|
|
|
|
|
2018-01-20 01:44:24 +00:00
|
|
|
// Verify ConsulBackend satisfies the correct interfaces
|
2021-04-08 16:43:39 +00:00
|
|
|
var (
|
2023-10-02 16:49:05 +00:00
|
|
|
_ physical.Backend = (*ConsulBackend)(nil)
|
|
|
|
_ physical.FencingHABackend = (*ConsulBackend)(nil)
|
|
|
|
_ physical.Lock = (*ConsulLock)(nil)
|
|
|
|
_ physical.Transactional = (*ConsulBackend)(nil)
|
2023-01-17 21:38:18 +00:00
|
|
|
|
|
|
|
GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in consul backend")
|
2021-04-08 16:43:39 +00:00
|
|
|
)
|
2018-02-12 21:11:59 +00:00
|
|
|
|
2015-03-02 18:48:53 +00:00
|
|
|
// ConsulBackend is a physical backend that stores data at specific
|
|
|
|
// prefix within Consul. It is used for most production situations as
|
|
|
|
// it allows Vault to run on multiple machines in a highly-available manner.
|
2023-01-17 21:38:18 +00:00
|
|
|
// failGetInTxn is only used in tests.
|
2015-03-02 18:48:53 +00:00
|
|
|
type ConsulBackend struct {
|
2023-10-02 16:49:05 +00:00
|
|
|
logger log.Logger
|
2020-01-24 17:42:03 +00:00
|
|
|
client *api.Client
|
2019-12-06 14:46:39 +00:00
|
|
|
path string
|
|
|
|
kv *api.KV
|
2022-09-13 17:03:19 +00:00
|
|
|
txn *api.Txn
|
2019-12-06 14:46:39 +00:00
|
|
|
permitPool *physical.PermitPool
|
|
|
|
consistencyMode string
|
2022-09-13 17:03:19 +00:00
|
|
|
sessionTTL string
|
|
|
|
lockWaitTime time.Duration
|
2023-01-17 21:38:18 +00:00
|
|
|
failGetInTxn *uint32
|
2023-10-02 16:49:05 +00:00
|
|
|
activeNodeLock atomic.Pointer[ConsulLock]
|
2015-03-02 18:48:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
// NewConsulBackend constructs a Consul backend using the given API client
|
2015-03-02 18:48:53 +00:00
|
|
|
// and the prefix in the KV store.
|
2017-08-03 17:24:27 +00:00
|
|
|
func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
2015-04-03 23:44:32 +00:00
|
|
|
// Get the path in Consul
|
|
|
|
path, ok := conf["path"]
|
2015-04-04 00:05:18 +00:00
|
|
|
if !ok {
|
|
|
|
path = "vault/"
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger.Debug("config path set", "path", path)
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|
2015-04-04 00:05:18 +00:00
|
|
|
|
|
|
|
// Ensure path is suffixed but not prefixed
|
2015-04-03 23:44:32 +00:00
|
|
|
if !strings.HasSuffix(path, "/") {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger.Warn("appending trailing forward slash to path")
|
2015-04-03 23:44:32 +00:00
|
|
|
path += "/"
|
|
|
|
}
|
2015-04-04 00:05:18 +00:00
|
|
|
if strings.HasPrefix(path, "/") {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger.Warn("trimming path of its forward slash")
|
2015-04-04 00:05:18 +00:00
|
|
|
path = strings.TrimPrefix(path, "/")
|
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
|
2018-04-18 17:09:55 +00:00
|
|
|
sessionTTL := api.DefaultLockSessionTTL
|
|
|
|
sessionTTLStr, ok := conf["session_ttl"]
|
|
|
|
if ok {
|
|
|
|
_, err := parseutil.ParseDurationSecond(sessionTTLStr)
|
|
|
|
if err != nil {
|
2021-05-31 16:54:05 +00:00
|
|
|
return nil, fmt.Errorf("invalid session_ttl: %w", err)
|
2018-04-18 17:09:55 +00:00
|
|
|
}
|
|
|
|
sessionTTL = sessionTTLStr
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("config session_ttl set", "session_ttl", sessionTTL)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lockWaitTime := api.DefaultLockWaitTime
|
|
|
|
lockWaitTimeRaw, ok := conf["lock_wait_time"]
|
|
|
|
if ok {
|
|
|
|
d, err := parseutil.ParseDurationSecond(lockWaitTimeRaw)
|
|
|
|
if err != nil {
|
2021-05-31 16:54:05 +00:00
|
|
|
return nil, fmt.Errorf("invalid lock_wait_time: %w", err)
|
2018-04-18 17:09:55 +00:00
|
|
|
}
|
|
|
|
lockWaitTime = d
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("config lock_wait_time set", "lock_wait_time", d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-03 20:26:07 +00:00
|
|
|
maxParStr, ok := conf["max_parallel"]
|
|
|
|
var maxParInt int
|
|
|
|
if ok {
|
2020-01-24 17:42:03 +00:00
|
|
|
maxParInt, err := strconv.Atoi(maxParStr)
|
2015-11-03 20:26:07 +00:00
|
|
|
if err != nil {
|
2021-05-31 16:54:05 +00:00
|
|
|
return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
|
2015-11-03 20:26:07 +00:00
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if logger.IsDebug() {
|
2018-04-03 00:46:59 +00:00
|
|
|
logger.Debug("max_parallel set", "max_parallel", maxParInt)
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode, ok := conf["consistency_mode"]
|
2017-01-13 17:49:04 +00:00
|
|
|
if ok {
|
2017-01-19 22:36:33 +00:00
|
|
|
switch consistencyMode {
|
|
|
|
case consistencyModeDefault, consistencyModeStrong:
|
|
|
|
default:
|
2018-04-05 15:49:21 +00:00
|
|
|
return nil, fmt.Errorf("invalid consistency_mode value: %q", consistencyMode)
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
2017-01-19 22:36:33 +00:00
|
|
|
} else {
|
|
|
|
consistencyMode = consistencyModeDefault
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 17:42:03 +00:00
|
|
|
// Configure the client
|
|
|
|
consulConf := api.DefaultConfig()
|
|
|
|
// Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore
|
|
|
|
consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
|
|
|
|
|
2021-12-21 17:38:58 +00:00
|
|
|
if err := SetupSecureTLS(context.Background(), consulConf, conf, logger, false); err != nil {
|
|
|
|
return nil, fmt.Errorf("client setup failed: %w", err)
|
|
|
|
}
|
2021-04-28 15:55:18 +00:00
|
|
|
|
|
|
|
consulConf.HttpClient = &http.Client{Transport: consulConf.Transport}
|
|
|
|
client, err := api.NewClient(consulConf)
|
|
|
|
if err != nil {
|
2021-05-31 16:54:05 +00:00
|
|
|
return nil, fmt.Errorf("client setup failed: %w", err)
|
2021-04-28 15:55:18 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
// Set up the backend
|
2021-04-28 15:55:18 +00:00
|
|
|
c := &ConsulBackend{
|
2023-10-02 16:49:05 +00:00
|
|
|
logger: logger,
|
2021-04-28 15:55:18 +00:00
|
|
|
path: path,
|
|
|
|
client: client,
|
|
|
|
kv: client.KV(),
|
2022-09-13 17:03:19 +00:00
|
|
|
txn: client.Txn(),
|
2021-04-28 15:55:18 +00:00
|
|
|
permitPool: physical.NewPermitPool(maxParInt),
|
|
|
|
consistencyMode: consistencyMode,
|
2023-01-17 21:38:18 +00:00
|
|
|
sessionTTL: sessionTTL,
|
|
|
|
lockWaitTime: lockWaitTime,
|
|
|
|
failGetInTxn: new(uint32),
|
2021-04-28 15:55:18 +00:00
|
|
|
}
|
2022-09-13 17:03:19 +00:00
|
|
|
|
2021-04-28 15:55:18 +00:00
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2021-06-15 16:53:29 +00:00
|
|
|
func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string]string, logger log.Logger, isDiagnose bool) error {
|
2020-01-24 17:42:03 +00:00
|
|
|
if addr, ok := conf["address"]; ok {
|
|
|
|
consulConf.Address = addr
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("config address set", "address", addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copied from the Consul API module; set the Scheme based on
|
|
|
|
// the protocol field if address looks ike a URL.
|
|
|
|
// This can enable the TLS configuration below.
|
|
|
|
parts := strings.SplitN(addr, "://", 2)
|
|
|
|
if len(parts) == 2 {
|
|
|
|
if parts[0] == "http" || parts[0] == "https" {
|
|
|
|
consulConf.Scheme = parts[0]
|
|
|
|
consulConf.Address = parts[1]
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("config address parsed", "scheme", parts[0])
|
|
|
|
logger.Debug("config scheme parsed", "address", parts[1])
|
|
|
|
}
|
|
|
|
} // allow "unix:" or whatever else consul supports in the future
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if scheme, ok := conf["scheme"]; ok {
|
|
|
|
consulConf.Scheme = scheme
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("config scheme set", "scheme", scheme)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if token, ok := conf["token"]; ok {
|
|
|
|
consulConf.Token = token
|
|
|
|
logger.Debug("config token set")
|
|
|
|
}
|
|
|
|
|
|
|
|
if consulConf.Scheme == "https" {
|
2021-04-28 15:55:18 +00:00
|
|
|
if isDiagnose {
|
|
|
|
certPath, okCert := conf["tls_cert_file"]
|
|
|
|
keyPath, okKey := conf["tls_key_file"]
|
|
|
|
if okCert && okKey {
|
2021-06-15 16:53:29 +00:00
|
|
|
warnings, err := diagnose.TLSFileChecks(certPath, keyPath)
|
|
|
|
for _, warning := range warnings {
|
|
|
|
diagnose.Warn(ctx, warning)
|
|
|
|
}
|
2021-04-28 15:55:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-15 16:53:29 +00:00
|
|
|
return nil
|
2021-04-28 15:55:18 +00:00
|
|
|
}
|
2021-06-15 16:53:29 +00:00
|
|
|
return fmt.Errorf("key or cert path: %s, %s, cannot be loaded from consul config file", certPath, keyPath)
|
2021-04-28 15:55:18 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 17:42:03 +00:00
|
|
|
// Use the parsed Address instead of the raw conf['address']
|
|
|
|
tlsClientConfig, err := tlsutil.SetupTLSConfig(conf, consulConf.Address)
|
|
|
|
if err != nil {
|
2021-04-28 15:55:18 +00:00
|
|
|
return err
|
2020-01-24 17:42:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
consulConf.Transport.TLSClientConfig = tlsClientConfig
|
|
|
|
if err := http2.ConfigureTransport(consulConf.Transport); err != nil {
|
2021-04-28 15:55:18 +00:00
|
|
|
return err
|
2020-01-24 17:42:03 +00:00
|
|
|
}
|
|
|
|
logger.Debug("configured TLS")
|
2021-07-07 18:35:25 +00:00
|
|
|
} else {
|
|
|
|
if isDiagnose {
|
|
|
|
diagnose.Skipped(ctx, "HTTPS is not used, Skipping TLS verification.")
|
|
|
|
}
|
2020-01-24 17:42:03 +00:00
|
|
|
}
|
2021-04-28 15:55:18 +00:00
|
|
|
return nil
|
2015-07-28 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-01-17 21:38:18 +00:00
|
|
|
// ExpandedCapabilitiesAvailable tests to see if Consul has KVGetOrEmpty and 128 entries per transaction available
|
|
|
|
func (c *ConsulBackend) ExpandedCapabilitiesAvailable(ctx context.Context) bool {
|
|
|
|
available := false
|
|
|
|
|
|
|
|
maxEntries := 128
|
|
|
|
ops := make([]*api.TxnOp, maxEntries)
|
|
|
|
for i := 0; i < maxEntries; i++ {
|
|
|
|
ops[i] = &api.TxnOp{KV: &api.KVTxnOp{
|
|
|
|
Key: c.path + nonExistentKey,
|
|
|
|
Verb: api.KVGetOrEmpty,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
|
|
|
queryOpts := &api.QueryOptions{}
|
|
|
|
queryOpts = queryOpts.WithContext(ctx)
|
|
|
|
|
|
|
|
ok, resp, _, err := c.txn.Txn(ops, queryOpts)
|
|
|
|
if ok && len(resp.Errors) == 0 && err == nil {
|
|
|
|
available = true
|
|
|
|
}
|
|
|
|
|
|
|
|
return available
|
|
|
|
}
|
|
|
|
|
2023-10-02 16:49:05 +00:00
|
|
|
func (c *ConsulBackend) writeTxnOps(ctx context.Context, len int) ([]*api.TxnOp, string) {
|
|
|
|
if len < 1 {
|
|
|
|
len = 1
|
|
|
|
}
|
|
|
|
ops := make([]*api.TxnOp, 0, len+1)
|
|
|
|
|
|
|
|
// If we don't have a lock yet, return a transaction with no session check. We
|
|
|
|
// need to do this to allow writes during cluster initialization before there
|
|
|
|
// is an active node.
|
|
|
|
lock := c.activeNodeLock.Load()
|
|
|
|
if lock == nil {
|
|
|
|
return ops, ""
|
|
|
|
}
|
|
|
|
|
|
|
|
lockKey, lockSession := lock.Info()
|
|
|
|
if lockKey == "" || lockSession == "" {
|
|
|
|
return ops, ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the context used to write has been marked as a special case write that
|
|
|
|
// happens outside of a lock then don't add the session check.
|
|
|
|
if physical.IsUnfencedWrite(ctx) {
|
|
|
|
return ops, ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert the session check operation at index 0. This will allow us later to
|
|
|
|
// work out easily if a write failure is because of the session check.
|
|
|
|
ops = append(ops, &api.TxnOp{
|
|
|
|
KV: &api.KVTxnOp{
|
|
|
|
Verb: api.KVCheckSession,
|
|
|
|
Key: lockKey,
|
|
|
|
Session: lockSession,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
return ops, lockSession
|
|
|
|
}
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
// Transaction is used to run multiple entries via a transaction.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
|
2023-10-02 16:49:05 +00:00
|
|
|
return c.txnInternal(ctx, txns, "transaction")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulBackend) txnInternal(ctx context.Context, txns []*physical.TxnEntry, apiOpName string) error {
|
2017-02-17 14:15:35 +00:00
|
|
|
if len(txns) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2023-10-02 16:49:05 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", apiOpName}, time.Now())
|
2017-02-17 14:15:35 +00:00
|
|
|
|
2023-01-17 21:38:18 +00:00
|
|
|
failGetInTxn := atomic.LoadUint32(c.failGetInTxn)
|
|
|
|
for _, t := range txns {
|
|
|
|
if t.Operation == physical.GetOperation && failGetInTxn != 0 {
|
|
|
|
return GetInTxnDisabledError
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-02 16:49:05 +00:00
|
|
|
ops, sessionID := c.writeTxnOps(ctx, len(txns))
|
2022-09-13 17:03:19 +00:00
|
|
|
for _, t := range txns {
|
|
|
|
o, err := c.makeApiTxn(t)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error converting physical transactions into api transactions: %w", err)
|
2017-02-17 14:15:35 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
ops = append(ops, o)
|
2017-02-17 14:15:35 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 17:59:35 +00:00
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
var retErr *multierror.Error
|
|
|
|
kvMap := make(map[string][]byte, 0)
|
|
|
|
|
2018-06-11 15:03:00 +00:00
|
|
|
queryOpts := &api.QueryOptions{}
|
|
|
|
queryOpts = queryOpts.WithContext(ctx)
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
ok, resp, _, err := c.txn.Txn(ops, queryOpts)
|
2017-02-17 14:15:35 +00:00
|
|
|
if err != nil {
|
2019-05-01 17:47:41 +00:00
|
|
|
if strings.Contains(err.Error(), "is too large") {
|
2021-05-31 16:54:05 +00:00
|
|
|
return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err)
|
2019-05-01 17:47:41 +00:00
|
|
|
}
|
2017-02-17 14:15:35 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-10-23 20:42:56 +00:00
|
|
|
if ok && len(resp.Errors) == 0 {
|
2023-10-02 16:49:05 +00:00
|
|
|
// Loop over results and cache them in a map. Note that we're only caching
|
|
|
|
// the first time we see a key, which _should_ correspond to a Get
|
|
|
|
// operation, since we expect those come first in our txns slice (though
|
|
|
|
// after check-session).
|
2022-09-13 17:03:19 +00:00
|
|
|
for _, txnr := range resp.Results {
|
|
|
|
if len(txnr.KV.Value) > 0 {
|
2023-10-02 16:49:05 +00:00
|
|
|
// We need to trim the Consul kv path (typically "vault/") from the key
|
|
|
|
// otherwise it won't match the transaction entries we have.
|
2022-09-13 17:03:19 +00:00
|
|
|
key := strings.TrimPrefix(txnr.KV.Key, c.path)
|
|
|
|
if _, found := kvMap[key]; !found {
|
|
|
|
kvMap[key] = txnr.KV.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-17 14:15:35 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
if len(resp.Errors) > 0 {
|
|
|
|
for _, res := range resp.Errors {
|
|
|
|
retErr = multierror.Append(retErr, errors.New(res.What))
|
2023-10-02 16:49:05 +00:00
|
|
|
if res.OpIndex == 0 && sessionID != "" {
|
|
|
|
// We added a session check (sessionID not empty) so an error at OpIndex
|
|
|
|
// 0 means that we failed that session check. We don't attempt to string
|
|
|
|
// match because Consul can return at least three different errors here
|
|
|
|
// with no common string. In all cases though failing this check means
|
|
|
|
// we no longer hold the lock because it was released, modified or
|
|
|
|
// deleted. Rather than just continuing to try writing until the
|
|
|
|
// blocking query manages to notice we're no longer the lock holder
|
|
|
|
// (which can take 10s of seconds even in good network conditions in my
|
|
|
|
// testing) we can now Unlock directly here. Our ConsulLock now has a
|
|
|
|
// shortcut that will cause the lock to close the leaderCh immediately
|
|
|
|
// when we call without waiting for the blocking query to return (unlike
|
|
|
|
// Consul's current Lock implementation). But before we unlock, we
|
|
|
|
// should re-load the lock and ensure it's still the same instance we
|
|
|
|
// just tried to write with in case this goroutine is somehow really
|
|
|
|
// delayed and we actually acquired a whole new lock in the meantime!
|
|
|
|
lock := c.activeNodeLock.Load()
|
|
|
|
if lock != nil {
|
|
|
|
_, lockSessionID := lock.Info()
|
|
|
|
if sessionID == lockSessionID {
|
|
|
|
c.logger.Warn("session check failed on write, we lost active node lock, stepping down", "err", res.What)
|
|
|
|
lock.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-09-13 17:03:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if retErr != nil {
|
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Loop over our get transactions and populate any values found in our map cache.
|
|
|
|
for _, t := range txns {
|
|
|
|
if val, ok := kvMap[t.Entry.Key]; ok && t.Operation == physical.GetOperation {
|
|
|
|
newVal := make([]byte, len(val))
|
|
|
|
copy(newVal, val)
|
|
|
|
t.Entry.Value = newVal
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) {
|
|
|
|
op := &api.KVTxnOp{
|
|
|
|
Key: c.path + txn.Entry.Key,
|
|
|
|
}
|
|
|
|
switch txn.Operation {
|
|
|
|
case physical.GetOperation:
|
2023-01-17 21:38:18 +00:00
|
|
|
op.Verb = api.KVGetOrEmpty
|
2022-09-13 17:03:19 +00:00
|
|
|
case physical.DeleteOperation:
|
|
|
|
op.Verb = api.KVDelete
|
|
|
|
case physical.PutOperation:
|
|
|
|
op.Verb = api.KVSet
|
|
|
|
op.Value = txn.Entry.Value
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("%q is not a supported transaction operation", txn.Operation)
|
2017-02-17 14:15:35 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
return &api.TxnOp{KV: op}, nil
|
2017-02-17 14:15:35 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
// Put is used to insert or update an entry
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error {
|
2023-10-02 16:49:05 +00:00
|
|
|
txns := []*physical.TxnEntry{
|
|
|
|
{
|
|
|
|
Operation: physical.PutOperation,
|
|
|
|
Entry: entry,
|
|
|
|
},
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
2023-10-02 16:49:05 +00:00
|
|
|
return c.txnInternal(ctx, txns, "put")
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get is used to fetch an entry
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "get"}, time.Now())
|
2015-11-03 16:47:16 +00:00
|
|
|
|
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2018-06-11 15:03:00 +00:00
|
|
|
queryOpts := &api.QueryOptions{}
|
|
|
|
queryOpts = queryOpts.WithContext(ctx)
|
|
|
|
|
2017-01-19 22:36:33 +00:00
|
|
|
if c.consistencyMode == consistencyModeStrong {
|
2018-06-11 15:03:00 +00:00
|
|
|
queryOpts.RequireConsistent = true
|
2017-01-13 17:49:04 +00:00
|
|
|
}
|
|
|
|
|
2018-06-11 15:03:00 +00:00
|
|
|
pair, _, err := c.kv.Get(c.path+key, queryOpts)
|
2015-04-03 23:44:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if pair == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-08-03 17:24:27 +00:00
|
|
|
ent := &physical.Entry{
|
2015-04-03 23:44:32 +00:00
|
|
|
Key: key,
|
|
|
|
Value: pair.Value,
|
|
|
|
}
|
|
|
|
return ent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete is used to permanently delete an entry
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) Delete(ctx context.Context, key string) error {
|
2023-10-02 16:49:05 +00:00
|
|
|
txns := []*physical.TxnEntry{
|
|
|
|
{
|
|
|
|
Operation: physical.DeleteOperation,
|
|
|
|
Entry: &physical.Entry{
|
|
|
|
Key: key,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return c.txnInternal(ctx, txns, "delete")
|
2015-04-03 23:44:32 +00:00
|
|
|
}
|
|
|
|
|
2015-05-09 06:37:16 +00:00
|
|
|
// List is used to list all the keys under a given
|
2015-04-03 23:44:32 +00:00
|
|
|
// prefix, up to the next prefix.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *ConsulBackend) List(ctx context.Context, prefix string) ([]string, error) {
|
2015-04-14 18:09:24 +00:00
|
|
|
defer metrics.MeasureSince([]string{"consul", "list"}, time.Now())
|
2015-04-03 23:44:32 +00:00
|
|
|
scan := c.path + prefix
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2016-01-19 22:05:01 +00:00
|
|
|
// The TrimPrefix call below will not work correctly if we have "//" at the
|
|
|
|
// end. This can happen in cases where you are e.g. listing the root of a
|
|
|
|
// prefix in a logical backend via "/" instead of ""
|
|
|
|
if strings.HasSuffix(scan, "//") {
|
|
|
|
scan = scan[:len(scan)-1]
|
|
|
|
}
|
|
|
|
|
2015-11-03 16:47:16 +00:00
|
|
|
c.permitPool.Acquire()
|
|
|
|
defer c.permitPool.Release()
|
|
|
|
|
2018-06-11 15:03:00 +00:00
|
|
|
queryOpts := &api.QueryOptions{}
|
|
|
|
queryOpts = queryOpts.WithContext(ctx)
|
|
|
|
|
|
|
|
out, _, err := c.kv.Keys(scan, "/", queryOpts)
|
2015-04-03 23:44:32 +00:00
|
|
|
for idx, val := range out {
|
|
|
|
out[idx] = strings.TrimPrefix(val, scan)
|
|
|
|
}
|
2016-01-19 22:05:01 +00:00
|
|
|
|
2015-04-03 23:44:32 +00:00
|
|
|
return out, err
|
2015-03-02 18:48:53 +00:00
|
|
|
}
|
2015-04-14 18:49:46 +00:00
|
|
|
|
2023-01-17 21:38:18 +00:00
|
|
|
func (c *ConsulBackend) FailGetInTxn(fail bool) {
|
|
|
|
var val uint32
|
|
|
|
if fail {
|
|
|
|
val = 1
|
|
|
|
}
|
|
|
|
atomic.StoreUint32(c.failGetInTxn, val)
|
|
|
|
}
|
|
|
|
|
2022-09-13 17:03:19 +00:00
|
|
|
// LockWith is used for mutual exclusion based on the given key.
|
2017-08-03 17:24:27 +00:00
|
|
|
func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) {
|
2015-04-14 23:36:53 +00:00
|
|
|
cl := &ConsulLock{
|
2023-10-02 16:49:05 +00:00
|
|
|
logger: c.logger,
|
2020-01-24 17:42:03 +00:00
|
|
|
client: c.client,
|
2017-01-19 22:36:33 +00:00
|
|
|
key: c.path + key,
|
2023-10-02 16:49:05 +00:00
|
|
|
value: value,
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode: c.consistencyMode,
|
2023-10-02 16:49:05 +00:00
|
|
|
sessionTTL: c.sessionTTL,
|
|
|
|
lockWaitTime: c.lockWaitTime,
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
2016-07-18 17:19:58 +00:00
|
|
|
// HAEnabled indicates whether the HA functionality should be exposed.
|
|
|
|
// Currently always returns true.
|
|
|
|
func (c *ConsulBackend) HAEnabled() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-05-02 22:34:39 +00:00
|
|
|
// DetectHostAddr is used to detect the host address by asking the Consul agent
|
|
|
|
func (c *ConsulBackend) DetectHostAddr() (string, error) {
|
2020-01-24 17:42:03 +00:00
|
|
|
agent := c.client.Agent()
|
2015-05-02 22:34:39 +00:00
|
|
|
self, err := agent.Self()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-04-24 00:17:07 +00:00
|
|
|
addr, ok := self["Member"]["Addr"].(string)
|
|
|
|
if !ok {
|
2018-04-05 15:49:21 +00:00
|
|
|
return "", fmt.Errorf("unable to convert an address to string")
|
2016-04-24 00:17:07 +00:00
|
|
|
}
|
2015-05-02 22:34:39 +00:00
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2023-10-02 16:49:05 +00:00
|
|
|
// RegisterActiveNodeLock is called after active node lock is obtained to allow
|
|
|
|
// us to fence future writes.
|
|
|
|
func (c *ConsulBackend) RegisterActiveNodeLock(l physical.Lock) error {
|
|
|
|
cl, ok := l.(*ConsulLock)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid Lock type")
|
|
|
|
}
|
|
|
|
c.activeNodeLock.Store(cl)
|
|
|
|
key, sessionID := cl.Info()
|
|
|
|
c.logger.Info("registered active node lock", "key", key, "sessionID", sessionID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConsulLock is used to provide the Lock interface backed by Consul. We work
|
|
|
|
// around some limitations of Consuls api.Lock noted in
|
|
|
|
// https://github.com/hashicorp/consul/issues/18271 by creating and managing the
|
|
|
|
// session ourselves, while using Consul's Lock to do the heavy lifting.
|
2015-04-14 23:36:53 +00:00
|
|
|
type ConsulLock struct {
|
2023-10-02 16:49:05 +00:00
|
|
|
logger log.Logger
|
2017-01-19 22:36:33 +00:00
|
|
|
client *api.Client
|
|
|
|
key string
|
2023-10-02 16:49:05 +00:00
|
|
|
value string
|
2017-01-19 22:36:33 +00:00
|
|
|
consistencyMode string
|
2023-10-02 16:49:05 +00:00
|
|
|
sessionTTL string
|
|
|
|
lockWaitTime time.Duration
|
|
|
|
|
|
|
|
mu sync.Mutex // protects session state
|
|
|
|
session *lockSession
|
|
|
|
// sessionID is a copy of the value from session.id. We use a separate field
|
|
|
|
// because `Info` needs to keep returning the same sessionID after Unlock has
|
|
|
|
// cleaned up the session state so that we continue to fence any writes still
|
|
|
|
// in flight after the lock is Unlocked. It's easier to reason about that as a
|
|
|
|
// separate field rather than keeping an already-terminated session object
|
|
|
|
// around. Once Lock is called again this will be replaced (while mu is
|
|
|
|
// locked) with the new session ID. Must hold mu to read or write this.
|
|
|
|
sessionID string
|
|
|
|
}
|
|
|
|
|
|
|
|
type lockSession struct {
|
|
|
|
// id is immutable after the session is created so does not need mu held
|
|
|
|
id string
|
|
|
|
|
|
|
|
// mu protects the lock and unlockCh to ensure they are only cleaned up once
|
|
|
|
mu sync.Mutex
|
|
|
|
lock *api.Lock
|
|
|
|
unlockCh chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *lockSession) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
lockHeld := false
|
|
|
|
defer func() {
|
|
|
|
if !lockHeld {
|
|
|
|
s.cleanupLocked()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
consulLeaderCh, err := s.lock.Lock(stopCh)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if consulLeaderCh == nil {
|
|
|
|
// If both leaderCh and err are nil from Consul's Lock then it means we
|
|
|
|
// waited for the lockWait without grabbing it.
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// We got the Lock, monitor it!
|
|
|
|
lockHeld = true
|
|
|
|
leaderCh := make(chan struct{})
|
|
|
|
go s.monitorLock(leaderCh, s.unlockCh, consulLeaderCh)
|
|
|
|
return leaderCh, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// monitorLock waits for either unlockCh or consulLeaderCh to close and then
|
|
|
|
// closes leaderCh. It's designed to be run in a separate goroutine. Note that
|
|
|
|
// we pass unlockCh rather than accessing it via the member variable because it
|
|
|
|
// is mutated under the lock during Unlock so reading it from c could be racy.
|
|
|
|
// We just need the chan created at the call site here so we pass it instead of
|
|
|
|
// locking and unlocking in here.
|
|
|
|
func (s *lockSession) monitorLock(leaderCh chan struct{}, unlockCh, consulLeaderCh <-chan struct{}) {
|
|
|
|
select {
|
|
|
|
case <-unlockCh:
|
|
|
|
case <-consulLeaderCh:
|
|
|
|
}
|
|
|
|
// We lost the lock. Close the leaderCh
|
|
|
|
close(leaderCh)
|
|
|
|
|
|
|
|
// Whichever chan closed, cleanup to unwind all the state. If we were
|
|
|
|
// triggered by a cleanup call this will be a no-op, but if not it ensures all
|
|
|
|
// state is cleaned up correctly.
|
|
|
|
s.cleanup()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *lockSession) cleanup() {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
s.cleanupLocked()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *lockSession) cleanupLocked() {
|
|
|
|
if s.lock != nil {
|
|
|
|
s.lock.Unlock()
|
|
|
|
s.lock = nil
|
|
|
|
}
|
|
|
|
if s.unlockCh != nil {
|
|
|
|
close(s.unlockCh)
|
|
|
|
s.unlockCh = nil
|
|
|
|
}
|
|
|
|
// Don't bother destroying sessions as they will be destroyed after TTL
|
|
|
|
// anyway.
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) createSession() (*lockSession, error) {
|
|
|
|
se := &api.SessionEntry{
|
|
|
|
Name: "Vault Lock",
|
|
|
|
TTL: c.sessionTTL,
|
|
|
|
// We use Consul's default LockDelay of 15s by not specifying it
|
|
|
|
}
|
|
|
|
session, _, err := c.client.Session().Create(se, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := &api.LockOptions{
|
|
|
|
Key: c.key,
|
|
|
|
Value: []byte(c.value),
|
|
|
|
Session: session,
|
|
|
|
MonitorRetries: 5,
|
|
|
|
LockWaitTime: c.lockWaitTime,
|
|
|
|
SessionTTL: c.sessionTTL,
|
|
|
|
}
|
|
|
|
lock, err := c.client.LockOpts(opts)
|
|
|
|
if err != nil {
|
|
|
|
// Don't bother destroying sessions as they will be destroyed after TTL
|
|
|
|
// anyway.
|
|
|
|
return nil, fmt.Errorf("failed to create lock: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
unlockCh := make(chan struct{})
|
|
|
|
|
|
|
|
s := &lockSession{
|
|
|
|
id: session,
|
|
|
|
lock: lock,
|
|
|
|
unlockCh: unlockCh,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start renewals of the session
|
|
|
|
go func() {
|
|
|
|
// Note we capture unlockCh here rather than s.unlockCh because s.unlockCh
|
|
|
|
// is mutated on cleanup which is racy since we don't hold a lock here.
|
|
|
|
// unlockCh will never be mutated though.
|
|
|
|
err := c.client.Session().RenewPeriodic(c.sessionTTL, session, nil, unlockCh)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to renew consul session for more than the TTL, lock lost", "err", err)
|
|
|
|
}
|
|
|
|
// release other resources for this session only i.e. don't c.Unlock as that
|
|
|
|
// might now be locked under a different session).
|
|
|
|
s.cleanup()
|
|
|
|
}()
|
|
|
|
return s, nil
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
2023-10-02 16:49:05 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
if c.session != nil {
|
|
|
|
return nil, fmt.Errorf("lock instance already locked")
|
|
|
|
}
|
|
|
|
|
|
|
|
session, err := c.createSession()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
leaderCh, err := session.Lock(stopCh)
|
|
|
|
if leaderCh != nil && err == nil {
|
|
|
|
// We hold the lock, store the session
|
|
|
|
c.session = session
|
|
|
|
c.sessionID = session.id
|
|
|
|
}
|
|
|
|
return leaderCh, err
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Unlock() error {
|
2023-10-02 16:49:05 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
if c.session != nil {
|
|
|
|
c.session.cleanup()
|
|
|
|
c.session = nil
|
|
|
|
// Don't clear c.sessionID since we rely on returning the same old ID after
|
|
|
|
// Unlock until the next Lock.
|
|
|
|
}
|
|
|
|
return nil
|
2015-04-14 23:36:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ConsulLock) Value() (bool, string, error) {
|
|
|
|
kv := c.client.KV()
|
2015-11-03 16:47:16 +00:00
|
|
|
|
2017-01-13 20:22:14 +00:00
|
|
|
var queryOptions *api.QueryOptions
|
2017-01-19 22:36:33 +00:00
|
|
|
if c.consistencyMode == consistencyModeStrong {
|
2017-01-13 20:22:14 +00:00
|
|
|
queryOptions = &api.QueryOptions{
|
2017-01-19 22:36:33 +00:00
|
|
|
RequireConsistent: true,
|
2017-01-13 20:22:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pair, _, err := kv.Get(c.key, queryOptions)
|
2015-04-14 23:36:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, "", err
|
|
|
|
}
|
|
|
|
if pair == nil {
|
|
|
|
return false, "", nil
|
|
|
|
}
|
2023-10-02 16:49:05 +00:00
|
|
|
// Note that held is expected to mean "does _any_ node hold the lock" not
|
|
|
|
// "does this current instance hold the lock" so although we know what our own
|
|
|
|
// session ID is, we don't check it matches here only that there is _some_
|
|
|
|
// session in Consul holding the lock right now.
|
2015-04-14 23:36:53 +00:00
|
|
|
held := pair.Session != ""
|
|
|
|
value := string(pair.Value)
|
|
|
|
return held, value, nil
|
2015-04-14 18:49:46 +00:00
|
|
|
}
|
2023-10-02 16:49:05 +00:00
|
|
|
|
|
|
|
func (c *ConsulLock) Info() (key, sessionid string) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
return c.key, c.sessionID
|
|
|
|
}
|