2020-06-19 19:01:35 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-06-23 23:36:24 +00:00
|
|
|
"errors"
|
|
|
|
"os"
|
2020-06-19 19:01:35 +00:00
|
|
|
"strings"
|
2020-06-23 23:36:24 +00:00
|
|
|
"time"
|
2020-06-19 19:01:35 +00:00
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
2020-10-13 23:38:21 +00:00
|
|
|
"github.com/hashicorp/vault/helper/metricsutil"
|
2020-06-19 19:01:35 +00:00
|
|
|
"github.com/hashicorp/vault/helper/namespace"
|
2021-04-26 23:01:26 +00:00
|
|
|
"github.com/hashicorp/vault/physical/raft"
|
2021-02-25 20:27:25 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
2020-06-19 19:01:35 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/logical"
|
|
|
|
)
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
func (c *Core) metricsLoop(stopCh chan struct{}) {
|
|
|
|
emitTimer := time.Tick(time.Second)
|
2021-02-22 17:04:41 +00:00
|
|
|
|
2022-05-31 16:15:39 +00:00
|
|
|
stopOrHAState := func() (bool, consts.HAState) {
|
2022-09-20 15:03:16 +00:00
|
|
|
l := newLockGrabber(c.stateLock.RLock, c.stateLock.RUnlock, stopCh)
|
|
|
|
go l.grab()
|
|
|
|
if stopped := l.lockOrStop(); stopped {
|
2022-05-31 16:15:39 +00:00
|
|
|
return true, 0
|
|
|
|
}
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
return false, c.HAState()
|
|
|
|
}
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
identityCountTimer := time.Tick(time.Minute * 10)
|
2021-04-26 17:54:19 +00:00
|
|
|
// Only emit on active node of cluster that is not a DR secondary.
|
2022-05-31 16:15:39 +00:00
|
|
|
if stopped, haState := stopOrHAState(); stopped {
|
|
|
|
return
|
|
|
|
} else if haState == consts.Standby || c.IsDRSecondary() {
|
2021-02-22 17:04:41 +00:00
|
|
|
identityCountTimer = nil
|
|
|
|
}
|
|
|
|
|
2021-07-29 17:21:40 +00:00
|
|
|
writeTimer := time.Tick(time.Second * 30)
|
2021-02-22 17:04:41 +00:00
|
|
|
// Do not process the writeTimer on DR Secondary nodes
|
|
|
|
if c.IsDRSecondary() {
|
|
|
|
writeTimer = nil
|
|
|
|
}
|
2020-06-23 23:36:24 +00:00
|
|
|
|
|
|
|
// This loop covers
|
|
|
|
// vault.expire.num_leases
|
|
|
|
// vault.core.unsealed
|
|
|
|
// vault.identity.num_entities
|
|
|
|
// and the non-telemetry request counters shown in the UI.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-emitTimer:
|
2022-05-31 16:15:39 +00:00
|
|
|
stopped, haState := stopOrHAState()
|
|
|
|
if stopped {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if haState == consts.Active {
|
2020-09-22 19:48:30 +00:00
|
|
|
c.metricsMutex.Lock()
|
|
|
|
// Emit on active node only
|
|
|
|
if c.expiration != nil {
|
|
|
|
c.expiration.emitMetrics()
|
|
|
|
}
|
|
|
|
c.metricsMutex.Unlock()
|
2020-06-23 23:36:24 +00:00
|
|
|
}
|
2020-09-15 22:12:28 +00:00
|
|
|
|
|
|
|
// Refresh the sealed gauge, on all nodes
|
2020-06-23 23:36:24 +00:00
|
|
|
if c.Sealed() {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "unsealed"}, 0, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "unsealed"}, 1, nil)
|
|
|
|
}
|
2022-10-06 18:24:16 +00:00
|
|
|
|
|
|
|
if c.UndoLogsEnabled() {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "write_undo_logs"}, 1, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "write_undo_logs"}, 0, nil)
|
|
|
|
}
|
2020-06-23 23:36:24 +00:00
|
|
|
|
2020-10-15 21:15:58 +00:00
|
|
|
// Refresh the standby gauge, on all nodes
|
2022-05-31 16:15:39 +00:00
|
|
|
if haState != consts.Active {
|
2020-10-29 17:30:45 +00:00
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 0, nil)
|
2020-10-15 21:15:58 +00:00
|
|
|
} else {
|
2020-10-29 17:30:45 +00:00
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 1, nil)
|
2020-10-15 21:15:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-31 16:15:39 +00:00
|
|
|
if haState == consts.PerfStandby {
|
2021-04-26 17:54:19 +00:00
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "performance_standby"}, 1, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "performance_standby"}, 0, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.ReplicationState().HasState(consts.ReplicationPerformancePrimary) {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "primary"}, 1, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "primary"}, 0, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.IsPerfSecondary() {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "secondary"}, 1, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "secondary"}, 0, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.ReplicationState().HasState(consts.ReplicationDRPrimary) {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "primary"}, 1, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "primary"}, 0, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.IsDRSecondary() {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "secondary"}, 1, nil)
|
|
|
|
} else {
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "secondary"}, 0, nil)
|
|
|
|
}
|
|
|
|
|
2022-10-07 16:09:08 +00:00
|
|
|
// If we're using a raft backend, emit raft metrics
|
|
|
|
if rb, ok := c.underlyingPhysical.(*raft.RaftBackend); ok {
|
|
|
|
rb.CollectMetrics(c.MetricSink())
|
|
|
|
}
|
|
|
|
|
2021-12-08 22:34:42 +00:00
|
|
|
// Capture the total number of in-flight requests
|
|
|
|
c.inFlightReqGaugeMetric()
|
|
|
|
|
2020-10-27 15:24:43 +00:00
|
|
|
// Refresh gauge metrics that are looped
|
|
|
|
c.cachedGaugeMetricsEmitter()
|
2020-06-23 23:36:24 +00:00
|
|
|
case <-writeTimer:
|
2022-09-20 15:03:16 +00:00
|
|
|
l := newLockGrabber(c.stateLock.RLock, c.stateLock.RUnlock, stopCh)
|
|
|
|
go l.grab()
|
|
|
|
if stopped := l.lockOrStop(); stopped {
|
2022-05-31 16:15:39 +00:00
|
|
|
return
|
2020-06-23 23:36:24 +00:00
|
|
|
}
|
2021-07-29 17:21:40 +00:00
|
|
|
// Ship barrier encryption counts if a perf standby or the active node
|
|
|
|
// on a performance secondary cluster
|
2022-09-19 12:15:27 +00:00
|
|
|
if c.perfStandby || c.IsPerfSecondary() { // already have lock here, do not re-acquire
|
2021-07-29 17:21:40 +00:00
|
|
|
err := syncBarrierEncryptionCounter(c)
|
2021-02-25 20:27:25 +00:00
|
|
|
if err != nil {
|
2021-07-29 17:21:40 +00:00
|
|
|
c.logger.Error("writing syncing encryption counters", "err", err)
|
2020-06-23 23:36:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
case <-identityCountTimer:
|
|
|
|
// TODO: this can be replaced by the identity gauge counter; we need to
|
|
|
|
// sum across all namespaces.
|
|
|
|
go func() {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
|
|
|
defer cancel()
|
|
|
|
entities, err := c.countActiveEntities(ctx)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("error counting identity entities", "err", err)
|
|
|
|
} else {
|
|
|
|
metrics.SetGauge([]string{"identity", "num_entities"}, float32(entities.Entities.Total))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// These wrappers are responsible for redirecting to the current instance of
|
|
|
|
// TokenStore; there is one per method because an additional level of abstraction
|
|
|
|
// seems confusing.
|
|
|
|
func (c *Core) tokenGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
2021-11-17 16:21:54 +00:00
|
|
|
if c.IsDRSecondary() {
|
|
|
|
// there is no expiration manager on DR Secondaries
|
|
|
|
return []metricsutil.GaugeLabelValues{}, nil
|
|
|
|
}
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
// stateLock or authLock protects the tokenStore pointer
|
|
|
|
c.stateLock.RLock()
|
|
|
|
ts := c.tokenStore
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if ts == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
|
|
|
|
}
|
|
|
|
return ts.gaugeCollector(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) tokenGaugePolicyCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
2021-11-17 16:21:54 +00:00
|
|
|
if c.IsDRSecondary() {
|
|
|
|
// there is no expiration manager on DR Secondaries
|
|
|
|
return []metricsutil.GaugeLabelValues{}, nil
|
|
|
|
}
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
ts := c.tokenStore
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if ts == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
|
|
|
|
}
|
|
|
|
return ts.gaugeCollectorByPolicy(ctx)
|
|
|
|
}
|
|
|
|
|
2020-11-13 18:26:58 +00:00
|
|
|
func (c *Core) leaseExpiryGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
e := c.expiration
|
|
|
|
metricsConsts := c.MetricSink().TelemetryConsts
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if e == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil expiration manager")
|
|
|
|
}
|
|
|
|
return e.leaseAggregationMetrics(ctx, metricsConsts)
|
|
|
|
}
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
func (c *Core) tokenGaugeMethodCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
2021-11-17 16:21:54 +00:00
|
|
|
if c.IsDRSecondary() {
|
|
|
|
// there is no expiration manager on DR Secondaries
|
|
|
|
return []metricsutil.GaugeLabelValues{}, nil
|
|
|
|
}
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
ts := c.tokenStore
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if ts == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
|
|
|
|
}
|
|
|
|
return ts.gaugeCollectorByMethod(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) tokenGaugeTtlCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
2021-11-17 16:21:54 +00:00
|
|
|
if c.IsDRSecondary() {
|
|
|
|
// there is no expiration manager on DR Secondaries
|
|
|
|
return []metricsutil.GaugeLabelValues{}, nil
|
|
|
|
}
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
ts := c.tokenStore
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if ts == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
|
|
|
|
}
|
|
|
|
return ts.gaugeCollectorByTtl(ctx)
|
|
|
|
}
|
|
|
|
|
2022-10-07 16:09:08 +00:00
|
|
|
// emitMetricsActiveNode is used to start all the periodic metrics; all of them should
|
|
|
|
// be shut down when stopCh is closed. This code runs on the active node only.
|
|
|
|
func (c *Core) emitMetricsActiveNode(stopCh chan struct{}) {
|
2020-06-23 23:36:24 +00:00
|
|
|
// The gauge collection processes are started and stopped here
|
|
|
|
// because there's more than one TokenManager created during startup,
|
|
|
|
// but we only want one set of gauges.
|
|
|
|
metricsInit := []struct {
|
|
|
|
MetricName []string
|
|
|
|
MetadataLabel []metrics.Label
|
|
|
|
CollectorFunc metricsutil.GaugeCollector
|
|
|
|
DisableEnvVar string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
[]string{"token", "count"},
|
|
|
|
[]metrics.Label{{"gauge", "token_by_namespace"}},
|
|
|
|
c.tokenGaugeCollector,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
[]string{"token", "count", "by_policy"},
|
|
|
|
[]metrics.Label{{"gauge", "token_by_policy"}},
|
|
|
|
c.tokenGaugePolicyCollector,
|
|
|
|
"",
|
|
|
|
},
|
2020-11-13 18:26:58 +00:00
|
|
|
{
|
|
|
|
[]string{"expire", "leases", "by_expiration"},
|
|
|
|
[]metrics.Label{{"gauge", "leases_by_expiration"}},
|
|
|
|
c.leaseExpiryGaugeCollector,
|
|
|
|
"",
|
|
|
|
},
|
2020-06-23 23:36:24 +00:00
|
|
|
{
|
|
|
|
[]string{"token", "count", "by_auth"},
|
|
|
|
[]metrics.Label{{"gauge", "token_by_auth"}},
|
|
|
|
c.tokenGaugeMethodCollector,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
[]string{"token", "count", "by_ttl"},
|
|
|
|
[]metrics.Label{{"gauge", "token_by_ttl"}},
|
|
|
|
c.tokenGaugeTtlCollector,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
[]string{"secret", "kv", "count"},
|
|
|
|
[]metrics.Label{{"gauge", "kv_secrets_by_mountpoint"}},
|
|
|
|
c.kvSecretGaugeCollector,
|
|
|
|
"VAULT_DISABLE_KV_GAUGE",
|
|
|
|
},
|
2020-06-25 23:54:38 +00:00
|
|
|
{
|
|
|
|
[]string{"identity", "entity", "count"},
|
|
|
|
[]metrics.Label{{"gauge", "identity_by_namespace"}},
|
|
|
|
c.entityGaugeCollector,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
[]string{"identity", "entity", "alias", "count"},
|
|
|
|
[]metrics.Label{{"gauge", "identity_by_mountpoint"}},
|
|
|
|
c.entityGaugeCollectorByMount,
|
|
|
|
"",
|
|
|
|
},
|
2021-01-26 22:37:07 +00:00
|
|
|
{
|
|
|
|
[]string{"identity", "entity", "active", "partial_month"},
|
|
|
|
[]metrics.Label{{"gauge", "identity_active_month"}},
|
|
|
|
c.activeEntityGaugeCollector,
|
|
|
|
"",
|
|
|
|
},
|
2020-06-23 23:36:24 +00:00
|
|
|
}
|
|
|
|
|
2020-10-29 23:47:34 +00:00
|
|
|
// Disable collection if configured, or if we're a performance standby
|
|
|
|
// node or DR secondary cluster.
|
2020-06-23 23:36:24 +00:00
|
|
|
if c.MetricSink().GaugeInterval == time.Duration(0) {
|
|
|
|
c.logger.Info("usage gauge collection is disabled")
|
2020-10-29 23:47:34 +00:00
|
|
|
} else if standby, _ := c.Standby(); !standby && !c.IsDRSecondary() {
|
2020-06-23 23:36:24 +00:00
|
|
|
for _, init := range metricsInit {
|
|
|
|
if init.DisableEnvVar != "" {
|
|
|
|
if os.Getenv(init.DisableEnvVar) != "" {
|
|
|
|
c.logger.Info("usage gauge collection is disabled for",
|
|
|
|
"metric", init.MetricName)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
proc, err := c.MetricSink().NewGaugeCollectionProcess(
|
|
|
|
init.MetricName,
|
|
|
|
init.MetadataLabel,
|
|
|
|
init.CollectorFunc,
|
|
|
|
c.logger,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to start collector", "metric", init.MetricName, "error", err)
|
|
|
|
} else {
|
|
|
|
go proc.Run()
|
|
|
|
defer proc.Stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// When this returns, all the defers set up above will fire.
|
|
|
|
c.metricsLoop(stopCh)
|
|
|
|
}
|
2020-06-19 19:01:35 +00:00
|
|
|
|
|
|
|
type kvMount struct {
|
|
|
|
Namespace *namespace.Namespace
|
|
|
|
MountPoint string
|
|
|
|
Version string
|
|
|
|
NumSecrets int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) findKvMounts() []*kvMount {
|
|
|
|
mounts := make([]*kvMount, 0)
|
|
|
|
|
|
|
|
c.mountsLock.RLock()
|
|
|
|
defer c.mountsLock.RUnlock()
|
|
|
|
|
2022-10-07 16:09:08 +00:00
|
|
|
// we don't grab the statelock, so this code might run during or after the seal process.
|
|
|
|
// Therefore, we need to check if c.mounts is nil. If we do not, this will panic when
|
2021-01-19 22:06:50 +00:00
|
|
|
// run after seal.
|
|
|
|
if c.mounts == nil {
|
|
|
|
return mounts
|
|
|
|
}
|
|
|
|
|
2020-06-19 19:01:35 +00:00
|
|
|
for _, entry := range c.mounts.Entries {
|
2021-07-09 15:05:05 +00:00
|
|
|
if entry.Type == "kv" || entry.Type == "generic" {
|
2020-06-19 19:01:35 +00:00
|
|
|
version, ok := entry.Options["version"]
|
|
|
|
if !ok {
|
|
|
|
version = "1"
|
|
|
|
}
|
|
|
|
mounts = append(mounts, &kvMount{
|
|
|
|
Namespace: entry.namespace,
|
|
|
|
MountPoint: entry.Path,
|
|
|
|
Version: version,
|
|
|
|
NumSecrets: 0,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mounts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) kvCollectionErrorCount() {
|
|
|
|
c.MetricSink().IncrCounterWithLabels(
|
|
|
|
[]string{"metrics", "collection", "error"},
|
|
|
|
1,
|
|
|
|
[]metrics.Label{{"gauge", "kv_secrets_by_mountpoint"}},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) walkKvMountSecrets(ctx context.Context, m *kvMount) {
|
|
|
|
var subdirectories []string
|
|
|
|
if m.Version == "1" {
|
2020-06-23 23:36:24 +00:00
|
|
|
subdirectories = []string{m.Namespace.Path + m.MountPoint}
|
2020-06-19 19:01:35 +00:00
|
|
|
} else {
|
2020-06-23 23:36:24 +00:00
|
|
|
subdirectories = []string{m.Namespace.Path + m.MountPoint + "metadata/"}
|
2020-06-19 19:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for len(subdirectories) > 0 {
|
|
|
|
// Check for cancellation
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
currentDirectory := subdirectories[0]
|
|
|
|
subdirectories = subdirectories[1:]
|
|
|
|
|
|
|
|
listRequest := &logical.Request{
|
|
|
|
Operation: logical.ListOperation,
|
|
|
|
Path: currentDirectory,
|
|
|
|
}
|
|
|
|
resp, err := c.router.Route(ctx, listRequest)
|
|
|
|
if err != nil {
|
|
|
|
c.kvCollectionErrorCount()
|
|
|
|
// ErrUnsupportedPath probably means that the mount is not there any more,
|
|
|
|
// don't log those cases.
|
|
|
|
if !strings.Contains(err.Error(), logical.ErrUnsupportedPath.Error()) {
|
|
|
|
c.logger.Error("failed to perform internal KV list", "mount_point", m.MountPoint, "error", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Quit handling this mount point (but it'll still appear in the list)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if resp == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
rawKeys, ok := resp.Data["keys"]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
keys, ok := rawKeys.([]string)
|
|
|
|
if !ok {
|
|
|
|
c.kvCollectionErrorCount()
|
|
|
|
c.logger.Error("KV list keys are not a []string", "mount_point", m.MountPoint, "rawKeys", rawKeys)
|
|
|
|
// Quit handling this mount point (but it'll still appear in the list)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, path := range keys {
|
2020-09-02 22:43:44 +00:00
|
|
|
if len(path) > 0 && path[len(path)-1] == '/' {
|
2020-06-19 19:01:35 +00:00
|
|
|
subdirectories = append(subdirectories, currentDirectory+path)
|
|
|
|
} else {
|
|
|
|
m.NumSecrets += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) kvSecretGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
|
|
|
// Find all KV mounts
|
|
|
|
mounts := c.findKvMounts()
|
|
|
|
results := make([]metricsutil.GaugeLabelValues, len(mounts))
|
|
|
|
|
2020-06-23 23:36:24 +00:00
|
|
|
// Use a root namespace, so include namespace path
|
|
|
|
// in any queries.
|
2020-06-19 19:01:35 +00:00
|
|
|
ctx = namespace.RootContext(ctx)
|
|
|
|
|
|
|
|
// Route list requests to all the identified mounts.
|
|
|
|
// (All of these will show up as activity in the vault.route metric.)
|
|
|
|
// Then we have to explore each subdirectory.
|
|
|
|
for i, m := range mounts {
|
|
|
|
// Check for cancellation, return empty array
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return []metricsutil.GaugeLabelValues{}, nil
|
|
|
|
default:
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
results[i].Labels = []metrics.Label{
|
2020-10-13 23:38:21 +00:00
|
|
|
metricsutil.NamespaceLabel(m.Namespace),
|
2020-06-19 19:01:35 +00:00
|
|
|
{"mount_point", m.MountPoint},
|
|
|
|
}
|
|
|
|
|
|
|
|
c.walkKvMountSecrets(ctx, m)
|
|
|
|
results[i].Value = float32(m.NumSecrets)
|
|
|
|
}
|
|
|
|
|
|
|
|
return results, nil
|
|
|
|
}
|
2020-06-24 00:45:59 +00:00
|
|
|
|
|
|
|
func (c *Core) entityGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
|
|
|
// Protect against concurrent changes during seal
|
|
|
|
c.stateLock.RLock()
|
|
|
|
identityStore := c.identityStore
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if identityStore == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil identity store")
|
|
|
|
}
|
|
|
|
|
|
|
|
byNamespace, err := identityStore.countEntitiesByNamespace(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// No check for expiration here; the bulk of the work should be in
|
|
|
|
// counting the entities.
|
|
|
|
allNamespaces := c.collectNamespaces()
|
|
|
|
values := make([]metricsutil.GaugeLabelValues, len(allNamespaces))
|
|
|
|
for i := range values {
|
|
|
|
values[i].Labels = []metrics.Label{
|
2020-10-13 23:38:21 +00:00
|
|
|
metricsutil.NamespaceLabel(allNamespaces[i]),
|
2020-06-24 00:45:59 +00:00
|
|
|
}
|
|
|
|
values[i].Value = float32(byNamespace[allNamespaces[i].ID])
|
|
|
|
}
|
|
|
|
|
|
|
|
return values, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) entityGaugeCollectorByMount(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
identityStore := c.identityStore
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if identityStore == nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, errors.New("nil identity store")
|
|
|
|
}
|
|
|
|
|
|
|
|
byAccessor, err := identityStore.countEntitiesByMountAccessor(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return []metricsutil.GaugeLabelValues{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
values := make([]metricsutil.GaugeLabelValues, 0)
|
|
|
|
for accessor, count := range byAccessor {
|
|
|
|
// Terminate if taking too long to do the translation
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return values, errors.New("context cancelled")
|
|
|
|
default:
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
c.stateLock.RLock()
|
|
|
|
mountEntry := c.router.MatchingMountByAccessor(accessor)
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if mountEntry == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
values = append(values, metricsutil.GaugeLabelValues{
|
|
|
|
Labels: []metrics.Label{
|
2020-10-13 23:38:21 +00:00
|
|
|
metricsutil.NamespaceLabel(mountEntry.namespace),
|
2020-06-24 00:45:59 +00:00
|
|
|
{"auth_method", mountEntry.Type},
|
|
|
|
{"mount_point", "auth/" + mountEntry.Path},
|
|
|
|
},
|
|
|
|
Value: float32(count),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return values, nil
|
|
|
|
}
|
2020-10-27 15:24:43 +00:00
|
|
|
|
|
|
|
func (c *Core) cachedGaugeMetricsEmitter() {
|
|
|
|
if c.metricsHelper == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
loopMetrics := &c.metricsHelper.LoopMetrics.Metrics
|
|
|
|
|
|
|
|
emit := func(key interface{}, value interface{}) bool {
|
|
|
|
metricValue := value.(metricsutil.GaugeMetric)
|
|
|
|
c.metricSink.SetGaugeWithLabels(metricValue.Key, metricValue.Value, metricValue.Labels)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
loopMetrics.Range(emit)
|
|
|
|
}
|
2021-12-08 22:34:42 +00:00
|
|
|
|
|
|
|
func (c *Core) inFlightReqGaugeMetric() {
|
|
|
|
totalInFlightReq := c.inFlightReqData.InFlightReqCount.Load()
|
|
|
|
// Adding a gauge metric to capture total number of inflight requests
|
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "in_flight_requests"}, float32(totalInFlightReq), nil)
|
|
|
|
}
|