ed14061578
* Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
132 lines
3.3 KiB
Go
132 lines
3.3 KiB
Go
package metrics
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"sort"
|
|
"time"
|
|
)
|
|
|
|
// MetricsSummary holds a roll-up of metrics info for a given interval
|
|
type MetricsSummary struct {
|
|
Timestamp string
|
|
Gauges []GaugeValue
|
|
Points []PointValue
|
|
Counters []SampledValue
|
|
Samples []SampledValue
|
|
}
|
|
|
|
type GaugeValue struct {
|
|
Name string
|
|
Hash string `json:"-"`
|
|
Value float32
|
|
|
|
Labels []Label `json:"-"`
|
|
DisplayLabels map[string]string `json:"Labels"`
|
|
}
|
|
|
|
type PointValue struct {
|
|
Name string
|
|
Points []float32
|
|
}
|
|
|
|
type SampledValue struct {
|
|
Name string
|
|
Hash string `json:"-"`
|
|
*AggregateSample
|
|
Mean float64
|
|
Stddev float64
|
|
|
|
Labels []Label `json:"-"`
|
|
DisplayLabels map[string]string `json:"Labels"`
|
|
}
|
|
|
|
// deepCopy allocates a new instance of AggregateSample
|
|
func (source *SampledValue) deepCopy() SampledValue {
|
|
dest := *source
|
|
if source.AggregateSample != nil {
|
|
dest.AggregateSample = &AggregateSample{}
|
|
*dest.AggregateSample = *source.AggregateSample
|
|
}
|
|
return dest
|
|
}
|
|
|
|
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
|
|
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
data := i.Data()
|
|
|
|
var interval *IntervalMetrics
|
|
n := len(data)
|
|
switch {
|
|
case n == 0:
|
|
return nil, fmt.Errorf("no metric intervals have been initialized yet")
|
|
case n == 1:
|
|
// Show the current interval if it's all we have
|
|
interval = data[0]
|
|
default:
|
|
// Show the most recent finished interval if we have one
|
|
interval = data[n-2]
|
|
}
|
|
|
|
interval.RLock()
|
|
defer interval.RUnlock()
|
|
|
|
summary := MetricsSummary{
|
|
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
|
|
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
|
|
Points: make([]PointValue, 0, len(interval.Points)),
|
|
}
|
|
|
|
// Format and sort the output of each metric type, so it gets displayed in a
|
|
// deterministic order.
|
|
for name, points := range interval.Points {
|
|
summary.Points = append(summary.Points, PointValue{name, points})
|
|
}
|
|
sort.Slice(summary.Points, func(i, j int) bool {
|
|
return summary.Points[i].Name < summary.Points[j].Name
|
|
})
|
|
|
|
for hash, value := range interval.Gauges {
|
|
value.Hash = hash
|
|
value.DisplayLabels = make(map[string]string)
|
|
for _, label := range value.Labels {
|
|
value.DisplayLabels[label.Name] = label.Value
|
|
}
|
|
value.Labels = nil
|
|
|
|
summary.Gauges = append(summary.Gauges, value)
|
|
}
|
|
sort.Slice(summary.Gauges, func(i, j int) bool {
|
|
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
|
|
})
|
|
|
|
summary.Counters = formatSamples(interval.Counters)
|
|
summary.Samples = formatSamples(interval.Samples)
|
|
|
|
return summary, nil
|
|
}
|
|
|
|
func formatSamples(source map[string]SampledValue) []SampledValue {
|
|
output := make([]SampledValue, 0, len(source))
|
|
for hash, sample := range source {
|
|
displayLabels := make(map[string]string)
|
|
for _, label := range sample.Labels {
|
|
displayLabels[label.Name] = label.Value
|
|
}
|
|
|
|
output = append(output, SampledValue{
|
|
Name: sample.Name,
|
|
Hash: hash,
|
|
AggregateSample: sample.AggregateSample,
|
|
Mean: sample.AggregateSample.Mean(),
|
|
Stddev: sample.AggregateSample.Stddev(),
|
|
DisplayLabels: displayLabels,
|
|
})
|
|
}
|
|
sort.Slice(output, func(i, j int) bool {
|
|
return output[i].Hash < output[j].Hash
|
|
})
|
|
|
|
return output
|
|
}
|