2022-03-17 23:02:26 +00:00
|
|
|
package middleware
|
|
|
|
|
|
|
|
import (
|
2023-01-04 19:38:44 +00:00
|
|
|
"net"
|
2022-03-17 23:02:26 +00:00
|
|
|
"reflect"
|
|
|
|
"strconv"
|
2022-03-31 17:49:37 +00:00
|
|
|
"strings"
|
2022-03-17 23:02:26 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
2022-03-22 00:26:32 +00:00
|
|
|
"github.com/armon/go-metrics/prometheus"
|
2022-03-17 23:02:26 +00:00
|
|
|
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
2023-01-04 19:38:44 +00:00
|
|
|
rpcRate "github.com/hashicorp/consul/agent/consul/rate"
|
2022-03-17 23:02:26 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
)
|
|
|
|
|
|
|
|
// RPCTypeInternal identifies the "RPC" request as coming from some internal
|
|
|
|
// operation that runs on the cluster leader. Technically this is not an RPC
|
|
|
|
// request, but these raft.Apply operations have the same impact on blocking
|
|
|
|
// queries, and streaming subscriptions, so need to be tracked by the same metric
|
|
|
|
// and logs.
|
|
|
|
// Really what we are measuring here is a "cluster operation". The term we have
|
|
|
|
// used for this historically is "RPC", so we continue to use that here.
|
|
|
|
const RPCTypeInternal = "internal"
|
2022-03-22 16:31:54 +00:00
|
|
|
const RPCTypeNetRPC = "net/rpc"
|
2022-03-17 23:02:26 +00:00
|
|
|
|
2022-03-22 00:26:32 +00:00
|
|
|
var metricRPCRequest = []string{"rpc", "server", "call"}
|
2022-03-31 17:49:37 +00:00
|
|
|
var requestLogName = strings.Join(metricRPCRequest, "_")
|
2022-03-17 23:02:26 +00:00
|
|
|
|
2022-03-31 17:49:37 +00:00
|
|
|
var OneTwelveRPCSummary = []prometheus.SummaryDefinition{
|
2022-03-22 00:26:32 +00:00
|
|
|
{
|
|
|
|
Name: metricRPCRequest,
|
2022-03-31 17:49:37 +00:00
|
|
|
Help: "Measures the time an RPC service call takes to make in milliseconds. Labels mark which RPC method was called and metadata about the call.",
|
2022-03-22 00:26:32 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-03-17 23:02:26 +00:00
|
|
|
type RequestRecorder struct {
|
2022-04-12 17:50:25 +00:00
|
|
|
Logger hclog.Logger
|
|
|
|
RecorderFunc func(key []string, val float32, labels []metrics.Label)
|
|
|
|
serverIsLeader func() bool
|
|
|
|
localDC string
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 17:50:25 +00:00
|
|
|
func NewRequestRecorder(logger hclog.Logger, isLeader func() bool, localDC string) *RequestRecorder {
|
|
|
|
return &RequestRecorder{
|
|
|
|
Logger: logger,
|
|
|
|
RecorderFunc: metrics.AddSampleWithLabels,
|
|
|
|
serverIsLeader: isLeader,
|
|
|
|
localDC: localDC,
|
|
|
|
}
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *RequestRecorder) Record(requestName string, rpcType string, start time.Time, request interface{}, respErrored bool) {
|
2022-09-15 17:04:37 +00:00
|
|
|
elapsed := time.Since(start).Microseconds()
|
|
|
|
elapsedMs := float32(elapsed) / 1000
|
2022-03-17 23:02:26 +00:00
|
|
|
reqType := requestType(request)
|
2022-04-12 17:50:25 +00:00
|
|
|
isLeader := r.getServerLeadership()
|
2022-03-17 23:02:26 +00:00
|
|
|
|
|
|
|
labels := []metrics.Label{
|
|
|
|
{Name: "method", Value: requestName},
|
|
|
|
{Name: "errored", Value: strconv.FormatBool(respErrored)},
|
|
|
|
{Name: "request_type", Value: reqType},
|
|
|
|
{Name: "rpc_type", Value: rpcType},
|
2022-04-12 17:50:25 +00:00
|
|
|
{Name: "leader", Value: isLeader},
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 17:50:25 +00:00
|
|
|
labels = r.addOptionalLabels(request, labels)
|
|
|
|
|
2022-03-31 17:49:37 +00:00
|
|
|
// math.MaxInt64 < math.MaxFloat32 is true so we should be good!
|
2022-09-15 17:04:37 +00:00
|
|
|
r.RecorderFunc(metricRPCRequest, elapsedMs, labels)
|
2022-04-12 17:50:25 +00:00
|
|
|
|
|
|
|
labelsArr := flattenLabels(labels)
|
|
|
|
r.Logger.Trace(requestLogName, labelsArr...)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenLabels(labels []metrics.Label) []interface{} {
|
|
|
|
|
|
|
|
var labelArr []interface{}
|
|
|
|
for _, label := range labels {
|
|
|
|
labelArr = append(labelArr, label.Name, label.Value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return labelArr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *RequestRecorder) addOptionalLabels(request interface{}, labels []metrics.Label) []metrics.Label {
|
|
|
|
if rq, ok := request.(readQuery); ok {
|
|
|
|
labels = append(labels,
|
|
|
|
metrics.Label{
|
|
|
|
Name: "allow_stale",
|
|
|
|
Value: strconv.FormatBool(rq.AllowStaleRead()),
|
|
|
|
},
|
|
|
|
metrics.Label{
|
|
|
|
Name: "blocking",
|
|
|
|
Value: strconv.FormatBool(rq.GetMinQueryIndex() > 0),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if td, ok := request.(targetDC); ok {
|
|
|
|
requestDC := td.RequestDatacenter()
|
|
|
|
labels = append(labels, metrics.Label{Name: "target_datacenter", Value: requestDC})
|
|
|
|
|
|
|
|
if r.localDC == requestDC {
|
|
|
|
labels = append(labels, metrics.Label{Name: "locality", Value: "local"})
|
|
|
|
} else {
|
|
|
|
labels = append(labels, metrics.Label{Name: "locality", Value: "forwarded"})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return labels
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func requestType(req interface{}) string {
|
2022-03-31 17:49:37 +00:00
|
|
|
if r, ok := req.(interface{ IsRead() bool }); ok {
|
|
|
|
if r.IsRead() {
|
|
|
|
return "read"
|
|
|
|
} else {
|
|
|
|
return "write"
|
|
|
|
}
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
2022-03-31 17:49:37 +00:00
|
|
|
|
|
|
|
// This logical branch should not happen. If it happens
|
|
|
|
// it means an underlying request is not implementing the interface.
|
|
|
|
// Rather than swallowing it up in a "read" or "write", let's be aware of it.
|
|
|
|
return "unreported"
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 17:50:25 +00:00
|
|
|
func (r *RequestRecorder) getServerLeadership() string {
|
|
|
|
if r.serverIsLeader != nil {
|
|
|
|
if r.serverIsLeader() {
|
|
|
|
return "true"
|
|
|
|
} else {
|
|
|
|
return "false"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This logical branch should not happen. If it happens
|
|
|
|
// it means that we have not plumbed down a way to verify
|
|
|
|
// whether the server handling the request was a leader or not
|
|
|
|
return "unreported"
|
|
|
|
}
|
|
|
|
|
|
|
|
type readQuery interface {
|
|
|
|
GetMinQueryIndex() uint64
|
|
|
|
AllowStaleRead() bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type targetDC interface {
|
|
|
|
RequestDatacenter() string
|
|
|
|
}
|
|
|
|
|
2022-03-17 23:02:26 +00:00
|
|
|
func GetNetRPCInterceptor(recorder *RequestRecorder) rpc.ServerServiceCallInterceptor {
|
|
|
|
return func(reqServiceMethod string, argv, replyv reflect.Value, handler func() error) {
|
|
|
|
reqStart := time.Now()
|
|
|
|
|
|
|
|
err := handler()
|
|
|
|
|
2022-03-22 16:31:54 +00:00
|
|
|
recorder.Record(reqServiceMethod, RPCTypeNetRPC, reqStart, argv.Interface(), err != nil)
|
2022-03-17 23:02:26 +00:00
|
|
|
}
|
|
|
|
}
|
2023-01-04 19:38:44 +00:00
|
|
|
|
2023-01-25 19:13:38 +00:00
|
|
|
func GetNetRPCRateLimitingInterceptor(requestLimitsHandler rpcRate.RequestLimitsHandler, panicHandler RecoveryHandlerFunc) rpc.PreBodyInterceptor {
|
|
|
|
|
|
|
|
return func(reqServiceMethod string, sourceAddr net.Addr) (retErr error) {
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
retErr = panicHandler(r)
|
|
|
|
}
|
|
|
|
}()
|
2023-01-04 19:38:44 +00:00
|
|
|
|
|
|
|
op := rpcRate.Operation{
|
|
|
|
Name: reqServiceMethod,
|
|
|
|
SourceAddr: sourceAddr,
|
2023-03-20 15:24:29 +00:00
|
|
|
Type: rpcRateLimitSpecs[reqServiceMethod].Type,
|
|
|
|
Category: rpcRateLimitSpecs[reqServiceMethod].Category,
|
2023-01-04 19:38:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// net/rpc does not provide a way to encode the nuances of the
|
|
|
|
// error response (retry or retry elsewhere) so the error string
|
|
|
|
// from the rate limiter is all that we have.
|
|
|
|
return requestLimitsHandler.Allow(op)
|
|
|
|
}
|
|
|
|
}
|