2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/perf_step_timer.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
#include "rocksdb/perf_context.h"
|
|
|
|
#include "util/stop_watch.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2022-05-18 22:25:19 +00:00
|
|
|
#if defined(NPERF_CONTEXT)
|
2017-09-16 00:02:37 +00:00
|
|
|
extern PerfContext perf_context;
|
|
|
|
#else
|
|
|
|
#if defined(OS_SOLARIS)
|
2022-05-18 22:25:19 +00:00
|
|
|
extern thread_local PerfContext perf_context_;
|
2018-06-15 19:39:16 +00:00
|
|
|
#define perf_context (*get_perf_context())
|
2017-09-16 00:02:37 +00:00
|
|
|
#else
|
2018-10-17 18:18:00 +00:00
|
|
|
extern thread_local PerfContext perf_context;
|
2017-09-16 00:02:37 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2017-06-03 00:12:39 +00:00
|
|
|
#if defined(NPERF_CONTEXT)
|
2014-04-08 17:58:07 +00:00
|
|
|
|
|
|
|
#define PERF_TIMER_STOP(metric)
|
2014-08-22 22:28:58 +00:00
|
|
|
#define PERF_TIMER_START(metric)
|
2019-08-16 21:36:41 +00:00
|
|
|
#define PERF_TIMER_GUARD(metric)
|
2021-01-26 06:07:26 +00:00
|
|
|
#define PERF_TIMER_GUARD_WITH_CLOCK(metric, clock)
|
|
|
|
#define PERF_CPU_TIMER_GUARD(metric, clock)
|
2019-09-20 19:00:55 +00:00
|
|
|
#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition, stats, \
|
|
|
|
ticker_type)
|
2024-02-22 20:14:53 +00:00
|
|
|
#define PERF_TIMER_FOR_WAIT_GUARD(metric)
|
2019-08-16 21:36:41 +00:00
|
|
|
#define PERF_TIMER_MEASURE(metric)
|
2014-04-08 17:58:07 +00:00
|
|
|
#define PERF_COUNTER_ADD(metric, value)
|
2019-08-16 21:36:41 +00:00
|
|
|
#define PERF_COUNTER_BY_LEVEL_ADD(metric, value, level)
|
2014-04-08 17:58:07 +00:00
|
|
|
|
2014-04-04 20:11:44 +00:00
|
|
|
#else
|
2014-04-08 17:58:07 +00:00
|
|
|
|
2014-08-22 22:28:58 +00:00
|
|
|
// Stop the timer and update the metric
|
2017-06-03 00:12:39 +00:00
|
|
|
#define PERF_TIMER_STOP(metric) perf_step_timer_##metric.Stop();
|
2014-04-08 17:58:07 +00:00
|
|
|
|
2017-06-03 00:12:39 +00:00
|
|
|
#define PERF_TIMER_START(metric) perf_step_timer_##metric.Start();
|
2014-04-08 17:58:07 +00:00
|
|
|
|
|
|
|
// Declare and set start time of the timer
|
2017-09-16 00:02:37 +00:00
|
|
|
#define PERF_TIMER_GUARD(metric) \
|
|
|
|
PerfStepTimer perf_step_timer_##metric(&(perf_context.metric)); \
|
2017-06-03 00:12:39 +00:00
|
|
|
perf_step_timer_##metric.Start();
|
|
|
|
|
2018-12-20 20:00:40 +00:00
|
|
|
// Declare and set start time of the timer
|
2021-03-15 11:32:24 +00:00
|
|
|
#define PERF_TIMER_GUARD_WITH_CLOCK(metric, clock) \
|
|
|
|
PerfStepTimer perf_step_timer_##metric(&(perf_context.metric), clock); \
|
2018-12-20 20:00:40 +00:00
|
|
|
perf_step_timer_##metric.Start();
|
|
|
|
|
|
|
|
// Declare and set start time of the timer
|
2021-01-26 06:07:26 +00:00
|
|
|
#define PERF_CPU_TIMER_GUARD(metric, clock) \
|
2018-12-20 20:00:40 +00:00
|
|
|
PerfStepTimer perf_step_timer_##metric( \
|
2021-03-15 11:32:24 +00:00
|
|
|
&(perf_context.metric), clock, true, \
|
2018-12-20 20:00:40 +00:00
|
|
|
PerfLevel::kEnableTimeAndCPUTimeExceptForMutex); \
|
|
|
|
perf_step_timer_##metric.Start();
|
|
|
|
|
|
|
|
#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition, stats, \
|
|
|
|
ticker_type) \
|
|
|
|
PerfStepTimer perf_step_timer_##metric(&(perf_context.metric), nullptr, \
|
|
|
|
false, PerfLevel::kEnableTime, stats, \
|
|
|
|
ticker_type); \
|
|
|
|
if (condition) { \
|
|
|
|
perf_step_timer_##metric.Start(); \
|
2015-10-13 17:41:48 +00:00
|
|
|
}
|
|
|
|
|
2024-02-22 20:14:53 +00:00
|
|
|
#define PERF_TIMER_FOR_WAIT_GUARD(metric) \
|
|
|
|
PerfStepTimer perf_step_timer_##metric(&(perf_context.metric), nullptr, \
|
|
|
|
false, PerfLevel::kEnableWait); \
|
|
|
|
perf_step_timer_##metric.Start();
|
|
|
|
|
2014-04-08 17:58:07 +00:00
|
|
|
// Update metric with time elapsed since last START. start time is reset
|
|
|
|
// to current timestamp.
|
2017-06-03 00:12:39 +00:00
|
|
|
#define PERF_TIMER_MEASURE(metric) perf_step_timer_##metric.Measure();
|
2014-04-08 17:58:07 +00:00
|
|
|
|
|
|
|
// Increase metric value
|
2017-06-26 22:13:35 +00:00
|
|
|
#define PERF_COUNTER_ADD(metric, value) \
|
|
|
|
if (perf_level >= PerfLevel::kEnableCount) { \
|
2017-09-16 00:02:37 +00:00
|
|
|
perf_context.metric += value; \
|
2023-12-09 01:12:11 +00:00
|
|
|
} \
|
2023-12-02 06:35:34 +00:00
|
|
|
static_assert(true, "semicolon required")
|
2014-04-08 17:58:07 +00:00
|
|
|
|
2018-10-17 18:18:00 +00:00
|
|
|
// Increase metric value
|
2021-10-07 21:57:02 +00:00
|
|
|
#define PERF_COUNTER_BY_LEVEL_ADD(metric, value, level) \
|
|
|
|
if (perf_level >= PerfLevel::kEnableCount && \
|
|
|
|
perf_context.per_level_perf_context_enabled && \
|
|
|
|
perf_context.level_to_perf_context) { \
|
|
|
|
if ((*(perf_context.level_to_perf_context)).find(level) != \
|
|
|
|
(*(perf_context.level_to_perf_context)).end()) { \
|
|
|
|
(*(perf_context.level_to_perf_context))[level].metric += value; \
|
|
|
|
} else { \
|
|
|
|
PerfContextByLevel empty_context; \
|
|
|
|
(*(perf_context.level_to_perf_context))[level] = empty_context; \
|
|
|
|
(*(perf_context.level_to_perf_context))[level].metric += value; \
|
|
|
|
} \
|
|
|
|
}
|
2018-10-17 18:18:00 +00:00
|
|
|
|
2014-04-08 17:58:07 +00:00
|
|
|
#endif
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|