2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2023-05-17 18:27:09 +00:00
|
|
|
#include "monitoring/statistics_impl.h"
|
2021-01-26 06:07:26 +00:00
|
|
|
#include "rocksdb/system_clock.h"
|
2013-02-15 19:53:17 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-05-17 17:55:34 +00:00
|
|
|
// Auto-scoped.
|
2023-04-21 16:07:18 +00:00
|
|
|
// When statistics is not nullptr, records the measured time into any enabled
|
|
|
|
// histograms supplied to the constructor. A histogram argument may be omitted
|
|
|
|
// by setting it to Histograms::HISTOGRAM_ENUM_MAX. It is also saved into
|
|
|
|
// *elapsed if the pointer is not nullptr and overwrite is true, it will be
|
|
|
|
// added to *elapsed if overwrite is false.
|
2013-02-15 19:53:17 +00:00
|
|
|
class StopWatch {
|
|
|
|
public:
|
2021-03-15 11:32:24 +00:00
|
|
|
StopWatch(SystemClock* clock, Statistics* statistics,
|
2023-04-21 16:07:18 +00:00
|
|
|
const uint32_t hist_type_1,
|
|
|
|
const uint32_t hist_type_2 = Histograms::HISTOGRAM_ENUM_MAX,
|
|
|
|
uint64_t* elapsed = nullptr, bool overwrite = true,
|
|
|
|
bool delay_enabled = false)
|
2021-01-26 06:07:26 +00:00
|
|
|
: clock_(clock),
|
2016-08-02 21:53:29 +00:00
|
|
|
statistics_(statistics),
|
2023-04-21 16:07:18 +00:00
|
|
|
hist_type_1_(statistics && statistics->HistEnabledForType(hist_type_1)
|
|
|
|
? hist_type_1
|
|
|
|
: Histograms::HISTOGRAM_ENUM_MAX),
|
|
|
|
hist_type_2_(statistics && statistics->HistEnabledForType(hist_type_2)
|
|
|
|
? hist_type_2
|
|
|
|
: Histograms::HISTOGRAM_ENUM_MAX),
|
2016-08-02 21:53:29 +00:00
|
|
|
elapsed_(elapsed),
|
|
|
|
overwrite_(overwrite),
|
2019-02-28 18:14:19 +00:00
|
|
|
stats_enabled_(statistics &&
|
2019-03-01 18:39:00 +00:00
|
|
|
statistics->get_stats_level() >=
|
|
|
|
StatsLevel::kExceptTimers &&
|
2023-04-21 16:07:18 +00:00
|
|
|
(hist_type_1_ != Histograms::HISTOGRAM_ENUM_MAX ||
|
|
|
|
hist_type_2_ != Histograms::HISTOGRAM_ENUM_MAX)),
|
2018-07-14 01:29:50 +00:00
|
|
|
delay_enabled_(delay_enabled),
|
|
|
|
total_delay_(0),
|
|
|
|
delay_start_time_(0),
|
2021-01-26 06:07:26 +00:00
|
|
|
start_time_((stats_enabled_ || elapsed != nullptr) ? clock->NowMicros()
|
2016-08-02 21:53:29 +00:00
|
|
|
: 0) {}
|
2013-02-15 19:53:17 +00:00
|
|
|
|
2014-07-28 19:05:36 +00:00
|
|
|
~StopWatch() {
|
2014-07-28 19:22:37 +00:00
|
|
|
if (elapsed_) {
|
2016-08-02 21:53:29 +00:00
|
|
|
if (overwrite_) {
|
2021-01-26 06:07:26 +00:00
|
|
|
*elapsed_ = clock_->NowMicros() - start_time_;
|
2016-08-02 21:53:29 +00:00
|
|
|
} else {
|
2021-01-26 06:07:26 +00:00
|
|
|
*elapsed_ += clock_->NowMicros() - start_time_;
|
2016-08-02 21:53:29 +00:00
|
|
|
}
|
2014-07-28 19:22:37 +00:00
|
|
|
}
|
2018-07-14 01:29:50 +00:00
|
|
|
if (elapsed_ && delay_enabled_) {
|
|
|
|
*elapsed_ -= total_delay_;
|
|
|
|
}
|
2014-07-28 19:22:37 +00:00
|
|
|
if (stats_enabled_) {
|
2023-04-21 16:07:18 +00:00
|
|
|
const auto time = (elapsed_ != nullptr)
|
|
|
|
? *elapsed_
|
|
|
|
: (clock_->NowMicros() - start_time_);
|
|
|
|
if (hist_type_1_ != Histograms::HISTOGRAM_ENUM_MAX) {
|
|
|
|
statistics_->reportTimeToHistogram(hist_type_1_, time);
|
|
|
|
}
|
|
|
|
if (hist_type_2_ != Histograms::HISTOGRAM_ENUM_MAX) {
|
|
|
|
statistics_->reportTimeToHistogram(hist_type_2_, time);
|
|
|
|
}
|
2014-07-28 19:05:36 +00:00
|
|
|
}
|
|
|
|
}
|
2013-02-15 19:53:17 +00:00
|
|
|
|
2018-07-14 01:29:50 +00:00
|
|
|
void DelayStart() {
|
|
|
|
// if delay_start_time_ is not 0, it means we are already tracking delay,
|
|
|
|
// so delay_start_time_ should not be overwritten
|
|
|
|
if (elapsed_ && delay_enabled_ && delay_start_time_ == 0) {
|
2021-01-26 06:07:26 +00:00
|
|
|
delay_start_time_ = clock_->NowMicros();
|
2018-07-14 01:29:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DelayStop() {
|
|
|
|
if (elapsed_ && delay_enabled_ && delay_start_time_ != 0) {
|
2021-01-26 06:07:26 +00:00
|
|
|
total_delay_ += clock_->NowMicros() - delay_start_time_;
|
2018-07-14 01:29:50 +00:00
|
|
|
}
|
|
|
|
// reset to 0 means currently no delay is being tracked, so two consecutive
|
|
|
|
// calls to DelayStop will not increase total_delay_
|
|
|
|
delay_start_time_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t GetDelay() const { return delay_enabled_ ? total_delay_ : 0; }
|
|
|
|
|
2017-03-17 01:10:37 +00:00
|
|
|
uint64_t start_time() const { return start_time_; }
|
|
|
|
|
2013-02-15 19:53:17 +00:00
|
|
|
private:
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
2013-11-22 22:14:05 +00:00
|
|
|
Statistics* statistics_;
|
2023-04-21 16:07:18 +00:00
|
|
|
const uint32_t hist_type_1_;
|
|
|
|
const uint32_t hist_type_2_;
|
2014-07-28 19:22:37 +00:00
|
|
|
uint64_t* elapsed_;
|
2016-08-02 21:53:29 +00:00
|
|
|
bool overwrite_;
|
2014-07-28 19:22:37 +00:00
|
|
|
bool stats_enabled_;
|
2018-07-14 01:29:50 +00:00
|
|
|
bool delay_enabled_;
|
|
|
|
uint64_t total_delay_;
|
|
|
|
uint64_t delay_start_time_;
|
2014-07-28 19:05:36 +00:00
|
|
|
const uint64_t start_time_;
|
2013-02-15 19:53:17 +00:00
|
|
|
};
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
|
|
|
// a nano second precision stopwatch
|
|
|
|
class StopWatchNano {
|
|
|
|
public:
|
2021-03-15 11:32:24 +00:00
|
|
|
explicit StopWatchNano(SystemClock* clock, bool auto_start = false)
|
2021-01-26 06:07:26 +00:00
|
|
|
: clock_(clock), start_(0) {
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
if (auto_start) {
|
|
|
|
Start();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-26 06:07:26 +00:00
|
|
|
void Start() { start_ = clock_->NowNanos(); }
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
|
|
|
uint64_t ElapsedNanos(bool reset = false) {
|
2021-01-26 06:07:26 +00:00
|
|
|
auto now = clock_->NowNanos();
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
auto elapsed = now - start_;
|
|
|
|
if (reset) {
|
|
|
|
start_ = now;
|
|
|
|
}
|
|
|
|
return elapsed;
|
|
|
|
}
|
|
|
|
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
uint64_t ElapsedNanosSafe(bool reset = false) {
|
2021-01-26 06:07:26 +00:00
|
|
|
return (clock_ != nullptr) ? ElapsedNanos(reset) : 0U;
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
2023-04-22 04:57:40 +00:00
|
|
|
bool IsStarted() { return start_ != 0; }
|
|
|
|
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
private:
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
uint64_t start_;
|
|
|
|
};
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|