mirror of https://github.com/facebook/rocksdb.git
Add statistics `COMPACTION_CPU_TOTAL_TIME` for total compaction time (#11741)
Summary: Existing compaction statistics are `COMPACTION_TIME` and `COMPACTION_CPU_TIME` which are histogram and are logged at the end of a compaction. The new statistics `COMPACTION_CPU_TOTAL_TIME` is for cumulative total compaction time which is updated regularly during a compaction. This allows user to more closely track compaction cpu usage. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11741 Test Plan: * new unit test `DBTestWithParam.CompactionTotalTimeTest` Reviewed By: ajkr Differential Revision: D48608094 Pulled By: cbi42 fbshipit-source-id: b597109f3e4bf2237fb5a216b6fd036e5363b4c0
This commit is contained in:
parent
1e63fc9925
commit
9d71682d1b
|
@ -1318,6 +1318,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
|||
"CompactionJob::ProcessKeyValueCompaction()::Processing",
|
||||
reinterpret_cast<void*>(
|
||||
const_cast<Compaction*>(sub_compact->compaction)));
|
||||
uint64_t last_cpu_micros = prev_cpu_micros;
|
||||
while (status.ok() && !cfd->IsDropped() && c_iter->Valid()) {
|
||||
// Invariant: c_iter.status() is guaranteed to be OK if c_iter->Valid()
|
||||
// returns true.
|
||||
|
@ -1329,6 +1330,12 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
|||
RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats);
|
||||
c_iter->ResetRecordCounts();
|
||||
RecordCompactionIOStats();
|
||||
|
||||
uint64_t cur_cpu_micros = db_options_.clock->CPUMicros();
|
||||
assert(cur_cpu_micros >= last_cpu_micros);
|
||||
RecordTick(stats_, COMPACTION_CPU_TOTAL_TIME,
|
||||
cur_cpu_micros - last_cpu_micros);
|
||||
last_cpu_micros = cur_cpu_micros;
|
||||
}
|
||||
|
||||
// Add current compaction_iterator key to target compaction output, if the
|
||||
|
@ -1436,8 +1443,11 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
|||
sub_compact->Current().UpdateBlobStats();
|
||||
}
|
||||
|
||||
uint64_t cur_cpu_micros = db_options_.clock->CPUMicros();
|
||||
sub_compact->compaction_job_stats.cpu_micros =
|
||||
db_options_.clock->CPUMicros() - prev_cpu_micros;
|
||||
cur_cpu_micros - prev_cpu_micros;
|
||||
RecordTick(stats_, COMPACTION_CPU_TOTAL_TIME,
|
||||
cur_cpu_micros - last_cpu_micros);
|
||||
|
||||
if (measure_io_stats_) {
|
||||
sub_compact->compaction_job_stats.file_write_nanos +=
|
||||
|
|
|
@ -6040,6 +6040,64 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
|
|||
delete itr;
|
||||
}
|
||||
|
||||
#ifndef OS_WIN
|
||||
// CPUMicros() is not supported. See WinClock::CPUMicros().
|
||||
TEST_P(DBTestWithParam, CompactionTotalTimeTest) {
|
||||
int record_count = 0;
|
||||
class TestStatistics : public StatisticsImpl {
|
||||
public:
|
||||
explicit TestStatistics(int* record_count)
|
||||
: StatisticsImpl(nullptr), record_count_(record_count) {}
|
||||
void recordTick(uint32_t ticker_type, uint64_t count) override {
|
||||
if (ticker_type == COMPACTION_CPU_TOTAL_TIME) {
|
||||
ASSERT_GT(count, 0);
|
||||
(*record_count_)++;
|
||||
}
|
||||
StatisticsImpl::recordTick(ticker_type, count);
|
||||
}
|
||||
|
||||
int* record_count_;
|
||||
};
|
||||
|
||||
Options options = CurrentOptions();
|
||||
options.disable_auto_compactions = true;
|
||||
options.create_if_missing = true;
|
||||
options.statistics = std::make_shared<TestStatistics>(&record_count);
|
||||
options.statistics->set_stats_level(kExceptTimeForMutex);
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
DestroyAndReopen(options);
|
||||
|
||||
int n = 0;
|
||||
for (int table = 0; table < 4; ++table) {
|
||||
for (int i = 0; i < 1000; ++i) {
|
||||
ASSERT_OK(Put(std::to_string(table * 1000 + i), "val"));
|
||||
++n;
|
||||
}
|
||||
// Overlapping tables
|
||||
ASSERT_OK(Put(std::to_string(0), "val"));
|
||||
++n;
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
|
||||
CompactRangeOptions cro;
|
||||
cro.exclusive_manual_compaction = exclusive_manual_compaction_;
|
||||
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
||||
|
||||
// Hard-coded number in CompactionJob::ProcessKeyValueCompaction().
|
||||
const int kRecordStatsEvery = 1000;
|
||||
// The stat COMPACTION_CPU_TOTAL_TIME should be recorded
|
||||
// during compaction and once more after compaction.
|
||||
ASSERT_EQ(n / kRecordStatsEvery + 1, record_count);
|
||||
|
||||
// Check that COMPACTION_CPU_TOTAL_TIME correctly
|
||||
// records compaction time after a compaction.
|
||||
HistogramData h;
|
||||
options.statistics->histogramData(COMPACTION_CPU_TIME, &h);
|
||||
ASSERT_EQ(1, h.count);
|
||||
ASSERT_EQ(h.max, TestGetTickerCount(options, COMPACTION_CPU_TOTAL_TIME));
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_F(DBTest, TestLogCleanup) {
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 64 * 1024; // very small
|
||||
|
|
|
@ -208,8 +208,11 @@ enum Tickers : uint32_t {
|
|||
|
||||
// DEPRECATED / unused (see NUMBER_BLOCK_COMPRESSION_*)
|
||||
NUMBER_BLOCK_NOT_COMPRESSED,
|
||||
|
||||
// Tickers that record cumulative time.
|
||||
MERGE_OPERATION_TOTAL_TIME,
|
||||
FILTER_OPERATION_TOTAL_TIME,
|
||||
COMPACTION_CPU_TOTAL_TIME,
|
||||
|
||||
// Row cache.
|
||||
ROW_CACHE_HIT,
|
||||
|
|
|
@ -111,6 +111,7 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
|
|||
{NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"},
|
||||
{MERGE_OPERATION_TOTAL_TIME, "rocksdb.merge.operation.time.nanos"},
|
||||
{FILTER_OPERATION_TOTAL_TIME, "rocksdb.filter.operation.time.nanos"},
|
||||
{COMPACTION_CPU_TOTAL_TIME, "rocksdb.compaction.total.time.cpu_micros"},
|
||||
{ROW_CACHE_HIT, "rocksdb.row.cache.hit"},
|
||||
{ROW_CACHE_MISS, "rocksdb.row.cache.miss"},
|
||||
{READ_AMP_ESTIMATE_USEFUL_BYTES, "rocksdb.read.amp.estimate.useful.bytes"},
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
* Add a new statistic `COMPACTION_CPU_TOTAL_TIME` that records cumulative compaction cpu time. This ticker is updated regularly while a compaction is running.
|
Loading…
Reference in New Issue