mirror of https://github.com/facebook/rocksdb.git
Added RocksDB stats GET_HIT_L0 and GET_HIT_L1
Summary:
- In statistics.h , added tickers.
- In version_set.cc,
-- Added a getter method for hit_file_level_ in the class FilePicker
-- Added a line in the Get() method in case of a found, increment the corresponding counters based on the level of the file respectively.
Corresponding task: https://our.intern.facebook.com/intern/tasks/?s=506100481&t=5952818
Personal fork: 0c3f2e3600
Test Plan:
In terminal,
```
make -j32 db_test
ROCKSDB_TESTS=L0L1L2AndUpHitCounter ./db_test
```
Or to use debugger,
```
make -j32 db_test
export ROCKSDB_TESTS=L0L1L2AndUpHitCounter
gdb db_test
```
Reviewers: rven, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D32205
This commit is contained in:
parent
91ac3b2067
commit
1851f977c2
|
@ -10330,6 +10330,44 @@ TEST(DBTest, DeleteMovedFileAfterCompaction) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(DBTest, L0L1L2AndUpHitCounter) {
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 32 * 1024;
|
||||
options.target_file_size_base = 32 * 1024;
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
options.level0_slowdown_writes_trigger = 2;
|
||||
options.level0_stop_writes_trigger = 4;
|
||||
options.max_bytes_for_level_base = 64 * 1024;
|
||||
options.max_write_buffer_number = 2;
|
||||
options.max_background_compactions = 8;
|
||||
options.max_background_flushes = 8;
|
||||
options.statistics = rocksdb::CreateDBStatistics();
|
||||
CreateAndReopenWithCF({"mypikachu"}, options);
|
||||
|
||||
int numkeys = 20000;
|
||||
for (int i = 0; i < numkeys; i++) {
|
||||
ASSERT_OK(Put(1, Key(i), "val"));
|
||||
}
|
||||
ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0));
|
||||
ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1));
|
||||
ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP));
|
||||
|
||||
ASSERT_OK(Flush(1));
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
|
||||
for (int i = 0; i < numkeys; i++) {
|
||||
ASSERT_EQ(Get(1, Key(i)), "val");
|
||||
}
|
||||
|
||||
ASSERT_GT(TestGetTickerCount(options, GET_HIT_L0), 100);
|
||||
ASSERT_GT(TestGetTickerCount(options, GET_HIT_L1), 100);
|
||||
ASSERT_GT(TestGetTickerCount(options, GET_HIT_L2_AND_UP), 100);
|
||||
|
||||
ASSERT_EQ(numkeys, TestGetTickerCount(options, GET_HIT_L0) +
|
||||
TestGetTickerCount(options, GET_HIT_L1) +
|
||||
TestGetTickerCount(options, GET_HIT_L2_AND_UP));
|
||||
}
|
||||
|
||||
TEST(DBTest, EncodeDecompressedBlockSizeTest) {
|
||||
// iter 0 -- zlib
|
||||
// iter 1 -- bzip2
|
||||
|
|
|
@ -91,6 +91,7 @@ class FilePicker {
|
|||
const InternalKeyComparator* internal_comparator)
|
||||
: num_levels_(num_levels),
|
||||
curr_level_(-1),
|
||||
hit_file_level_(-1),
|
||||
search_left_bound_(0),
|
||||
search_right_bound_(FileIndexer::kLevelMaxIndex),
|
||||
#ifndef NDEBUG
|
||||
|
@ -120,6 +121,7 @@ class FilePicker {
|
|||
while (curr_index_in_curr_level_ < curr_file_level_->num_files) {
|
||||
// Loops over all files in current level.
|
||||
FdWithKeyRange* f = &curr_file_level_->files[curr_index_in_curr_level_];
|
||||
hit_file_level_ = curr_level_;
|
||||
int cmp_largest = -1;
|
||||
|
||||
// Do key range filtering of files or/and fractional cascading if:
|
||||
|
@ -199,9 +201,14 @@ class FilePicker {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
// getter for current file level
|
||||
// for GET_HIT_L0, GET_HIT_L1 & GET_HIT_L2_AND_UP counts
|
||||
unsigned int GetHitFileLevel() { return hit_file_level_; }
|
||||
|
||||
private:
|
||||
unsigned int num_levels_;
|
||||
unsigned int curr_level_;
|
||||
unsigned int hit_file_level_;
|
||||
int32_t search_left_bound_;
|
||||
int32_t search_right_bound_;
|
||||
#ifndef NDEBUG
|
||||
|
@ -800,6 +807,13 @@ void Version::Get(const ReadOptions& read_options,
|
|||
// Keep searching in other files
|
||||
break;
|
||||
case GetContext::kFound:
|
||||
if (fp.GetHitFileLevel() == 0) {
|
||||
RecordTick(db_statistics_, GET_HIT_L0);
|
||||
} else if (fp.GetHitFileLevel() == 1) {
|
||||
RecordTick(db_statistics_, GET_HIT_L1);
|
||||
} else if (fp.GetHitFileLevel() >= 2) {
|
||||
RecordTick(db_statistics_, GET_HIT_L2_AND_UP);
|
||||
}
|
||||
return;
|
||||
case GetContext::kDeleted:
|
||||
// Use empty error message for speed
|
||||
|
|
|
@ -53,6 +53,13 @@ enum Tickers : uint32_t {
|
|||
// # of memtable misses.
|
||||
MEMTABLE_MISS,
|
||||
|
||||
// # of Get() queries served by L0
|
||||
GET_HIT_L0,
|
||||
// # of Get() queries served by L1
|
||||
GET_HIT_L1,
|
||||
// # of Get() queries served by L2 and up
|
||||
GET_HIT_L2_AND_UP,
|
||||
|
||||
/**
|
||||
* COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
|
||||
* There are 3 reasons currently.
|
||||
|
@ -150,6 +157,9 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
|
|||
{BLOOM_FILTER_USEFUL, "rocksdb.bloom.filter.useful"},
|
||||
{MEMTABLE_HIT, "rocksdb.memtable.hit"},
|
||||
{MEMTABLE_MISS, "rocksdb.memtable.miss"},
|
||||
{GET_HIT_L0, "rocksdb.l0.hit"},
|
||||
{GET_HIT_L1, "rocksdb.l1.hit"},
|
||||
{GET_HIT_L2_AND_UP, "rocksdb.l2andup.hit"},
|
||||
{COMPACTION_KEY_DROP_NEWER_ENTRY, "rocksdb.compaction.key.drop.new"},
|
||||
{COMPACTION_KEY_DROP_OBSOLETE, "rocksdb.compaction.key.drop.obsolete"},
|
||||
{COMPACTION_KEY_DROP_USER, "rocksdb.compaction.key.drop.user"},
|
||||
|
@ -194,7 +204,8 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
|
|||
{NUMBER_SUPERVERSION_ACQUIRES, "rocksdb.number.superversion_acquires"},
|
||||
{NUMBER_SUPERVERSION_RELEASES, "rocksdb.number.superversion_releases"},
|
||||
{NUMBER_SUPERVERSION_CLEANUPS, "rocksdb.number.superversion_cleanups"},
|
||||
{NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"}, };
|
||||
{NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"},
|
||||
};
|
||||
|
||||
/**
|
||||
* Keep adding histogram's here.
|
||||
|
|
Loading…
Reference in New Issue