mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 16:30:56 +00:00
39f5846ec7
Summary: We want to know more about opportunities for better range filters, and the effectiveness of our own range filters. Currently the stats are very limited, essentially logging just hits and misses against prefix filters for range scans in BLOOM_FILTER_PREFIX_* without tracking the false positive rate. Perhaps confusingly, when prefix filters are used for point queries, the stats are currently going into the non-PREFIX tickers. This change does several things: * Introduce new stat tickers for seeks and related filtering, \*LEVEL_SEEK\* * Most importantly, allows us to see opportunities for range filtering. Specifically, we can count how many times a seek in an SST file accesses at least one data block, and how many times at least one value() is then accessed. If a data block was accessed but no value(), we can generally assume that the key(s) seen was(were) not of interest so could have been filtered with the right kind of filter, avoiding the data block access. * We can get the same level of detail when a filter (for now, prefix Bloom/ribbon) is used, or not. Specifically, we can infer a false positive rate for prefix filters (not available before) from the seek "false positive" rate: when a data block is accessed but no value() is called. (There can be other explanations for a seek false positive, but in typical iterator usage it would indicate a filter false positive.) * For efficiency, I wanted to avoid making additional calls to the prefix extractor (or key comparisons, etc.), which would be required if we wanted to more precisely detect filter false positives. I believe that instrumenting value() is the best balance of efficiency vs. accurately measuring what we are often interested in. * The stats are divided between last level and non-last levels, to help understand potential tiered storage use cases. * The old BLOOM_FILTER_PREFIX_* stats have a different meaning: no longer referring to iterators but to point queries using prefix filters. BLOOM_FILTER_PREFIX_TRUE_POSITIVE is added for computing the prefix false positive rate on point queries, which can be due to filter false positives as well as different keys with the same prefix. * Similarly, the non-PREFIX BLOOM_FILTER stats are now for whole key filtering only. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11460 Test Plan: unit tests updated, including updating many to pop the stat value since last read to improve test readability and maintainability. Performance test shows a consistent small improvement with these changes, both with clang and with gcc. CPU profile indicates that RecordTick is using less CPU, and this makes sense at least for a high filter miss rate. Before, we were recording two ticks per filter miss in iterators (CHECKED & USEFUL) and now recording just one (FILTERED). Create DB with ``` TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=fillrandom -num=10000000 -disable_wal=1 -write_buffer_size=30000000 -bloom_bits=8 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -prefix_size=8 ``` And run simultaneous before&after with ``` TEST_TMPDIR=/dev/shm ./db_bench -readonly -benchmarks=seekrandom[-X1000] -num=10000000 -bloom_bits=8 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -prefix_size=8 -seek_nexts=1 -duration=20 -seed=43 -threads=8 -cache_size=1000000000 -statistics ``` Before: seekrandom [AVG 275 runs] : 189680 (± 222) ops/sec; 18.4 (± 0.0) MB/sec After: seekrandom [AVG 275 runs] : 197110 (± 208) ops/sec; 19.1 (± 0.0) MB/sec Reviewed By: ajkr Differential Revision: D46029177 Pulled By: pdillinger fbshipit-source-id: cdace79a2ea548d46c5900b068c5b7c3a02e5822
501 lines
17 KiB
C++
501 lines
17 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
#include "table/block_based/block_based_table_iterator.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
void BlockBasedTableIterator::SeekToFirst() { SeekImpl(nullptr, false); }
|
|
|
|
void BlockBasedTableIterator::Seek(const Slice& target) {
|
|
SeekImpl(&target, true);
|
|
}
|
|
|
|
void BlockBasedTableIterator::SeekImpl(const Slice* target,
|
|
bool async_prefetch) {
|
|
bool is_first_pass = true;
|
|
if (async_read_in_progress_) {
|
|
AsyncInitDataBlock(false);
|
|
is_first_pass = false;
|
|
}
|
|
|
|
is_out_of_bound_ = false;
|
|
is_at_first_key_from_index_ = false;
|
|
seek_stat_state_ = kNone;
|
|
bool filter_checked = false;
|
|
if (target &&
|
|
!CheckPrefixMayMatch(*target, IterDirection::kForward, &filter_checked)) {
|
|
ResetDataIter();
|
|
RecordTick(table_->GetStatistics(), is_last_level_
|
|
? LAST_LEVEL_SEEK_FILTERED
|
|
: NON_LAST_LEVEL_SEEK_FILTERED);
|
|
return;
|
|
}
|
|
if (filter_checked) {
|
|
seek_stat_state_ = kFilterUsed;
|
|
RecordTick(table_->GetStatistics(), is_last_level_
|
|
? LAST_LEVEL_SEEK_FILTER_MATCH
|
|
: NON_LAST_LEVEL_SEEK_FILTER_MATCH);
|
|
}
|
|
|
|
bool need_seek_index = true;
|
|
if (block_iter_points_to_real_block_ && block_iter_.Valid()) {
|
|
// Reseek.
|
|
prev_block_offset_ = index_iter_->value().handle.offset();
|
|
|
|
if (target) {
|
|
// We can avoid an index seek if:
|
|
// 1. The new seek key is larger than the current key
|
|
// 2. The new seek key is within the upper bound of the block
|
|
// Since we don't necessarily know the internal key for either
|
|
// the current key or the upper bound, we check user keys and
|
|
// exclude the equality case. Considering internal keys can
|
|
// improve for the boundary cases, but it would complicate the
|
|
// code.
|
|
if (user_comparator_.Compare(ExtractUserKey(*target),
|
|
block_iter_.user_key()) > 0 &&
|
|
user_comparator_.Compare(ExtractUserKey(*target),
|
|
index_iter_->user_key()) < 0) {
|
|
need_seek_index = false;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (need_seek_index) {
|
|
if (target) {
|
|
index_iter_->Seek(*target);
|
|
} else {
|
|
index_iter_->SeekToFirst();
|
|
}
|
|
|
|
if (!index_iter_->Valid()) {
|
|
ResetDataIter();
|
|
return;
|
|
}
|
|
}
|
|
|
|
IndexValue v = index_iter_->value();
|
|
const bool same_block = block_iter_points_to_real_block_ &&
|
|
v.handle.offset() == prev_block_offset_;
|
|
|
|
if (!v.first_internal_key.empty() && !same_block &&
|
|
(!target || icomp_.Compare(*target, v.first_internal_key) <= 0) &&
|
|
allow_unprepared_value_) {
|
|
// Index contains the first key of the block, and it's >= target.
|
|
// We can defer reading the block.
|
|
is_at_first_key_from_index_ = true;
|
|
// ResetDataIter() will invalidate block_iter_. Thus, there is no need to
|
|
// call CheckDataBlockWithinUpperBound() to check for iterate_upper_bound
|
|
// as that will be done later when the data block is actually read.
|
|
ResetDataIter();
|
|
} else {
|
|
// Need to use the data block.
|
|
if (!same_block) {
|
|
if (read_options_.async_io && async_prefetch) {
|
|
if (is_first_pass) {
|
|
AsyncInitDataBlock(is_first_pass);
|
|
}
|
|
if (async_read_in_progress_) {
|
|
// Status::TryAgain indicates asynchronous request for retrieval of
|
|
// data blocks has been submitted. So it should return at this point
|
|
// and Seek should be called again to retrieve the requested block and
|
|
// execute the remaining code.
|
|
return;
|
|
}
|
|
} else {
|
|
InitDataBlock();
|
|
}
|
|
} else {
|
|
// When the user does a reseek, the iterate_upper_bound might have
|
|
// changed. CheckDataBlockWithinUpperBound() needs to be called
|
|
// explicitly if the reseek ends up in the same data block.
|
|
// If the reseek ends up in a different block, InitDataBlock() will do
|
|
// the iterator upper bound check.
|
|
CheckDataBlockWithinUpperBound();
|
|
}
|
|
|
|
if (target) {
|
|
block_iter_.Seek(*target);
|
|
} else {
|
|
block_iter_.SeekToFirst();
|
|
}
|
|
FindKeyForward();
|
|
}
|
|
|
|
CheckOutOfBound();
|
|
|
|
if (target) {
|
|
assert(!Valid() || icomp_.Compare(*target, key()) <= 0);
|
|
}
|
|
}
|
|
|
|
void BlockBasedTableIterator::SeekForPrev(const Slice& target) {
|
|
is_out_of_bound_ = false;
|
|
is_at_first_key_from_index_ = false;
|
|
seek_stat_state_ = kNone;
|
|
bool filter_checked = false;
|
|
// For now totally disable prefix seek in auto prefix mode because we don't
|
|
// have logic
|
|
if (!CheckPrefixMayMatch(target, IterDirection::kBackward, &filter_checked)) {
|
|
ResetDataIter();
|
|
RecordTick(table_->GetStatistics(), is_last_level_
|
|
? LAST_LEVEL_SEEK_FILTERED
|
|
: NON_LAST_LEVEL_SEEK_FILTERED);
|
|
return;
|
|
}
|
|
if (filter_checked) {
|
|
seek_stat_state_ = kFilterUsed;
|
|
RecordTick(table_->GetStatistics(), is_last_level_
|
|
? LAST_LEVEL_SEEK_FILTER_MATCH
|
|
: NON_LAST_LEVEL_SEEK_FILTER_MATCH);
|
|
}
|
|
|
|
SavePrevIndexValue();
|
|
|
|
// Call Seek() rather than SeekForPrev() in the index block, because the
|
|
// target data block will likely to contain the position for `target`, the
|
|
// same as Seek(), rather than than before.
|
|
// For example, if we have three data blocks, each containing two keys:
|
|
// [2, 4] [6, 8] [10, 12]
|
|
// (the keys in the index block would be [4, 8, 12])
|
|
// and the user calls SeekForPrev(7), we need to go to the second block,
|
|
// just like if they call Seek(7).
|
|
// The only case where the block is difference is when they seek to a position
|
|
// in the boundary. For example, if they SeekForPrev(5), we should go to the
|
|
// first block, rather than the second. However, we don't have the information
|
|
// to distinguish the two unless we read the second block. In this case, we'll
|
|
// end up with reading two blocks.
|
|
index_iter_->Seek(target);
|
|
|
|
if (!index_iter_->Valid()) {
|
|
auto seek_status = index_iter_->status();
|
|
// Check for IO error
|
|
if (!seek_status.IsNotFound() && !seek_status.ok()) {
|
|
ResetDataIter();
|
|
return;
|
|
}
|
|
|
|
// With prefix index, Seek() returns NotFound if the prefix doesn't exist
|
|
if (seek_status.IsNotFound()) {
|
|
// Any key less than the target is fine for prefix seek
|
|
ResetDataIter();
|
|
return;
|
|
} else {
|
|
index_iter_->SeekToLast();
|
|
}
|
|
// Check for IO error
|
|
if (!index_iter_->Valid()) {
|
|
ResetDataIter();
|
|
return;
|
|
}
|
|
}
|
|
|
|
InitDataBlock();
|
|
|
|
block_iter_.SeekForPrev(target);
|
|
|
|
FindKeyBackward();
|
|
CheckDataBlockWithinUpperBound();
|
|
assert(!block_iter_.Valid() ||
|
|
icomp_.Compare(target, block_iter_.key()) >= 0);
|
|
}
|
|
|
|
void BlockBasedTableIterator::SeekToLast() {
|
|
is_out_of_bound_ = false;
|
|
is_at_first_key_from_index_ = false;
|
|
seek_stat_state_ = kNone;
|
|
SavePrevIndexValue();
|
|
index_iter_->SeekToLast();
|
|
if (!index_iter_->Valid()) {
|
|
ResetDataIter();
|
|
return;
|
|
}
|
|
InitDataBlock();
|
|
block_iter_.SeekToLast();
|
|
FindKeyBackward();
|
|
CheckDataBlockWithinUpperBound();
|
|
}
|
|
|
|
void BlockBasedTableIterator::Next() {
|
|
if (is_at_first_key_from_index_ && !MaterializeCurrentBlock()) {
|
|
return;
|
|
}
|
|
assert(block_iter_points_to_real_block_);
|
|
block_iter_.Next();
|
|
FindKeyForward();
|
|
CheckOutOfBound();
|
|
}
|
|
|
|
bool BlockBasedTableIterator::NextAndGetResult(IterateResult* result) {
|
|
Next();
|
|
bool is_valid = Valid();
|
|
if (is_valid) {
|
|
result->key = key();
|
|
result->bound_check_result = UpperBoundCheckResult();
|
|
result->value_prepared = !is_at_first_key_from_index_;
|
|
}
|
|
return is_valid;
|
|
}
|
|
|
|
void BlockBasedTableIterator::Prev() {
|
|
if (is_at_first_key_from_index_) {
|
|
is_at_first_key_from_index_ = false;
|
|
|
|
index_iter_->Prev();
|
|
if (!index_iter_->Valid()) {
|
|
return;
|
|
}
|
|
|
|
InitDataBlock();
|
|
block_iter_.SeekToLast();
|
|
} else {
|
|
assert(block_iter_points_to_real_block_);
|
|
block_iter_.Prev();
|
|
}
|
|
|
|
FindKeyBackward();
|
|
}
|
|
|
|
void BlockBasedTableIterator::InitDataBlock() {
|
|
BlockHandle data_block_handle = index_iter_->value().handle;
|
|
if (!block_iter_points_to_real_block_ ||
|
|
data_block_handle.offset() != prev_block_offset_ ||
|
|
// if previous attempt of reading the block missed cache, try again
|
|
block_iter_.status().IsIncomplete()) {
|
|
if (block_iter_points_to_real_block_) {
|
|
ResetDataIter();
|
|
}
|
|
auto* rep = table_->get_rep();
|
|
|
|
bool is_for_compaction =
|
|
lookup_context_.caller == TableReaderCaller::kCompaction;
|
|
// Prefetch additional data for range scans (iterators).
|
|
// Implicit auto readahead:
|
|
// Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
|
|
// Explicit user requested readahead:
|
|
// Enabled from the very first IO when ReadOptions.readahead_size is set.
|
|
block_prefetcher_.PrefetchIfNeeded(
|
|
rep, data_block_handle, read_options_.readahead_size, is_for_compaction,
|
|
/*no_sequential_checking=*/false, read_options_.rate_limiter_priority);
|
|
Status s;
|
|
table_->NewDataBlockIterator<DataBlockIter>(
|
|
read_options_, data_block_handle, &block_iter_, BlockType::kData,
|
|
/*get_context=*/nullptr, &lookup_context_,
|
|
block_prefetcher_.prefetch_buffer(),
|
|
/*for_compaction=*/is_for_compaction, /*async_read=*/false, s);
|
|
block_iter_points_to_real_block_ = true;
|
|
CheckDataBlockWithinUpperBound();
|
|
if (!is_for_compaction &&
|
|
(seek_stat_state_ & kDataBlockReadSinceLastSeek) == 0) {
|
|
RecordTick(table_->GetStatistics(), is_last_level_
|
|
? LAST_LEVEL_SEEK_DATA
|
|
: NON_LAST_LEVEL_SEEK_DATA);
|
|
seek_stat_state_ = static_cast<SeekStatState>(
|
|
seek_stat_state_ | kDataBlockReadSinceLastSeek | kReportOnUseful);
|
|
}
|
|
}
|
|
}
|
|
|
|
void BlockBasedTableIterator::AsyncInitDataBlock(bool is_first_pass) {
|
|
BlockHandle data_block_handle = index_iter_->value().handle;
|
|
bool is_for_compaction =
|
|
lookup_context_.caller == TableReaderCaller::kCompaction;
|
|
if (is_first_pass) {
|
|
if (!block_iter_points_to_real_block_ ||
|
|
data_block_handle.offset() != prev_block_offset_ ||
|
|
// if previous attempt of reading the block missed cache, try again
|
|
block_iter_.status().IsIncomplete()) {
|
|
if (block_iter_points_to_real_block_) {
|
|
ResetDataIter();
|
|
}
|
|
auto* rep = table_->get_rep();
|
|
// Prefetch additional data for range scans (iterators).
|
|
// Implicit auto readahead:
|
|
// Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
|
|
// Explicit user requested readahead:
|
|
// Enabled from the very first IO when ReadOptions.readahead_size is
|
|
// set.
|
|
// In case of async_io with Implicit readahead, block_prefetcher_ will
|
|
// always the create the prefetch buffer by setting no_sequential_checking
|
|
// = true.
|
|
block_prefetcher_.PrefetchIfNeeded(
|
|
rep, data_block_handle, read_options_.readahead_size,
|
|
is_for_compaction, /*no_sequential_checking=*/read_options_.async_io,
|
|
read_options_.rate_limiter_priority);
|
|
|
|
Status s;
|
|
table_->NewDataBlockIterator<DataBlockIter>(
|
|
read_options_, data_block_handle, &block_iter_, BlockType::kData,
|
|
/*get_context=*/nullptr, &lookup_context_,
|
|
block_prefetcher_.prefetch_buffer(),
|
|
/*for_compaction=*/is_for_compaction, /*async_read=*/true, s);
|
|
|
|
if (s.IsTryAgain()) {
|
|
async_read_in_progress_ = true;
|
|
return;
|
|
}
|
|
}
|
|
} else {
|
|
// Second pass will call the Poll to get the data block which has been
|
|
// requested asynchronously.
|
|
Status s;
|
|
table_->NewDataBlockIterator<DataBlockIter>(
|
|
read_options_, data_block_handle, &block_iter_, BlockType::kData,
|
|
/*get_context=*/nullptr, &lookup_context_,
|
|
block_prefetcher_.prefetch_buffer(),
|
|
/*for_compaction=*/is_for_compaction, /*async_read=*/false, s);
|
|
}
|
|
block_iter_points_to_real_block_ = true;
|
|
CheckDataBlockWithinUpperBound();
|
|
|
|
if (!is_for_compaction &&
|
|
(seek_stat_state_ & kDataBlockReadSinceLastSeek) == 0) {
|
|
RecordTick(table_->GetStatistics(), is_last_level_
|
|
? LAST_LEVEL_SEEK_DATA
|
|
: NON_LAST_LEVEL_SEEK_DATA);
|
|
seek_stat_state_ = static_cast<SeekStatState>(
|
|
seek_stat_state_ | kDataBlockReadSinceLastSeek | kReportOnUseful);
|
|
}
|
|
async_read_in_progress_ = false;
|
|
}
|
|
|
|
bool BlockBasedTableIterator::MaterializeCurrentBlock() {
|
|
assert(is_at_first_key_from_index_);
|
|
assert(!block_iter_points_to_real_block_);
|
|
assert(index_iter_->Valid());
|
|
|
|
is_at_first_key_from_index_ = false;
|
|
InitDataBlock();
|
|
assert(block_iter_points_to_real_block_);
|
|
|
|
if (!block_iter_.status().ok()) {
|
|
return false;
|
|
}
|
|
|
|
block_iter_.SeekToFirst();
|
|
|
|
if (!block_iter_.Valid() ||
|
|
icomp_.Compare(block_iter_.key(),
|
|
index_iter_->value().first_internal_key) != 0) {
|
|
block_iter_.Invalidate(Status::Corruption(
|
|
"first key in index doesn't match first key in block"));
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void BlockBasedTableIterator::FindKeyForward() {
|
|
// This method's code is kept short to make it likely to be inlined.
|
|
|
|
assert(!is_out_of_bound_);
|
|
assert(block_iter_points_to_real_block_);
|
|
|
|
if (!block_iter_.Valid()) {
|
|
// This is the only call site of FindBlockForward(), but it's extracted into
|
|
// a separate method to keep FindKeyForward() short and likely to be
|
|
// inlined. When transitioning to a different block, we call
|
|
// FindBlockForward(), which is much longer and is probably not inlined.
|
|
FindBlockForward();
|
|
} else {
|
|
// This is the fast path that avoids a function call.
|
|
}
|
|
}
|
|
|
|
void BlockBasedTableIterator::FindBlockForward() {
|
|
// TODO the while loop inherits from two-level-iterator. We don't know
|
|
// whether a block can be empty so it can be replaced by an "if".
|
|
do {
|
|
if (!block_iter_.status().ok()) {
|
|
return;
|
|
}
|
|
// Whether next data block is out of upper bound, if there is one.
|
|
const bool next_block_is_out_of_bound =
|
|
read_options_.iterate_upper_bound != nullptr &&
|
|
block_iter_points_to_real_block_ &&
|
|
block_upper_bound_check_ == BlockUpperBound::kUpperBoundInCurBlock;
|
|
assert(!next_block_is_out_of_bound ||
|
|
user_comparator_.CompareWithoutTimestamp(
|
|
*read_options_.iterate_upper_bound, /*a_has_ts=*/false,
|
|
index_iter_->user_key(), /*b_has_ts=*/true) <= 0);
|
|
ResetDataIter();
|
|
index_iter_->Next();
|
|
if (next_block_is_out_of_bound) {
|
|
// The next block is out of bound. No need to read it.
|
|
TEST_SYNC_POINT_CALLBACK("BlockBasedTableIterator:out_of_bound", nullptr);
|
|
// We need to make sure this is not the last data block before setting
|
|
// is_out_of_bound_, since the index key for the last data block can be
|
|
// larger than smallest key of the next file on the same level.
|
|
if (index_iter_->Valid()) {
|
|
is_out_of_bound_ = true;
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (!index_iter_->Valid()) {
|
|
return;
|
|
}
|
|
|
|
IndexValue v = index_iter_->value();
|
|
|
|
if (!v.first_internal_key.empty() && allow_unprepared_value_) {
|
|
// Index contains the first key of the block. Defer reading the block.
|
|
is_at_first_key_from_index_ = true;
|
|
return;
|
|
}
|
|
|
|
InitDataBlock();
|
|
block_iter_.SeekToFirst();
|
|
} while (!block_iter_.Valid());
|
|
}
|
|
|
|
void BlockBasedTableIterator::FindKeyBackward() {
|
|
while (!block_iter_.Valid()) {
|
|
if (!block_iter_.status().ok()) {
|
|
return;
|
|
}
|
|
|
|
ResetDataIter();
|
|
index_iter_->Prev();
|
|
|
|
if (index_iter_->Valid()) {
|
|
InitDataBlock();
|
|
block_iter_.SeekToLast();
|
|
} else {
|
|
return;
|
|
}
|
|
}
|
|
|
|
// We could have check lower bound here too, but we opt not to do it for
|
|
// code simplicity.
|
|
}
|
|
|
|
void BlockBasedTableIterator::CheckOutOfBound() {
|
|
if (read_options_.iterate_upper_bound != nullptr &&
|
|
block_upper_bound_check_ != BlockUpperBound::kUpperBoundBeyondCurBlock &&
|
|
Valid()) {
|
|
is_out_of_bound_ =
|
|
user_comparator_.CompareWithoutTimestamp(
|
|
*read_options_.iterate_upper_bound, /*a_has_ts=*/false, user_key(),
|
|
/*b_has_ts=*/true) <= 0;
|
|
}
|
|
}
|
|
|
|
void BlockBasedTableIterator::CheckDataBlockWithinUpperBound() {
|
|
if (read_options_.iterate_upper_bound != nullptr &&
|
|
block_iter_points_to_real_block_) {
|
|
block_upper_bound_check_ = (user_comparator_.CompareWithoutTimestamp(
|
|
*read_options_.iterate_upper_bound,
|
|
/*a_has_ts=*/false, index_iter_->user_key(),
|
|
/*b_has_ts=*/true) > 0)
|
|
? BlockUpperBound::kUpperBoundBeyondCurBlock
|
|
: BlockUpperBound::kUpperBoundInCurBlock;
|
|
}
|
|
}
|
|
} // namespace ROCKSDB_NAMESPACE
|