2020-03-13 04:39:36 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
|
|
|
|
|
|
#include "table/block_based/block_based_table_reader_impl.h"
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 19:17:34 +00:00
|
|
|
#include "table/block_based/block_prefetcher.h"
|
2020-03-13 04:39:36 +00:00
|
|
|
#include "table/block_based/reader_common.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// Iterates over the contents of BlockBasedTable.
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 19:17:34 +00:00
|
|
|
class BlockBasedTableIterator : public InternalIteratorBase<Slice> {
|
2020-03-13 04:39:36 +00:00
|
|
|
// compaction_readahead_size: its value will only be used if for_compaction =
|
|
|
|
// true
|
2020-08-03 22:21:56 +00:00
|
|
|
// @param read_options Must outlive this iterator.
|
2020-03-13 04:39:36 +00:00
|
|
|
public:
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 19:17:34 +00:00
|
|
|
BlockBasedTableIterator(
|
|
|
|
const BlockBasedTable* table, const ReadOptions& read_options,
|
|
|
|
const InternalKeyComparator& icomp,
|
|
|
|
std::unique_ptr<InternalIteratorBase<IndexValue>>&& index_iter,
|
|
|
|
bool check_filter, bool need_upper_bound_check,
|
|
|
|
const SliceTransform* prefix_extractor, TableReaderCaller caller,
|
2020-08-03 22:21:56 +00:00
|
|
|
size_t compaction_readahead_size = 0, bool allow_unprepared_value = false)
|
2021-11-11 00:18:27 +00:00
|
|
|
: index_iter_(std::move(index_iter)),
|
|
|
|
table_(table),
|
2020-03-13 04:39:36 +00:00
|
|
|
read_options_(read_options),
|
|
|
|
icomp_(icomp),
|
|
|
|
user_comparator_(icomp.user_comparator()),
|
|
|
|
pinned_iters_mgr_(nullptr),
|
|
|
|
prefix_extractor_(prefix_extractor),
|
|
|
|
lookup_context_(caller),
|
2022-04-16 00:28:09 +00:00
|
|
|
block_prefetcher_(
|
|
|
|
compaction_readahead_size,
|
|
|
|
table_->get_rep()->table_options.initial_auto_readahead_size),
|
2020-08-03 22:21:56 +00:00
|
|
|
allow_unprepared_value_(allow_unprepared_value),
|
|
|
|
block_iter_points_to_real_block_(false),
|
|
|
|
check_filter_(check_filter),
|
2022-05-20 23:09:33 +00:00
|
|
|
need_upper_bound_check_(need_upper_bound_check),
|
|
|
|
async_read_in_progress_(false) {}
|
2020-03-13 04:39:36 +00:00
|
|
|
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 19:17:34 +00:00
|
|
|
~BlockBasedTableIterator() {}
|
2020-03-13 04:39:36 +00:00
|
|
|
|
|
|
|
void Seek(const Slice& target) override;
|
|
|
|
void SeekForPrev(const Slice& target) override;
|
|
|
|
void SeekToFirst() override;
|
|
|
|
void SeekToLast() override;
|
|
|
|
void Next() final override;
|
|
|
|
bool NextAndGetResult(IterateResult* result) override;
|
|
|
|
void Prev() override;
|
|
|
|
bool Valid() const override {
|
|
|
|
return !is_out_of_bound_ &&
|
|
|
|
(is_at_first_key_from_index_ ||
|
|
|
|
(block_iter_points_to_real_block_ && block_iter_.Valid()));
|
|
|
|
}
|
|
|
|
Slice key() const override {
|
|
|
|
assert(Valid());
|
|
|
|
if (is_at_first_key_from_index_) {
|
|
|
|
return index_iter_->value().first_internal_key;
|
|
|
|
} else {
|
|
|
|
return block_iter_.key();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice user_key() const override {
|
|
|
|
assert(Valid());
|
|
|
|
if (is_at_first_key_from_index_) {
|
|
|
|
return ExtractUserKey(index_iter_->value().first_internal_key);
|
|
|
|
} else {
|
|
|
|
return block_iter_.user_key();
|
|
|
|
}
|
|
|
|
}
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
bool PrepareValue() override {
|
2020-03-13 04:39:36 +00:00
|
|
|
assert(Valid());
|
|
|
|
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
if (!is_at_first_key_from_index_) {
|
|
|
|
return true;
|
2020-03-13 04:39:36 +00:00
|
|
|
}
|
|
|
|
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
return const_cast<BlockBasedTableIterator*>(this)
|
|
|
|
->MaterializeCurrentBlock();
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
// PrepareValue() must have been called.
|
|
|
|
assert(!is_at_first_key_from_index_);
|
|
|
|
assert(Valid());
|
|
|
|
|
2020-03-13 04:39:36 +00:00
|
|
|
return block_iter_.value();
|
|
|
|
}
|
|
|
|
Status status() const override {
|
|
|
|
// Prefix index set status to NotFound when the prefix does not exist
|
|
|
|
if (!index_iter_->status().ok() && !index_iter_->status().IsNotFound()) {
|
|
|
|
return index_iter_->status();
|
|
|
|
} else if (block_iter_points_to_real_block_) {
|
|
|
|
return block_iter_.status();
|
2022-05-20 23:09:33 +00:00
|
|
|
} else if (async_read_in_progress_) {
|
|
|
|
return Status::TryAgain();
|
2020-03-13 04:39:36 +00:00
|
|
|
} else {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-05 17:42:56 +00:00
|
|
|
inline IterBoundCheck UpperBoundCheckResult() override {
|
|
|
|
if (is_out_of_bound_) {
|
|
|
|
return IterBoundCheck::kOutOfBound;
|
|
|
|
} else if (block_upper_bound_check_ ==
|
|
|
|
BlockUpperBound::kUpperBoundBeyondCurBlock) {
|
|
|
|
assert(!is_out_of_bound_);
|
|
|
|
return IterBoundCheck::kInbound;
|
|
|
|
} else {
|
|
|
|
return IterBoundCheck::kUnknown;
|
|
|
|
}
|
2020-03-13 04:39:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
|
|
|
|
pinned_iters_mgr_ = pinned_iters_mgr;
|
|
|
|
}
|
|
|
|
bool IsKeyPinned() const override {
|
|
|
|
// Our key comes either from block_iter_'s current key
|
|
|
|
// or index_iter_'s current *value*.
|
|
|
|
return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
|
|
|
|
((is_at_first_key_from_index_ && index_iter_->IsValuePinned()) ||
|
|
|
|
(block_iter_points_to_real_block_ && block_iter_.IsKeyPinned()));
|
|
|
|
}
|
|
|
|
bool IsValuePinned() const override {
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
assert(!is_at_first_key_from_index_);
|
|
|
|
assert(Valid());
|
|
|
|
|
2020-03-13 04:39:36 +00:00
|
|
|
// BlockIter::IsValuePinned() is always true. No need to check
|
|
|
|
return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
|
|
|
|
block_iter_points_to_real_block_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetDataIter() {
|
|
|
|
if (block_iter_points_to_real_block_) {
|
|
|
|
if (pinned_iters_mgr_ != nullptr && pinned_iters_mgr_->PinningEnabled()) {
|
|
|
|
block_iter_.DelegateCleanupsTo(pinned_iters_mgr_);
|
|
|
|
}
|
|
|
|
block_iter_.Invalidate(Status::OK());
|
|
|
|
block_iter_points_to_real_block_ = false;
|
|
|
|
}
|
2020-08-04 18:28:02 +00:00
|
|
|
block_upper_bound_check_ = BlockUpperBound::kUnknown;
|
2020-03-13 04:39:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SavePrevIndexValue() {
|
|
|
|
if (block_iter_points_to_real_block_) {
|
|
|
|
// Reseek. If they end up with the same data block, we shouldn't re-fetch
|
|
|
|
// the same data block.
|
|
|
|
prev_block_offset_ = index_iter_->value().handle.offset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-11 00:18:27 +00:00
|
|
|
void GetReadaheadState(ReadaheadFileInfo* readahead_file_info) override {
|
|
|
|
if (block_prefetcher_.prefetch_buffer() != nullptr &&
|
|
|
|
read_options_.adaptive_readahead) {
|
|
|
|
block_prefetcher_.prefetch_buffer()->GetReadaheadState(
|
|
|
|
&(readahead_file_info->data_block_readahead_info));
|
|
|
|
if (index_iter_) {
|
|
|
|
index_iter_->GetReadaheadState(readahead_file_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetReadaheadState(ReadaheadFileInfo* readahead_file_info) override {
|
2021-12-01 06:52:14 +00:00
|
|
|
if (read_options_.adaptive_readahead) {
|
|
|
|
block_prefetcher_.SetReadaheadState(
|
|
|
|
&(readahead_file_info->data_block_readahead_info));
|
|
|
|
if (index_iter_) {
|
|
|
|
index_iter_->SetReadaheadState(readahead_file_info);
|
|
|
|
}
|
2021-11-11 00:18:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIteratorBase<IndexValue>> index_iter_;
|
|
|
|
|
2020-03-13 04:39:36 +00:00
|
|
|
private:
|
|
|
|
enum class IterDirection {
|
|
|
|
kForward,
|
|
|
|
kBackward,
|
|
|
|
};
|
2020-08-04 18:28:02 +00:00
|
|
|
// This enum indicates whether the upper bound falls into current block
|
|
|
|
// or beyond.
|
|
|
|
// +-------------+
|
|
|
|
// | cur block | <-- (1)
|
|
|
|
// +-------------+
|
|
|
|
// <-- (2)
|
|
|
|
// --- <boundary key> ---
|
|
|
|
// <-- (3)
|
|
|
|
// +-------------+
|
|
|
|
// | next block | <-- (4)
|
|
|
|
// ......
|
|
|
|
//
|
|
|
|
// When the block is smaller than <boundary key>, kUpperBoundInCurBlock
|
|
|
|
// is the value to use. The examples are (1) or (2) in the graph. It means
|
|
|
|
// all keys in the next block or beyond will be out of bound. Keys within
|
|
|
|
// the current block may or may not be out of bound.
|
|
|
|
// When the block is larger or equal to <boundary key>,
|
|
|
|
// kUpperBoundBeyondCurBlock is to be used. The examples are (3) and (4)
|
|
|
|
// in the graph. It means that all keys in the current block is within the
|
|
|
|
// upper bound and keys in the next block may or may not be within the uppder
|
|
|
|
// bound.
|
|
|
|
// If the boundary key hasn't been checked against the upper bound,
|
|
|
|
// kUnknown can be used.
|
|
|
|
enum class BlockUpperBound {
|
|
|
|
kUpperBoundInCurBlock,
|
|
|
|
kUpperBoundBeyondCurBlock,
|
|
|
|
kUnknown,
|
|
|
|
};
|
2020-03-13 04:39:36 +00:00
|
|
|
|
|
|
|
const BlockBasedTable* table_;
|
2020-08-03 22:21:56 +00:00
|
|
|
const ReadOptions& read_options_;
|
2020-03-13 04:39:36 +00:00
|
|
|
const InternalKeyComparator& icomp_;
|
|
|
|
UserComparatorWrapper user_comparator_;
|
|
|
|
PinnedIteratorsManager* pinned_iters_mgr_;
|
De-template block based table iterator (#6531)
Summary:
Right now block based table iterator is used as both of iterating data for block based table, and for the index iterator for partitioend index. This was initially convenient for introducing a new iterator and block type for new index format, while reducing code change. However, these two usage doesn't go with each other very well. For example, Prev() is never called for partitioned index iterator, and some other complexity is maintained in block based iterators, which is not needed for index iterator but maintainers will always need to reason about it. Furthermore, the template usage is not following Google C++ Style which we are following, and makes a large chunk of code tangled together. This commit separate the two iterators. Right now, here is what it is done:
1. Copy the block based iterator code into partitioned index iterator, and de-template them.
2. Remove some code not needed for partitioned index. The upper bound check and tricks are removed. We never tested performance for those tricks when partitioned index is enabled in the first place. It's unlikelyl to generate performance regression, as creating new partitioned index block is much rarer than data blocks.
3. Separate out the prefetch logic to a helper class and both classes call them.
This commit will enable future follow-ups. One direction is that we might separate index iterator interface for data blocks and index blocks, as they are quite different.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6531
Test Plan: build using make and cmake. And build release
Differential Revision: D20473108
fbshipit-source-id: e48011783b339a4257c204cc07507b171b834b0f
2020-03-16 19:17:34 +00:00
|
|
|
DataBlockIter block_iter_;
|
2020-08-03 22:21:56 +00:00
|
|
|
const SliceTransform* prefix_extractor_;
|
|
|
|
uint64_t prev_block_offset_ = std::numeric_limits<uint64_t>::max();
|
|
|
|
BlockCacheLookupContext lookup_context_;
|
|
|
|
|
|
|
|
BlockPrefetcher block_prefetcher_;
|
2020-03-13 04:39:36 +00:00
|
|
|
|
2020-08-03 22:21:56 +00:00
|
|
|
const bool allow_unprepared_value_;
|
2020-03-13 04:39:36 +00:00
|
|
|
// True if block_iter_ is initialized and points to the same block
|
|
|
|
// as index iterator.
|
|
|
|
bool block_iter_points_to_real_block_;
|
|
|
|
// See InternalIteratorBase::IsOutOfBound().
|
|
|
|
bool is_out_of_bound_ = false;
|
2020-08-04 18:28:02 +00:00
|
|
|
// How current data block's boundary key with the next block is compared with
|
|
|
|
// iterate upper bound.
|
|
|
|
BlockUpperBound block_upper_bound_check_ = BlockUpperBound::kUnknown;
|
2020-03-13 04:39:36 +00:00
|
|
|
// True if we're standing at the first key of a block, and we haven't loaded
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
// that block yet. A call to PrepareValue() will trigger loading the block.
|
2020-03-13 04:39:36 +00:00
|
|
|
bool is_at_first_key_from_index_ = false;
|
|
|
|
bool check_filter_;
|
|
|
|
// TODO(Zhongyi): pick a better name
|
|
|
|
bool need_upper_bound_check_;
|
|
|
|
|
2022-05-20 23:09:33 +00:00
|
|
|
bool async_read_in_progress_;
|
|
|
|
|
2020-03-13 04:39:36 +00:00
|
|
|
// If `target` is null, seek to first.
|
2022-05-20 23:09:33 +00:00
|
|
|
void SeekImpl(const Slice* target, bool async_prefetch);
|
2020-03-13 04:39:36 +00:00
|
|
|
|
|
|
|
void InitDataBlock();
|
2022-05-20 23:09:33 +00:00
|
|
|
void AsyncInitDataBlock(bool is_first_pass);
|
2020-03-13 04:39:36 +00:00
|
|
|
bool MaterializeCurrentBlock();
|
|
|
|
void FindKeyForward();
|
|
|
|
void FindBlockForward();
|
|
|
|
void FindKeyBackward();
|
|
|
|
void CheckOutOfBound();
|
|
|
|
|
|
|
|
// Check if data block is fully within iterate_upper_bound.
|
|
|
|
//
|
|
|
|
// Note MyRocks may update iterate bounds between seek. To workaround it,
|
|
|
|
// we need to check and update data_block_within_upper_bound_ accordingly.
|
|
|
|
void CheckDataBlockWithinUpperBound();
|
|
|
|
|
|
|
|
bool CheckPrefixMayMatch(const Slice& ikey, IterDirection direction) {
|
|
|
|
if (need_upper_bound_check_ && direction == IterDirection::kBackward) {
|
2020-12-01 22:05:19 +00:00
|
|
|
// Upper bound check isn't sufficient for backward direction to
|
2020-03-13 04:39:36 +00:00
|
|
|
// guarantee the same result as total order, so disable prefix
|
|
|
|
// check.
|
|
|
|
return true;
|
|
|
|
}
|
2022-06-16 23:41:25 +00:00
|
|
|
if (check_filter_ && !table_->PrefixRangeMayMatch(
|
|
|
|
ikey, read_options_, prefix_extractor_,
|
|
|
|
need_upper_bound_check_, &lookup_context_)) {
|
2020-03-13 04:39:36 +00:00
|
|
|
// TODO remember the iterator is invalidated because of prefix
|
|
|
|
// match. This can avoid the upper level file iterator to falsely
|
|
|
|
// believe the position is the end of the SST file and move to
|
|
|
|
// the first key of the next file.
|
|
|
|
ResetDataIter();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|