mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 22:44:05 +00:00
Fix corruption error in stress test for auto_readahead_size enabled (#11961)
Summary: Fix corruption error - "Corruption: first key in index doesn't match first key in block". when auto_readahead_size is enabled. Error is because of bug when index_iter_ moves forward, first_internal_key of that index_iter_ is not copied. So the Slice points to a different key resulting in wrong comparison when doing comparison. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11961 Test Plan: Ran stress test which reproduced this error. Reviewed By: anand1976 Differential Revision: D50310589 Pulled By: akankshamahajan15 fbshipit-source-id: 95d8320b8388f1e3822c32024f84754f3a20a631
This commit is contained in:
parent
2296c624fa
commit
9135a61ec6
|
@ -318,7 +318,7 @@ void BlockBasedTableIterator::InitDataBlock() {
|
|||
bool use_block_cache_for_lookup = true;
|
||||
|
||||
if (DoesContainBlockHandles()) {
|
||||
data_block_handle = block_handles_.front().index_val_.handle;
|
||||
data_block_handle = block_handles_.front().handle_;
|
||||
is_in_cache = block_handles_.front().is_cache_hit_;
|
||||
use_block_cache_for_lookup = false;
|
||||
} else {
|
||||
|
@ -483,15 +483,15 @@ bool BlockBasedTableIterator::MaterializeCurrentBlock() {
|
|||
// handles placed in blockhandle. So index_ will be pointing to current block.
|
||||
// After InitDataBlock, index_iter_ can point to different block if
|
||||
// BlockCacheLookupForReadAheadSize is called.
|
||||
IndexValue index_val;
|
||||
Slice first_internal_key;
|
||||
if (DoesContainBlockHandles()) {
|
||||
index_val = block_handles_.front().index_val_;
|
||||
first_internal_key = block_handles_.front().first_internal_key_;
|
||||
} else {
|
||||
index_val = index_iter_->value();
|
||||
first_internal_key = index_iter_->value().first_internal_key;
|
||||
}
|
||||
|
||||
if (!block_iter_.Valid() ||
|
||||
icomp_.Compare(block_iter_.key(), index_val.first_internal_key) != 0) {
|
||||
icomp_.Compare(block_iter_.key(), first_internal_key) != 0) {
|
||||
block_iter_.Invalidate(Status::Corruption(
|
||||
"first key in index doesn't match first key in block"));
|
||||
return false;
|
||||
|
@ -701,7 +701,9 @@ void BlockBasedTableIterator::BlockCacheLookupForReadAheadSize(
|
|||
// Add the current block to block_handles_.
|
||||
{
|
||||
BlockHandleInfo block_handle_info;
|
||||
block_handle_info.index_val_ = index_iter_->value();
|
||||
block_handle_info.handle_ = index_iter_->value().handle;
|
||||
block_handle_info.SetFirstInternalKey(
|
||||
index_iter_->value().first_internal_key);
|
||||
block_handles_.emplace_back(std::move(block_handle_info));
|
||||
}
|
||||
|
||||
|
@ -726,7 +728,9 @@ void BlockBasedTableIterator::BlockCacheLookupForReadAheadSize(
|
|||
// For current data block, do the lookup in the cache. Lookup should pin the
|
||||
// data block and add the placeholder for cache.
|
||||
BlockHandleInfo block_handle_info;
|
||||
block_handle_info.index_val_ = index_iter_->value();
|
||||
block_handle_info.handle_ = index_iter_->value().handle;
|
||||
block_handle_info.SetFirstInternalKey(
|
||||
index_iter_->value().first_internal_key);
|
||||
|
||||
Status s = table_->LookupAndPinBlocksInCache<Block_kData>(
|
||||
read_options_, block_handle,
|
||||
|
@ -758,7 +762,7 @@ void BlockBasedTableIterator::BlockCacheLookupForReadAheadSize(
|
|||
// update the readahead_size.
|
||||
for (auto it = block_handles_.rbegin();
|
||||
it != block_handles_.rend() && (*it).is_cache_hit_ == true; ++it) {
|
||||
current_readahead_size -= (*it).index_val_.handle.size();
|
||||
current_readahead_size -= (*it).handle_.size();
|
||||
current_readahead_size -= footer;
|
||||
}
|
||||
updated_readahead_size = current_readahead_size;
|
||||
|
|
|
@ -250,11 +250,21 @@ class BlockBasedTableIterator : public InternalIteratorBase<Slice> {
|
|||
// BlockHandleInfo is used to store the info needed when block cache lookup
|
||||
// ahead is enabled to tune readahead_size.
|
||||
struct BlockHandleInfo {
|
||||
BlockHandleInfo() {}
|
||||
void SetFirstInternalKey(const Slice& key) {
|
||||
if (key.empty()) {
|
||||
return;
|
||||
}
|
||||
size_t size = key.size();
|
||||
buf_ = std::unique_ptr<char[]>(new char[size]);
|
||||
memcpy(buf_.get(), key.data(), size);
|
||||
first_internal_key_ = Slice(buf_.get(), size);
|
||||
}
|
||||
|
||||
IndexValue index_val_;
|
||||
BlockHandle handle_;
|
||||
bool is_cache_hit_ = false;
|
||||
CachableEntry<Block> cachable_entry_;
|
||||
Slice first_internal_key_;
|
||||
std::unique_ptr<char[]> buf_;
|
||||
};
|
||||
|
||||
bool IsIndexAtCurr() const { return is_index_at_curr_block_; }
|
||||
|
|
1
unreleased_history/bug_fixes/auto_readahead.md
Normal file
1
unreleased_history/bug_fixes/auto_readahead.md
Normal file
|
@ -0,0 +1 @@
|
|||
Fix a bug in auto_readahead_size where first_internal_key of index blocks wasn't copied properly resulting in corruption error when first_internal_key was used for comparison.
|
Loading…
Reference in a new issue