mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 22:44:05 +00:00
Return error if Get() fails in Prefetching Filter blocks (#7463)
Summary: Right now all I/O failures under PartitionFilterBlock::CacheDependencies() is swallowed. Return error in case prefetch fails. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7463 Test Plan: make check -j64 Reviewed By: anand1976 Differential Revision: D24008226 Pulled By: akankshamahajan15 fbshipit-source-id: b65d63b2d01465db92500b78de7ad58650ec9b3b
This commit is contained in:
parent
668ee08915
commit
7d503e66a9
|
@ -91,17 +91,19 @@ Status FilePrefetchBuffer::Prefetch(const IOOptions& opts,
|
|||
size_t read_len = static_cast<size_t>(roundup_len - chunk_len);
|
||||
s = reader->Read(opts, rounddown_offset + chunk_len, read_len, &result,
|
||||
buffer_.BufferStart() + chunk_len, nullptr, for_compaction);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
if (!s.ok() || result.size() < read_len) {
|
||||
if (result.size() < read_len) {
|
||||
// Fake an IO error to force db_stress fault injection to ignore
|
||||
// truncated read errors
|
||||
IGNORE_STATUS_IF_ERROR(Status::IOError());
|
||||
}
|
||||
#endif
|
||||
if (s.ok()) {
|
||||
buffer_offset_ = rounddown_offset;
|
||||
buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
|
||||
}
|
||||
buffer_offset_ = rounddown_offset;
|
||||
buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
|
||||
return s;
|
||||
}
|
||||
|
||||
|
|
|
@ -1038,13 +1038,16 @@ Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
|
|||
auto filter = new_table->CreateFilterBlockReader(
|
||||
ro, prefetch_buffer, use_cache, prefetch_filter, pin_filter,
|
||||
lookup_context);
|
||||
|
||||
if (filter) {
|
||||
rep_->filter = std::move(filter);
|
||||
// Refer to the comment above about paritioned indexes always being cached
|
||||
if (prefetch_all) {
|
||||
filter->CacheDependencies(ro, pin_all);
|
||||
s = rep_->filter->CacheDependencies(ro, pin_all);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
rep_->filter = std::move(filter);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,9 @@ class FilterBlockReader {
|
|||
return error_msg;
|
||||
}
|
||||
|
||||
virtual void CacheDependencies(const ReadOptions& /*ro*/, bool /*pin*/) {}
|
||||
virtual Status CacheDependencies(const ReadOptions& /*ro*/, bool /*pin*/) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
virtual bool RangeMayExist(const Slice* /*iterate_upper_bound*/,
|
||||
const Slice& user_key,
|
||||
|
|
|
@ -412,8 +412,8 @@ size_t PartitionedFilterBlockReader::ApproximateMemoryUsage() const {
|
|||
}
|
||||
|
||||
// TODO(myabandeh): merge this with the same function in IndexReader
|
||||
void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
|
||||
bool pin) {
|
||||
Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
|
||||
bool pin) {
|
||||
assert(table());
|
||||
|
||||
const BlockBasedTable::Rep* const rep = table()->get_rep();
|
||||
|
@ -426,12 +426,11 @@ void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
|
|||
Status s = GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
|
||||
&lookup_context, &filter_block);
|
||||
if (!s.ok()) {
|
||||
ROCKS_LOG_WARN(rep->ioptions.info_log,
|
||||
"Error retrieving top-level filter block while trying to "
|
||||
"cache filter partitions: %s",
|
||||
s.ToString().c_str());
|
||||
IGNORE_STATUS_IF_ERROR(s);
|
||||
return;
|
||||
ROCKS_LOG_ERROR(rep->ioptions.info_log,
|
||||
"Error retrieving top-level filter block while trying to "
|
||||
"cache filter partitions: %s",
|
||||
s.ToString().c_str());
|
||||
return s;
|
||||
}
|
||||
|
||||
// Before read partitions, prefetch them to avoid lots of IOs
|
||||
|
@ -465,6 +464,9 @@ void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
|
|||
s = prefetch_buffer->Prefetch(opts, rep->file.get(), prefetch_off,
|
||||
static_cast<size_t>(prefetch_len));
|
||||
}
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
// After prefetch, read the partitions one by one
|
||||
for (biter.SeekToFirst(); biter.Valid(); biter.Next()) {
|
||||
|
@ -477,17 +479,20 @@ void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
|
|||
prefetch_buffer.get(), ro, handle, UncompressionDict::GetEmptyDict(),
|
||||
&block, BlockType::kFilter, nullptr /* get_context */, &lookup_context,
|
||||
nullptr /* contents */);
|
||||
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
assert(s.ok() || block.GetValue() == nullptr);
|
||||
if (s.ok() && block.GetValue() != nullptr) {
|
||||
|
||||
if (block.GetValue() != nullptr) {
|
||||
if (block.IsCached()) {
|
||||
if (pin) {
|
||||
filter_map_[handle.offset()] = std::move(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
IGNORE_STATUS_IF_ERROR(s);
|
||||
}
|
||||
return biter.status();
|
||||
}
|
||||
|
||||
const InternalKeyComparator* PartitionedFilterBlockReader::internal_comparator()
|
||||
|
|
|
@ -130,7 +130,7 @@ class PartitionedFilterBlockReader : public FilterBlockReaderCommon<Block> {
|
|||
uint64_t block_offset, BlockHandle filter_handle,
|
||||
bool no_io, BlockCacheLookupContext* lookup_context,
|
||||
FilterManyFunction filter_function) const;
|
||||
void CacheDependencies(const ReadOptions& ro, bool pin) override;
|
||||
Status CacheDependencies(const ReadOptions& ro, bool pin) override;
|
||||
|
||||
const InternalKeyComparator* internal_comparator() const;
|
||||
bool index_key_includes_seq() const;
|
||||
|
|
Loading…
Reference in a new issue