mirror of https://github.com/facebook/rocksdb.git
Eliminate some parameters redundant with ReadOptions (#12761)
Summary: ... in Index and CompressionDict readers (Filters in another PR). no_io and verify_checksums should be inferred from ReadOptions rather than specified redundantly. Fixes incomplete propagation of ReadOptions in UncompressionDictReader::GetOrReadUncompressionDictionar so is technically a functional change. (Related to https://github.com/facebook/rocksdb/issues/12757) Also there was hardcoded no verify_checksums in DumpTable, but only for UncompressionDict, which doesn't make sense. Now using consistent ReadOptions and verify_checksum can be controlled for more reads together. Pull Request resolved: https://github.com/facebook/rocksdb/pull/12761 Test Plan: existing tests Reviewed By: hx235 Differential Revision: D58450392 Pulled By: pdillinger fbshipit-source-id: 0faed22832d664cb3b04a4c03ee77119977c200b
This commit is contained in:
parent
a2f772910e
commit
5cf3bed00f
|
@ -44,9 +44,8 @@ InternalIteratorBase<IndexValue>* BinarySearchIndexReader::NewIterator(
|
|||
IndexBlockIter* iter, GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context) {
|
||||
const BlockBasedTable::Rep* rep = table()->get_rep();
|
||||
const bool no_io = (read_options.read_tier == kBlockCacheTier);
|
||||
CachableEntry<Block> index_block;
|
||||
const Status s = GetOrReadIndexBlock(no_io, get_context, lookup_context,
|
||||
const Status s = GetOrReadIndexBlock(get_context, lookup_context,
|
||||
&index_block, read_options);
|
||||
if (!s.ok()) {
|
||||
if (iter != nullptr) {
|
||||
|
|
|
@ -1565,9 +1565,8 @@ Status BlockBasedTable::LookupAndPinBlocksInCache(
|
|||
Status s;
|
||||
CachableEntry<UncompressionDict> uncompression_dict;
|
||||
if (rep_->uncompression_dict_reader) {
|
||||
const bool no_io = (ro.read_tier == kBlockCacheTier);
|
||||
s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
|
||||
/* prefetch_buffer= */ nullptr, ro, no_io, ro.verify_checksums,
|
||||
/* prefetch_buffer= */ nullptr, ro,
|
||||
/* get_context= */ nullptr, /* lookup_context= */ nullptr,
|
||||
&uncompression_dict);
|
||||
if (!s.ok()) {
|
||||
|
@ -3085,10 +3084,8 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) {
|
|||
if (rep_->uncompression_dict_reader) {
|
||||
CachableEntry<UncompressionDict> uncompression_dict;
|
||||
s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
|
||||
nullptr /* prefetch_buffer */, ro, false /* no_io */,
|
||||
false, /* verify_checksums */
|
||||
nullptr /* get_context */, nullptr /* lookup_context */,
|
||||
&uncompression_dict);
|
||||
nullptr /* prefetch_buffer */, ro, nullptr /* get_context */,
|
||||
nullptr /* lookup_context */, &uncompression_dict);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ TBlockIter* BlockBasedTable::NewDataBlockIterator(
|
|||
CachableEntry<Block> block;
|
||||
if (rep_->uncompression_dict_reader && block_type == BlockType::kData) {
|
||||
CachableEntry<UncompressionDict> uncompression_dict;
|
||||
const bool no_io = (ro.read_tier == kBlockCacheTier);
|
||||
// For async scans, don't use the prefetch buffer since an async prefetch
|
||||
// might already be under way and this would invalidate it. Also, the
|
||||
// uncompression dict is typically at the end of the file and would
|
||||
|
@ -72,8 +71,7 @@ TBlockIter* BlockBasedTable::NewDataBlockIterator(
|
|||
// pattern.
|
||||
s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
|
||||
((ro.async_io || ro.auto_readahead_size) ? nullptr : prefetch_buffer),
|
||||
ro, no_io, ro.verify_checksums, get_context, lookup_context,
|
||||
&uncompression_dict);
|
||||
ro, get_context, lookup_context, &uncompression_dict);
|
||||
if (!s.ok()) {
|
||||
iter->Invalidate(s);
|
||||
return iter;
|
||||
|
|
|
@ -461,9 +461,9 @@ DEFINE_SYNC_AND_ASYNC(void, BlockBasedTable::MultiGet)
|
|||
uncompression_dict_status =
|
||||
rep_->uncompression_dict_reader
|
||||
->GetOrReadUncompressionDictionary(
|
||||
nullptr /* prefetch_buffer */, read_options, no_io,
|
||||
read_options.verify_checksums, get_context,
|
||||
&metadata_lookup_context, &uncompression_dict);
|
||||
nullptr /* prefetch_buffer */, read_options,
|
||||
get_context, &metadata_lookup_context,
|
||||
&uncompression_dict);
|
||||
uncompression_dict_inited = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,9 +114,8 @@ InternalIteratorBase<IndexValue>* HashIndexReader::NewIterator(
|
|||
IndexBlockIter* iter, GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context) {
|
||||
const BlockBasedTable::Rep* rep = table()->get_rep();
|
||||
const bool no_io = (read_options.read_tier == kBlockCacheTier);
|
||||
CachableEntry<Block> index_block;
|
||||
const Status s = GetOrReadIndexBlock(no_io, get_context, lookup_context,
|
||||
const Status s = GetOrReadIndexBlock(get_context, lookup_context,
|
||||
&index_block, read_options);
|
||||
if (!s.ok()) {
|
||||
if (iter != nullptr) {
|
||||
|
|
|
@ -35,9 +35,8 @@ Status BlockBasedTable::IndexReaderCommon::ReadIndexBlock(
|
|||
}
|
||||
|
||||
Status BlockBasedTable::IndexReaderCommon::GetOrReadIndexBlock(
|
||||
bool no_io, GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context, CachableEntry<Block>* index_block,
|
||||
const ReadOptions& ro) const {
|
||||
GetContext* get_context, BlockCacheLookupContext* lookup_context,
|
||||
CachableEntry<Block>* index_block, const ReadOptions& ro) const {
|
||||
assert(index_block != nullptr);
|
||||
|
||||
if (!index_block_.IsEmpty()) {
|
||||
|
@ -45,12 +44,7 @@ Status BlockBasedTable::IndexReaderCommon::GetOrReadIndexBlock(
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
ReadOptions read_options = ro;
|
||||
if (no_io) {
|
||||
read_options.read_tier = kBlockCacheTier;
|
||||
}
|
||||
|
||||
return ReadIndexBlock(table_, /*prefetch_buffer=*/nullptr, read_options,
|
||||
return ReadIndexBlock(table_, /*prefetch_buffer=*/nullptr, ro,
|
||||
cache_index_blocks(), get_context, lookup_context,
|
||||
index_block);
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ class BlockBasedTable::IndexReaderCommon : public BlockBasedTable::IndexReader {
|
|||
return table_->get_rep()->user_defined_timestamps_persisted;
|
||||
}
|
||||
|
||||
Status GetOrReadIndexBlock(bool no_io, GetContext* get_context,
|
||||
Status GetOrReadIndexBlock(GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context,
|
||||
CachableEntry<Block>* index_block,
|
||||
const ReadOptions& read_options) const;
|
||||
|
|
|
@ -47,9 +47,8 @@ InternalIteratorBase<IndexValue>* PartitionIndexReader::NewIterator(
|
|||
const ReadOptions& read_options, bool /* disable_prefix_seek */,
|
||||
IndexBlockIter* iter, GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context) {
|
||||
const bool no_io = (read_options.read_tier == kBlockCacheTier);
|
||||
CachableEntry<Block> index_block;
|
||||
const Status s = GetOrReadIndexBlock(no_io, get_context, lookup_context,
|
||||
const Status s = GetOrReadIndexBlock(get_context, lookup_context,
|
||||
&index_block, read_options);
|
||||
if (!s.ok()) {
|
||||
if (iter != nullptr) {
|
||||
|
@ -125,8 +124,8 @@ Status PartitionIndexReader::CacheDependencies(
|
|||
|
||||
CachableEntry<Block> index_block;
|
||||
{
|
||||
Status s = GetOrReadIndexBlock(false /* no_io */, nullptr /* get_context */,
|
||||
&lookup_context, &index_block, ro);
|
||||
Status s = GetOrReadIndexBlock(nullptr /* get_context */, &lookup_context,
|
||||
&index_block, ro);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
@ -225,9 +224,10 @@ void PartitionIndexReader::EraseFromCacheBeforeDestruction(
|
|||
if (uncache_aggressiveness > 0) {
|
||||
CachableEntry<Block> top_level_block;
|
||||
|
||||
GetOrReadIndexBlock(/*no_io=*/true, /*get_context=*/nullptr,
|
||||
/*lookup_context=*/nullptr, &top_level_block,
|
||||
ReadOptions{})
|
||||
ReadOptions ro_no_io;
|
||||
ro_no_io.read_tier = ReadTier::kBlockCacheTier;
|
||||
GetOrReadIndexBlock(/*get_context=*/nullptr,
|
||||
/*lookup_context=*/nullptr, &top_level_block, ro_no_io)
|
||||
.PermitUncheckedError();
|
||||
|
||||
if (!partition_map_.empty()) {
|
||||
|
|
|
@ -77,9 +77,8 @@ Status UncompressionDictReader::ReadUncompressionDictionary(
|
|||
}
|
||||
|
||||
Status UncompressionDictReader::GetOrReadUncompressionDictionary(
|
||||
FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro, bool no_io,
|
||||
bool verify_checksums, GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context,
|
||||
FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
|
||||
GetContext* get_context, BlockCacheLookupContext* lookup_context,
|
||||
CachableEntry<UncompressionDict>* uncompression_dict) const {
|
||||
assert(uncompression_dict);
|
||||
|
||||
|
@ -88,14 +87,7 @@ Status UncompressionDictReader::GetOrReadUncompressionDictionary(
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
ReadOptions read_options;
|
||||
if (no_io) {
|
||||
read_options.read_tier = kBlockCacheTier;
|
||||
}
|
||||
read_options.verify_checksums = verify_checksums;
|
||||
read_options.io_activity = ro.io_activity;
|
||||
|
||||
return ReadUncompressionDictionary(table_, prefetch_buffer, read_options,
|
||||
return ReadUncompressionDictionary(table_, prefetch_buffer, ro,
|
||||
cache_dictionary_blocks(), get_context,
|
||||
lookup_context, uncompression_dict);
|
||||
}
|
||||
|
|
|
@ -32,9 +32,8 @@ class UncompressionDictReader {
|
|||
std::unique_ptr<UncompressionDictReader>* uncompression_dict_reader);
|
||||
|
||||
Status GetOrReadUncompressionDictionary(
|
||||
FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro, bool no_io,
|
||||
bool verify_checksums, GetContext* get_context,
|
||||
BlockCacheLookupContext* lookup_context,
|
||||
FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
|
||||
GetContext* get_context, BlockCacheLookupContext* lookup_context,
|
||||
CachableEntry<UncompressionDict>* uncompression_dict) const;
|
||||
|
||||
size_t ApproximateMemoryUsage() const;
|
||||
|
|
Loading…
Reference in New Issue