mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-28 05:43:50 +00:00
8ea0a2c1bd
Summary: Implement the ```WaitAll()``` interface in ```LRUCache``` to allow callers to issue multiple lookups in parallel and wait for all of them to complete. Modify ```MultiGet``` to use this to parallelize the secondary cache lookups in order to reduce the overall latency. A call to ```cache->Lookup()``` returns a handle that has an incomplete value (nullptr), and the caller can call ```cache->IsReady()``` to check whether the lookup is complete, and pass a vector of handles to ```WaitAll``` to wait for completion. If any of the lookups fail, ```MultiGet``` will read the block from the SST file. Another change in this PR is to rename ```SecondaryCacheHandle``` to ```SecondaryCacheResultHandle``` as it more accurately describes the return result of the secondary cache lookup, which is more like a future. Tests: 1. Add unit tests in lru_cache_test 2. Benchmark results with no secondary cache configured Master - ``` readrandom : 41.175 micros/op 388562 ops/sec; 106.7 MB/s (7277999 of 7277999 found) readrandom : 41.217 micros/op 388160 ops/sec; 106.6 MB/s (7274999 of 7274999 found) multireadrandom : 10.309 micros/op 1552082 ops/sec; (28908992 of 28908992 found) multireadrandom : 10.321 micros/op 1550218 ops/sec; (29081984 of 29081984 found) ``` This PR - ``` readrandom : 41.158 micros/op 388723 ops/sec; 106.8 MB/s (7290999 of 7290999 found) readrandom : 41.185 micros/op 388463 ops/sec; 106.7 MB/s (7287999 of 7287999 found) multireadrandom : 10.277 micros/op 1556801 ops/sec; (29346944 of 29346944 found) multireadrandom : 10.253 micros/op 1560539 ops/sec; (29274944 of 29274944 found) ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/8405 Reviewed By: zhichao-cao Differential Revision: D29190509 Pulled By: anand1976 fbshipit-source-id: 6f8eff6246712af8a297cfe22ea0d1c3b2a01bb0
121 lines
3.9 KiB
C++
121 lines
3.9 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
|
|
#include "table/block_based/uncompression_dict_reader.h"
|
|
#include "monitoring/perf_context_imp.h"
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
#include "util/compression.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
Status UncompressionDictReader::Create(
|
|
const BlockBasedTable* table, const ReadOptions& ro,
|
|
FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch,
|
|
bool pin, BlockCacheLookupContext* lookup_context,
|
|
std::unique_ptr<UncompressionDictReader>* uncompression_dict_reader) {
|
|
assert(table);
|
|
assert(table->get_rep());
|
|
assert(!pin || prefetch);
|
|
assert(uncompression_dict_reader);
|
|
|
|
CachableEntry<UncompressionDict> uncompression_dict;
|
|
if (prefetch || !use_cache) {
|
|
const Status s = ReadUncompressionDictionary(
|
|
table, prefetch_buffer, ro, use_cache, nullptr /* get_context */,
|
|
lookup_context, &uncompression_dict);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
|
|
if (use_cache && !pin) {
|
|
uncompression_dict.Reset();
|
|
}
|
|
}
|
|
|
|
uncompression_dict_reader->reset(
|
|
new UncompressionDictReader(table, std::move(uncompression_dict)));
|
|
|
|
return Status::OK();
|
|
}
|
|
|
|
Status UncompressionDictReader::ReadUncompressionDictionary(
|
|
const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer,
|
|
const ReadOptions& read_options, bool use_cache, GetContext* get_context,
|
|
BlockCacheLookupContext* lookup_context,
|
|
CachableEntry<UncompressionDict>* uncompression_dict) {
|
|
// TODO: add perf counter for compression dictionary read time
|
|
|
|
assert(table);
|
|
assert(uncompression_dict);
|
|
assert(uncompression_dict->IsEmpty());
|
|
|
|
const BlockBasedTable::Rep* const rep = table->get_rep();
|
|
assert(rep);
|
|
assert(!rep->compression_dict_handle.IsNull());
|
|
|
|
const Status s = table->RetrieveBlock(
|
|
prefetch_buffer, read_options, rep->compression_dict_handle,
|
|
UncompressionDict::GetEmptyDict(), uncompression_dict,
|
|
BlockType::kCompressionDictionary, get_context, lookup_context,
|
|
/* for_compaction */ false, use_cache, /* wait_for_cache */ true);
|
|
|
|
if (!s.ok()) {
|
|
ROCKS_LOG_WARN(
|
|
rep->ioptions.logger,
|
|
"Encountered error while reading data from compression dictionary "
|
|
"block %s",
|
|
s.ToString().c_str());
|
|
}
|
|
|
|
return s;
|
|
}
|
|
|
|
Status UncompressionDictReader::GetOrReadUncompressionDictionary(
|
|
FilePrefetchBuffer* prefetch_buffer, bool no_io, GetContext* get_context,
|
|
BlockCacheLookupContext* lookup_context,
|
|
CachableEntry<UncompressionDict>* uncompression_dict) const {
|
|
assert(uncompression_dict);
|
|
|
|
if (!uncompression_dict_.IsEmpty()) {
|
|
uncompression_dict->SetUnownedValue(uncompression_dict_.GetValue());
|
|
return Status::OK();
|
|
}
|
|
|
|
ReadOptions read_options;
|
|
if (no_io) {
|
|
read_options.read_tier = kBlockCacheTier;
|
|
}
|
|
|
|
return ReadUncompressionDictionary(table_, prefetch_buffer, read_options,
|
|
cache_dictionary_blocks(), get_context,
|
|
lookup_context, uncompression_dict);
|
|
}
|
|
|
|
size_t UncompressionDictReader::ApproximateMemoryUsage() const {
|
|
assert(!uncompression_dict_.GetOwnValue() ||
|
|
uncompression_dict_.GetValue() != nullptr);
|
|
size_t usage = uncompression_dict_.GetOwnValue()
|
|
? uncompression_dict_.GetValue()->ApproximateMemoryUsage()
|
|
: 0;
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
usage += malloc_usable_size(const_cast<UncompressionDictReader*>(this));
|
|
#else
|
|
usage += sizeof(*this);
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return usage;
|
|
}
|
|
|
|
bool UncompressionDictReader::cache_dictionary_blocks() const {
|
|
assert(table_);
|
|
assert(table_->get_rep());
|
|
|
|
return table_->get_rep()->table_options.cache_index_and_filter_blocks;
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|