mirror of https://github.com/facebook/rocksdb.git
Updated NewDataBlockIterator to not fetch compression dict for non-da… (#10310)
Summary: …ta blocks During MyShadow testing, ajkr helped me find out that with partitioned index and dictionary compression enabled, `PartitionedIndexIterator::InitPartitionedIndexBlock()` spent considerable amount of time (1-2% CPU) on fetching uncompression dictionary. Fetching uncompression dict was not needed since the index blocks were not compressed (and even if they were, they use empty dictionary). This should only affect use cases with partitioned index, dictionary compression and without uncompression dictionary pinned. This PR updates NewDataBlockIterator to not fetch uncompression dictionary when it is not for data blocks. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10310 Test Plan: 1. `make check` 2. Perf benchmark: 1.5% (143950 -> 146176) improvement in op/sec for partitioned index + dict compression benchmark. For default config without partitioned index and without dict compression, there is no regression in readrandom perf from multiple runs of db_bench. ``` # Set up for partitioned index with dictionary compression TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -partition_index=true -compression_max_dict_bytes=16384 -compression_zstd_max_train_bytes=1638400 # Pre PR TEST_TMPDIR=/dev/shm ./db_bench_main -use_existing_db=true -benchmarks=readrandom[-X50] -partition_index=true readrandom [AVG 50 runs] : 143950 (± 1108) ops/sec; 15.9 (± 0.1) MB/sec readrandom [MEDIAN 50 runs] : 144406 ops/sec; 16.0 MB/sec # Post PR TEST_TMPDIR=/dev/shm ./db_bench_opt -use_existing_db=true -benchmarks=readrandom[-X50] -partition_index=true readrandom [AVG 50 runs] : 146176 (± 1121) ops/sec; 16.2 (± 0.1) MB/sec readrandom [MEDIAN 50 runs] : 146014 ops/sec; 16.2 MB/sec # Set up for no partitioned index and no dictionary compression TEST_TMPDIR=/dev/shm/baseline ./db_bench_main -benchmarks=filluniquerandom,compact -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false # Pre PR TEST_TMPDIR=/dev/shm/baseline/ ./db_bench_main --use_existing_db=true "--benchmarks=readrandom[-X50]" readrandom [AVG 50 runs] : 158546 (± 1000) ops/sec; 17.5 (± 0.1) MB/sec readrandom [MEDIAN 50 runs] : 158280 ops/sec; 17.5 MB/sec # Post PR TEST_TMPDIR=/dev/shm/baseline/ ./db_bench_opt --use_existing_db=true "--benchmarks=readrandom[-X50]" readrandom [AVG 50 runs] : 161061 (± 1520) ops/sec; 17.8 (± 0.2) MB/sec readrandom [MEDIAN 50 runs] : 161596 ops/sec; 17.9 MB/sec ``` Reviewed By: ajkr Differential Revision: D37631358 Pulled By: cbi42 fbshipit-source-id: 6ca2665e270e63871968e061ba4a99d3136785d9
This commit is contained in:
parent
0ff7713112
commit
f9cfc6a808
|
@ -36,8 +36,9 @@ TBlockIter* BlockBasedTable::NewDataBlockIterator(
|
|||
return iter;
|
||||
}
|
||||
|
||||
CachableEntry<UncompressionDict> uncompression_dict;
|
||||
if (rep_->uncompression_dict_reader) {
|
||||
CachableEntry<Block> block;
|
||||
if (rep_->uncompression_dict_reader && block_type == BlockType::kData) {
|
||||
CachableEntry<UncompressionDict> uncompression_dict;
|
||||
const bool no_io = (ro.read_tier == kBlockCacheTier);
|
||||
s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
|
||||
prefetch_buffer, no_io, ro.verify_checksums, get_context,
|
||||
|
@ -46,18 +47,20 @@ TBlockIter* BlockBasedTable::NewDataBlockIterator(
|
|||
iter->Invalidate(s);
|
||||
return iter;
|
||||
}
|
||||
const UncompressionDict& dict = uncompression_dict.GetValue()
|
||||
? *uncompression_dict.GetValue()
|
||||
: UncompressionDict::GetEmptyDict();
|
||||
s = RetrieveBlock(prefetch_buffer, ro, handle, dict, &block, block_type,
|
||||
get_context, lookup_context, for_compaction,
|
||||
/* use_cache */ true, /* wait_for_cache */ true,
|
||||
async_read);
|
||||
} else {
|
||||
s = RetrieveBlock(
|
||||
prefetch_buffer, ro, handle, UncompressionDict::GetEmptyDict(), &block,
|
||||
block_type, get_context, lookup_context, for_compaction,
|
||||
/* use_cache */ true, /* wait_for_cache */ true, async_read);
|
||||
}
|
||||
|
||||
const UncompressionDict& dict = uncompression_dict.GetValue()
|
||||
? *uncompression_dict.GetValue()
|
||||
: UncompressionDict::GetEmptyDict();
|
||||
|
||||
CachableEntry<Block> block;
|
||||
s = RetrieveBlock(prefetch_buffer, ro, handle, dict, &block, block_type,
|
||||
get_context, lookup_context, for_compaction,
|
||||
/* use_cache */ true, /* wait_for_cache */ true,
|
||||
async_read);
|
||||
|
||||
if (s.IsTryAgain() && async_read) {
|
||||
return iter;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue