2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2014-03-05 18:32:54 +00:00
|
|
|
|
|
|
|
#include <sstream>
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2021-04-13 14:55:58 +00:00
|
|
|
#if defined(NPERF_CONTEXT)
|
|
|
|
// Should not be used because the counters are not thread-safe.
|
|
|
|
// Put here just to make get_perf_context() simple without ifdef.
|
2017-06-03 00:12:39 +00:00
|
|
|
PerfContext perf_context;
|
2021-04-13 14:55:58 +00:00
|
|
|
#else
|
2022-05-18 22:25:19 +00:00
|
|
|
thread_local PerfContext perf_context;
|
2014-04-04 20:11:44 +00:00
|
|
|
#endif
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
|
2017-06-03 00:12:39 +00:00
|
|
|
PerfContext* get_perf_context() {
|
2017-06-26 22:13:35 +00:00
|
|
|
return &perf_context;
|
2017-06-03 00:12:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-17 18:18:00 +00:00
|
|
|
PerfContext::~PerfContext() {
|
2022-05-18 22:25:19 +00:00
|
|
|
#if !defined(NPERF_CONTEXT) && !defined(OS_SOLARIS)
|
2018-10-17 18:18:00 +00:00
|
|
|
ClearPerLevelPerfContext();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-02-05 22:12:08 +00:00
|
|
|
PerfContext::PerfContext(const PerfContext& other) {
|
2020-04-03 20:21:53 +00:00
|
|
|
#ifdef NPERF_CONTEXT
|
|
|
|
(void)other;
|
|
|
|
#else
|
2019-02-05 22:12:08 +00:00
|
|
|
user_key_comparison_count = other.user_key_comparison_count;
|
|
|
|
block_cache_hit_count = other.block_cache_hit_count;
|
|
|
|
block_read_count = other.block_read_count;
|
|
|
|
block_read_byte = other.block_read_byte;
|
|
|
|
block_read_time = other.block_read_time;
|
|
|
|
block_cache_index_hit_count = other.block_cache_index_hit_count;
|
2022-09-08 23:35:57 +00:00
|
|
|
block_cache_standalone_handle_count =
|
|
|
|
other.block_cache_standalone_handle_count;
|
|
|
|
block_cache_real_handle_count = other.block_cache_real_handle_count;
|
2019-02-05 22:12:08 +00:00
|
|
|
index_block_read_count = other.index_block_read_count;
|
|
|
|
block_cache_filter_hit_count = other.block_cache_filter_hit_count;
|
|
|
|
filter_block_read_count = other.filter_block_read_count;
|
|
|
|
compression_dict_block_read_count = other.compression_dict_block_read_count;
|
2021-08-20 22:16:33 +00:00
|
|
|
secondary_cache_hit_count = other.secondary_cache_hit_count;
|
2022-09-08 23:35:57 +00:00
|
|
|
compressed_sec_cache_insert_real_count =
|
|
|
|
other.compressed_sec_cache_insert_real_count;
|
|
|
|
compressed_sec_cache_insert_dummy_count =
|
|
|
|
other.compressed_sec_cache_insert_dummy_count;
|
|
|
|
compressed_sec_cache_uncompressed_bytes =
|
|
|
|
other.compressed_sec_cache_uncompressed_bytes;
|
|
|
|
compressed_sec_cache_compressed_bytes =
|
|
|
|
other.compressed_sec_cache_compressed_bytes;
|
2019-02-05 22:12:08 +00:00
|
|
|
block_checksum_time = other.block_checksum_time;
|
|
|
|
block_decompress_time = other.block_decompress_time;
|
|
|
|
get_read_bytes = other.get_read_bytes;
|
|
|
|
multiget_read_bytes = other.multiget_read_bytes;
|
|
|
|
iter_read_bytes = other.iter_read_bytes;
|
2022-06-28 20:52:35 +00:00
|
|
|
|
|
|
|
blob_cache_hit_count = other.blob_cache_hit_count;
|
|
|
|
blob_read_count = other.blob_read_count;
|
|
|
|
blob_read_byte = other.blob_read_byte;
|
|
|
|
blob_read_time = other.blob_read_time;
|
|
|
|
blob_checksum_time = other.blob_checksum_time;
|
|
|
|
blob_decompress_time = other.blob_decompress_time;
|
|
|
|
|
2019-02-05 22:12:08 +00:00
|
|
|
internal_key_skipped_count = other.internal_key_skipped_count;
|
|
|
|
internal_delete_skipped_count = other.internal_delete_skipped_count;
|
|
|
|
internal_recent_skipped_count = other.internal_recent_skipped_count;
|
|
|
|
internal_merge_count = other.internal_merge_count;
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2022-09-02 16:51:19 +00:00
|
|
|
internal_range_del_reseek_count = other.internal_range_del_reseek_count;
|
2019-02-05 22:12:08 +00:00
|
|
|
write_wal_time = other.write_wal_time;
|
|
|
|
get_snapshot_time = other.get_snapshot_time;
|
|
|
|
get_from_memtable_time = other.get_from_memtable_time;
|
|
|
|
get_from_memtable_count = other.get_from_memtable_count;
|
|
|
|
get_post_process_time = other.get_post_process_time;
|
|
|
|
get_from_output_files_time = other.get_from_output_files_time;
|
|
|
|
seek_on_memtable_time = other.seek_on_memtable_time;
|
|
|
|
seek_on_memtable_count = other.seek_on_memtable_count;
|
|
|
|
next_on_memtable_count = other.next_on_memtable_count;
|
|
|
|
prev_on_memtable_count = other.prev_on_memtable_count;
|
|
|
|
seek_child_seek_time = other.seek_child_seek_time;
|
|
|
|
seek_child_seek_count = other.seek_child_seek_count;
|
|
|
|
seek_min_heap_time = other.seek_min_heap_time;
|
|
|
|
seek_internal_seek_time = other.seek_internal_seek_time;
|
|
|
|
find_next_user_entry_time = other.find_next_user_entry_time;
|
|
|
|
write_pre_and_post_process_time = other.write_pre_and_post_process_time;
|
|
|
|
write_memtable_time = other.write_memtable_time;
|
|
|
|
write_delay_time = other.write_delay_time;
|
|
|
|
write_thread_wait_nanos = other.write_thread_wait_nanos;
|
|
|
|
write_scheduling_flushes_compactions_time =
|
|
|
|
other.write_scheduling_flushes_compactions_time;
|
|
|
|
db_mutex_lock_nanos = other.db_mutex_lock_nanos;
|
|
|
|
db_condition_wait_nanos = other.db_condition_wait_nanos;
|
|
|
|
merge_operator_time_nanos = other.merge_operator_time_nanos;
|
|
|
|
read_index_block_nanos = other.read_index_block_nanos;
|
|
|
|
read_filter_block_nanos = other.read_filter_block_nanos;
|
|
|
|
new_table_block_iter_nanos = other.new_table_block_iter_nanos;
|
|
|
|
new_table_iterator_nanos = other.new_table_iterator_nanos;
|
|
|
|
block_seek_nanos = other.block_seek_nanos;
|
|
|
|
find_table_nanos = other.find_table_nanos;
|
|
|
|
bloom_memtable_hit_count = other.bloom_memtable_hit_count;
|
|
|
|
bloom_memtable_miss_count = other.bloom_memtable_miss_count;
|
|
|
|
bloom_sst_hit_count = other.bloom_sst_hit_count;
|
|
|
|
bloom_sst_miss_count = other.bloom_sst_miss_count;
|
|
|
|
key_lock_wait_time = other.key_lock_wait_time;
|
|
|
|
key_lock_wait_count = other.key_lock_wait_count;
|
|
|
|
|
|
|
|
env_new_sequential_file_nanos = other.env_new_sequential_file_nanos;
|
|
|
|
env_new_random_access_file_nanos = other.env_new_random_access_file_nanos;
|
|
|
|
env_new_writable_file_nanos = other.env_new_writable_file_nanos;
|
|
|
|
env_reuse_writable_file_nanos = other.env_reuse_writable_file_nanos;
|
|
|
|
env_new_random_rw_file_nanos = other.env_new_random_rw_file_nanos;
|
|
|
|
env_new_directory_nanos = other.env_new_directory_nanos;
|
|
|
|
env_file_exists_nanos = other.env_file_exists_nanos;
|
|
|
|
env_get_children_nanos = other.env_get_children_nanos;
|
|
|
|
env_get_children_file_attributes_nanos =
|
|
|
|
other.env_get_children_file_attributes_nanos;
|
|
|
|
env_delete_file_nanos = other.env_delete_file_nanos;
|
|
|
|
env_create_dir_nanos = other.env_create_dir_nanos;
|
|
|
|
env_create_dir_if_missing_nanos = other.env_create_dir_if_missing_nanos;
|
|
|
|
env_delete_dir_nanos = other.env_delete_dir_nanos;
|
|
|
|
env_get_file_size_nanos = other.env_get_file_size_nanos;
|
|
|
|
env_get_file_modification_time_nanos =
|
|
|
|
other.env_get_file_modification_time_nanos;
|
|
|
|
env_rename_file_nanos = other.env_rename_file_nanos;
|
|
|
|
env_link_file_nanos = other.env_link_file_nanos;
|
|
|
|
env_lock_file_nanos = other.env_lock_file_nanos;
|
|
|
|
env_unlock_file_nanos = other.env_unlock_file_nanos;
|
|
|
|
env_new_logger_nanos = other.env_new_logger_nanos;
|
|
|
|
get_cpu_nanos = other.get_cpu_nanos;
|
2019-03-26 23:20:52 +00:00
|
|
|
iter_next_cpu_nanos = other.iter_next_cpu_nanos;
|
|
|
|
iter_prev_cpu_nanos = other.iter_prev_cpu_nanos;
|
|
|
|
iter_seek_cpu_nanos = other.iter_seek_cpu_nanos;
|
2022-05-20 23:09:33 +00:00
|
|
|
number_async_seek = other.number_async_seek;
|
2019-02-05 22:12:08 +00:00
|
|
|
if (per_level_perf_context_enabled && level_to_perf_context != nullptr) {
|
|
|
|
ClearPerLevelPerfContext();
|
|
|
|
}
|
|
|
|
if (other.level_to_perf_context != nullptr) {
|
|
|
|
level_to_perf_context = new std::map<uint32_t, PerfContextByLevel>();
|
|
|
|
*level_to_perf_context = *other.level_to_perf_context;
|
|
|
|
}
|
|
|
|
per_level_perf_context_enabled = other.per_level_perf_context_enabled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
PerfContext::PerfContext(PerfContext&& other) noexcept {
|
2020-04-03 20:21:53 +00:00
|
|
|
#ifdef NPERF_CONTEXT
|
|
|
|
(void)other;
|
|
|
|
#else
|
2019-02-05 22:12:08 +00:00
|
|
|
user_key_comparison_count = other.user_key_comparison_count;
|
|
|
|
block_cache_hit_count = other.block_cache_hit_count;
|
|
|
|
block_read_count = other.block_read_count;
|
|
|
|
block_read_byte = other.block_read_byte;
|
|
|
|
block_read_time = other.block_read_time;
|
|
|
|
block_cache_index_hit_count = other.block_cache_index_hit_count;
|
2022-09-08 23:35:57 +00:00
|
|
|
block_cache_standalone_handle_count =
|
|
|
|
other.block_cache_standalone_handle_count;
|
|
|
|
block_cache_real_handle_count = other.block_cache_real_handle_count;
|
2019-02-05 22:12:08 +00:00
|
|
|
index_block_read_count = other.index_block_read_count;
|
|
|
|
block_cache_filter_hit_count = other.block_cache_filter_hit_count;
|
|
|
|
filter_block_read_count = other.filter_block_read_count;
|
|
|
|
compression_dict_block_read_count = other.compression_dict_block_read_count;
|
2021-08-20 22:16:33 +00:00
|
|
|
secondary_cache_hit_count = other.secondary_cache_hit_count;
|
2022-09-08 23:35:57 +00:00
|
|
|
compressed_sec_cache_insert_real_count =
|
|
|
|
other.compressed_sec_cache_insert_real_count;
|
|
|
|
compressed_sec_cache_insert_dummy_count =
|
|
|
|
other.compressed_sec_cache_insert_dummy_count;
|
|
|
|
compressed_sec_cache_uncompressed_bytes =
|
|
|
|
other.compressed_sec_cache_uncompressed_bytes;
|
|
|
|
compressed_sec_cache_compressed_bytes =
|
|
|
|
other.compressed_sec_cache_compressed_bytes;
|
2019-02-05 22:12:08 +00:00
|
|
|
block_checksum_time = other.block_checksum_time;
|
|
|
|
block_decompress_time = other.block_decompress_time;
|
|
|
|
get_read_bytes = other.get_read_bytes;
|
|
|
|
multiget_read_bytes = other.multiget_read_bytes;
|
|
|
|
iter_read_bytes = other.iter_read_bytes;
|
2022-06-28 20:52:35 +00:00
|
|
|
|
|
|
|
blob_cache_hit_count = other.blob_cache_hit_count;
|
|
|
|
blob_read_count = other.blob_read_count;
|
|
|
|
blob_read_byte = other.blob_read_byte;
|
|
|
|
blob_read_time = other.blob_read_time;
|
|
|
|
blob_checksum_time = other.blob_checksum_time;
|
|
|
|
blob_decompress_time = other.blob_decompress_time;
|
|
|
|
|
2019-02-05 22:12:08 +00:00
|
|
|
internal_key_skipped_count = other.internal_key_skipped_count;
|
|
|
|
internal_delete_skipped_count = other.internal_delete_skipped_count;
|
|
|
|
internal_recent_skipped_count = other.internal_recent_skipped_count;
|
|
|
|
internal_merge_count = other.internal_merge_count;
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2022-09-02 16:51:19 +00:00
|
|
|
internal_range_del_reseek_count = other.internal_range_del_reseek_count;
|
2019-02-05 22:12:08 +00:00
|
|
|
write_wal_time = other.write_wal_time;
|
|
|
|
get_snapshot_time = other.get_snapshot_time;
|
|
|
|
get_from_memtable_time = other.get_from_memtable_time;
|
|
|
|
get_from_memtable_count = other.get_from_memtable_count;
|
|
|
|
get_post_process_time = other.get_post_process_time;
|
|
|
|
get_from_output_files_time = other.get_from_output_files_time;
|
|
|
|
seek_on_memtable_time = other.seek_on_memtable_time;
|
|
|
|
seek_on_memtable_count = other.seek_on_memtable_count;
|
|
|
|
next_on_memtable_count = other.next_on_memtable_count;
|
|
|
|
prev_on_memtable_count = other.prev_on_memtable_count;
|
|
|
|
seek_child_seek_time = other.seek_child_seek_time;
|
|
|
|
seek_child_seek_count = other.seek_child_seek_count;
|
|
|
|
seek_min_heap_time = other.seek_min_heap_time;
|
|
|
|
seek_internal_seek_time = other.seek_internal_seek_time;
|
|
|
|
find_next_user_entry_time = other.find_next_user_entry_time;
|
|
|
|
write_pre_and_post_process_time = other.write_pre_and_post_process_time;
|
|
|
|
write_memtable_time = other.write_memtable_time;
|
|
|
|
write_delay_time = other.write_delay_time;
|
|
|
|
write_thread_wait_nanos = other.write_thread_wait_nanos;
|
|
|
|
write_scheduling_flushes_compactions_time =
|
|
|
|
other.write_scheduling_flushes_compactions_time;
|
|
|
|
db_mutex_lock_nanos = other.db_mutex_lock_nanos;
|
|
|
|
db_condition_wait_nanos = other.db_condition_wait_nanos;
|
|
|
|
merge_operator_time_nanos = other.merge_operator_time_nanos;
|
|
|
|
read_index_block_nanos = other.read_index_block_nanos;
|
|
|
|
read_filter_block_nanos = other.read_filter_block_nanos;
|
|
|
|
new_table_block_iter_nanos = other.new_table_block_iter_nanos;
|
|
|
|
new_table_iterator_nanos = other.new_table_iterator_nanos;
|
|
|
|
block_seek_nanos = other.block_seek_nanos;
|
|
|
|
find_table_nanos = other.find_table_nanos;
|
|
|
|
bloom_memtable_hit_count = other.bloom_memtable_hit_count;
|
|
|
|
bloom_memtable_miss_count = other.bloom_memtable_miss_count;
|
|
|
|
bloom_sst_hit_count = other.bloom_sst_hit_count;
|
|
|
|
bloom_sst_miss_count = other.bloom_sst_miss_count;
|
|
|
|
key_lock_wait_time = other.key_lock_wait_time;
|
|
|
|
key_lock_wait_count = other.key_lock_wait_count;
|
|
|
|
|
|
|
|
env_new_sequential_file_nanos = other.env_new_sequential_file_nanos;
|
|
|
|
env_new_random_access_file_nanos = other.env_new_random_access_file_nanos;
|
|
|
|
env_new_writable_file_nanos = other.env_new_writable_file_nanos;
|
|
|
|
env_reuse_writable_file_nanos = other.env_reuse_writable_file_nanos;
|
|
|
|
env_new_random_rw_file_nanos = other.env_new_random_rw_file_nanos;
|
|
|
|
env_new_directory_nanos = other.env_new_directory_nanos;
|
|
|
|
env_file_exists_nanos = other.env_file_exists_nanos;
|
|
|
|
env_get_children_nanos = other.env_get_children_nanos;
|
|
|
|
env_get_children_file_attributes_nanos =
|
|
|
|
other.env_get_children_file_attributes_nanos;
|
|
|
|
env_delete_file_nanos = other.env_delete_file_nanos;
|
|
|
|
env_create_dir_nanos = other.env_create_dir_nanos;
|
|
|
|
env_create_dir_if_missing_nanos = other.env_create_dir_if_missing_nanos;
|
|
|
|
env_delete_dir_nanos = other.env_delete_dir_nanos;
|
|
|
|
env_get_file_size_nanos = other.env_get_file_size_nanos;
|
|
|
|
env_get_file_modification_time_nanos =
|
|
|
|
other.env_get_file_modification_time_nanos;
|
|
|
|
env_rename_file_nanos = other.env_rename_file_nanos;
|
|
|
|
env_link_file_nanos = other.env_link_file_nanos;
|
|
|
|
env_lock_file_nanos = other.env_lock_file_nanos;
|
|
|
|
env_unlock_file_nanos = other.env_unlock_file_nanos;
|
|
|
|
env_new_logger_nanos = other.env_new_logger_nanos;
|
|
|
|
get_cpu_nanos = other.get_cpu_nanos;
|
2019-03-26 23:20:52 +00:00
|
|
|
iter_next_cpu_nanos = other.iter_next_cpu_nanos;
|
|
|
|
iter_prev_cpu_nanos = other.iter_prev_cpu_nanos;
|
|
|
|
iter_seek_cpu_nanos = other.iter_seek_cpu_nanos;
|
2022-05-20 23:09:33 +00:00
|
|
|
number_async_seek = other.number_async_seek;
|
2019-02-05 22:12:08 +00:00
|
|
|
if (per_level_perf_context_enabled && level_to_perf_context != nullptr) {
|
|
|
|
ClearPerLevelPerfContext();
|
|
|
|
}
|
|
|
|
if (other.level_to_perf_context != nullptr) {
|
|
|
|
level_to_perf_context = other.level_to_perf_context;
|
|
|
|
other.level_to_perf_context = nullptr;
|
|
|
|
}
|
|
|
|
per_level_perf_context_enabled = other.per_level_perf_context_enabled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(Zhongyi): reduce code duplication between copy constructor and
|
|
|
|
// assignment operator
|
|
|
|
PerfContext& PerfContext::operator=(const PerfContext& other) {
|
2020-04-03 20:21:53 +00:00
|
|
|
#ifdef NPERF_CONTEXT
|
|
|
|
(void)other;
|
|
|
|
#else
|
2019-02-05 22:12:08 +00:00
|
|
|
user_key_comparison_count = other.user_key_comparison_count;
|
|
|
|
block_cache_hit_count = other.block_cache_hit_count;
|
|
|
|
block_read_count = other.block_read_count;
|
|
|
|
block_read_byte = other.block_read_byte;
|
|
|
|
block_read_time = other.block_read_time;
|
|
|
|
block_cache_index_hit_count = other.block_cache_index_hit_count;
|
2022-09-08 23:35:57 +00:00
|
|
|
block_cache_standalone_handle_count =
|
|
|
|
other.block_cache_standalone_handle_count;
|
|
|
|
block_cache_real_handle_count = other.block_cache_real_handle_count;
|
2019-02-05 22:12:08 +00:00
|
|
|
index_block_read_count = other.index_block_read_count;
|
|
|
|
block_cache_filter_hit_count = other.block_cache_filter_hit_count;
|
|
|
|
filter_block_read_count = other.filter_block_read_count;
|
|
|
|
compression_dict_block_read_count = other.compression_dict_block_read_count;
|
2021-08-20 22:16:33 +00:00
|
|
|
secondary_cache_hit_count = other.secondary_cache_hit_count;
|
2022-09-08 23:35:57 +00:00
|
|
|
compressed_sec_cache_insert_real_count =
|
|
|
|
other.compressed_sec_cache_insert_real_count;
|
|
|
|
compressed_sec_cache_insert_dummy_count =
|
|
|
|
other.compressed_sec_cache_insert_dummy_count;
|
|
|
|
compressed_sec_cache_uncompressed_bytes =
|
|
|
|
other.compressed_sec_cache_uncompressed_bytes;
|
|
|
|
compressed_sec_cache_compressed_bytes =
|
|
|
|
other.compressed_sec_cache_compressed_bytes;
|
2019-02-05 22:12:08 +00:00
|
|
|
block_checksum_time = other.block_checksum_time;
|
|
|
|
block_decompress_time = other.block_decompress_time;
|
|
|
|
get_read_bytes = other.get_read_bytes;
|
|
|
|
multiget_read_bytes = other.multiget_read_bytes;
|
|
|
|
iter_read_bytes = other.iter_read_bytes;
|
2022-06-28 20:52:35 +00:00
|
|
|
|
|
|
|
blob_cache_hit_count = other.blob_cache_hit_count;
|
|
|
|
blob_read_count = other.blob_read_count;
|
|
|
|
blob_read_byte = other.blob_read_byte;
|
|
|
|
blob_read_time = other.blob_read_time;
|
|
|
|
blob_checksum_time = other.blob_checksum_time;
|
|
|
|
blob_decompress_time = other.blob_decompress_time;
|
|
|
|
|
2019-02-05 22:12:08 +00:00
|
|
|
internal_key_skipped_count = other.internal_key_skipped_count;
|
|
|
|
internal_delete_skipped_count = other.internal_delete_skipped_count;
|
|
|
|
internal_recent_skipped_count = other.internal_recent_skipped_count;
|
|
|
|
internal_merge_count = other.internal_merge_count;
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2022-09-02 16:51:19 +00:00
|
|
|
internal_range_del_reseek_count = other.internal_range_del_reseek_count;
|
2019-02-05 22:12:08 +00:00
|
|
|
write_wal_time = other.write_wal_time;
|
|
|
|
get_snapshot_time = other.get_snapshot_time;
|
|
|
|
get_from_memtable_time = other.get_from_memtable_time;
|
|
|
|
get_from_memtable_count = other.get_from_memtable_count;
|
|
|
|
get_post_process_time = other.get_post_process_time;
|
|
|
|
get_from_output_files_time = other.get_from_output_files_time;
|
|
|
|
seek_on_memtable_time = other.seek_on_memtable_time;
|
|
|
|
seek_on_memtable_count = other.seek_on_memtable_count;
|
|
|
|
next_on_memtable_count = other.next_on_memtable_count;
|
|
|
|
prev_on_memtable_count = other.prev_on_memtable_count;
|
|
|
|
seek_child_seek_time = other.seek_child_seek_time;
|
|
|
|
seek_child_seek_count = other.seek_child_seek_count;
|
|
|
|
seek_min_heap_time = other.seek_min_heap_time;
|
|
|
|
seek_internal_seek_time = other.seek_internal_seek_time;
|
|
|
|
find_next_user_entry_time = other.find_next_user_entry_time;
|
|
|
|
write_pre_and_post_process_time = other.write_pre_and_post_process_time;
|
|
|
|
write_memtable_time = other.write_memtable_time;
|
|
|
|
write_delay_time = other.write_delay_time;
|
|
|
|
write_thread_wait_nanos = other.write_thread_wait_nanos;
|
|
|
|
write_scheduling_flushes_compactions_time =
|
|
|
|
other.write_scheduling_flushes_compactions_time;
|
|
|
|
db_mutex_lock_nanos = other.db_mutex_lock_nanos;
|
|
|
|
db_condition_wait_nanos = other.db_condition_wait_nanos;
|
|
|
|
merge_operator_time_nanos = other.merge_operator_time_nanos;
|
|
|
|
read_index_block_nanos = other.read_index_block_nanos;
|
|
|
|
read_filter_block_nanos = other.read_filter_block_nanos;
|
|
|
|
new_table_block_iter_nanos = other.new_table_block_iter_nanos;
|
|
|
|
new_table_iterator_nanos = other.new_table_iterator_nanos;
|
|
|
|
block_seek_nanos = other.block_seek_nanos;
|
|
|
|
find_table_nanos = other.find_table_nanos;
|
|
|
|
bloom_memtable_hit_count = other.bloom_memtable_hit_count;
|
|
|
|
bloom_memtable_miss_count = other.bloom_memtable_miss_count;
|
|
|
|
bloom_sst_hit_count = other.bloom_sst_hit_count;
|
|
|
|
bloom_sst_miss_count = other.bloom_sst_miss_count;
|
|
|
|
key_lock_wait_time = other.key_lock_wait_time;
|
|
|
|
key_lock_wait_count = other.key_lock_wait_count;
|
|
|
|
|
|
|
|
env_new_sequential_file_nanos = other.env_new_sequential_file_nanos;
|
|
|
|
env_new_random_access_file_nanos = other.env_new_random_access_file_nanos;
|
|
|
|
env_new_writable_file_nanos = other.env_new_writable_file_nanos;
|
|
|
|
env_reuse_writable_file_nanos = other.env_reuse_writable_file_nanos;
|
|
|
|
env_new_random_rw_file_nanos = other.env_new_random_rw_file_nanos;
|
|
|
|
env_new_directory_nanos = other.env_new_directory_nanos;
|
|
|
|
env_file_exists_nanos = other.env_file_exists_nanos;
|
|
|
|
env_get_children_nanos = other.env_get_children_nanos;
|
|
|
|
env_get_children_file_attributes_nanos =
|
|
|
|
other.env_get_children_file_attributes_nanos;
|
|
|
|
env_delete_file_nanos = other.env_delete_file_nanos;
|
|
|
|
env_create_dir_nanos = other.env_create_dir_nanos;
|
|
|
|
env_create_dir_if_missing_nanos = other.env_create_dir_if_missing_nanos;
|
|
|
|
env_delete_dir_nanos = other.env_delete_dir_nanos;
|
|
|
|
env_get_file_size_nanos = other.env_get_file_size_nanos;
|
|
|
|
env_get_file_modification_time_nanos =
|
|
|
|
other.env_get_file_modification_time_nanos;
|
|
|
|
env_rename_file_nanos = other.env_rename_file_nanos;
|
|
|
|
env_link_file_nanos = other.env_link_file_nanos;
|
|
|
|
env_lock_file_nanos = other.env_lock_file_nanos;
|
|
|
|
env_unlock_file_nanos = other.env_unlock_file_nanos;
|
|
|
|
env_new_logger_nanos = other.env_new_logger_nanos;
|
|
|
|
get_cpu_nanos = other.get_cpu_nanos;
|
2019-03-26 23:20:52 +00:00
|
|
|
iter_next_cpu_nanos = other.iter_next_cpu_nanos;
|
|
|
|
iter_prev_cpu_nanos = other.iter_prev_cpu_nanos;
|
|
|
|
iter_seek_cpu_nanos = other.iter_seek_cpu_nanos;
|
2022-05-20 23:09:33 +00:00
|
|
|
number_async_seek = other.number_async_seek;
|
2019-02-05 22:12:08 +00:00
|
|
|
if (per_level_perf_context_enabled && level_to_perf_context != nullptr) {
|
|
|
|
ClearPerLevelPerfContext();
|
|
|
|
}
|
|
|
|
if (other.level_to_perf_context != nullptr) {
|
|
|
|
level_to_perf_context = new std::map<uint32_t, PerfContextByLevel>();
|
|
|
|
*level_to_perf_context = *other.level_to_perf_context;
|
|
|
|
}
|
|
|
|
per_level_perf_context_enabled = other.per_level_perf_context_enabled;
|
|
|
|
#endif
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
void PerfContext::Reset() {
|
2017-06-03 00:12:39 +00:00
|
|
|
#ifndef NPERF_CONTEXT
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
user_key_comparison_count = 0;
|
|
|
|
block_cache_hit_count = 0;
|
|
|
|
block_read_count = 0;
|
|
|
|
block_read_byte = 0;
|
|
|
|
block_read_time = 0;
|
2018-12-07 23:04:20 +00:00
|
|
|
block_cache_index_hit_count = 0;
|
2022-09-08 23:35:57 +00:00
|
|
|
block_cache_standalone_handle_count = 0;
|
|
|
|
block_cache_real_handle_count = 0;
|
2018-12-07 23:04:20 +00:00
|
|
|
index_block_read_count = 0;
|
|
|
|
block_cache_filter_hit_count = 0;
|
|
|
|
filter_block_read_count = 0;
|
2019-01-24 02:11:08 +00:00
|
|
|
compression_dict_block_read_count = 0;
|
2021-08-20 22:16:33 +00:00
|
|
|
secondary_cache_hit_count = 0;
|
2022-09-08 23:35:57 +00:00
|
|
|
compressed_sec_cache_insert_real_count = 0;
|
|
|
|
compressed_sec_cache_insert_dummy_count = 0;
|
|
|
|
compressed_sec_cache_uncompressed_bytes = 0;
|
|
|
|
compressed_sec_cache_compressed_bytes = 0;
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
block_checksum_time = 0;
|
|
|
|
block_decompress_time = 0;
|
2017-08-18 18:40:36 +00:00
|
|
|
get_read_bytes = 0;
|
|
|
|
multiget_read_bytes = 0;
|
|
|
|
iter_read_bytes = 0;
|
2022-06-28 20:52:35 +00:00
|
|
|
|
|
|
|
blob_cache_hit_count = 0;
|
|
|
|
blob_read_count = 0;
|
|
|
|
blob_read_byte = 0;
|
|
|
|
blob_read_time = 0;
|
|
|
|
blob_checksum_time = 0;
|
|
|
|
blob_decompress_time = 0;
|
|
|
|
|
2013-10-02 17:28:25 +00:00
|
|
|
internal_key_skipped_count = 0;
|
|
|
|
internal_delete_skipped_count = 0;
|
2016-11-28 18:12:28 +00:00
|
|
|
internal_recent_skipped_count = 0;
|
|
|
|
internal_merge_count = 0;
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2022-09-02 16:51:19 +00:00
|
|
|
internal_range_del_reseek_count = 0;
|
2013-11-18 19:32:54 +00:00
|
|
|
write_wal_time = 0;
|
|
|
|
|
|
|
|
get_snapshot_time = 0;
|
|
|
|
get_from_memtable_time = 0;
|
|
|
|
get_from_memtable_count = 0;
|
|
|
|
get_post_process_time = 0;
|
|
|
|
get_from_output_files_time = 0;
|
2015-02-28 01:06:06 +00:00
|
|
|
seek_on_memtable_time = 0;
|
|
|
|
seek_on_memtable_count = 0;
|
2016-11-28 18:12:28 +00:00
|
|
|
next_on_memtable_count = 0;
|
|
|
|
prev_on_memtable_count = 0;
|
2013-11-18 19:32:54 +00:00
|
|
|
seek_child_seek_time = 0;
|
|
|
|
seek_child_seek_count = 0;
|
|
|
|
seek_min_heap_time = 0;
|
|
|
|
seek_internal_seek_time = 0;
|
|
|
|
find_next_user_entry_time = 0;
|
|
|
|
write_pre_and_post_process_time = 0;
|
|
|
|
write_memtable_time = 0;
|
2015-06-02 09:07:58 +00:00
|
|
|
write_delay_time = 0;
|
2018-04-24 00:53:27 +00:00
|
|
|
write_thread_wait_nanos = 0;
|
|
|
|
write_scheduling_flushes_compactions_time = 0;
|
2015-02-10 00:12:31 +00:00
|
|
|
db_mutex_lock_nanos = 0;
|
|
|
|
db_condition_wait_nanos = 0;
|
2015-03-03 18:59:36 +00:00
|
|
|
merge_operator_time_nanos = 0;
|
2015-07-08 23:34:48 +00:00
|
|
|
read_index_block_nanos = 0;
|
|
|
|
read_filter_block_nanos = 0;
|
|
|
|
new_table_block_iter_nanos = 0;
|
2015-07-10 23:09:10 +00:00
|
|
|
new_table_iterator_nanos = 0;
|
2015-07-08 23:34:48 +00:00
|
|
|
block_seek_nanos = 0;
|
|
|
|
find_table_nanos = 0;
|
2015-10-07 18:23:20 +00:00
|
|
|
bloom_memtable_hit_count = 0;
|
|
|
|
bloom_memtable_miss_count = 0;
|
|
|
|
bloom_sst_hit_count = 0;
|
|
|
|
bloom_sst_miss_count = 0;
|
2017-11-06 18:52:17 +00:00
|
|
|
key_lock_wait_time = 0;
|
|
|
|
key_lock_wait_count = 0;
|
2017-04-04 18:12:47 +00:00
|
|
|
|
|
|
|
env_new_sequential_file_nanos = 0;
|
|
|
|
env_new_random_access_file_nanos = 0;
|
|
|
|
env_new_writable_file_nanos = 0;
|
|
|
|
env_reuse_writable_file_nanos = 0;
|
|
|
|
env_new_random_rw_file_nanos = 0;
|
|
|
|
env_new_directory_nanos = 0;
|
|
|
|
env_file_exists_nanos = 0;
|
|
|
|
env_get_children_nanos = 0;
|
|
|
|
env_get_children_file_attributes_nanos = 0;
|
|
|
|
env_delete_file_nanos = 0;
|
|
|
|
env_create_dir_nanos = 0;
|
|
|
|
env_create_dir_if_missing_nanos = 0;
|
|
|
|
env_delete_dir_nanos = 0;
|
|
|
|
env_get_file_size_nanos = 0;
|
|
|
|
env_get_file_modification_time_nanos = 0;
|
|
|
|
env_rename_file_nanos = 0;
|
|
|
|
env_link_file_nanos = 0;
|
|
|
|
env_lock_file_nanos = 0;
|
|
|
|
env_unlock_file_nanos = 0;
|
|
|
|
env_new_logger_nanos = 0;
|
2018-12-20 20:00:40 +00:00
|
|
|
get_cpu_nanos = 0;
|
2019-03-26 23:20:52 +00:00
|
|
|
iter_next_cpu_nanos = 0;
|
|
|
|
iter_prev_cpu_nanos = 0;
|
|
|
|
iter_seek_cpu_nanos = 0;
|
2022-05-20 23:09:33 +00:00
|
|
|
number_async_seek = 0;
|
2018-10-17 18:18:00 +00:00
|
|
|
if (per_level_perf_context_enabled && level_to_perf_context) {
|
|
|
|
for (auto& kv : *level_to_perf_context) {
|
|
|
|
kv.second.Reset();
|
|
|
|
}
|
|
|
|
}
|
2014-04-08 17:58:07 +00:00
|
|
|
#endif
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
}
|
|
|
|
|
2016-02-04 00:04:10 +00:00
|
|
|
#define PERF_CONTEXT_OUTPUT(counter) \
|
2016-02-01 21:41:13 +00:00
|
|
|
if (!exclude_zero_counters || (counter > 0)) { \
|
|
|
|
ss << #counter << " = " << counter << ", "; \
|
|
|
|
}
|
2014-03-05 18:32:54 +00:00
|
|
|
|
2018-10-17 18:18:00 +00:00
|
|
|
#define PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(counter) \
|
|
|
|
if (per_level_perf_context_enabled && \
|
|
|
|
level_to_perf_context) { \
|
|
|
|
ss << #counter << " = "; \
|
|
|
|
for (auto& kv : *level_to_perf_context) { \
|
|
|
|
if (!exclude_zero_counters || (kv.second.counter > 0)) { \
|
|
|
|
ss << kv.second.counter << "@level" << kv.first << ", "; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
void PerfContextByLevel::Reset() {
|
|
|
|
#ifndef NPERF_CONTEXT
|
|
|
|
bloom_filter_useful = 0;
|
|
|
|
bloom_filter_full_positive = 0;
|
|
|
|
bloom_filter_full_true_positive = 0;
|
2018-12-21 21:15:47 +00:00
|
|
|
block_cache_hit_count = 0;
|
|
|
|
block_cache_miss_count = 0;
|
2018-10-17 18:18:00 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:41:13 +00:00
|
|
|
std::string PerfContext::ToString(bool exclude_zero_counters) const {
|
2017-06-03 00:12:39 +00:00
|
|
|
#ifdef NPERF_CONTEXT
|
2020-04-03 20:21:53 +00:00
|
|
|
(void)exclude_zero_counters;
|
2014-04-08 17:58:07 +00:00
|
|
|
return "";
|
|
|
|
#else
|
2014-03-05 18:32:54 +00:00
|
|
|
std::ostringstream ss;
|
2016-02-04 00:04:10 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(user_key_comparison_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_cache_hit_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_read_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_read_byte);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_read_time);
|
2018-12-07 23:04:20 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(block_cache_index_hit_count);
|
2022-09-08 23:35:57 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(block_cache_standalone_handle_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_cache_real_handle_count);
|
2018-12-07 23:04:20 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(index_block_read_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_cache_filter_hit_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(filter_block_read_count);
|
2019-01-24 02:11:08 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(compression_dict_block_read_count);
|
2021-08-20 22:16:33 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(secondary_cache_hit_count);
|
2022-09-08 23:35:57 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(compressed_sec_cache_insert_real_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(compressed_sec_cache_insert_dummy_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(compressed_sec_cache_uncompressed_bytes);
|
|
|
|
PERF_CONTEXT_OUTPUT(compressed_sec_cache_compressed_bytes);
|
2016-02-04 00:04:10 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(block_checksum_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_decompress_time);
|
2017-08-18 18:40:36 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(get_read_bytes);
|
|
|
|
PERF_CONTEXT_OUTPUT(multiget_read_bytes);
|
|
|
|
PERF_CONTEXT_OUTPUT(iter_read_bytes);
|
2022-06-28 20:52:35 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(blob_cache_hit_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(blob_read_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(blob_read_byte);
|
|
|
|
PERF_CONTEXT_OUTPUT(blob_read_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(blob_checksum_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(blob_decompress_time);
|
2016-02-04 00:04:10 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(internal_key_skipped_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(internal_delete_skipped_count);
|
2016-11-28 18:12:28 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(internal_recent_skipped_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(internal_merge_count);
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2022-09-02 16:51:19 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(internal_range_del_reseek_count);
|
2016-02-04 00:04:10 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(write_wal_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(get_snapshot_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(get_from_memtable_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(get_from_memtable_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(get_post_process_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(get_from_output_files_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(seek_on_memtable_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(seek_on_memtable_count);
|
2016-11-28 18:12:28 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(next_on_memtable_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(prev_on_memtable_count);
|
2016-02-04 00:04:10 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(seek_child_seek_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(seek_child_seek_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(seek_min_heap_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(seek_internal_seek_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(find_next_user_entry_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(write_pre_and_post_process_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(write_memtable_time);
|
2018-04-24 00:53:27 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(write_thread_wait_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(write_scheduling_flushes_compactions_time);
|
2016-02-04 00:04:10 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(db_mutex_lock_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(db_condition_wait_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(merge_operator_time_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(write_delay_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(read_index_block_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(read_filter_block_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(new_table_block_iter_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(new_table_iterator_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(block_seek_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(find_table_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(bloom_memtable_hit_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(bloom_memtable_miss_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(bloom_sst_hit_count);
|
|
|
|
PERF_CONTEXT_OUTPUT(bloom_sst_miss_count);
|
2017-11-06 18:52:17 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(key_lock_wait_time);
|
|
|
|
PERF_CONTEXT_OUTPUT(key_lock_wait_count);
|
2017-04-04 18:12:47 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(env_new_sequential_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_new_random_access_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_new_writable_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_reuse_writable_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_new_random_rw_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_new_directory_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_file_exists_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_get_children_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_get_children_file_attributes_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_delete_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_create_dir_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_create_dir_if_missing_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_delete_dir_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_get_file_size_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_get_file_modification_time_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_rename_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_link_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_lock_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_unlock_file_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(env_new_logger_nanos);
|
2018-12-20 20:00:40 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(get_cpu_nanos);
|
2019-03-26 23:20:52 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(iter_next_cpu_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(iter_prev_cpu_nanos);
|
|
|
|
PERF_CONTEXT_OUTPUT(iter_seek_cpu_nanos);
|
2022-05-20 23:09:33 +00:00
|
|
|
PERF_CONTEXT_OUTPUT(number_async_seek);
|
2018-10-17 18:18:00 +00:00
|
|
|
PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(bloom_filter_useful);
|
|
|
|
PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(bloom_filter_full_positive);
|
|
|
|
PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(bloom_filter_full_true_positive);
|
2018-12-21 21:15:47 +00:00
|
|
|
PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(block_cache_hit_count);
|
|
|
|
PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(block_cache_miss_count);
|
2019-09-03 18:21:47 +00:00
|
|
|
|
|
|
|
std::string str = ss.str();
|
|
|
|
str.erase(str.find_last_not_of(", ") + 1);
|
|
|
|
return str;
|
2014-04-08 17:58:07 +00:00
|
|
|
#endif
|
2014-03-05 18:32:54 +00:00
|
|
|
}
|
|
|
|
|
2018-10-17 18:18:00 +00:00
|
|
|
void PerfContext::EnablePerLevelPerfContext() {
|
2019-02-05 22:12:08 +00:00
|
|
|
if (level_to_perf_context == nullptr) {
|
2018-10-17 18:18:00 +00:00
|
|
|
level_to_perf_context = new std::map<uint32_t, PerfContextByLevel>();
|
|
|
|
}
|
|
|
|
per_level_perf_context_enabled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PerfContext::DisablePerLevelPerfContext(){
|
|
|
|
per_level_perf_context_enabled = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PerfContext::ClearPerLevelPerfContext(){
|
2019-02-05 22:12:08 +00:00
|
|
|
if (level_to_perf_context != nullptr) {
|
|
|
|
level_to_perf_context->clear();
|
2018-10-17 18:18:00 +00:00
|
|
|
delete level_to_perf_context;
|
|
|
|
level_to_perf_context = nullptr;
|
|
|
|
}
|
|
|
|
per_level_perf_context_enabled = false;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|