2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2021-03-19 19:08:09 +00:00
|
|
|
#include <cstdint>
|
2016-02-27 01:13:39 +00:00
|
|
|
#include <string>
|
2021-03-19 19:08:09 +00:00
|
|
|
|
2019-05-31 22:21:36 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2018-12-18 01:26:56 +00:00
|
|
|
#include "db/range_del_aggregator.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/arena.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/cf_options.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/db.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "rocksdb/iterator.h"
|
2022-09-14 04:01:36 +00:00
|
|
|
#include "rocksdb/wide_columns.h"
|
2019-09-13 20:48:04 +00:00
|
|
|
#include "table/iterator_wrapper.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
#include "util/autovector.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2020-12-05 05:28:26 +00:00
|
|
|
class Version;
|
|
|
|
|
2019-05-23 22:53:37 +00:00
|
|
|
// This file declares the factory functions of DBIter, in its original form
|
|
|
|
// or a wrapped form with class ArenaWrappedDBIter, which is defined here.
|
|
|
|
// Class DBIter, which is declared and implemented inside db_iter.cc, is
|
2020-02-21 23:07:55 +00:00
|
|
|
// an iterator that converts internal keys (yielded by an InternalIterator)
|
2019-05-23 22:53:37 +00:00
|
|
|
// that were live at the specified sequence number into appropriate user
|
|
|
|
// keys.
|
2020-02-21 23:07:55 +00:00
|
|
|
// Each internal key consists of a user key, a sequence number, and a value
|
2019-05-23 22:53:37 +00:00
|
|
|
// type. DBIter deals with multiple key versions, tombstones, merge operands,
|
|
|
|
// etc, and exposes an Iterator.
|
|
|
|
// For example, DBIter may wrap following InternalIterator:
|
|
|
|
// user key: AAA value: v3 seqno: 100 type: Put
|
|
|
|
// user key: AAA value: v2 seqno: 97 type: Put
|
|
|
|
// user key: AAA value: v1 seqno: 95 type: Put
|
|
|
|
// user key: BBB value: v1 seqno: 90 type: Put
|
|
|
|
// user key: BBC value: N/A seqno: 98 type: Delete
|
|
|
|
// user key: BBC value: v1 seqno: 95 type: Put
|
|
|
|
// If the snapshot passed in is 102, then the DBIter is expected to
|
|
|
|
// expose the following iterator:
|
|
|
|
// key: AAA value: v3
|
|
|
|
// key: BBB value: v1
|
|
|
|
// If the snapshot passed in is 96, then it should expose:
|
|
|
|
// key: AAA value: v1
|
|
|
|
// key: BBB value: v1
|
|
|
|
// key: BBC value: v1
|
|
|
|
//
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
|
2019-09-13 20:48:04 +00:00
|
|
|
// Memtables and sstables that make the DB representation contain
|
|
|
|
// (userkey,seq,type) => uservalue entries. DBIter
|
|
|
|
// combines multiple entries for the same userkey found in the DB
|
|
|
|
// representation into a single entry while accounting for sequence
|
|
|
|
// numbers, deletion markers, overwrites, etc.
|
2019-09-19 19:32:33 +00:00
|
|
|
class DBIter final : public Iterator {
|
2019-09-13 20:48:04 +00:00
|
|
|
public:
|
|
|
|
// The following is grossly complicated. TODO: clean it up
|
|
|
|
// Which direction is the iterator currently moving?
|
|
|
|
// (1) When moving forward:
|
|
|
|
// (1a) if current_entry_is_merged_ = false, the internal iterator is
|
|
|
|
// positioned at the exact entry that yields this->key(), this->value()
|
|
|
|
// (1b) if current_entry_is_merged_ = true, the internal iterator is
|
|
|
|
// positioned immediately after the last entry that contributed to the
|
|
|
|
// current this->value(). That entry may or may not have key equal to
|
|
|
|
// this->key().
|
|
|
|
// (2) When moving backwards, the internal iterator is positioned
|
|
|
|
// just before all entries whose user key == this->key().
|
2021-03-10 19:13:55 +00:00
|
|
|
enum Direction : uint8_t { kForward, kReverse };
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
// LocalStatistics contain Statistics counters that will be aggregated per
|
|
|
|
// each iterator instance and then will be sent to the global statistics when
|
|
|
|
// the iterator is destroyed.
|
|
|
|
//
|
|
|
|
// The purpose of this approach is to avoid perf regression happening
|
|
|
|
// when multiple threads bump the atomic counters from a DBIter::Next().
|
|
|
|
struct LocalStatistics {
|
|
|
|
explicit LocalStatistics() { ResetCounters(); }
|
|
|
|
|
|
|
|
void ResetCounters() {
|
|
|
|
next_count_ = 0;
|
|
|
|
next_found_count_ = 0;
|
|
|
|
prev_count_ = 0;
|
|
|
|
prev_found_count_ = 0;
|
|
|
|
bytes_read_ = 0;
|
|
|
|
skip_count_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BumpGlobalStatistics(Statistics* global_statistics) {
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_NEXT, next_count_);
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_NEXT_FOUND, next_found_count_);
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_PREV, prev_count_);
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_PREV_FOUND, prev_found_count_);
|
|
|
|
RecordTick(global_statistics, ITER_BYTES_READ, bytes_read_);
|
|
|
|
RecordTick(global_statistics, NUMBER_ITER_SKIP, skip_count_);
|
|
|
|
PERF_COUNTER_ADD(iter_read_bytes, bytes_read_);
|
|
|
|
ResetCounters();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map to Tickers::NUMBER_DB_NEXT
|
|
|
|
uint64_t next_count_;
|
|
|
|
// Map to Tickers::NUMBER_DB_NEXT_FOUND
|
|
|
|
uint64_t next_found_count_;
|
|
|
|
// Map to Tickers::NUMBER_DB_PREV
|
|
|
|
uint64_t prev_count_;
|
|
|
|
// Map to Tickers::NUMBER_DB_PREV_FOUND
|
|
|
|
uint64_t prev_found_count_;
|
|
|
|
// Map to Tickers::ITER_BYTES_READ
|
|
|
|
uint64_t bytes_read_;
|
|
|
|
// Map to Tickers::NUMBER_ITER_SKIP
|
|
|
|
uint64_t skip_count_;
|
|
|
|
};
|
|
|
|
|
|
|
|
DBIter(Env* _env, const ReadOptions& read_options,
|
2021-06-16 23:50:43 +00:00
|
|
|
const ImmutableOptions& ioptions,
|
2019-09-13 20:48:04 +00:00
|
|
|
const MutableCFOptions& mutable_cf_options, const Comparator* cmp,
|
2020-12-05 05:28:26 +00:00
|
|
|
InternalIterator* iter, const Version* version, SequenceNumber s,
|
|
|
|
bool arena_mode, uint64_t max_sequential_skip_in_iterations,
|
2019-09-13 20:48:04 +00:00
|
|
|
ReadCallback* read_callback, DBImpl* db_impl, ColumnFamilyData* cfd,
|
2020-12-05 05:28:26 +00:00
|
|
|
bool expose_blob_index);
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
DBIter(const DBIter&) = delete;
|
|
|
|
void operator=(const DBIter&) = delete;
|
|
|
|
|
|
|
|
~DBIter() override {
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ThreadStatus::OperationType cur_op_type =
|
|
|
|
ThreadStatusUtil::GetThreadOperation();
|
|
|
|
ThreadStatusUtil::SetThreadOperation(
|
|
|
|
ThreadStatus::OperationType::OP_UNKNOWN);
|
2019-09-13 20:48:04 +00:00
|
|
|
// Release pinned data if any
|
|
|
|
if (pinned_iters_mgr_.PinningEnabled()) {
|
|
|
|
pinned_iters_mgr_.ReleasePinnedData();
|
|
|
|
}
|
|
|
|
RecordTick(statistics_, NO_ITERATOR_DELETED);
|
|
|
|
ResetInternalKeysSkippedCounter();
|
|
|
|
local_stats_.BumpGlobalStatistics(statistics_);
|
|
|
|
iter_.DeleteIter(arena_mode_);
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ThreadStatusUtil::SetThreadOperation(cur_op_type);
|
2019-09-13 20:48:04 +00:00
|
|
|
}
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetIter(InternalIterator* iter) {
|
2019-09-13 20:48:04 +00:00
|
|
|
assert(iter_.iter() == nullptr);
|
|
|
|
iter_.Set(iter);
|
|
|
|
iter_.iter()->SetPinnedItersMgr(&pinned_iters_mgr_);
|
|
|
|
}
|
|
|
|
|
2020-09-29 16:47:33 +00:00
|
|
|
bool Valid() const override {
|
|
|
|
#ifdef ROCKSDB_ASSERT_STATUS_CHECKED
|
|
|
|
if (valid_) {
|
|
|
|
status_.PermitUncheckedError();
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_ASSERT_STATUS_CHECKED
|
|
|
|
return valid_;
|
|
|
|
}
|
2019-09-13 20:48:04 +00:00
|
|
|
Slice key() const override {
|
|
|
|
assert(valid_);
|
2022-04-11 17:26:55 +00:00
|
|
|
if (timestamp_lb_) {
|
2019-09-13 20:48:04 +00:00
|
|
|
return saved_key_.GetInternalKey();
|
|
|
|
} else {
|
2020-03-07 00:21:03 +00:00
|
|
|
const Slice ukey_and_ts = saved_key_.GetUserKey();
|
|
|
|
return Slice(ukey_and_ts.data(), ukey_and_ts.size() - timestamp_size_);
|
2019-09-13 20:48:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(valid_);
|
2022-09-14 04:01:36 +00:00
|
|
|
|
|
|
|
return value_;
|
2019-09-13 20:48:04 +00:00
|
|
|
}
|
2022-09-14 04:01:36 +00:00
|
|
|
|
|
|
|
const WideColumns& columns() const override {
|
|
|
|
assert(valid_);
|
|
|
|
|
|
|
|
return wide_columns_;
|
|
|
|
}
|
|
|
|
|
2019-09-13 20:48:04 +00:00
|
|
|
Status status() const override {
|
|
|
|
if (status_.ok()) {
|
|
|
|
return iter_.status();
|
|
|
|
} else {
|
|
|
|
assert(!valid_);
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
}
|
2020-03-07 00:21:03 +00:00
|
|
|
Slice timestamp() const override {
|
|
|
|
assert(valid_);
|
|
|
|
assert(timestamp_size_ > 0);
|
2021-03-10 19:13:55 +00:00
|
|
|
if (direction_ == kReverse) {
|
|
|
|
return saved_timestamp_;
|
|
|
|
}
|
2020-03-07 00:21:03 +00:00
|
|
|
const Slice ukey_and_ts = saved_key_.GetUserKey();
|
|
|
|
assert(timestamp_size_ < ukey_and_ts.size());
|
|
|
|
return ExtractTimestampFromUserKey(ukey_and_ts, timestamp_size_);
|
|
|
|
}
|
2019-09-13 20:48:04 +00:00
|
|
|
bool IsBlob() const {
|
2020-12-05 05:28:26 +00:00
|
|
|
assert(valid_);
|
2019-09-13 20:48:04 +00:00
|
|
|
return is_blob_;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status GetProperty(std::string prop_name, std::string* prop) override;
|
|
|
|
|
|
|
|
void Next() final override;
|
|
|
|
void Prev() final override;
|
2020-03-07 00:21:03 +00:00
|
|
|
// 'target' does not contain timestamp, even if user timestamp feature is
|
|
|
|
// enabled.
|
2019-09-13 20:48:04 +00:00
|
|
|
void Seek(const Slice& target) final override;
|
|
|
|
void SeekForPrev(const Slice& target) final override;
|
|
|
|
void SeekToFirst() final override;
|
|
|
|
void SeekToLast() final override;
|
2020-02-21 23:07:55 +00:00
|
|
|
Env* env() const { return env_; }
|
2019-09-13 20:48:04 +00:00
|
|
|
void set_sequence(uint64_t s) {
|
|
|
|
sequence_ = s;
|
|
|
|
if (read_callback_) {
|
|
|
|
read_callback_->Refresh(s);
|
|
|
|
}
|
2023-09-15 17:44:43 +00:00
|
|
|
iter_.SetRangeDelReadSeqno(s);
|
2019-09-13 20:48:04 +00:00
|
|
|
}
|
|
|
|
void set_valid(bool v) { valid_ = v; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
// For all methods in this block:
|
|
|
|
// PRE: iter_->Valid() && status_.ok()
|
|
|
|
// Return false if there was an error, and status() is non-ok, valid_ = false;
|
|
|
|
// in this case callers would usually stop what they were doing and return.
|
|
|
|
bool ReverseToForward();
|
|
|
|
bool ReverseToBackward();
|
2019-09-17 04:02:27 +00:00
|
|
|
// Set saved_key_ to the seek key to target, with proper sequence number set.
|
|
|
|
// It might get adjusted if the seek key is smaller than iterator lower bound.
|
2022-06-29 02:51:05 +00:00
|
|
|
// target does not have timestamp.
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetSavedKeyToSeekTarget(const Slice& target);
|
2019-09-17 04:02:27 +00:00
|
|
|
// Set saved_key_ to the seek key to target, with proper sequence number set.
|
|
|
|
// It might get adjusted if the seek key is larger than iterator upper bound.
|
2022-06-29 02:51:05 +00:00
|
|
|
// target does not have timestamp.
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetSavedKeyToSeekForPrevTarget(const Slice& target);
|
2019-09-13 20:48:04 +00:00
|
|
|
bool FindValueForCurrentKey();
|
|
|
|
bool FindValueForCurrentKeyUsingSeek();
|
|
|
|
bool FindUserKeyBeforeSavedKey();
|
2019-09-17 04:02:27 +00:00
|
|
|
// If `skipping_saved_key` is true, the function will keep iterating until it
|
|
|
|
// finds a user key that is larger than `saved_key_`.
|
|
|
|
// If `prefix` is not null, the iterator needs to stop when all keys for the
|
2021-03-26 04:17:17 +00:00
|
|
|
// prefix are exhausted and the iterator is set to invalid.
|
2019-09-17 04:02:27 +00:00
|
|
|
bool FindNextUserEntry(bool skipping_saved_key, const Slice* prefix);
|
|
|
|
// Internal implementation of FindNextUserEntry().
|
|
|
|
bool FindNextUserEntryInternal(bool skipping_saved_key, const Slice* prefix);
|
2019-09-13 20:48:04 +00:00
|
|
|
bool ParseKey(ParsedInternalKey* key);
|
|
|
|
bool MergeValuesNewToOld();
|
|
|
|
|
2019-09-17 04:02:27 +00:00
|
|
|
// If prefix is not null, we need to set the iterator to invalid if no more
|
|
|
|
// entry can be found within the prefix.
|
2020-02-21 23:07:55 +00:00
|
|
|
void PrevInternal(const Slice* prefix);
|
2019-09-13 20:48:04 +00:00
|
|
|
bool TooManyInternalKeysSkipped(bool increment = true);
|
2020-04-10 16:49:38 +00:00
|
|
|
bool IsVisible(SequenceNumber sequence, const Slice& ts,
|
|
|
|
bool* more_recent = nullptr);
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
// Temporarily pin the blocks that we encounter until ReleaseTempPinnedData()
|
|
|
|
// is called
|
|
|
|
void TempPinData() {
|
|
|
|
if (!pin_thru_lifetime_) {
|
|
|
|
pinned_iters_mgr_.StartPinning();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release blocks pinned by TempPinData()
|
|
|
|
void ReleaseTempPinnedData() {
|
|
|
|
if (!pin_thru_lifetime_ && pinned_iters_mgr_.PinningEnabled()) {
|
|
|
|
pinned_iters_mgr_.ReleasePinnedData();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void ClearSavedValue() {
|
|
|
|
if (saved_value_.capacity() > 1048576) {
|
|
|
|
std::string empty;
|
|
|
|
swap(empty, saved_value_);
|
|
|
|
} else {
|
|
|
|
saved_value_.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void ResetInternalKeysSkippedCounter() {
|
|
|
|
local_stats_.skip_count_ += num_internal_keys_skipped_;
|
|
|
|
if (valid_) {
|
|
|
|
local_stats_.skip_count_--;
|
|
|
|
}
|
|
|
|
num_internal_keys_skipped_ = 0;
|
|
|
|
}
|
|
|
|
|
2020-01-28 22:42:21 +00:00
|
|
|
bool expect_total_order_inner_iter() {
|
|
|
|
assert(expect_total_order_inner_iter_ || prefix_extractor_ != nullptr);
|
|
|
|
return expect_total_order_inner_iter_;
|
|
|
|
}
|
|
|
|
|
2020-04-10 16:49:38 +00:00
|
|
|
// If lower bound of timestamp is given by ReadOptions.iter_start_ts, we need
|
|
|
|
// to return versions of the same key. We cannot just skip if the key value
|
|
|
|
// is the same but timestamps are different but fall in timestamp range.
|
|
|
|
inline int CompareKeyForSkip(const Slice& a, const Slice& b) {
|
|
|
|
return timestamp_lb_ != nullptr
|
|
|
|
? user_comparator_.Compare(a, b)
|
|
|
|
: user_comparator_.CompareWithoutTimestamp(a, b);
|
|
|
|
}
|
|
|
|
|
2020-12-05 05:28:26 +00:00
|
|
|
// Retrieves the blob value for the specified user key using the given blob
|
|
|
|
// index when using the integrated BlobDB implementation.
|
|
|
|
bool SetBlobValueIfNeeded(const Slice& user_key, const Slice& blob_index);
|
|
|
|
|
2022-08-09 18:39:57 +00:00
|
|
|
void ResetBlobValue() {
|
|
|
|
is_blob_ = false;
|
|
|
|
blob_value_.Reset();
|
|
|
|
}
|
|
|
|
|
2022-09-14 04:01:36 +00:00
|
|
|
void SetValueAndColumnsFromPlain(const Slice& slice) {
|
|
|
|
assert(value_.empty());
|
|
|
|
assert(wide_columns_.empty());
|
|
|
|
|
|
|
|
value_ = slice;
|
|
|
|
wide_columns_.emplace_back(kDefaultWideColumnName, slice);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SetValueAndColumnsFromEntity(Slice slice);
|
2022-06-25 22:30:47 +00:00
|
|
|
|
Integrate FullMergeV3 into the query and compaction paths (#11858)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11858
The patch builds on https://github.com/facebook/rocksdb/pull/11807 and integrates the `FullMergeV3` API into the read and compaction code paths by updating and extending the logic in `MergeHelper`.
In particular, when it comes to merge inputs, the existing `TimedFullMergeWithEntity` is folded into `TimedFullMerge`, since wide-column base values are now handled the same way as plain base values (or no base values for that matter), e.g. they are passed directly to the `MergeOperator`. On the other hand, there is some new differentiation on the output side. Namely, there are now two sets of `TimedFullMerge` variants: one set for contexts where the complete merge result and its value type are needed (used by iterators and compactions), and another set where the merge result is needed in a form determined by the client (used by the point lookup APIs, where e.g. for `Get` we have to extract the value of the default column of any wide-column results).
Implementation-wise, the two sets of overloads use different visitors to process the `std::variant` produced by `FullMergeV3`. This has the benefit of eliminating some repeated code e.g. in the point lookup paths, since `TimedFullMerge` now populates the application's result object (`PinnableSlice`/`string` or `PinnableWideColumns`) directly. Moreover, within each set of variants, there is a separate overload for the no base value/plain base value/wide-column base value cases, which eliminates some repeated branching w/r/t to the type of the base value if any.
Reviewed By: jaykorean
Differential Revision: D49352562
fbshipit-source-id: c2fb9853dba3fbbc6918665bde4195c4ea150a0c
2023-09-20 00:27:04 +00:00
|
|
|
bool SetValueAndColumnsFromMergeResult(const Status& merge_status,
|
|
|
|
ValueType result_type);
|
|
|
|
|
2022-09-14 04:01:36 +00:00
|
|
|
void ResetValueAndColumns() {
|
|
|
|
value_.clear();
|
|
|
|
wide_columns_.clear();
|
2022-08-08 23:10:08 +00:00
|
|
|
}
|
|
|
|
|
Integrate FullMergeV3 into the query and compaction paths (#11858)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11858
The patch builds on https://github.com/facebook/rocksdb/pull/11807 and integrates the `FullMergeV3` API into the read and compaction code paths by updating and extending the logic in `MergeHelper`.
In particular, when it comes to merge inputs, the existing `TimedFullMergeWithEntity` is folded into `TimedFullMerge`, since wide-column base values are now handled the same way as plain base values (or no base values for that matter), e.g. they are passed directly to the `MergeOperator`. On the other hand, there is some new differentiation on the output side. Namely, there are now two sets of `TimedFullMerge` variants: one set for contexts where the complete merge result and its value type are needed (used by iterators and compactions), and another set where the merge result is needed in a form determined by the client (used by the point lookup APIs, where e.g. for `Get` we have to extract the value of the default column of any wide-column results).
Implementation-wise, the two sets of overloads use different visitors to process the `std::variant` produced by `FullMergeV3`. This has the benefit of eliminating some repeated code e.g. in the point lookup paths, since `TimedFullMerge` now populates the application's result object (`PinnableSlice`/`string` or `PinnableWideColumns`) directly. Moreover, within each set of variants, there is a separate overload for the no base value/plain base value/wide-column base value cases, which eliminates some repeated branching w/r/t to the type of the base value if any.
Reviewed By: jaykorean
Differential Revision: D49352562
fbshipit-source-id: c2fb9853dba3fbbc6918665bde4195c4ea150a0c
2023-09-20 00:27:04 +00:00
|
|
|
// The following methods perform the actual merge operation for the
|
|
|
|
// no base value/plain base value/wide-column base value cases.
|
2022-11-01 05:28:58 +00:00
|
|
|
// If user-defined timestamp is enabled, `user_key` includes timestamp.
|
Integrate FullMergeV3 into the query and compaction paths (#11858)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11858
The patch builds on https://github.com/facebook/rocksdb/pull/11807 and integrates the `FullMergeV3` API into the read and compaction code paths by updating and extending the logic in `MergeHelper`.
In particular, when it comes to merge inputs, the existing `TimedFullMergeWithEntity` is folded into `TimedFullMerge`, since wide-column base values are now handled the same way as plain base values (or no base values for that matter), e.g. they are passed directly to the `MergeOperator`. On the other hand, there is some new differentiation on the output side. Namely, there are now two sets of `TimedFullMerge` variants: one set for contexts where the complete merge result and its value type are needed (used by iterators and compactions), and another set where the merge result is needed in a form determined by the client (used by the point lookup APIs, where e.g. for `Get` we have to extract the value of the default column of any wide-column results).
Implementation-wise, the two sets of overloads use different visitors to process the `std::variant` produced by `FullMergeV3`. This has the benefit of eliminating some repeated code e.g. in the point lookup paths, since `TimedFullMerge` now populates the application's result object (`PinnableSlice`/`string` or `PinnableWideColumns`) directly. Moreover, within each set of variants, there is a separate overload for the no base value/plain base value/wide-column base value cases, which eliminates some repeated branching w/r/t to the type of the base value if any.
Reviewed By: jaykorean
Differential Revision: D49352562
fbshipit-source-id: c2fb9853dba3fbbc6918665bde4195c4ea150a0c
2023-09-20 00:27:04 +00:00
|
|
|
bool MergeWithNoBaseValue(const Slice& user_key);
|
|
|
|
bool MergeWithPlainBaseValue(const Slice& value, const Slice& user_key);
|
|
|
|
bool MergeWithWideColumnBaseValue(const Slice& entity, const Slice& user_key);
|
2021-06-10 19:55:29 +00:00
|
|
|
|
2024-01-16 19:30:36 +00:00
|
|
|
bool PrepareValue() {
|
|
|
|
if (!iter_.PrepareValue()) {
|
|
|
|
assert(!iter_.status().ok());
|
|
|
|
valid_ = false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// ikey_ could change as BlockBasedTableIterator does Block cache
|
|
|
|
// lookup and index_iter_ could point to different block resulting
|
|
|
|
// in ikey_ pointing to wrong key. So ikey_ needs to be updated in
|
|
|
|
// case of Seek/Next calls to point to right key again.
|
|
|
|
if (!ParseKey(&ikey_)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-13 20:48:04 +00:00
|
|
|
const SliceTransform* prefix_extractor_;
|
|
|
|
Env* const env_;
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
2019-09-13 20:48:04 +00:00
|
|
|
Logger* logger_;
|
|
|
|
UserComparatorWrapper user_comparator_;
|
|
|
|
const MergeOperator* const merge_operator_;
|
|
|
|
IteratorWrapper iter_;
|
2020-12-05 05:28:26 +00:00
|
|
|
const Version* version_;
|
2019-09-13 20:48:04 +00:00
|
|
|
ReadCallback* read_callback_;
|
|
|
|
// Max visible sequence number. It is normally the snapshot seq unless we have
|
|
|
|
// uncommitted data in db as in WriteUnCommitted.
|
|
|
|
SequenceNumber sequence_;
|
|
|
|
|
|
|
|
IterKey saved_key_;
|
|
|
|
// Reusable internal key data structure. This is only used inside one function
|
|
|
|
// and should not be used across functions. Reusing this object can reduce
|
|
|
|
// overhead of calling construction of the function if creating it each time.
|
|
|
|
ParsedInternalKey ikey_;
|
|
|
|
std::string saved_value_;
|
|
|
|
Slice pinned_value_;
|
|
|
|
// for prefix seek mode to support prev()
|
2020-12-05 05:28:26 +00:00
|
|
|
PinnableSlice blob_value_;
|
2022-09-14 04:01:36 +00:00
|
|
|
// Value of the default column
|
|
|
|
Slice value_;
|
|
|
|
// All columns (i.e. name-value pairs)
|
|
|
|
WideColumns wide_columns_;
|
2019-09-13 20:48:04 +00:00
|
|
|
Statistics* statistics_;
|
|
|
|
uint64_t max_skip_;
|
|
|
|
uint64_t max_skippable_internal_keys_;
|
|
|
|
uint64_t num_internal_keys_skipped_;
|
|
|
|
const Slice* iterate_lower_bound_;
|
|
|
|
const Slice* iterate_upper_bound_;
|
|
|
|
|
2019-09-17 04:02:27 +00:00
|
|
|
// The prefix of the seek key. It is only used when prefix_same_as_start_
|
|
|
|
// is true and prefix extractor is not null. In Next() or Prev(), current keys
|
|
|
|
// will be checked against this prefix, so that the iterator can be
|
|
|
|
// invalidated if the keys in this prefix has been exhausted. Set it using
|
|
|
|
// SetUserKey() and use it using GetUserKey().
|
|
|
|
IterKey prefix_;
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
Status status_;
|
|
|
|
Direction direction_;
|
|
|
|
bool valid_;
|
|
|
|
bool current_entry_is_merged_;
|
|
|
|
// True if we know that the current entry's seqnum is 0.
|
|
|
|
// This information is used as that the next entry will be for another
|
|
|
|
// user key.
|
|
|
|
bool is_key_seqnum_zero_;
|
|
|
|
const bool prefix_same_as_start_;
|
|
|
|
// Means that we will pin all data blocks we read as long the Iterator
|
|
|
|
// is not deleted, will be true if ReadOptions::pin_data is true
|
|
|
|
const bool pin_thru_lifetime_;
|
2020-01-28 22:42:21 +00:00
|
|
|
// Expect the inner iterator to maintain a total order.
|
|
|
|
// prefix_extractor_ must be non-NULL if the value is false.
|
|
|
|
const bool expect_total_order_inner_iter_;
|
2020-12-05 05:28:26 +00:00
|
|
|
ReadTier read_tier_;
|
2022-08-08 15:26:33 +00:00
|
|
|
bool fill_cache_;
|
2020-12-05 05:28:26 +00:00
|
|
|
bool verify_checksums_;
|
|
|
|
// Whether the iterator is allowed to expose blob references. Set to true when
|
|
|
|
// the stacked BlobDB implementation is used, false otherwise.
|
|
|
|
bool expose_blob_index_;
|
2019-09-13 20:48:04 +00:00
|
|
|
bool is_blob_;
|
|
|
|
bool arena_mode_;
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
const Env::IOActivity io_activity_;
|
2019-09-13 20:48:04 +00:00
|
|
|
// List of operands for merge operator.
|
|
|
|
MergeContext merge_context_;
|
|
|
|
LocalStatistics local_stats_;
|
|
|
|
PinnedIteratorsManager pinned_iters_mgr_;
|
|
|
|
DBImpl* db_impl_;
|
|
|
|
ColumnFamilyData* cfd_;
|
2020-03-07 00:21:03 +00:00
|
|
|
const Slice* const timestamp_ub_;
|
2020-04-10 16:49:38 +00:00
|
|
|
const Slice* const timestamp_lb_;
|
2020-03-07 00:21:03 +00:00
|
|
|
const size_t timestamp_size_;
|
2021-03-10 19:13:55 +00:00
|
|
|
std::string saved_timestamp_;
|
2019-09-13 20:48:04 +00:00
|
|
|
};
|
2020-02-21 23:07:55 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Return a new iterator that converts internal keys (yielded by
|
2019-05-23 22:53:37 +00:00
|
|
|
// "*internal_iter") that were live at the specified `sequence` number
|
2011-03-18 22:37:00 +00:00
|
|
|
// into appropriate user keys.
|
2018-08-11 00:56:11 +00:00
|
|
|
extern Iterator* NewDBIterator(
|
2021-06-16 23:50:43 +00:00
|
|
|
Env* env, const ReadOptions& read_options, const ImmutableOptions& ioptions,
|
2018-08-11 00:56:11 +00:00
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
const Comparator* user_key_comparator, InternalIterator* internal_iter,
|
2020-12-05 05:28:26 +00:00
|
|
|
const Version* version, const SequenceNumber& sequence,
|
|
|
|
uint64_t max_sequential_skip_in_iterations, ReadCallback* read_callback,
|
|
|
|
DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr,
|
|
|
|
bool expose_blob_index = false);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|