2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2021-03-19 19:08:09 +00:00
|
|
|
#include <cstdint>
|
2016-02-27 01:13:39 +00:00
|
|
|
#include <string>
|
2021-03-19 19:08:09 +00:00
|
|
|
|
2019-05-31 22:21:36 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2018-12-18 01:26:56 +00:00
|
|
|
#include "db/range_del_aggregator.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/arena.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/cf_options.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/db.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "rocksdb/iterator.h"
|
2019-09-13 20:48:04 +00:00
|
|
|
#include "table/iterator_wrapper.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
#include "util/autovector.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2020-12-05 05:28:26 +00:00
|
|
|
class Version;
|
|
|
|
|
2019-05-23 22:53:37 +00:00
|
|
|
// This file declares the factory functions of DBIter, in its original form
|
|
|
|
// or a wrapped form with class ArenaWrappedDBIter, which is defined here.
|
|
|
|
// Class DBIter, which is declared and implemented inside db_iter.cc, is
|
2020-02-21 23:07:55 +00:00
|
|
|
// an iterator that converts internal keys (yielded by an InternalIterator)
|
2019-05-23 22:53:37 +00:00
|
|
|
// that were live at the specified sequence number into appropriate user
|
|
|
|
// keys.
|
2020-02-21 23:07:55 +00:00
|
|
|
// Each internal key consists of a user key, a sequence number, and a value
|
2019-05-23 22:53:37 +00:00
|
|
|
// type. DBIter deals with multiple key versions, tombstones, merge operands,
|
|
|
|
// etc, and exposes an Iterator.
|
|
|
|
// For example, DBIter may wrap following InternalIterator:
|
|
|
|
// user key: AAA value: v3 seqno: 100 type: Put
|
|
|
|
// user key: AAA value: v2 seqno: 97 type: Put
|
|
|
|
// user key: AAA value: v1 seqno: 95 type: Put
|
|
|
|
// user key: BBB value: v1 seqno: 90 type: Put
|
|
|
|
// user key: BBC value: N/A seqno: 98 type: Delete
|
|
|
|
// user key: BBC value: v1 seqno: 95 type: Put
|
|
|
|
// If the snapshot passed in is 102, then the DBIter is expected to
|
|
|
|
// expose the following iterator:
|
|
|
|
// key: AAA value: v3
|
|
|
|
// key: BBB value: v1
|
|
|
|
// If the snapshot passed in is 96, then it should expose:
|
|
|
|
// key: AAA value: v1
|
|
|
|
// key: BBB value: v1
|
|
|
|
// key: BBC value: v1
|
|
|
|
//
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
|
2019-09-13 20:48:04 +00:00
|
|
|
// Memtables and sstables that make the DB representation contain
|
|
|
|
// (userkey,seq,type) => uservalue entries. DBIter
|
|
|
|
// combines multiple entries for the same userkey found in the DB
|
|
|
|
// representation into a single entry while accounting for sequence
|
|
|
|
// numbers, deletion markers, overwrites, etc.
|
2019-09-19 19:32:33 +00:00
|
|
|
class DBIter final : public Iterator {
|
2019-09-13 20:48:04 +00:00
|
|
|
public:
|
|
|
|
// The following is grossly complicated. TODO: clean it up
|
|
|
|
// Which direction is the iterator currently moving?
|
|
|
|
// (1) When moving forward:
|
|
|
|
// (1a) if current_entry_is_merged_ = false, the internal iterator is
|
|
|
|
// positioned at the exact entry that yields this->key(), this->value()
|
|
|
|
// (1b) if current_entry_is_merged_ = true, the internal iterator is
|
|
|
|
// positioned immediately after the last entry that contributed to the
|
|
|
|
// current this->value(). That entry may or may not have key equal to
|
|
|
|
// this->key().
|
|
|
|
// (2) When moving backwards, the internal iterator is positioned
|
|
|
|
// just before all entries whose user key == this->key().
|
2021-03-10 19:13:55 +00:00
|
|
|
enum Direction : uint8_t { kForward, kReverse };
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
// LocalStatistics contain Statistics counters that will be aggregated per
|
|
|
|
// each iterator instance and then will be sent to the global statistics when
|
|
|
|
// the iterator is destroyed.
|
|
|
|
//
|
|
|
|
// The purpose of this approach is to avoid perf regression happening
|
|
|
|
// when multiple threads bump the atomic counters from a DBIter::Next().
|
|
|
|
struct LocalStatistics {
|
|
|
|
explicit LocalStatistics() { ResetCounters(); }
|
|
|
|
|
|
|
|
void ResetCounters() {
|
|
|
|
next_count_ = 0;
|
|
|
|
next_found_count_ = 0;
|
|
|
|
prev_count_ = 0;
|
|
|
|
prev_found_count_ = 0;
|
|
|
|
bytes_read_ = 0;
|
|
|
|
skip_count_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BumpGlobalStatistics(Statistics* global_statistics) {
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_NEXT, next_count_);
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_NEXT_FOUND, next_found_count_);
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_PREV, prev_count_);
|
|
|
|
RecordTick(global_statistics, NUMBER_DB_PREV_FOUND, prev_found_count_);
|
|
|
|
RecordTick(global_statistics, ITER_BYTES_READ, bytes_read_);
|
|
|
|
RecordTick(global_statistics, NUMBER_ITER_SKIP, skip_count_);
|
|
|
|
PERF_COUNTER_ADD(iter_read_bytes, bytes_read_);
|
|
|
|
ResetCounters();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map to Tickers::NUMBER_DB_NEXT
|
|
|
|
uint64_t next_count_;
|
|
|
|
// Map to Tickers::NUMBER_DB_NEXT_FOUND
|
|
|
|
uint64_t next_found_count_;
|
|
|
|
// Map to Tickers::NUMBER_DB_PREV
|
|
|
|
uint64_t prev_count_;
|
|
|
|
// Map to Tickers::NUMBER_DB_PREV_FOUND
|
|
|
|
uint64_t prev_found_count_;
|
|
|
|
// Map to Tickers::ITER_BYTES_READ
|
|
|
|
uint64_t bytes_read_;
|
|
|
|
// Map to Tickers::NUMBER_ITER_SKIP
|
|
|
|
uint64_t skip_count_;
|
|
|
|
};
|
|
|
|
|
|
|
|
DBIter(Env* _env, const ReadOptions& read_options,
|
2021-06-16 23:50:43 +00:00
|
|
|
const ImmutableOptions& ioptions,
|
2019-09-13 20:48:04 +00:00
|
|
|
const MutableCFOptions& mutable_cf_options, const Comparator* cmp,
|
2020-12-05 05:28:26 +00:00
|
|
|
InternalIterator* iter, const Version* version, SequenceNumber s,
|
|
|
|
bool arena_mode, uint64_t max_sequential_skip_in_iterations,
|
2019-09-13 20:48:04 +00:00
|
|
|
ReadCallback* read_callback, DBImpl* db_impl, ColumnFamilyData* cfd,
|
2020-12-05 05:28:26 +00:00
|
|
|
bool expose_blob_index);
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
DBIter(const DBIter&) = delete;
|
|
|
|
void operator=(const DBIter&) = delete;
|
|
|
|
|
|
|
|
~DBIter() override {
|
|
|
|
// Release pinned data if any
|
|
|
|
if (pinned_iters_mgr_.PinningEnabled()) {
|
|
|
|
pinned_iters_mgr_.ReleasePinnedData();
|
|
|
|
}
|
|
|
|
RecordTick(statistics_, NO_ITERATOR_DELETED);
|
|
|
|
ResetInternalKeysSkippedCounter();
|
|
|
|
local_stats_.BumpGlobalStatistics(statistics_);
|
|
|
|
iter_.DeleteIter(arena_mode_);
|
|
|
|
}
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetIter(InternalIterator* iter) {
|
2019-09-13 20:48:04 +00:00
|
|
|
assert(iter_.iter() == nullptr);
|
|
|
|
iter_.Set(iter);
|
|
|
|
iter_.iter()->SetPinnedItersMgr(&pinned_iters_mgr_);
|
|
|
|
}
|
2020-02-21 23:07:55 +00:00
|
|
|
ReadRangeDelAggregator* GetRangeDelAggregator() { return &range_del_agg_; }
|
2019-09-13 20:48:04 +00:00
|
|
|
|
2020-09-29 16:47:33 +00:00
|
|
|
bool Valid() const override {
|
|
|
|
#ifdef ROCKSDB_ASSERT_STATUS_CHECKED
|
|
|
|
if (valid_) {
|
|
|
|
status_.PermitUncheckedError();
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_ASSERT_STATUS_CHECKED
|
|
|
|
return valid_;
|
|
|
|
}
|
2019-09-13 20:48:04 +00:00
|
|
|
Slice key() const override {
|
|
|
|
assert(valid_);
|
2022-04-11 17:26:55 +00:00
|
|
|
if (timestamp_lb_) {
|
2019-09-13 20:48:04 +00:00
|
|
|
return saved_key_.GetInternalKey();
|
|
|
|
} else {
|
2020-03-07 00:21:03 +00:00
|
|
|
const Slice ukey_and_ts = saved_key_.GetUserKey();
|
|
|
|
return Slice(ukey_and_ts.data(), ukey_and_ts.size() - timestamp_size_);
|
2019-09-13 20:48:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(valid_);
|
2020-12-05 05:28:26 +00:00
|
|
|
|
|
|
|
if (!expose_blob_index_ && is_blob_) {
|
|
|
|
return blob_value_;
|
|
|
|
} else if (current_entry_is_merged_) {
|
2019-09-13 20:48:04 +00:00
|
|
|
// If pinned_value_ is set then the result of merge operator is one of
|
|
|
|
// the merge operands and we should return it.
|
|
|
|
return pinned_value_.data() ? pinned_value_ : saved_value_;
|
|
|
|
} else if (direction_ == kReverse) {
|
|
|
|
return pinned_value_;
|
|
|
|
} else {
|
|
|
|
return iter_.value();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Status status() const override {
|
|
|
|
if (status_.ok()) {
|
|
|
|
return iter_.status();
|
|
|
|
} else {
|
|
|
|
assert(!valid_);
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
}
|
2020-03-07 00:21:03 +00:00
|
|
|
Slice timestamp() const override {
|
|
|
|
assert(valid_);
|
|
|
|
assert(timestamp_size_ > 0);
|
2021-03-10 19:13:55 +00:00
|
|
|
if (direction_ == kReverse) {
|
|
|
|
return saved_timestamp_;
|
|
|
|
}
|
2020-03-07 00:21:03 +00:00
|
|
|
const Slice ukey_and_ts = saved_key_.GetUserKey();
|
|
|
|
assert(timestamp_size_ < ukey_and_ts.size());
|
|
|
|
return ExtractTimestampFromUserKey(ukey_and_ts, timestamp_size_);
|
|
|
|
}
|
2019-09-13 20:48:04 +00:00
|
|
|
bool IsBlob() const {
|
2020-12-05 05:28:26 +00:00
|
|
|
assert(valid_);
|
2019-09-13 20:48:04 +00:00
|
|
|
return is_blob_;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status GetProperty(std::string prop_name, std::string* prop) override;
|
|
|
|
|
|
|
|
void Next() final override;
|
|
|
|
void Prev() final override;
|
2020-03-07 00:21:03 +00:00
|
|
|
// 'target' does not contain timestamp, even if user timestamp feature is
|
|
|
|
// enabled.
|
2019-09-13 20:48:04 +00:00
|
|
|
void Seek(const Slice& target) final override;
|
|
|
|
void SeekForPrev(const Slice& target) final override;
|
|
|
|
void SeekToFirst() final override;
|
|
|
|
void SeekToLast() final override;
|
2020-02-21 23:07:55 +00:00
|
|
|
Env* env() const { return env_; }
|
2019-09-13 20:48:04 +00:00
|
|
|
void set_sequence(uint64_t s) {
|
|
|
|
sequence_ = s;
|
|
|
|
if (read_callback_) {
|
|
|
|
read_callback_->Refresh(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void set_valid(bool v) { valid_ = v; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
// For all methods in this block:
|
|
|
|
// PRE: iter_->Valid() && status_.ok()
|
|
|
|
// Return false if there was an error, and status() is non-ok, valid_ = false;
|
|
|
|
// in this case callers would usually stop what they were doing and return.
|
|
|
|
bool ReverseToForward();
|
|
|
|
bool ReverseToBackward();
|
2019-09-17 04:02:27 +00:00
|
|
|
// Set saved_key_ to the seek key to target, with proper sequence number set.
|
|
|
|
// It might get adjusted if the seek key is smaller than iterator lower bound.
|
2022-06-29 02:51:05 +00:00
|
|
|
// target does not have timestamp.
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetSavedKeyToSeekTarget(const Slice& target);
|
2019-09-17 04:02:27 +00:00
|
|
|
// Set saved_key_ to the seek key to target, with proper sequence number set.
|
|
|
|
// It might get adjusted if the seek key is larger than iterator upper bound.
|
2022-06-29 02:51:05 +00:00
|
|
|
// target does not have timestamp.
|
2020-02-21 23:07:55 +00:00
|
|
|
void SetSavedKeyToSeekForPrevTarget(const Slice& target);
|
2019-09-13 20:48:04 +00:00
|
|
|
bool FindValueForCurrentKey();
|
|
|
|
bool FindValueForCurrentKeyUsingSeek();
|
|
|
|
bool FindUserKeyBeforeSavedKey();
|
2019-09-17 04:02:27 +00:00
|
|
|
// If `skipping_saved_key` is true, the function will keep iterating until it
|
|
|
|
// finds a user key that is larger than `saved_key_`.
|
|
|
|
// If `prefix` is not null, the iterator needs to stop when all keys for the
|
2021-03-26 04:17:17 +00:00
|
|
|
// prefix are exhausted and the iterator is set to invalid.
|
2019-09-17 04:02:27 +00:00
|
|
|
bool FindNextUserEntry(bool skipping_saved_key, const Slice* prefix);
|
|
|
|
// Internal implementation of FindNextUserEntry().
|
|
|
|
bool FindNextUserEntryInternal(bool skipping_saved_key, const Slice* prefix);
|
2019-09-13 20:48:04 +00:00
|
|
|
bool ParseKey(ParsedInternalKey* key);
|
|
|
|
bool MergeValuesNewToOld();
|
|
|
|
|
2019-09-17 04:02:27 +00:00
|
|
|
// If prefix is not null, we need to set the iterator to invalid if no more
|
|
|
|
// entry can be found within the prefix.
|
2020-02-21 23:07:55 +00:00
|
|
|
void PrevInternal(const Slice* prefix);
|
2019-09-13 20:48:04 +00:00
|
|
|
bool TooManyInternalKeysSkipped(bool increment = true);
|
2020-04-10 16:49:38 +00:00
|
|
|
bool IsVisible(SequenceNumber sequence, const Slice& ts,
|
|
|
|
bool* more_recent = nullptr);
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
// Temporarily pin the blocks that we encounter until ReleaseTempPinnedData()
|
|
|
|
// is called
|
|
|
|
void TempPinData() {
|
|
|
|
if (!pin_thru_lifetime_) {
|
|
|
|
pinned_iters_mgr_.StartPinning();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release blocks pinned by TempPinData()
|
|
|
|
void ReleaseTempPinnedData() {
|
|
|
|
if (!pin_thru_lifetime_ && pinned_iters_mgr_.PinningEnabled()) {
|
|
|
|
pinned_iters_mgr_.ReleasePinnedData();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void ClearSavedValue() {
|
|
|
|
if (saved_value_.capacity() > 1048576) {
|
|
|
|
std::string empty;
|
|
|
|
swap(empty, saved_value_);
|
|
|
|
} else {
|
|
|
|
saved_value_.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void ResetInternalKeysSkippedCounter() {
|
|
|
|
local_stats_.skip_count_ += num_internal_keys_skipped_;
|
|
|
|
if (valid_) {
|
|
|
|
local_stats_.skip_count_--;
|
|
|
|
}
|
|
|
|
num_internal_keys_skipped_ = 0;
|
|
|
|
}
|
|
|
|
|
2020-01-28 22:42:21 +00:00
|
|
|
bool expect_total_order_inner_iter() {
|
|
|
|
assert(expect_total_order_inner_iter_ || prefix_extractor_ != nullptr);
|
|
|
|
return expect_total_order_inner_iter_;
|
|
|
|
}
|
|
|
|
|
2020-04-10 16:49:38 +00:00
|
|
|
// If lower bound of timestamp is given by ReadOptions.iter_start_ts, we need
|
|
|
|
// to return versions of the same key. We cannot just skip if the key value
|
|
|
|
// is the same but timestamps are different but fall in timestamp range.
|
|
|
|
inline int CompareKeyForSkip(const Slice& a, const Slice& b) {
|
|
|
|
return timestamp_lb_ != nullptr
|
|
|
|
? user_comparator_.Compare(a, b)
|
|
|
|
: user_comparator_.CompareWithoutTimestamp(a, b);
|
|
|
|
}
|
|
|
|
|
2020-12-05 05:28:26 +00:00
|
|
|
// Retrieves the blob value for the specified user key using the given blob
|
|
|
|
// index when using the integrated BlobDB implementation.
|
|
|
|
bool SetBlobValueIfNeeded(const Slice& user_key, const Slice& blob_index);
|
|
|
|
|
2022-06-25 22:30:47 +00:00
|
|
|
bool SetWideColumnValueIfNeeded(const Slice& wide_columns_slice);
|
|
|
|
|
2021-06-10 19:55:29 +00:00
|
|
|
Status Merge(const Slice* val, const Slice& user_key);
|
|
|
|
|
2019-09-13 20:48:04 +00:00
|
|
|
const SliceTransform* prefix_extractor_;
|
|
|
|
Env* const env_;
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
2019-09-13 20:48:04 +00:00
|
|
|
Logger* logger_;
|
|
|
|
UserComparatorWrapper user_comparator_;
|
|
|
|
const MergeOperator* const merge_operator_;
|
|
|
|
IteratorWrapper iter_;
|
2020-12-05 05:28:26 +00:00
|
|
|
const Version* version_;
|
2019-09-13 20:48:04 +00:00
|
|
|
ReadCallback* read_callback_;
|
|
|
|
// Max visible sequence number. It is normally the snapshot seq unless we have
|
|
|
|
// uncommitted data in db as in WriteUnCommitted.
|
|
|
|
SequenceNumber sequence_;
|
|
|
|
|
|
|
|
IterKey saved_key_;
|
|
|
|
// Reusable internal key data structure. This is only used inside one function
|
|
|
|
// and should not be used across functions. Reusing this object can reduce
|
|
|
|
// overhead of calling construction of the function if creating it each time.
|
|
|
|
ParsedInternalKey ikey_;
|
|
|
|
std::string saved_value_;
|
|
|
|
Slice pinned_value_;
|
|
|
|
// for prefix seek mode to support prev()
|
2020-12-05 05:28:26 +00:00
|
|
|
PinnableSlice blob_value_;
|
2019-09-13 20:48:04 +00:00
|
|
|
Statistics* statistics_;
|
|
|
|
uint64_t max_skip_;
|
|
|
|
uint64_t max_skippable_internal_keys_;
|
|
|
|
uint64_t num_internal_keys_skipped_;
|
|
|
|
const Slice* iterate_lower_bound_;
|
|
|
|
const Slice* iterate_upper_bound_;
|
|
|
|
|
2019-09-17 04:02:27 +00:00
|
|
|
// The prefix of the seek key. It is only used when prefix_same_as_start_
|
|
|
|
// is true and prefix extractor is not null. In Next() or Prev(), current keys
|
|
|
|
// will be checked against this prefix, so that the iterator can be
|
|
|
|
// invalidated if the keys in this prefix has been exhausted. Set it using
|
|
|
|
// SetUserKey() and use it using GetUserKey().
|
|
|
|
IterKey prefix_;
|
2019-09-13 20:48:04 +00:00
|
|
|
|
|
|
|
Status status_;
|
|
|
|
Direction direction_;
|
|
|
|
bool valid_;
|
|
|
|
bool current_entry_is_merged_;
|
|
|
|
// True if we know that the current entry's seqnum is 0.
|
|
|
|
// This information is used as that the next entry will be for another
|
|
|
|
// user key.
|
|
|
|
bool is_key_seqnum_zero_;
|
|
|
|
const bool prefix_same_as_start_;
|
|
|
|
// Means that we will pin all data blocks we read as long the Iterator
|
|
|
|
// is not deleted, will be true if ReadOptions::pin_data is true
|
|
|
|
const bool pin_thru_lifetime_;
|
2020-01-28 22:42:21 +00:00
|
|
|
// Expect the inner iterator to maintain a total order.
|
|
|
|
// prefix_extractor_ must be non-NULL if the value is false.
|
|
|
|
const bool expect_total_order_inner_iter_;
|
2020-12-05 05:28:26 +00:00
|
|
|
ReadTier read_tier_;
|
2022-08-08 15:26:33 +00:00
|
|
|
bool fill_cache_;
|
2020-12-05 05:28:26 +00:00
|
|
|
bool verify_checksums_;
|
|
|
|
// Whether the iterator is allowed to expose blob references. Set to true when
|
|
|
|
// the stacked BlobDB implementation is used, false otherwise.
|
|
|
|
bool expose_blob_index_;
|
2019-09-13 20:48:04 +00:00
|
|
|
bool is_blob_;
|
|
|
|
bool arena_mode_;
|
|
|
|
// List of operands for merge operator.
|
|
|
|
MergeContext merge_context_;
|
|
|
|
ReadRangeDelAggregator range_del_agg_;
|
|
|
|
LocalStatistics local_stats_;
|
|
|
|
PinnedIteratorsManager pinned_iters_mgr_;
|
|
|
|
#ifdef ROCKSDB_LITE
|
|
|
|
ROCKSDB_FIELD_UNUSED
|
|
|
|
#endif
|
|
|
|
DBImpl* db_impl_;
|
|
|
|
#ifdef ROCKSDB_LITE
|
|
|
|
ROCKSDB_FIELD_UNUSED
|
|
|
|
#endif
|
|
|
|
ColumnFamilyData* cfd_;
|
2020-03-07 00:21:03 +00:00
|
|
|
const Slice* const timestamp_ub_;
|
2020-04-10 16:49:38 +00:00
|
|
|
const Slice* const timestamp_lb_;
|
2020-03-07 00:21:03 +00:00
|
|
|
const size_t timestamp_size_;
|
2021-03-10 19:13:55 +00:00
|
|
|
std::string saved_timestamp_;
|
2022-06-29 02:51:05 +00:00
|
|
|
|
|
|
|
// Used only if timestamp_lb_ is not nullptr.
|
|
|
|
std::string saved_ikey_;
|
2019-09-13 20:48:04 +00:00
|
|
|
};
|
2020-02-21 23:07:55 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Return a new iterator that converts internal keys (yielded by
|
2019-05-23 22:53:37 +00:00
|
|
|
// "*internal_iter") that were live at the specified `sequence` number
|
2011-03-18 22:37:00 +00:00
|
|
|
// into appropriate user keys.
|
2018-08-11 00:56:11 +00:00
|
|
|
extern Iterator* NewDBIterator(
|
2021-06-16 23:50:43 +00:00
|
|
|
Env* env, const ReadOptions& read_options, const ImmutableOptions& ioptions,
|
2018-08-11 00:56:11 +00:00
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
const Comparator* user_key_comparator, InternalIterator* internal_iter,
|
2020-12-05 05:28:26 +00:00
|
|
|
const Version* version, const SequenceNumber& sequence,
|
|
|
|
uint64_t max_sequential_skip_in_iterations, ReadCallback* read_callback,
|
|
|
|
DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr,
|
|
|
|
bool expose_blob_index = false);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|