mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-29 09:36:17 +00:00
7528130e38
Summary: This allows tombstone fragmenting to only be performed when the table is opened, and cached for subsequent accesses. On the same DB used in #4449, running `readrandom` results in the following: ``` readrandom : 0.983 micros/op 1017076 ops/sec; 78.3 MB/s (63103 of 100000 found) ``` Now that Get performance in the presence of range tombstones is reasonable, I also compared the performance between a DB with range tombstones, "expanded" range tombstones (several point tombstones that cover the same keys the equivalent range tombstone would cover, a common workaround for DeleteRange), and no range tombstones. The created DBs had 5 million keys each, and DeleteRange was called at regular intervals (depending on the total number of range tombstones being written) after 4.5 million Puts. The table below summarizes the results of a `readwhilewriting` benchmark (in order to provide somewhat more realistic results): ``` Tombstones? | avg micros/op | stddev micros/op | avg ops/s | stddev ops/s ----------------- | ------------- | ---------------- | ------------ | ------------ None | 0.6186 | 0.04637 | 1,625,252.90 | 124,679.41 500 Expanded | 0.6019 | 0.03628 | 1,666,670.40 | 101,142.65 500 Unexpanded | 0.6435 | 0.03994 | 1,559,979.40 | 104,090.52 1k Expanded | 0.6034 | 0.04349 | 1,665,128.10 | 125,144.57 1k Unexpanded | 0.6261 | 0.03093 | 1,600,457.50 | 79,024.94 5k Expanded | 0.6163 | 0.05926 | 1,636,668.80 | 154,888.85 5k Unexpanded | 0.6402 | 0.04002 | 1,567,804.70 | 100,965.55 10k Expanded | 0.6036 | 0.05105 | 1,667,237.70 | 142,830.36 10k Unexpanded | 0.6128 | 0.02598 | 1,634,633.40 | 72,161.82 25k Expanded | 0.6198 | 0.04542 | 1,620,980.50 | 116,662.93 25k Unexpanded | 0.5478 | 0.0362 | 1,833,059.10 | 121,233.81 50k Expanded | 0.5104 | 0.04347 | 1,973,107.90 | 184,073.49 50k Unexpanded | 0.4528 | 0.03387 | 2,219,034.50 | 170,984.32 ``` After a large enough quantity of range tombstones are written, range tombstone Gets can become faster than reading from an equivalent DB with several point tombstones. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4493 Differential Revision: D10842844 Pulled By: abhimadan fbshipit-source-id: a7d44534f8120e6aabb65779d26c6b9df954c509
131 lines
4.5 KiB
C++
131 lines
4.5 KiB
C++
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#pragma once
|
|
|
|
#include <list>
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "db/dbformat.h"
|
|
#include "db/pinned_iterators_manager.h"
|
|
#include "rocksdb/status.h"
|
|
#include "table/internal_iterator.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
struct FragmentedRangeTombstoneList {
|
|
public:
|
|
FragmentedRangeTombstoneList(
|
|
std::unique_ptr<InternalIterator> unfragmented_tombstones,
|
|
const InternalKeyComparator& icmp, bool one_time_use,
|
|
SequenceNumber snapshot = kMaxSequenceNumber);
|
|
|
|
std::vector<RangeTombstone>::const_iterator begin() const {
|
|
return tombstones_.begin();
|
|
}
|
|
|
|
std::vector<RangeTombstone>::const_iterator end() const {
|
|
return tombstones_.end();
|
|
}
|
|
|
|
bool empty() const { return tombstones_.size() == 0; }
|
|
|
|
private:
|
|
// Given an ordered range tombstone iterator unfragmented_tombstones,
|
|
// "fragment" the tombstones into non-overlapping pieces, and store them in
|
|
// tombstones_.
|
|
void FragmentTombstones(
|
|
std::unique_ptr<InternalIterator> unfragmented_tombstones,
|
|
const InternalKeyComparator& icmp, bool one_time_use,
|
|
SequenceNumber snapshot = kMaxSequenceNumber);
|
|
|
|
std::vector<RangeTombstone> tombstones_;
|
|
std::list<std::string> pinned_slices_;
|
|
PinnedIteratorsManager pinned_iters_mgr_;
|
|
};
|
|
|
|
// FragmentedRangeTombstoneIterator converts an InternalIterator of a range-del
|
|
// meta block into an iterator over non-overlapping tombstone fragments. The
|
|
// tombstone fragmentation process should be more efficient than the range
|
|
// tombstone collapsing algorithm in RangeDelAggregator because this leverages
|
|
// the internal key ordering already provided by the input iterator, if
|
|
// applicable (when the iterator is unsorted, a new sorted iterator is created
|
|
// before proceeding). If there are few overlaps, creating a
|
|
// FragmentedRangeTombstoneIterator should be O(n), while the RangeDelAggregator
|
|
// tombstone collapsing is always O(n log n).
|
|
class FragmentedRangeTombstoneIterator : public InternalIterator {
|
|
public:
|
|
FragmentedRangeTombstoneIterator(
|
|
const FragmentedRangeTombstoneList* tombstones,
|
|
const InternalKeyComparator& icmp);
|
|
FragmentedRangeTombstoneIterator(
|
|
const std::shared_ptr<const FragmentedRangeTombstoneList>& tombstones,
|
|
const InternalKeyComparator& icmp);
|
|
void SeekToFirst() override;
|
|
void SeekToLast() override;
|
|
void Seek(const Slice& target) override;
|
|
void SeekForPrev(const Slice& target) override;
|
|
void Next() override;
|
|
void Prev() override;
|
|
bool Valid() const override;
|
|
Slice key() const override {
|
|
MaybePinKey();
|
|
return current_start_key_.Encode();
|
|
}
|
|
Slice value() const override { return pos_->end_key_; }
|
|
bool IsKeyPinned() const override { return false; }
|
|
bool IsValuePinned() const override { return true; }
|
|
Status status() const override { return Status::OK(); }
|
|
|
|
Slice user_key() const { return pos_->start_key_; }
|
|
SequenceNumber seq() const { return pos_->seq_; }
|
|
|
|
private:
|
|
struct FragmentedRangeTombstoneComparator {
|
|
explicit FragmentedRangeTombstoneComparator(const Comparator* c) : cmp(c) {}
|
|
|
|
bool operator()(const RangeTombstone& a, const RangeTombstone& b) const {
|
|
int user_key_cmp = cmp->Compare(a.start_key_, b.start_key_);
|
|
if (user_key_cmp != 0) {
|
|
return user_key_cmp < 0;
|
|
}
|
|
return a.seq_ > b.seq_;
|
|
}
|
|
|
|
const Comparator* cmp;
|
|
};
|
|
|
|
void MaybePinKey() const {
|
|
if (pos_ != tombstones_->end() && pinned_pos_ != pos_) {
|
|
current_start_key_.Set(pos_->start_key_, pos_->seq_, kTypeRangeDeletion);
|
|
pinned_pos_ = pos_;
|
|
}
|
|
}
|
|
|
|
void ParseKey(ParsedInternalKey* parsed) const {
|
|
parsed->user_key = pos_->start_key_;
|
|
parsed->sequence = pos_->seq_;
|
|
parsed->type = kTypeRangeDeletion;
|
|
}
|
|
|
|
const FragmentedRangeTombstoneComparator tombstone_cmp_;
|
|
const InternalKeyComparator* icmp_;
|
|
const Comparator* ucmp_;
|
|
std::shared_ptr<const FragmentedRangeTombstoneList> tombstones_ref_;
|
|
const FragmentedRangeTombstoneList* tombstones_;
|
|
std::vector<RangeTombstone>::const_iterator pos_;
|
|
mutable std::vector<RangeTombstone>::const_iterator pinned_pos_;
|
|
mutable InternalKey current_start_key_;
|
|
PinnedIteratorsManager pinned_iters_mgr_;
|
|
};
|
|
|
|
SequenceNumber MaxCoveringTombstoneSeqnum(
|
|
FragmentedRangeTombstoneIterator* tombstone_iter, const Slice& key,
|
|
const Comparator* ucmp);
|
|
|
|
} // namespace rocksdb
|