rocksdb/db/blob/blob_counting_iterator.h

151 lines
3.5 KiB
C
Raw Normal View History

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include <cassert>
#include "db/blob/blob_garbage_meter.h"
#include "rocksdb/rocksdb_namespace.h"
#include "rocksdb/status.h"
#include "table/internal_iterator.h"
#include "test_util/sync_point.h"
namespace ROCKSDB_NAMESPACE {
// An internal iterator that passes each key-value encountered to
// BlobGarbageMeter as inflow in order to measure the total number and size of
// blobs in the compaction input on a per-blob file basis.
class BlobCountingIterator : public InternalIterator {
public:
BlobCountingIterator(InternalIterator* iter,
BlobGarbageMeter* blob_garbage_meter)
: iter_(iter), blob_garbage_meter_(blob_garbage_meter) {
assert(iter_);
assert(blob_garbage_meter_);
UpdateAndCountBlobIfNeeded();
}
bool Valid() const override { return iter_->Valid() && status_.ok(); }
void SeekToFirst() override {
iter_->SeekToFirst();
UpdateAndCountBlobIfNeeded();
}
void SeekToLast() override {
iter_->SeekToLast();
UpdateAndCountBlobIfNeeded();
}
void Seek(const Slice& target) override {
iter_->Seek(target);
UpdateAndCountBlobIfNeeded();
}
void SeekForPrev(const Slice& target) override {
iter_->SeekForPrev(target);
UpdateAndCountBlobIfNeeded();
}
void Next() override {
assert(Valid());
iter_->Next();
UpdateAndCountBlobIfNeeded();
}
bool NextAndGetResult(IterateResult* result) override {
assert(Valid());
const bool res = iter_->NextAndGetResult(result);
UpdateAndCountBlobIfNeeded();
return res;
}
void Prev() override {
assert(Valid());
iter_->Prev();
UpdateAndCountBlobIfNeeded();
}
Slice key() const override {
assert(Valid());
return iter_->key();
}
Slice user_key() const override {
assert(Valid());
return iter_->user_key();
}
Slice value() const override {
assert(Valid());
return iter_->value();
}
Status status() const override { return status_; }
bool PrepareValue() override {
assert(Valid());
return iter_->PrepareValue();
}
bool MayBeOutOfLowerBound() override {
assert(Valid());
return iter_->MayBeOutOfLowerBound();
}
IterBoundCheck UpperBoundCheckResult() override {
assert(Valid());
return iter_->UpperBoundCheckResult();
}
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
iter_->SetPinnedItersMgr(pinned_iters_mgr);
}
bool IsKeyPinned() const override {
assert(Valid());
return iter_->IsKeyPinned();
}
bool IsValuePinned() const override {
assert(Valid());
return iter_->IsValuePinned();
}
Status GetProperty(std::string prop_name, std::string* prop) override {
return iter_->GetProperty(prop_name, prop);
}
Refactor AddRangeDels() + consider range tombstone during compaction file cutting (#11113) Summary: A second attempt after https://github.com/facebook/rocksdb/issues/10802, with bug fixes and refactoring. This PR updates compaction logic to take range tombstones into account when determining whether to cut the current compaction output file (https://github.com/facebook/rocksdb/issues/4811). Before this change, only point keys were considered, and range tombstones could cause large compactions. For example, if the current compaction outputs is a range tombstone [a, b) and 2 point keys y, z, they would be added to the same file, and may overlap with too many files in the next level and cause a large compaction in the future. This PR also includes ajkr's effort to simplify the logic to add range tombstones to compaction output files in `AddRangeDels()` ([https://github.com/facebook/rocksdb/issues/11078](https://github.com/facebook/rocksdb/pull/11078#issuecomment-1386078861)). The main change is for `CompactionIterator` to emit range tombstone start keys to be processed by `CompactionOutputs`. A new class `CompactionMergingIterator` is introduced to replace `MergingIterator` under `CompactionIterator` to enable emitting of range tombstone start keys. Further improvement after this PR include cutting compaction output at some grandparent boundary key (instead of the next output key) when cutting within a range tombstone to reduce overlap with grandparents. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11113 Test Plan: * added unit test in db_range_del_test * crash test with a small key range: `python3 tools/db_crashtest.py blackbox --simple --max_key=100 --interval=600 --write_buffer_size=262144 --target_file_size_base=256 --max_bytes_for_level_base=262144 --block_size=128 --value_size_mult=33 --subcompactions=10 --use_multiget=1 --delpercent=3 --delrangepercent=2 --verify_iterator_with_expected_state_one_in=2 --num_iterations=10` Reviewed By: ajkr Differential Revision: D42655709 Pulled By: cbi42 fbshipit-source-id: 8367e36ef5640e8f21c14a3855d4a8d6e360a34c
2023-02-22 20:28:18 +00:00
bool IsDeleteRangeSentinelKey() const override {
return iter_->IsDeleteRangeSentinelKey();
}
private:
void UpdateAndCountBlobIfNeeded() {
assert(!iter_->Valid() || iter_->status().ok());
if (!iter_->Valid()) {
status_ = iter_->status();
return;
}
TEST_SYNC_POINT(
"BlobCountingIterator::UpdateAndCountBlobIfNeeded:ProcessInFlow");
status_ = blob_garbage_meter_->ProcessInFlow(key(), value());
}
InternalIterator* iter_;
BlobGarbageMeter* blob_garbage_meter_;
Status status_;
};
} // namespace ROCKSDB_NAMESPACE