mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-29 18:33:58 +00:00
68a8e6b8fa
Summary: This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice> This diff is stacked on top of D56493 and D56511 In this diff we - Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future - Replace std::deque<std::string> with std::vector<Slice> to pass operands - Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187) - Allow FullMergeV2 output to be an existing operand ``` [Everything in Memtable | 10K operands | 10 KB each | 1 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000 [FullMergeV2] readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s [master] readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s ``` ``` [Everything in Memtable | 10K operands | 10 KB each | 10 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000 [FullMergeV2] readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s [master] readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s ``` ``` [Everything in Block cache | 10K operands | 10 KB each | 1 operand per key] [FullMergeV2] $ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s [master] readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s ``` ``` [Everything in Block cache | 10K operands | 10 KB each | 10 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions [FullMergeV2] readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s [master] readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s ``` Test Plan: COMPILE_WITH_ASAN=1 make check -j64 Reviewers: yhchiang, andrewkr, sdong Reviewed By: sdong Subscribers: lovro, andrewkr, dhruba Differential Revision: https://reviews.facebook.net/D57075
327 lines
12 KiB
C++
327 lines
12 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
#include "db/merge_helper.h"
|
|
|
|
#include <stdio.h>
|
|
#include <string>
|
|
|
|
#include "db/dbformat.h"
|
|
#include "rocksdb/comparator.h"
|
|
#include "rocksdb/db.h"
|
|
#include "rocksdb/merge_operator.h"
|
|
#include "table/internal_iterator.h"
|
|
#include "util/perf_context_imp.h"
|
|
#include "util/statistics.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
|
|
const Slice& key, const Slice* value,
|
|
const std::vector<Slice>& operands,
|
|
std::string* result, Logger* logger,
|
|
Statistics* statistics, Env* env,
|
|
Slice* result_operand) {
|
|
assert(merge_operator != nullptr);
|
|
|
|
if (operands.size() == 0) {
|
|
assert(value != nullptr && result != nullptr);
|
|
result->assign(value->data(), value->size());
|
|
return Status::OK();
|
|
}
|
|
|
|
bool success;
|
|
Slice tmp_result_operand(nullptr, 0);
|
|
const MergeOperator::MergeOperationInput merge_in(key, value, operands,
|
|
logger);
|
|
MergeOperator::MergeOperationOutput merge_out(*result, tmp_result_operand);
|
|
{
|
|
// Setup to time the merge
|
|
StopWatchNano timer(env, statistics != nullptr);
|
|
PERF_TIMER_GUARD(merge_operator_time_nanos);
|
|
|
|
// Do the merge
|
|
success = merge_operator->FullMergeV2(merge_in, &merge_out);
|
|
|
|
if (tmp_result_operand.data()) {
|
|
// FullMergeV2 result is an existing operand
|
|
if (result_operand != nullptr) {
|
|
*result_operand = tmp_result_operand;
|
|
} else {
|
|
result->assign(tmp_result_operand.data(), tmp_result_operand.size());
|
|
}
|
|
} else if (result_operand) {
|
|
*result_operand = Slice(nullptr, 0);
|
|
}
|
|
|
|
RecordTick(statistics, MERGE_OPERATION_TOTAL_TIME,
|
|
statistics ? timer.ElapsedNanos() : 0);
|
|
}
|
|
|
|
if (!success) {
|
|
RecordTick(statistics, NUMBER_MERGE_FAILURES);
|
|
return Status::Corruption("Error: Could not perform merge.");
|
|
}
|
|
|
|
return Status::OK();
|
|
}
|
|
|
|
// PRE: iter points to the first merge type entry
|
|
// POST: iter points to the first entry beyond the merge process (or the end)
|
|
// keys_, operands_ are updated to reflect the merge result.
|
|
// keys_ stores the list of keys encountered while merging.
|
|
// operands_ stores the list of merge operands encountered while merging.
|
|
// keys_[i] corresponds to operands_[i] for each i.
|
|
Status MergeHelper::MergeUntil(InternalIterator* iter,
|
|
const SequenceNumber stop_before,
|
|
const bool at_bottom) {
|
|
// Get a copy of the internal key, before it's invalidated by iter->Next()
|
|
// Also maintain the list of merge operands seen.
|
|
assert(HasOperator());
|
|
keys_.clear();
|
|
merge_context_.Clear();
|
|
assert(user_merge_operator_);
|
|
bool first_key = true;
|
|
|
|
// We need to parse the internal key again as the parsed key is
|
|
// backed by the internal key!
|
|
// Assume no internal key corruption as it has been successfully parsed
|
|
// by the caller.
|
|
// original_key_is_iter variable is just caching the information:
|
|
// original_key_is_iter == (iter->key().ToString() == original_key)
|
|
bool original_key_is_iter = true;
|
|
std::string original_key = iter->key().ToString();
|
|
// Important:
|
|
// orig_ikey is backed by original_key if keys_.empty()
|
|
// orig_ikey is backed by keys_.back() if !keys_.empty()
|
|
ParsedInternalKey orig_ikey;
|
|
ParseInternalKey(original_key, &orig_ikey);
|
|
|
|
Status s;
|
|
bool hit_the_next_user_key = false;
|
|
for (; iter->Valid(); iter->Next(), original_key_is_iter = false) {
|
|
ParsedInternalKey ikey;
|
|
assert(keys_.size() == merge_context_.GetNumOperands());
|
|
|
|
if (!ParseInternalKey(iter->key(), &ikey)) {
|
|
// stop at corrupted key
|
|
if (assert_valid_internal_key_) {
|
|
assert(!"Corrupted internal key not expected.");
|
|
return Status::Corruption("Corrupted internal key not expected.");
|
|
}
|
|
break;
|
|
} else if (first_key) {
|
|
assert(user_comparator_->Equal(ikey.user_key, orig_ikey.user_key));
|
|
first_key = false;
|
|
} else if (!user_comparator_->Equal(ikey.user_key, orig_ikey.user_key)) {
|
|
// hit a different user key, stop right here
|
|
hit_the_next_user_key = true;
|
|
break;
|
|
} else if (stop_before && ikey.sequence <= stop_before) {
|
|
// hit an entry that's visible by the previous snapshot, can't touch that
|
|
break;
|
|
}
|
|
|
|
// At this point we are guaranteed that we need to process this key.
|
|
|
|
assert(IsValueType(ikey.type));
|
|
if (ikey.type != kTypeMerge) {
|
|
if (ikey.type != kTypeValue && ikey.type != kTypeDeletion) {
|
|
// Merges operands can only be used with puts and deletions, single
|
|
// deletions are not supported.
|
|
assert(false);
|
|
// release build doesn't have asserts, so we return error status
|
|
return Status::InvalidArgument(
|
|
" Merges operands can only be used with puts and deletions, single "
|
|
"deletions are not supported.");
|
|
}
|
|
|
|
// hit a put/delete
|
|
// => merge the put value or a nullptr with operands_
|
|
// => store result in operands_.back() (and update keys_.back())
|
|
// => change the entry type to kTypeValue for keys_.back()
|
|
// We are done! Success!
|
|
|
|
// If there are no operands, just return the Status::OK(). That will cause
|
|
// the compaction iterator to write out the key we're currently at, which
|
|
// is the put/delete we just encountered.
|
|
if (keys_.empty()) {
|
|
return Status::OK();
|
|
}
|
|
|
|
// TODO(noetzli) If the merge operator returns false, we are currently
|
|
// (almost) silently dropping the put/delete. That's probably not what we
|
|
// want.
|
|
const Slice val = iter->value();
|
|
const Slice* val_ptr = (kTypeValue == ikey.type) ? &val : nullptr;
|
|
std::string merge_result;
|
|
s = TimedFullMerge(user_merge_operator_, ikey.user_key, val_ptr,
|
|
merge_context_.GetOperands(), &merge_result, logger_,
|
|
stats_, env_);
|
|
|
|
// We store the result in keys_.back() and operands_.back()
|
|
// if nothing went wrong (i.e.: no operand corruption on disk)
|
|
if (s.ok()) {
|
|
// The original key encountered
|
|
original_key = std::move(keys_.back());
|
|
orig_ikey.type = kTypeValue;
|
|
UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type);
|
|
keys_.clear();
|
|
merge_context_.Clear();
|
|
keys_.emplace_front(std::move(original_key));
|
|
merge_context_.PushOperand(merge_result);
|
|
}
|
|
|
|
// move iter to the next entry
|
|
iter->Next();
|
|
return s;
|
|
} else {
|
|
// hit a merge
|
|
// => if there is a compaction filter, apply it.
|
|
// => merge the operand into the front of the operands_ list
|
|
// if not filtered
|
|
// => then continue because we haven't yet seen a Put/Delete.
|
|
//
|
|
// Keep queuing keys and operands until we either meet a put / delete
|
|
// request or later did a partial merge.
|
|
|
|
Slice value_slice = iter->value();
|
|
// add an operand to the list if:
|
|
// 1) it's included in one of the snapshots. in that case we *must* write
|
|
// it out, no matter what compaction filter says
|
|
// 2) it's not filtered by a compaction filter
|
|
if (ikey.sequence <= latest_snapshot_ ||
|
|
!FilterMerge(orig_ikey.user_key, value_slice)) {
|
|
if (original_key_is_iter) {
|
|
// this is just an optimization that saves us one memcpy
|
|
keys_.push_front(std::move(original_key));
|
|
} else {
|
|
keys_.push_front(iter->key().ToString());
|
|
}
|
|
if (keys_.size() == 1) {
|
|
// we need to re-anchor the orig_ikey because it was anchored by
|
|
// original_key before
|
|
ParseInternalKey(keys_.back(), &orig_ikey);
|
|
}
|
|
merge_context_.PushOperand(value_slice,
|
|
iter->IsValuePinned() /* operand_pinned */);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (merge_context_.GetNumOperands() == 0) {
|
|
// we filtered out all the merge operands
|
|
return Status::OK();
|
|
}
|
|
|
|
// We are sure we have seen this key's entire history if we are at the
|
|
// last level and exhausted all internal keys of this user key.
|
|
// NOTE: !iter->Valid() does not necessarily mean we hit the
|
|
// beginning of a user key, as versions of a user key might be
|
|
// split into multiple files (even files on the same level)
|
|
// and some files might not be included in the compaction/merge.
|
|
//
|
|
// There are also cases where we have seen the root of history of this
|
|
// key without being sure of it. Then, we simply miss the opportunity
|
|
// to combine the keys. Since VersionSet::SetupOtherInputs() always makes
|
|
// sure that all merge-operands on the same level get compacted together,
|
|
// this will simply lead to these merge operands moving to the next level.
|
|
//
|
|
// So, we only perform the following logic (to merge all operands together
|
|
// without a Put/Delete) if we are certain that we have seen the end of key.
|
|
bool surely_seen_the_beginning = hit_the_next_user_key && at_bottom;
|
|
if (surely_seen_the_beginning) {
|
|
// do a final merge with nullptr as the existing value and say
|
|
// bye to the merge type (it's now converted to a Put)
|
|
assert(kTypeMerge == orig_ikey.type);
|
|
assert(merge_context_.GetNumOperands() >= 1);
|
|
assert(merge_context_.GetNumOperands() == keys_.size());
|
|
std::string merge_result;
|
|
s = TimedFullMerge(user_merge_operator_, orig_ikey.user_key, nullptr,
|
|
merge_context_.GetOperands(), &merge_result, logger_,
|
|
stats_, env_);
|
|
if (s.ok()) {
|
|
// The original key encountered
|
|
// We are certain that keys_ is not empty here (see assertions couple of
|
|
// lines before).
|
|
original_key = std::move(keys_.back());
|
|
orig_ikey.type = kTypeValue;
|
|
UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type);
|
|
keys_.clear();
|
|
merge_context_.Clear();
|
|
keys_.emplace_front(std::move(original_key));
|
|
merge_context_.PushOperand(merge_result);
|
|
}
|
|
} else {
|
|
// We haven't seen the beginning of the key nor a Put/Delete.
|
|
// Attempt to use the user's associative merge function to
|
|
// merge the stacked merge operands into a single operand.
|
|
//
|
|
// TODO(noetzli) The docblock of MergeUntil suggests that a successful
|
|
// partial merge returns Status::OK(). Should we change the status code
|
|
// after a successful partial merge?
|
|
s = Status::MergeInProgress();
|
|
if (merge_context_.GetNumOperands() >= 2 &&
|
|
merge_context_.GetNumOperands() >= min_partial_merge_operands_) {
|
|
bool merge_success = false;
|
|
std::string merge_result;
|
|
{
|
|
StopWatchNano timer(env_, stats_ != nullptr);
|
|
PERF_TIMER_GUARD(merge_operator_time_nanos);
|
|
merge_success = user_merge_operator_->PartialMergeMulti(
|
|
orig_ikey.user_key,
|
|
std::deque<Slice>(merge_context_.GetOperands().begin(),
|
|
merge_context_.GetOperands().end()),
|
|
&merge_result, logger_);
|
|
RecordTick(stats_, MERGE_OPERATION_TOTAL_TIME,
|
|
stats_ ? timer.ElapsedNanosSafe() : 0);
|
|
}
|
|
if (merge_success) {
|
|
// Merging of operands (associative merge) was successful.
|
|
// Replace operands with the merge result
|
|
merge_context_.Clear();
|
|
merge_context_.PushOperand(merge_result);
|
|
keys_.erase(keys_.begin(), keys_.end() - 1);
|
|
}
|
|
}
|
|
}
|
|
|
|
return s;
|
|
}
|
|
|
|
MergeOutputIterator::MergeOutputIterator(const MergeHelper* merge_helper)
|
|
: merge_helper_(merge_helper) {
|
|
it_keys_ = merge_helper_->keys().rend();
|
|
it_values_ = merge_helper_->values().rend();
|
|
}
|
|
|
|
void MergeOutputIterator::SeekToFirst() {
|
|
const auto& keys = merge_helper_->keys();
|
|
const auto& values = merge_helper_->values();
|
|
assert(keys.size() == values.size());
|
|
it_keys_ = keys.rbegin();
|
|
it_values_ = values.rbegin();
|
|
}
|
|
|
|
void MergeOutputIterator::Next() {
|
|
++it_keys_;
|
|
++it_values_;
|
|
}
|
|
|
|
bool MergeHelper::FilterMerge(const Slice& user_key, const Slice& value_slice) {
|
|
if (compaction_filter_ == nullptr) {
|
|
return false;
|
|
}
|
|
if (stats_ != nullptr) {
|
|
filter_timer_.Start();
|
|
}
|
|
bool to_delete =
|
|
compaction_filter_->FilterMergeOperand(level_, user_key, value_slice);
|
|
total_filter_time_ += filter_timer_.ElapsedNanosSafe();
|
|
return to_delete;
|
|
}
|
|
|
|
} // namespace rocksdb
|