mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 16:30:56 +00:00
68a8e6b8fa
Summary: This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice> This diff is stacked on top of D56493 and D56511 In this diff we - Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future - Replace std::deque<std::string> with std::vector<Slice> to pass operands - Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187) - Allow FullMergeV2 output to be an existing operand ``` [Everything in Memtable | 10K operands | 10 KB each | 1 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000 [FullMergeV2] readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s [master] readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s ``` ``` [Everything in Memtable | 10K operands | 10 KB each | 10 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000 [FullMergeV2] readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s [master] readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s ``` ``` [Everything in Block cache | 10K operands | 10 KB each | 1 operand per key] [FullMergeV2] $ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s [master] readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s ``` ``` [Everything in Block cache | 10K operands | 10 KB each | 10 operand per key] DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions [FullMergeV2] readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s [master] readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s ``` Test Plan: COMPILE_WITH_ASAN=1 make check -j64 Reviewers: yhchiang, andrewkr, sdong Reviewed By: sdong Subscribers: lovro, andrewkr, dhruba Differential Revision: https://reviews.facebook.net/D57075
341 lines
11 KiB
C++
341 lines
11 KiB
C++
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
#include <deque>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "rocksdb/db.h"
|
|
#include "rocksdb/env.h"
|
|
#include "rocksdb/compaction_filter.h"
|
|
#include "rocksdb/merge_operator.h"
|
|
#include "rocksdb/utilities/utility_db.h"
|
|
#include "rocksdb/utilities/db_ttl.h"
|
|
#include "db/db_impl.h"
|
|
|
|
#ifdef _WIN32
|
|
// Windows API macro interference
|
|
#undef GetCurrentTime
|
|
#endif
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
class DBWithTTLImpl : public DBWithTTL {
|
|
public:
|
|
static void SanitizeOptions(int32_t ttl, ColumnFamilyOptions* options,
|
|
Env* env);
|
|
|
|
explicit DBWithTTLImpl(DB* db);
|
|
|
|
virtual ~DBWithTTLImpl();
|
|
|
|
Status CreateColumnFamilyWithTtl(const ColumnFamilyOptions& options,
|
|
const std::string& column_family_name,
|
|
ColumnFamilyHandle** handle,
|
|
int ttl) override;
|
|
|
|
Status CreateColumnFamily(const ColumnFamilyOptions& options,
|
|
const std::string& column_family_name,
|
|
ColumnFamilyHandle** handle) override;
|
|
|
|
using StackableDB::Put;
|
|
virtual Status Put(const WriteOptions& options,
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
|
const Slice& val) override;
|
|
|
|
using StackableDB::Get;
|
|
virtual Status Get(const ReadOptions& options,
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
|
std::string* value) override;
|
|
|
|
using StackableDB::MultiGet;
|
|
virtual std::vector<Status> MultiGet(
|
|
const ReadOptions& options,
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
const std::vector<Slice>& keys,
|
|
std::vector<std::string>* values) override;
|
|
|
|
using StackableDB::KeyMayExist;
|
|
virtual bool KeyMayExist(const ReadOptions& options,
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
|
std::string* value,
|
|
bool* value_found = nullptr) override;
|
|
|
|
using StackableDB::Merge;
|
|
virtual Status Merge(const WriteOptions& options,
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
|
const Slice& value) override;
|
|
|
|
virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override;
|
|
|
|
using StackableDB::NewIterator;
|
|
virtual Iterator* NewIterator(const ReadOptions& opts,
|
|
ColumnFamilyHandle* column_family) override;
|
|
|
|
virtual DB* GetBaseDB() override { return db_; }
|
|
|
|
static bool IsStale(const Slice& value, int32_t ttl, Env* env);
|
|
|
|
static Status AppendTS(const Slice& val, std::string* val_with_ts, Env* env);
|
|
|
|
static Status SanityCheckTimestamp(const Slice& str);
|
|
|
|
static Status StripTS(std::string* str);
|
|
|
|
static const uint32_t kTSLength = sizeof(int32_t); // size of timestamp
|
|
|
|
static const int32_t kMinTimestamp = 1368146402; // 05/09/2013:5:40PM GMT-8
|
|
|
|
static const int32_t kMaxTimestamp = 2147483647; // 01/18/2038:7:14PM GMT-8
|
|
};
|
|
|
|
class TtlIterator : public Iterator {
|
|
|
|
public:
|
|
explicit TtlIterator(Iterator* iter) : iter_(iter) { assert(iter_); }
|
|
|
|
~TtlIterator() { delete iter_; }
|
|
|
|
bool Valid() const override { return iter_->Valid(); }
|
|
|
|
void SeekToFirst() override { iter_->SeekToFirst(); }
|
|
|
|
void SeekToLast() override { iter_->SeekToLast(); }
|
|
|
|
void Seek(const Slice& target) override { iter_->Seek(target); }
|
|
|
|
void Next() override { iter_->Next(); }
|
|
|
|
void Prev() override { iter_->Prev(); }
|
|
|
|
Slice key() const override { return iter_->key(); }
|
|
|
|
int32_t timestamp() const {
|
|
return DecodeFixed32(iter_->value().data() + iter_->value().size() -
|
|
DBWithTTLImpl::kTSLength);
|
|
}
|
|
|
|
Slice value() const override {
|
|
// TODO: handle timestamp corruption like in general iterator semantics
|
|
assert(DBWithTTLImpl::SanityCheckTimestamp(iter_->value()).ok());
|
|
Slice trimmed_value = iter_->value();
|
|
trimmed_value.size_ -= DBWithTTLImpl::kTSLength;
|
|
return trimmed_value;
|
|
}
|
|
|
|
Status status() const override { return iter_->status(); }
|
|
|
|
private:
|
|
Iterator* iter_;
|
|
};
|
|
|
|
class TtlCompactionFilter : public CompactionFilter {
|
|
public:
|
|
TtlCompactionFilter(
|
|
int32_t ttl, Env* env, const CompactionFilter* user_comp_filter,
|
|
std::unique_ptr<const CompactionFilter> user_comp_filter_from_factory =
|
|
nullptr)
|
|
: ttl_(ttl),
|
|
env_(env),
|
|
user_comp_filter_(user_comp_filter),
|
|
user_comp_filter_from_factory_(
|
|
std::move(user_comp_filter_from_factory)) {
|
|
// Unlike the merge operator, compaction filter is necessary for TTL, hence
|
|
// this would be called even if user doesn't specify any compaction-filter
|
|
if (!user_comp_filter_) {
|
|
user_comp_filter_ = user_comp_filter_from_factory_.get();
|
|
}
|
|
}
|
|
|
|
virtual bool Filter(int level, const Slice& key, const Slice& old_val,
|
|
std::string* new_val, bool* value_changed) const
|
|
override {
|
|
if (DBWithTTLImpl::IsStale(old_val, ttl_, env_)) {
|
|
return true;
|
|
}
|
|
if (user_comp_filter_ == nullptr) {
|
|
return false;
|
|
}
|
|
assert(old_val.size() >= DBWithTTLImpl::kTSLength);
|
|
Slice old_val_without_ts(old_val.data(),
|
|
old_val.size() - DBWithTTLImpl::kTSLength);
|
|
if (user_comp_filter_->Filter(level, key, old_val_without_ts, new_val,
|
|
value_changed)) {
|
|
return true;
|
|
}
|
|
if (*value_changed) {
|
|
new_val->append(
|
|
old_val.data() + old_val.size() - DBWithTTLImpl::kTSLength,
|
|
DBWithTTLImpl::kTSLength);
|
|
}
|
|
return false;
|
|
}
|
|
|
|
virtual const char* Name() const override { return "Delete By TTL"; }
|
|
|
|
private:
|
|
int32_t ttl_;
|
|
Env* env_;
|
|
const CompactionFilter* user_comp_filter_;
|
|
std::unique_ptr<const CompactionFilter> user_comp_filter_from_factory_;
|
|
};
|
|
|
|
class TtlCompactionFilterFactory : public CompactionFilterFactory {
|
|
public:
|
|
TtlCompactionFilterFactory(
|
|
int32_t ttl, Env* env,
|
|
std::shared_ptr<CompactionFilterFactory> comp_filter_factory)
|
|
: ttl_(ttl), env_(env), user_comp_filter_factory_(comp_filter_factory) {}
|
|
|
|
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
|
|
const CompactionFilter::Context& context) override {
|
|
std::unique_ptr<const CompactionFilter> user_comp_filter_from_factory =
|
|
nullptr;
|
|
if (user_comp_filter_factory_) {
|
|
user_comp_filter_from_factory =
|
|
user_comp_filter_factory_->CreateCompactionFilter(context);
|
|
}
|
|
|
|
return std::unique_ptr<TtlCompactionFilter>(new TtlCompactionFilter(
|
|
ttl_, env_, nullptr, std::move(user_comp_filter_from_factory)));
|
|
}
|
|
|
|
virtual const char* Name() const override {
|
|
return "TtlCompactionFilterFactory";
|
|
}
|
|
|
|
private:
|
|
int32_t ttl_;
|
|
Env* env_;
|
|
std::shared_ptr<CompactionFilterFactory> user_comp_filter_factory_;
|
|
};
|
|
|
|
class TtlMergeOperator : public MergeOperator {
|
|
|
|
public:
|
|
explicit TtlMergeOperator(const std::shared_ptr<MergeOperator>& merge_op,
|
|
Env* env)
|
|
: user_merge_op_(merge_op), env_(env) {
|
|
assert(merge_op);
|
|
assert(env);
|
|
}
|
|
|
|
virtual bool FullMergeV2(const MergeOperationInput& merge_in,
|
|
MergeOperationOutput* merge_out) const override {
|
|
const uint32_t ts_len = DBWithTTLImpl::kTSLength;
|
|
if (merge_in.existing_value && merge_in.existing_value->size() < ts_len) {
|
|
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger,
|
|
"Error: Could not remove timestamp from existing value.");
|
|
return false;
|
|
}
|
|
|
|
// Extract time-stamp from each operand to be passed to user_merge_op_
|
|
std::vector<Slice> operands_without_ts;
|
|
for (const auto& operand : merge_in.operand_list) {
|
|
if (operand.size() < ts_len) {
|
|
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger,
|
|
"Error: Could not remove timestamp from operand value.");
|
|
return false;
|
|
}
|
|
operands_without_ts.push_back(operand);
|
|
operands_without_ts.back().remove_suffix(ts_len);
|
|
}
|
|
|
|
// Apply the user merge operator (store result in *new_value)
|
|
bool good = true;
|
|
MergeOperationOutput user_merge_out(merge_out->new_value,
|
|
merge_out->existing_operand);
|
|
if (merge_in.existing_value) {
|
|
Slice existing_value_without_ts(merge_in.existing_value->data(),
|
|
merge_in.existing_value->size() - ts_len);
|
|
good = user_merge_op_->FullMergeV2(
|
|
MergeOperationInput(merge_in.key, &existing_value_without_ts,
|
|
operands_without_ts, merge_in.logger),
|
|
&user_merge_out);
|
|
} else {
|
|
good = user_merge_op_->FullMergeV2(
|
|
MergeOperationInput(merge_in.key, nullptr, operands_without_ts,
|
|
merge_in.logger),
|
|
&user_merge_out);
|
|
}
|
|
|
|
// Return false if the user merge operator returned false
|
|
if (!good) {
|
|
return false;
|
|
}
|
|
|
|
if (merge_out->existing_operand.data()) {
|
|
merge_out->new_value.assign(merge_out->existing_operand.data(),
|
|
merge_out->existing_operand.size());
|
|
merge_out->existing_operand = Slice(nullptr, 0);
|
|
}
|
|
|
|
// Augment the *new_value with the ttl time-stamp
|
|
int64_t curtime;
|
|
if (!env_->GetCurrentTime(&curtime).ok()) {
|
|
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger,
|
|
"Error: Could not get current time to be attached internally "
|
|
"to the new value.");
|
|
return false;
|
|
} else {
|
|
char ts_string[ts_len];
|
|
EncodeFixed32(ts_string, (int32_t)curtime);
|
|
merge_out->new_value.append(ts_string, ts_len);
|
|
return true;
|
|
}
|
|
}
|
|
|
|
virtual bool PartialMergeMulti(const Slice& key,
|
|
const std::deque<Slice>& operand_list,
|
|
std::string* new_value, Logger* logger) const
|
|
override {
|
|
const uint32_t ts_len = DBWithTTLImpl::kTSLength;
|
|
std::deque<Slice> operands_without_ts;
|
|
|
|
for (const auto& operand : operand_list) {
|
|
if (operand.size() < ts_len) {
|
|
Log(InfoLogLevel::ERROR_LEVEL, logger,
|
|
"Error: Could not remove timestamp from value.");
|
|
return false;
|
|
}
|
|
|
|
operands_without_ts.push_back(
|
|
Slice(operand.data(), operand.size() - ts_len));
|
|
}
|
|
|
|
// Apply the user partial-merge operator (store result in *new_value)
|
|
assert(new_value);
|
|
if (!user_merge_op_->PartialMergeMulti(key, operands_without_ts, new_value,
|
|
logger)) {
|
|
return false;
|
|
}
|
|
|
|
// Augment the *new_value with the ttl time-stamp
|
|
int64_t curtime;
|
|
if (!env_->GetCurrentTime(&curtime).ok()) {
|
|
Log(InfoLogLevel::ERROR_LEVEL, logger,
|
|
"Error: Could not get current time to be attached internally "
|
|
"to the new value.");
|
|
return false;
|
|
} else {
|
|
char ts_string[ts_len];
|
|
EncodeFixed32(ts_string, (int32_t)curtime);
|
|
new_value->append(ts_string, ts_len);
|
|
return true;
|
|
}
|
|
}
|
|
|
|
virtual const char* Name() const override { return "Merge By TTL"; }
|
|
|
|
private:
|
|
std::shared_ptr<MergeOperator> user_merge_op_;
|
|
Env* env_;
|
|
};
|
|
}
|
|
#endif // ROCKSDB_LITE
|