2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2023-12-04 19:17:32 +00:00
|
|
|
#include <cassert>
|
2013-03-21 22:59:47 +00:00
|
|
|
#include <iostream>
|
2020-10-27 01:20:43 +00:00
|
|
|
#include <memory>
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2019-05-31 22:21:36 +00:00
|
|
|
#include "db/dbformat.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "db/write_batch_internal.h"
|
2015-06-26 18:35:46 +00:00
|
|
|
#include "port/stack_trace.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/merge_operator.h"
|
2014-07-23 14:21:38 +00:00
|
|
|
#include "rocksdb/utilities/db_ttl.h"
|
Introduce a wide column aware MergeOperator API (#11807)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11807
For now, RocksDB has limited support for using merge with wide columns: when a bunch of merge operands have to be applied to a wide-column base value, RocksDB currently passes only the value of the default column to the application's `MergeOperator`, which means there is no way to update any other columns during a merge. As a first step in making this more general, the patch adds a new API `FullMergeV3` to `MergeOperator`.
`FullMergeV3`'s interface enables applications to receive a plain, wide-column, or non-existent base value as merge input, and to produce a new plain value, a new wide-column value, or an existing operand as merge output. Note that there are no limitations on the column names and values if the merge result is a wide-column entity. Also, the interface is general in the sense that it makes it possible e.g. for a merge that takes a plain base value and some deltas to produce a wide-column entity as a result.
For backward compatibility, the default implementation of `FullMergeV3` falls back to `FullMergeV2` and implements the current logic where merge operands are applied to the default column of the base entity and any other columns are unchanged. (Note that with `FullMergeV3` in the `MergeOperator` interface, this behavior will become customizable.)
This patch just introduces the new API and the default backward compatible implementation. I plan to integrate `FullMergeV3` into the query and compaction logic in subsequent diffs.
Reviewed By: jaykorean
Differential Revision: D49117253
fbshipit-source-id: 109e016f25cd130fc504790818d927bae7fec6bd
2023-09-11 19:13:58 +00:00
|
|
|
#include "rocksdb/wide_columns.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
2020-10-27 01:20:43 +00:00
|
|
|
#include "util/coding.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2018-07-13 21:07:53 +00:00
|
|
|
|
|
|
|
bool use_compression;
|
|
|
|
|
|
|
|
class MergeTest : public testing::Test {};
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2014-11-11 21:47:22 +00:00
|
|
|
size_t num_merge_operator_calls;
|
|
|
|
void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; }
|
2014-03-25 00:57:13 +00:00
|
|
|
|
2014-11-11 21:47:22 +00:00
|
|
|
size_t num_partial_merge_calls;
|
|
|
|
void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; }
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
class CountMergeOperator : public AssociativeMergeOperator {
|
|
|
|
public:
|
|
|
|
CountMergeOperator() {
|
|
|
|
mergeOperator_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool Merge(const Slice& key, const Slice* existing_value, const Slice& value,
|
|
|
|
std::string* new_value, Logger* logger) const override {
|
2015-06-26 18:35:46 +00:00
|
|
|
assert(new_value->empty());
|
2014-11-11 21:47:22 +00:00
|
|
|
++num_merge_operator_calls;
|
2014-03-25 00:57:13 +00:00
|
|
|
if (existing_value == nullptr) {
|
|
|
|
new_value->assign(value.data(), value.size());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-01-21 21:10:37 +00:00
|
|
|
return mergeOperator_->PartialMerge(key, *existing_value, value, new_value,
|
|
|
|
logger);
|
2014-01-11 01:33:56 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool PartialMergeMulti(const Slice& key,
|
|
|
|
const std::deque<Slice>& operand_list,
|
|
|
|
std::string* new_value,
|
|
|
|
Logger* logger) const override {
|
2015-06-26 18:35:46 +00:00
|
|
|
assert(new_value->empty());
|
2014-03-25 00:57:13 +00:00
|
|
|
++num_partial_merge_calls;
|
|
|
|
return mergeOperator_->PartialMergeMulti(key, operand_list, new_value,
|
|
|
|
logger);
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return "UInt64AddOperator"; }
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::shared_ptr<MergeOperator> mergeOperator_;
|
|
|
|
};
|
|
|
|
|
2021-01-21 21:10:37 +00:00
|
|
|
class EnvMergeTest : public EnvWrapper {
|
|
|
|
public:
|
|
|
|
EnvMergeTest() : EnvWrapper(Env::Default()) {}
|
2022-01-05 00:44:54 +00:00
|
|
|
static const char* kClassName() { return "MergeEnv"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
2021-01-21 21:10:37 +00:00
|
|
|
// ~EnvMergeTest() override {}
|
|
|
|
|
|
|
|
uint64_t NowNanos() override {
|
|
|
|
++now_nanos_count_;
|
|
|
|
return target()->NowNanos();
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t now_nanos_count_;
|
|
|
|
|
|
|
|
static std::unique_ptr<EnvMergeTest> singleton_;
|
|
|
|
|
|
|
|
static EnvMergeTest* GetInstance() {
|
2023-12-04 19:17:32 +00:00
|
|
|
if (nullptr == singleton_) {
|
|
|
|
singleton_.reset(new EnvMergeTest);
|
|
|
|
}
|
2021-01-21 21:10:37 +00:00
|
|
|
return singleton_.get();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
uint64_t EnvMergeTest::now_nanos_count_{0};
|
|
|
|
std::unique_ptr<EnvMergeTest> EnvMergeTest::singleton_;
|
|
|
|
|
2015-12-15 23:26:20 +00:00
|
|
|
std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
|
2017-02-23 22:53:03 +00:00
|
|
|
const size_t max_successive_merges = 0) {
|
2013-03-21 22:59:47 +00:00
|
|
|
DB* db;
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2014-01-11 01:33:56 +00:00
|
|
|
options.merge_operator = std::make_shared<CountMergeOperator>();
|
|
|
|
options.max_successive_merges = max_successive_merges;
|
2021-01-21 21:10:37 +00:00
|
|
|
options.env = EnvMergeTest::GetInstance();
|
2020-12-22 23:08:17 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname, Options()));
|
2013-07-22 23:49:55 +00:00
|
|
|
Status s;
|
|
|
|
if (ttl) {
|
2014-04-29 03:44:33 +00:00
|
|
|
DBWithTTL* db_with_ttl;
|
|
|
|
s = DBWithTTL::Open(options, dbname, &db_with_ttl);
|
|
|
|
db = db_with_ttl;
|
2013-07-22 23:49:55 +00:00
|
|
|
} else {
|
2013-08-06 18:42:21 +00:00
|
|
|
s = DB::Open(options, dbname, &db);
|
2013-07-22 23:49:55 +00:00
|
|
|
}
|
2020-12-22 23:08:17 +00:00
|
|
|
EXPECT_OK(s);
|
|
|
|
assert(s.ok());
|
Built-in support for generating unique IDs, bug fix (#8708)
Summary:
Env::GenerateUniqueId() works fine on Windows and on POSIX
where /proc/sys/kernel/random/uuid exists. Our other implementation is
flawed and easily produces collision in a new multi-threaded test.
As we rely more heavily on DB session ID uniqueness, this becomes a
serious issue.
This change combines several individually suitable entropy sources
for reliable generation of random unique IDs, with goal of uniqueness
and portability, not cryptographic strength nor maximum speed.
Specifically:
* Moves code for getting UUIDs from the OS to port::GenerateRfcUuid
rather than in Env implementation details. Callers are now told whether
the operation fails or succeeds.
* Adds an internal API GenerateRawUniqueId for generating high-quality
128-bit unique identifiers, by combining entropy from three "tracks":
* Lots of info from default Env like time, process id, and hostname.
* std::random_device
* port::GenerateRfcUuid (when working)
* Built-in implementations of Env::GenerateUniqueId() will now always
produce an RFC 4122 UUID string, either from platform-specific API or
by converting the output of GenerateRawUniqueId.
DB session IDs now use GenerateRawUniqueId while DB IDs (not as
critical) try to use port::GenerateRfcUuid but fall back on
GenerateRawUniqueId with conversion to an RFC 4122 UUID.
GenerateRawUniqueId is declared and defined under env/ rather than util/
or even port/ because of the Env dependency.
Likely follow-up: enhance GenerateRawUniqueId to be faster after the
first call and to guarantee uniqueness within the lifetime of a single
process (imparting the same property onto DB session IDs).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8708
Test Plan:
A new mini-stress test in env_test checks the various public
and internal APIs for uniqueness, including each track of
GenerateRawUniqueId individually. We can't hope to verify anywhere close
to 128 bits of entropy, but it can at least detect flaws as bad as the
old code. Serial execution of the new tests takes about 350 ms on
my machine.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D30563780
Pulled By: pdillinger
fbshipit-source-id: de4c9ff4b2f581cf784fcedb5f39f16e5185c364
2021-08-30 22:19:39 +00:00
|
|
|
// Allowed to call NowNanos during DB creation (in GenerateRawUniqueId() for
|
|
|
|
// session ID)
|
|
|
|
EnvMergeTest::now_nanos_count_ = 0;
|
2013-03-21 22:59:47 +00:00
|
|
|
return std::shared_ptr<DB>(db);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Imagine we are maintaining a set of uint64 counters.
|
|
|
|
// Each counter has a distinct name. And we would like
|
|
|
|
// to support four high level operations:
|
|
|
|
// set, add, get and remove
|
|
|
|
// This is a quick implementation without a Merge operation.
|
|
|
|
class Counters {
|
|
|
|
protected:
|
|
|
|
std::shared_ptr<DB> db_;
|
|
|
|
|
|
|
|
WriteOptions put_option_;
|
|
|
|
ReadOptions get_option_;
|
|
|
|
WriteOptions delete_option_;
|
|
|
|
|
|
|
|
uint64_t default_;
|
|
|
|
|
|
|
|
public:
|
2013-07-29 20:26:38 +00:00
|
|
|
explicit Counters(std::shared_ptr<DB> db, uint64_t defaultCount = 0)
|
2013-03-21 22:59:47 +00:00
|
|
|
: db_(db),
|
|
|
|
put_option_(),
|
|
|
|
get_option_(),
|
|
|
|
delete_option_(),
|
|
|
|
default_(defaultCount) {
|
|
|
|
assert(db_);
|
|
|
|
}
|
|
|
|
|
2023-12-04 19:17:32 +00:00
|
|
|
virtual ~Counters() = default;
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
// public interface of Counters.
|
|
|
|
// All four functions return false
|
|
|
|
// if the underlying level db operation failed.
|
|
|
|
|
|
|
|
// mapped to a levedb Put
|
2015-12-15 23:26:20 +00:00
|
|
|
bool set(const std::string& key, uint64_t value) {
|
2013-03-21 22:59:47 +00:00
|
|
|
// just treat the internal rep of int64 as the string
|
2017-04-22 03:41:37 +00:00
|
|
|
char buf[sizeof(value)];
|
|
|
|
EncodeFixed64(buf, value);
|
|
|
|
Slice slice(buf, sizeof(value));
|
2013-03-21 22:59:47 +00:00
|
|
|
auto s = db_->Put(put_option_, key, slice);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
return true;
|
|
|
|
} else {
|
2015-12-15 23:26:20 +00:00
|
|
|
std::cerr << s.ToString() << std::endl;
|
2013-03-21 22:59:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
// mapped to a rocksdb Delete
|
2015-12-15 23:26:20 +00:00
|
|
|
bool remove(const std::string& key) {
|
2013-03-21 22:59:47 +00:00
|
|
|
auto s = db_->Delete(delete_option_, key);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
return true;
|
|
|
|
} else {
|
2015-12-15 23:26:20 +00:00
|
|
|
std::cerr << s.ToString() << std::endl;
|
2013-03-21 22:59:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
// mapped to a rocksdb Get
|
2015-12-15 23:26:20 +00:00
|
|
|
bool get(const std::string& key, uint64_t* value) {
|
|
|
|
std::string str;
|
2013-03-21 22:59:47 +00:00
|
|
|
auto s = db_->Get(get_option_, key, &str);
|
|
|
|
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
// return default value if not found;
|
|
|
|
*value = default_;
|
|
|
|
return true;
|
|
|
|
} else if (s.ok()) {
|
|
|
|
// deserialization
|
|
|
|
if (str.size() != sizeof(uint64_t)) {
|
2015-12-15 23:26:20 +00:00
|
|
|
std::cerr << "value corruption\n";
|
2013-03-21 22:59:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
2023-12-04 19:17:32 +00:00
|
|
|
*value = DecodeFixed64(str.data());
|
2013-03-21 22:59:47 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
2015-12-15 23:26:20 +00:00
|
|
|
std::cerr << s.ToString() << std::endl;
|
2013-03-21 22:59:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 'add' is implemented as get -> modify -> set
|
|
|
|
// An alternative is a single merge operation, see MergeBasedCounters
|
2015-12-15 23:26:20 +00:00
|
|
|
virtual bool add(const std::string& key, uint64_t value) {
|
2013-03-21 22:59:47 +00:00
|
|
|
uint64_t base = default_;
|
|
|
|
return get(key, &base) && set(key, base + value);
|
|
|
|
}
|
|
|
|
|
|
|
|
// convenience functions for testing
|
2015-12-15 23:26:20 +00:00
|
|
|
void assert_set(const std::string& key, uint64_t value) {
|
2013-03-21 22:59:47 +00:00
|
|
|
assert(set(key, value));
|
|
|
|
}
|
|
|
|
|
2015-12-15 23:26:20 +00:00
|
|
|
void assert_remove(const std::string& key) { assert(remove(key)); }
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2015-12-15 23:26:20 +00:00
|
|
|
uint64_t assert_get(const std::string& key) {
|
2013-03-21 22:59:47 +00:00
|
|
|
uint64_t value = default_;
|
2014-04-14 19:06:30 +00:00
|
|
|
int result = get(key, &value);
|
|
|
|
assert(result);
|
2023-12-04 19:17:32 +00:00
|
|
|
if (result == 0) {
|
|
|
|
exit(1); // Disable unused variable warning.
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2015-12-15 23:26:20 +00:00
|
|
|
void assert_add(const std::string& key, uint64_t value) {
|
2014-04-14 19:06:30 +00:00
|
|
|
int result = add(key, value);
|
|
|
|
assert(result);
|
2023-12-04 19:17:32 +00:00
|
|
|
if (result == 0) {
|
|
|
|
exit(1); // Disable unused variable warning.
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Implement 'add' directly with the new Merge operation
|
|
|
|
class MergeBasedCounters : public Counters {
|
|
|
|
private:
|
2021-01-21 21:10:37 +00:00
|
|
|
WriteOptions merge_option_; // for merge
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
public:
|
2013-07-29 20:26:38 +00:00
|
|
|
explicit MergeBasedCounters(std::shared_ptr<DB> db, uint64_t defaultCount = 0)
|
2021-01-21 21:10:37 +00:00
|
|
|
: Counters(db, defaultCount), merge_option_() {}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
// mapped to a rocksdb Merge operation
|
2019-02-14 21:52:47 +00:00
|
|
|
bool add(const std::string& key, uint64_t value) override {
|
2013-03-21 22:59:47 +00:00
|
|
|
char encoded[sizeof(uint64_t)];
|
|
|
|
EncodeFixed64(encoded, value);
|
|
|
|
Slice slice(encoded, sizeof(uint64_t));
|
|
|
|
auto s = db_->Merge(merge_option_, key, slice);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
return true;
|
|
|
|
} else {
|
2015-12-15 23:26:20 +00:00
|
|
|
std::cerr << s.ToString() << std::endl;
|
2013-03-21 22:59:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void dumpDb(DB* db) {
|
2018-11-09 19:17:34 +00:00
|
|
|
auto it = std::unique_ptr<Iterator>(db->NewIterator(ReadOptions()));
|
2013-03-21 22:59:47 +00:00
|
|
|
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
2021-01-21 21:10:37 +00:00
|
|
|
// uint64_t value = DecodeFixed64(it->value().data());
|
|
|
|
// std::cout << it->key().ToString() << ": " << value << std::endl;
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
|
|
|
assert(it->status().ok()); // Check for any errors found during the scan
|
|
|
|
}
|
|
|
|
|
|
|
|
void testCounters(Counters& counters, DB* db, bool test_compaction) {
|
|
|
|
FlushOptions o;
|
|
|
|
o.wait = true;
|
|
|
|
|
|
|
|
counters.assert_set("a", 1);
|
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
if (test_compaction) {
|
|
|
|
ASSERT_OK(db->Flush(o));
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(counters.assert_get("a"), 1);
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
counters.assert_remove("b");
|
|
|
|
|
|
|
|
// defaut value is 0 if non-existent
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(counters.assert_get("b"), 0);
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
counters.assert_add("a", 2);
|
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
if (test_compaction) {
|
|
|
|
ASSERT_OK(db->Flush(o));
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
// 1+2 = 3
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(counters.assert_get("a"), 3);
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
dumpDb(db);
|
|
|
|
|
|
|
|
// 1+...+49 = ?
|
|
|
|
uint64_t sum = 0;
|
|
|
|
for (int i = 1; i < 50; i++) {
|
|
|
|
counters.assert_add("b", i);
|
|
|
|
sum += i;
|
|
|
|
}
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(counters.assert_get("b"), sum);
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
dumpDb(db);
|
|
|
|
|
|
|
|
if (test_compaction) {
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Flush(o));
|
2013-03-21 22:59:47 +00:00
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2013-03-21 22:59:47 +00:00
|
|
|
|
|
|
|
dumpDb(db);
|
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(counters.assert_get("a"), 3);
|
|
|
|
ASSERT_EQ(counters.assert_get("b"), sum);
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-27 01:20:43 +00:00
|
|
|
void testCountersWithFlushAndCompaction(Counters& counters, DB* db) {
|
|
|
|
ASSERT_OK(db->Put({}, "1", "1"));
|
|
|
|
ASSERT_OK(db->Flush(FlushOptions()));
|
|
|
|
|
|
|
|
std::atomic<int> cnt{0};
|
|
|
|
const auto get_thread_id = [&cnt]() {
|
|
|
|
thread_local int thread_id{cnt++};
|
|
|
|
return thread_id;
|
|
|
|
};
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"VersionSet::LogAndApply:BeforeWriterWaiting", [&](void* /*arg*/) {
|
|
|
|
int thread_id = get_thread_id();
|
|
|
|
if (1 == thread_id) {
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"testCountersWithFlushAndCompaction::bg_compact_thread:0");
|
|
|
|
} else if (2 == thread_id) {
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"testCountersWithFlushAndCompaction::bg_flush_thread:0");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"VersionSet::LogAndApply:WriteManifest", [&](void* /*arg*/) {
|
|
|
|
int thread_id = get_thread_id();
|
|
|
|
if (0 == thread_id) {
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"testCountersWithFlushAndCompaction::set_options_thread:0");
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"testCountersWithFlushAndCompaction::set_options_thread:1");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"VersionSet::LogAndApply:WakeUpAndDone", [&](void* arg) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
auto* mutex = static_cast<InstrumentedMutex*>(arg);
|
2020-10-27 01:20:43 +00:00
|
|
|
mutex->AssertHeld();
|
|
|
|
int thread_id = get_thread_id();
|
|
|
|
ASSERT_EQ(2, thread_id);
|
|
|
|
mutex->Unlock();
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"testCountersWithFlushAndCompaction::bg_flush_thread:1");
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"testCountersWithFlushAndCompaction::bg_flush_thread:2");
|
|
|
|
mutex->Lock();
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"testCountersWithFlushAndCompaction::set_options_thread:0",
|
|
|
|
"testCountersWithCompactionAndFlush:BeforeCompact"},
|
|
|
|
{"testCountersWithFlushAndCompaction::bg_compact_thread:0",
|
|
|
|
"testCountersWithFlushAndCompaction:BeforeIncCounters"},
|
|
|
|
{"testCountersWithFlushAndCompaction::bg_flush_thread:0",
|
|
|
|
"testCountersWithFlushAndCompaction::set_options_thread:1"},
|
|
|
|
{"testCountersWithFlushAndCompaction::bg_flush_thread:1",
|
|
|
|
"testCountersWithFlushAndCompaction:BeforeVerification"},
|
|
|
|
{"testCountersWithFlushAndCompaction:AfterGet",
|
|
|
|
"testCountersWithFlushAndCompaction::bg_flush_thread:2"},
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
port::Thread set_options_thread([&]() {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ASSERT_OK(static_cast<DBImpl*>(db)->SetOptions(
|
2020-10-27 01:20:43 +00:00
|
|
|
{{"disable_auto_compactions", "false"}}));
|
|
|
|
});
|
|
|
|
TEST_SYNC_POINT("testCountersWithCompactionAndFlush:BeforeCompact");
|
|
|
|
port::Thread compact_thread([&]() {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ASSERT_OK(static_cast<DBImpl*>(db)->CompactRange(
|
2020-10-27 01:20:43 +00:00
|
|
|
CompactRangeOptions(), db->DefaultColumnFamily(), nullptr, nullptr));
|
|
|
|
});
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("testCountersWithFlushAndCompaction:BeforeIncCounters");
|
|
|
|
counters.add("test-key", 1);
|
|
|
|
|
|
|
|
FlushOptions flush_opts;
|
|
|
|
flush_opts.wait = false;
|
|
|
|
ASSERT_OK(db->Flush(flush_opts));
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("testCountersWithFlushAndCompaction:BeforeVerification");
|
|
|
|
std::string expected;
|
|
|
|
PutFixed64(&expected, 1);
|
|
|
|
std::string actual;
|
|
|
|
Status s = db->Get(ReadOptions(), "test-key", &actual);
|
|
|
|
TEST_SYNC_POINT("testCountersWithFlushAndCompaction:AfterGet");
|
|
|
|
set_options_thread.join();
|
|
|
|
compact_thread.join();
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ(expected, actual);
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
}
|
|
|
|
|
2014-11-11 21:47:22 +00:00
|
|
|
void testSuccessiveMerge(Counters& counters, size_t max_num_merges,
|
|
|
|
size_t num_merges) {
|
2014-01-11 01:33:56 +00:00
|
|
|
counters.assert_remove("z");
|
|
|
|
uint64_t sum = 0;
|
|
|
|
|
2014-11-11 21:47:22 +00:00
|
|
|
for (size_t i = 1; i <= num_merges; ++i) {
|
2014-01-11 01:33:56 +00:00
|
|
|
resetNumMergeOperatorCalls();
|
|
|
|
counters.assert_add("z", i);
|
|
|
|
sum += i;
|
|
|
|
|
|
|
|
if (i % (max_num_merges + 1) == 0) {
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(num_merge_operator_calls, max_num_merges + 1);
|
2014-01-11 01:33:56 +00:00
|
|
|
} else {
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(num_merge_operator_calls, 0);
|
2014-01-11 01:33:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
resetNumMergeOperatorCalls();
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_EQ(counters.assert_get("z"), sum);
|
|
|
|
ASSERT_EQ(num_merge_operator_calls, i % (max_num_merges + 1));
|
2014-01-11 01:33:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-11 21:47:22 +00:00
|
|
|
void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
|
|
|
|
size_t min_merge, size_t count) {
|
2014-03-25 00:57:13 +00:00
|
|
|
FlushOptions o;
|
|
|
|
o.wait = true;
|
|
|
|
|
|
|
|
// Test case 1: partial merge should be called when the number of merge
|
|
|
|
// operands exceeds the threshold.
|
|
|
|
uint64_t tmp_sum = 0;
|
|
|
|
resetNumPartialMergeCalls();
|
2014-11-11 21:47:22 +00:00
|
|
|
for (size_t i = 1; i <= count; i++) {
|
2014-03-25 00:57:13 +00:00
|
|
|
counters->assert_add("b", i);
|
|
|
|
tmp_sum += i;
|
|
|
|
}
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Flush(o));
|
|
|
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2014-03-25 00:57:13 +00:00
|
|
|
ASSERT_EQ(tmp_sum, counters->assert_get("b"));
|
|
|
|
if (count > max_merge) {
|
|
|
|
// in this case, FullMerge should be called instead.
|
2014-11-11 21:47:22 +00:00
|
|
|
ASSERT_EQ(num_partial_merge_calls, 0U);
|
2014-03-25 00:57:13 +00:00
|
|
|
} else {
|
|
|
|
// if count >= min_merge, then partial merge should be called once.
|
|
|
|
ASSERT_EQ((count >= min_merge), (num_partial_merge_calls == 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test case 2: partial merge should not be called when a put is found.
|
|
|
|
resetNumPartialMergeCalls();
|
|
|
|
tmp_sum = 0;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Put(ROCKSDB_NAMESPACE::WriteOptions(), "c", "10"));
|
2014-11-11 21:47:22 +00:00
|
|
|
for (size_t i = 1; i <= count; i++) {
|
2014-03-25 00:57:13 +00:00
|
|
|
counters->assert_add("c", i);
|
|
|
|
tmp_sum += i;
|
|
|
|
}
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Flush(o));
|
|
|
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2014-03-25 00:57:13 +00:00
|
|
|
ASSERT_EQ(tmp_sum, counters->assert_get("c"));
|
2014-11-11 21:47:22 +00:00
|
|
|
ASSERT_EQ(num_partial_merge_calls, 0U);
|
Built-in support for generating unique IDs, bug fix (#8708)
Summary:
Env::GenerateUniqueId() works fine on Windows and on POSIX
where /proc/sys/kernel/random/uuid exists. Our other implementation is
flawed and easily produces collision in a new multi-threaded test.
As we rely more heavily on DB session ID uniqueness, this becomes a
serious issue.
This change combines several individually suitable entropy sources
for reliable generation of random unique IDs, with goal of uniqueness
and portability, not cryptographic strength nor maximum speed.
Specifically:
* Moves code for getting UUIDs from the OS to port::GenerateRfcUuid
rather than in Env implementation details. Callers are now told whether
the operation fails or succeeds.
* Adds an internal API GenerateRawUniqueId for generating high-quality
128-bit unique identifiers, by combining entropy from three "tracks":
* Lots of info from default Env like time, process id, and hostname.
* std::random_device
* port::GenerateRfcUuid (when working)
* Built-in implementations of Env::GenerateUniqueId() will now always
produce an RFC 4122 UUID string, either from platform-specific API or
by converting the output of GenerateRawUniqueId.
DB session IDs now use GenerateRawUniqueId while DB IDs (not as
critical) try to use port::GenerateRfcUuid but fall back on
GenerateRawUniqueId with conversion to an RFC 4122 UUID.
GenerateRawUniqueId is declared and defined under env/ rather than util/
or even port/ because of the Env dependency.
Likely follow-up: enhance GenerateRawUniqueId to be faster after the
first call and to guarantee uniqueness within the lifetime of a single
process (imparting the same property onto DB session IDs).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8708
Test Plan:
A new mini-stress test in env_test checks the various public
and internal APIs for uniqueness, including each track of
GenerateRawUniqueId individually. We can't hope to verify anywhere close
to 128 bits of entropy, but it can at least detect flaws as bad as the
old code. Serial execution of the new tests takes about 350 ms on
my machine.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D30563780
Pulled By: pdillinger
fbshipit-source-id: de4c9ff4b2f581cf784fcedb5f39f16e5185c364
2021-08-30 22:19:39 +00:00
|
|
|
// NowNanos was previously called in MergeHelper::FilterMerge(), which
|
|
|
|
// harmed performance.
|
2021-01-21 21:10:37 +00:00
|
|
|
ASSERT_EQ(EnvMergeTest::now_nanos_count_, 0U);
|
2014-03-25 00:57:13 +00:00
|
|
|
}
|
|
|
|
|
2014-11-11 21:47:22 +00:00
|
|
|
void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
|
|
|
|
size_t num_merges) {
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_GT(num_merges, max_num_merges);
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
Slice key("BatchSuccessiveMerge");
|
|
|
|
uint64_t merge_value = 1;
|
2017-04-22 03:41:37 +00:00
|
|
|
char buf[sizeof(merge_value)];
|
|
|
|
EncodeFixed64(buf, merge_value);
|
|
|
|
Slice merge_value_slice(buf, sizeof(merge_value));
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
// Create the batch
|
|
|
|
WriteBatch batch;
|
2014-11-11 21:47:22 +00:00
|
|
|
for (size_t i = 0; i < num_merges; ++i) {
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(batch.Merge(key, merge_value_slice));
|
2014-01-11 01:33:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Apply to memtable and count the number of merges
|
|
|
|
resetNumMergeOperatorCalls();
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
2014-11-11 21:47:22 +00:00
|
|
|
ASSERT_EQ(
|
|
|
|
num_merge_operator_calls,
|
|
|
|
static_cast<size_t>(num_merges - (num_merges % (max_num_merges + 1))));
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
// Get the value
|
|
|
|
resetNumMergeOperatorCalls();
|
2015-12-15 23:26:20 +00:00
|
|
|
std::string get_value_str;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Get(ReadOptions(), key, &get_value_str));
|
2014-01-11 01:33:56 +00:00
|
|
|
assert(get_value_str.size() == sizeof(uint64_t));
|
2023-12-04 19:17:32 +00:00
|
|
|
uint64_t get_value = DecodeFixed64(get_value_str.data());
|
2014-01-11 17:25:42 +00:00
|
|
|
ASSERT_EQ(get_value, num_merges * merge_value);
|
2014-11-11 21:47:22 +00:00
|
|
|
ASSERT_EQ(num_merge_operator_calls,
|
|
|
|
static_cast<size_t>((num_merges % (max_num_merges + 1))));
|
2014-01-11 01:33:56 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 21:07:53 +00:00
|
|
|
void runTest(const std::string& dbname, const bool use_ttl = false) {
|
2013-03-21 22:59:47 +00:00
|
|
|
{
|
2014-10-31 18:59:54 +00:00
|
|
|
auto db = OpenDb(dbname, use_ttl);
|
|
|
|
|
|
|
|
{
|
|
|
|
Counters counters(db, 0);
|
|
|
|
testCounters(counters, db.get(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
MergeBasedCounters counters(db, 0);
|
2018-07-13 21:07:53 +00:00
|
|
|
testCounters(counters, db.get(), use_compression);
|
2014-10-31 18:59:54 +00:00
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
{
|
2014-03-25 00:57:13 +00:00
|
|
|
size_t max_merge = 5;
|
|
|
|
auto db = OpenDb(dbname, use_ttl, max_merge);
|
2014-01-11 01:33:56 +00:00
|
|
|
MergeBasedCounters counters(db, 0);
|
2018-07-13 21:07:53 +00:00
|
|
|
testCounters(counters, db.get(), use_compression);
|
2014-03-25 00:57:13 +00:00
|
|
|
testSuccessiveMerge(counters, max_merge, max_merge * 2);
|
2014-01-11 01:33:56 +00:00
|
|
|
testSingleBatchSuccessiveMerge(db.get(), 5, 7);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Close());
|
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2014-01-11 01:33:56 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 00:57:13 +00:00
|
|
|
{
|
|
|
|
size_t max_merge = 100;
|
2017-02-23 22:53:03 +00:00
|
|
|
// Min merge is hard-coded to 2.
|
|
|
|
uint32_t min_merge = 2;
|
|
|
|
for (uint32_t count = min_merge - 1; count <= min_merge + 1; count++) {
|
|
|
|
auto db = OpenDb(dbname, use_ttl, max_merge);
|
|
|
|
MergeBasedCounters counters(db, 0);
|
|
|
|
testPartialMerge(&counters, db.get(), max_merge, min_merge, count);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Close());
|
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2017-02-23 22:53:03 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
auto db = OpenDb(dbname, use_ttl, max_merge);
|
|
|
|
MergeBasedCounters counters(db, 0);
|
|
|
|
testPartialMerge(&counters, db.get(), max_merge, min_merge,
|
|
|
|
min_merge * 10);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->Close());
|
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2014-03-25 00:57:13 +00:00
|
|
|
}
|
|
|
|
}
|
2014-07-31 00:24:36 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
{
|
|
|
|
auto db = OpenDb(dbname);
|
|
|
|
MergeBasedCounters counters(db, 0);
|
|
|
|
counters.add("test-key", 1);
|
|
|
|
counters.add("test-key", 1);
|
|
|
|
counters.add("test-key", 1);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2014-07-31 00:24:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DB* reopen_db;
|
|
|
|
ASSERT_OK(DB::Open(Options(), dbname, &reopen_db));
|
|
|
|
std::string value;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_NOK(reopen_db->Get(ReadOptions(), "test-key", &value));
|
2014-07-31 00:24:36 +00:00
|
|
|
delete reopen_db;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2014-07-31 00:24:36 +00:00
|
|
|
}
|
|
|
|
|
2014-07-31 18:20:49 +00:00
|
|
|
/* Temporary remove this test
|
2014-07-31 00:24:36 +00:00
|
|
|
{
|
2015-12-15 23:26:20 +00:00
|
|
|
std::cout << "Test merge-operator not set after reopen (recovery case)\n";
|
2014-07-31 00:24:36 +00:00
|
|
|
{
|
|
|
|
auto db = OpenDb(dbname);
|
|
|
|
MergeBasedCounters counters(db, 0);
|
|
|
|
counters.add("test-key", 1);
|
|
|
|
counters.add("test-key", 1);
|
|
|
|
counters.add("test-key", 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
DB* reopen_db;
|
|
|
|
ASSERT_TRUE(DB::Open(Options(), dbname, &reopen_db).IsInvalidArgument());
|
|
|
|
}
|
2014-07-31 18:20:49 +00:00
|
|
|
*/
|
2013-07-22 23:49:55 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 21:07:53 +00:00
|
|
|
TEST_F(MergeTest, MergeDbTest) {
|
2018-07-14 00:18:39 +00:00
|
|
|
runTest(test::PerThreadDBPath("merge_testdb"));
|
2018-07-13 21:07:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MergeTest, MergeDbTtlTest) {
|
2018-07-14 00:18:39 +00:00
|
|
|
runTest(test::PerThreadDBPath("merge_testdbttl"),
|
|
|
|
true); // Run test on TTL database
|
2018-07-13 21:07:53 +00:00
|
|
|
}
|
2020-10-27 01:20:43 +00:00
|
|
|
|
|
|
|
TEST_F(MergeTest, MergeWithCompactionAndFlush) {
|
|
|
|
const std::string dbname =
|
|
|
|
test::PerThreadDBPath("merge_with_compaction_and_flush");
|
|
|
|
{
|
|
|
|
auto db = OpenDb(dbname);
|
|
|
|
{
|
|
|
|
MergeBasedCounters counters(db, 0);
|
|
|
|
testCountersWithFlushAndCompaction(counters, db.get());
|
|
|
|
}
|
|
|
|
}
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2020-10-27 01:20:43 +00:00
|
|
|
}
|
2018-07-13 21:07:53 +00:00
|
|
|
|
Introduce a wide column aware MergeOperator API (#11807)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11807
For now, RocksDB has limited support for using merge with wide columns: when a bunch of merge operands have to be applied to a wide-column base value, RocksDB currently passes only the value of the default column to the application's `MergeOperator`, which means there is no way to update any other columns during a merge. As a first step in making this more general, the patch adds a new API `FullMergeV3` to `MergeOperator`.
`FullMergeV3`'s interface enables applications to receive a plain, wide-column, or non-existent base value as merge input, and to produce a new plain value, a new wide-column value, or an existing operand as merge output. Note that there are no limitations on the column names and values if the merge result is a wide-column entity. Also, the interface is general in the sense that it makes it possible e.g. for a merge that takes a plain base value and some deltas to produce a wide-column entity as a result.
For backward compatibility, the default implementation of `FullMergeV3` falls back to `FullMergeV2` and implements the current logic where merge operands are applied to the default column of the base entity and any other columns are unchanged. (Note that with `FullMergeV3` in the `MergeOperator` interface, this behavior will become customizable.)
This patch just introduces the new API and the default backward compatible implementation. I plan to integrate `FullMergeV3` into the query and compaction logic in subsequent diffs.
Reviewed By: jaykorean
Differential Revision: D49117253
fbshipit-source-id: 109e016f25cd130fc504790818d927bae7fec6bd
2023-09-11 19:13:58 +00:00
|
|
|
TEST_F(MergeTest, FullMergeV3FallbackNewValue) {
|
|
|
|
// Test that the default FullMergeV3 implementation correctly handles the case
|
|
|
|
// when FullMergeV2 results in a new value.
|
|
|
|
|
|
|
|
const Slice key("foo");
|
|
|
|
const MergeOperator::MergeOperationInputV3::OperandList operands{
|
|
|
|
"first", "second", "third"};
|
|
|
|
constexpr Logger* logger = nullptr;
|
|
|
|
|
|
|
|
auto append_operator =
|
|
|
|
MergeOperators::CreateStringAppendOperator(std::string());
|
|
|
|
|
|
|
|
// No existing value
|
|
|
|
{
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value;
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(append_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result = std::get<std::string>(merge_out.new_value);
|
|
|
|
ASSERT_EQ(result, operands[0].ToString() + operands[1].ToString() +
|
|
|
|
operands[2].ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Plain existing value
|
|
|
|
{
|
|
|
|
const Slice plain("plain");
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(plain);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(append_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result = std::get<std::string>(merge_out.new_value);
|
|
|
|
ASSERT_EQ(result, plain.ToString() + operands[0].ToString() +
|
|
|
|
operands[1].ToString() + operands[2].ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wide-column existing value with default column
|
|
|
|
{
|
|
|
|
const WideColumns entity{
|
|
|
|
{kDefaultWideColumnName, "default"}, {"one", "1"}, {"two", "2"}};
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(entity);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(append_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result =
|
|
|
|
std::get<MergeOperator::MergeOperationOutputV3::NewColumns>(
|
|
|
|
merge_out.new_value);
|
|
|
|
ASSERT_EQ(result.size(), entity.size());
|
|
|
|
ASSERT_EQ(result[0].first, entity[0].name());
|
|
|
|
ASSERT_EQ(result[0].second,
|
|
|
|
entity[0].value().ToString() + operands[0].ToString() +
|
|
|
|
operands[1].ToString() + operands[2].ToString());
|
|
|
|
ASSERT_EQ(result[1].first, entity[1].name());
|
|
|
|
ASSERT_EQ(result[1].second, entity[1].value());
|
|
|
|
ASSERT_EQ(result[2].first, entity[2].name());
|
|
|
|
ASSERT_EQ(result[2].second, entity[2].value());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wide-column existing value without default column
|
|
|
|
{
|
|
|
|
const WideColumns entity{{"one", "1"}, {"two", "2"}};
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(entity);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(append_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result =
|
|
|
|
std::get<MergeOperator::MergeOperationOutputV3::NewColumns>(
|
|
|
|
merge_out.new_value);
|
|
|
|
ASSERT_EQ(result.size(), entity.size() + 1);
|
|
|
|
ASSERT_EQ(result[0].first, kDefaultWideColumnName);
|
|
|
|
ASSERT_EQ(result[0].second, operands[0].ToString() +
|
|
|
|
operands[1].ToString() +
|
|
|
|
operands[2].ToString());
|
|
|
|
ASSERT_EQ(result[1].first, entity[0].name());
|
|
|
|
ASSERT_EQ(result[1].second, entity[0].value());
|
|
|
|
ASSERT_EQ(result[2].first, entity[1].name());
|
|
|
|
ASSERT_EQ(result[2].second, entity[1].value());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MergeTest, FullMergeV3FallbackExistingOperand) {
|
|
|
|
// Test that the default FullMergeV3 implementation correctly handles the case
|
|
|
|
// when FullMergeV2 results in an existing operand.
|
|
|
|
|
|
|
|
const Slice key("foo");
|
|
|
|
const MergeOperator::MergeOperationInputV3::OperandList operands{
|
|
|
|
"first", "second", "third"};
|
|
|
|
constexpr Logger* logger = nullptr;
|
|
|
|
|
|
|
|
auto put_operator = MergeOperators::CreatePutOperator();
|
|
|
|
|
|
|
|
// No existing value
|
|
|
|
{
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value;
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(put_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result = std::get<Slice>(merge_out.new_value);
|
|
|
|
ASSERT_EQ(result.data(), operands.back().data());
|
|
|
|
ASSERT_EQ(result.size(), operands.back().size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Plain existing value
|
|
|
|
{
|
|
|
|
const Slice plain("plain");
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(plain);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(put_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result = std::get<Slice>(merge_out.new_value);
|
|
|
|
ASSERT_EQ(result.data(), operands.back().data());
|
|
|
|
ASSERT_EQ(result.size(), operands.back().size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wide-column existing value with default column
|
|
|
|
{
|
|
|
|
const WideColumns entity{
|
|
|
|
{kDefaultWideColumnName, "default"}, {"one", "1"}, {"two", "2"}};
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(entity);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(put_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result =
|
|
|
|
std::get<MergeOperator::MergeOperationOutputV3::NewColumns>(
|
|
|
|
merge_out.new_value);
|
|
|
|
ASSERT_EQ(result.size(), entity.size());
|
|
|
|
ASSERT_EQ(result[0].first, entity[0].name());
|
|
|
|
ASSERT_EQ(result[0].second, operands.back());
|
|
|
|
ASSERT_EQ(result[1].first, entity[1].name());
|
|
|
|
ASSERT_EQ(result[1].second, entity[1].value());
|
|
|
|
ASSERT_EQ(result[2].first, entity[2].name());
|
|
|
|
ASSERT_EQ(result[2].second, entity[2].value());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wide-column existing value without default column
|
|
|
|
{
|
|
|
|
const WideColumns entity{{"one", "1"}, {"two", "2"}};
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(entity);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_TRUE(put_operator->FullMergeV3(merge_in, &merge_out));
|
|
|
|
|
|
|
|
const auto& result =
|
|
|
|
std::get<MergeOperator::MergeOperationOutputV3::NewColumns>(
|
|
|
|
merge_out.new_value);
|
|
|
|
ASSERT_EQ(result.size(), entity.size() + 1);
|
|
|
|
ASSERT_EQ(result[0].first, kDefaultWideColumnName);
|
|
|
|
ASSERT_EQ(result[0].second, operands.back());
|
|
|
|
ASSERT_EQ(result[1].first, entity[0].name());
|
|
|
|
ASSERT_EQ(result[1].second, entity[0].value());
|
|
|
|
ASSERT_EQ(result[2].first, entity[1].name());
|
|
|
|
ASSERT_EQ(result[2].second, entity[1].value());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MergeTest, FullMergeV3FallbackFailure) {
|
|
|
|
// Test that the default FullMergeV3 implementation correctly handles the case
|
|
|
|
// when FullMergeV2 fails.
|
|
|
|
|
|
|
|
const Slice key("foo");
|
|
|
|
const MergeOperator::MergeOperationInputV3::OperandList operands{
|
|
|
|
"first", "second", "third"};
|
|
|
|
constexpr Logger* logger = nullptr;
|
|
|
|
|
|
|
|
class FailMergeOperator : public MergeOperator {
|
|
|
|
public:
|
|
|
|
bool FullMergeV2(const MergeOperationInput& /* merge_in */,
|
|
|
|
MergeOperationOutput* merge_out) const override {
|
|
|
|
assert(merge_out);
|
|
|
|
merge_out->op_failure_scope = OpFailureScope::kMustMerge;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Name() const override { return "FailMergeOperator"; }
|
|
|
|
};
|
|
|
|
|
|
|
|
FailMergeOperator fail_operator;
|
|
|
|
|
|
|
|
// No existing value
|
|
|
|
{
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value;
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_FALSE(fail_operator.FullMergeV3(merge_in, &merge_out));
|
|
|
|
ASSERT_EQ(merge_out.op_failure_scope,
|
|
|
|
MergeOperator::OpFailureScope::kMustMerge);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Plain existing value
|
|
|
|
{
|
|
|
|
const Slice plain("plain");
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(plain);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_FALSE(fail_operator.FullMergeV3(merge_in, &merge_out));
|
|
|
|
ASSERT_EQ(merge_out.op_failure_scope,
|
|
|
|
MergeOperator::OpFailureScope::kMustMerge);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wide-column existing value with default column
|
|
|
|
{
|
|
|
|
const WideColumns entity{
|
|
|
|
{kDefaultWideColumnName, "default"}, {"one", "1"}, {"two", "2"}};
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(entity);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_FALSE(fail_operator.FullMergeV3(merge_in, &merge_out));
|
|
|
|
ASSERT_EQ(merge_out.op_failure_scope,
|
|
|
|
MergeOperator::OpFailureScope::kMustMerge);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wide-column existing value without default column
|
|
|
|
{
|
|
|
|
const WideColumns entity{{"one", "1"}, {"two", "2"}};
|
|
|
|
MergeOperator::MergeOperationInputV3::ExistingValue existing_value(entity);
|
|
|
|
const MergeOperator::MergeOperationInputV3 merge_in(
|
|
|
|
key, std::move(existing_value), operands, logger);
|
|
|
|
|
|
|
|
MergeOperator::MergeOperationOutputV3 merge_out;
|
|
|
|
|
|
|
|
ASSERT_FALSE(fail_operator.FullMergeV3(merge_in, &merge_out));
|
|
|
|
ASSERT_EQ(merge_out.op_failure_scope,
|
|
|
|
MergeOperator::OpFailureScope::kMustMerge);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2018-07-13 21:07:53 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::use_compression = false;
|
2018-07-13 21:07:53 +00:00
|
|
|
if (argc > 1) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::use_compression = true;
|
2018-07-13 21:07:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2018-07-13 21:07:53 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|