2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2022-11-02 21:34:24 +00:00
|
|
|
#include "db/write_callback.h"
|
|
|
|
|
2017-05-22 17:21:38 +00:00
|
|
|
#include <atomic>
|
2016-12-16 19:17:26 +00:00
|
|
|
#include <functional>
|
2015-05-29 21:36:35 +00:00
|
|
|
#include <string>
|
2016-02-05 18:44:13 +00:00
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2015-05-29 21:36:35 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "port/port.h"
|
2015-05-29 21:36:35 +00:00
|
|
|
#include "rocksdb/db.h"
|
2024-06-01 02:30:19 +00:00
|
|
|
#include "rocksdb/user_write_callback.h"
|
2015-05-29 21:36:35 +00:00
|
|
|
#include "rocksdb/write_batch.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/random.h"
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
using std::string;
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
class WriteCallbackTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
string dbname;
|
|
|
|
|
|
|
|
WriteCallbackTest() {
|
2018-07-14 00:18:39 +00:00
|
|
|
dbname = test::PerThreadDBPath("write_callback_testdb");
|
2015-05-29 21:36:35 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class WriteCallbackTestWriteCallback1 : public WriteCallback {
|
|
|
|
public:
|
|
|
|
bool was_called = false;
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
Status Callback(DB* db) override {
|
2015-05-29 21:36:35 +00:00
|
|
|
was_called = true;
|
|
|
|
|
|
|
|
// Make sure db is a DBImpl
|
2022-11-02 21:34:24 +00:00
|
|
|
DBImpl* db_impl = dynamic_cast<DBImpl*>(db);
|
2015-05-29 21:36:35 +00:00
|
|
|
if (db_impl == nullptr) {
|
|
|
|
return Status::InvalidArgument("");
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2016-02-05 18:44:13 +00:00
|
|
|
|
|
|
|
bool AllowWriteBatching() override { return true; }
|
2015-05-29 21:36:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class WriteCallbackTestWriteCallback2 : public WriteCallback {
|
|
|
|
public:
|
2018-03-05 21:08:17 +00:00
|
|
|
Status Callback(DB* /*db*/) override { return Status::Busy(); }
|
2016-02-05 18:44:13 +00:00
|
|
|
bool AllowWriteBatching() override { return true; }
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockWriteCallback : public WriteCallback {
|
|
|
|
public:
|
|
|
|
bool should_fail_ = false;
|
|
|
|
bool allow_batching_ = false;
|
2017-05-22 17:21:38 +00:00
|
|
|
std::atomic<bool> was_called_{false};
|
|
|
|
|
2023-12-04 19:17:32 +00:00
|
|
|
MockWriteCallback() = default;
|
2017-05-22 17:21:38 +00:00
|
|
|
|
|
|
|
MockWriteCallback(const MockWriteCallback& other) {
|
|
|
|
should_fail_ = other.should_fail_;
|
|
|
|
allow_batching_ = other.allow_batching_;
|
|
|
|
was_called_.store(other.was_called_.load());
|
|
|
|
}
|
2016-02-05 18:44:13 +00:00
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
Status Callback(DB* /*db*/) override {
|
2017-05-22 17:21:38 +00:00
|
|
|
was_called_.store(true);
|
2016-02-05 18:44:13 +00:00
|
|
|
if (should_fail_) {
|
|
|
|
return Status::Busy();
|
|
|
|
} else {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AllowWriteBatching() override { return allow_batching_; }
|
2015-05-29 21:36:35 +00:00
|
|
|
};
|
|
|
|
|
2024-06-01 02:30:19 +00:00
|
|
|
class MockUserWriteCallback : public UserWriteCallback {
|
|
|
|
public:
|
|
|
|
std::atomic<bool> write_enqueued_{false};
|
|
|
|
std::atomic<bool> wal_write_done_{false};
|
|
|
|
|
|
|
|
MockUserWriteCallback() = default;
|
|
|
|
|
|
|
|
MockUserWriteCallback(const MockUserWriteCallback& other) {
|
|
|
|
write_enqueued_.store(other.write_enqueued_.load());
|
|
|
|
wal_write_done_.store(other.wal_write_done_.load());
|
|
|
|
}
|
|
|
|
|
|
|
|
void OnWriteEnqueued() override { write_enqueued_.store(true); }
|
|
|
|
|
|
|
|
void OnWalWriteFinish() override { wal_write_done_.store(true); }
|
|
|
|
|
|
|
|
void Reset() {
|
|
|
|
write_enqueued_.store(false);
|
|
|
|
wal_write_done_.store(false);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-07-07 18:13:09 +00:00
|
|
|
#if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
2020-06-30 19:29:01 +00:00
|
|
|
class WriteCallbackPTest
|
|
|
|
: public WriteCallbackTest,
|
|
|
|
public ::testing::WithParamInterface<
|
|
|
|
std::tuple<bool, bool, bool, bool, bool, bool, bool>> {
|
|
|
|
public:
|
|
|
|
WriteCallbackPTest() {
|
|
|
|
std::tie(unordered_write_, seq_per_batch_, two_queues_, allow_parallel_,
|
|
|
|
allow_batching_, enable_WAL_, enable_pipelined_write_) =
|
|
|
|
GetParam();
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
bool unordered_write_;
|
|
|
|
bool seq_per_batch_;
|
|
|
|
bool two_queues_;
|
|
|
|
bool allow_parallel_;
|
|
|
|
bool allow_batching_;
|
|
|
|
bool enable_WAL_;
|
|
|
|
bool enable_pipelined_write_;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
|
2016-02-05 18:44:13 +00:00
|
|
|
struct WriteOP {
|
|
|
|
WriteOP(bool should_fail = false) { callback_.should_fail_ = should_fail; }
|
|
|
|
|
|
|
|
void Put(const string& key, const string& val) {
|
2023-12-04 19:17:32 +00:00
|
|
|
kvs_.emplace_back(key, val);
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(write_batch_.Put(key, val));
|
2016-02-05 18:44:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Clear() {
|
|
|
|
kvs_.clear();
|
|
|
|
write_batch_.Clear();
|
2017-05-22 17:21:38 +00:00
|
|
|
callback_.was_called_.store(false);
|
2024-06-01 02:30:19 +00:00
|
|
|
user_write_cb_.Reset();
|
2016-02-05 18:44:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MockWriteCallback callback_;
|
2024-06-01 02:30:19 +00:00
|
|
|
MockUserWriteCallback user_write_cb_;
|
2016-02-05 18:44:13 +00:00
|
|
|
WriteBatch write_batch_;
|
|
|
|
std::vector<std::pair<string, string>> kvs_;
|
|
|
|
};
|
|
|
|
|
2017-07-25 23:40:21 +00:00
|
|
|
// In each scenario we'll launch multiple threads to write.
|
|
|
|
// The size of each array equals to number of threads, and
|
|
|
|
// each boolean in it denote whether callback of corresponding
|
|
|
|
// thread should succeed or fail.
|
2016-02-05 18:44:13 +00:00
|
|
|
std::vector<std::vector<WriteOP>> write_scenarios = {
|
|
|
|
{true},
|
|
|
|
{false},
|
|
|
|
{false, false},
|
|
|
|
{true, true},
|
|
|
|
{true, false},
|
|
|
|
{false, true},
|
|
|
|
{false, false, false},
|
|
|
|
{true, true, true},
|
|
|
|
{false, true, false},
|
|
|
|
{true, false, true},
|
|
|
|
{true, false, false, false, false},
|
|
|
|
{false, false, false, false, true},
|
|
|
|
{false, false, true, false, true},
|
|
|
|
};
|
|
|
|
|
2020-06-30 19:29:01 +00:00
|
|
|
for (auto& write_group : write_scenarios) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.unordered_write = unordered_write_;
|
|
|
|
options.allow_concurrent_memtable_write = allow_parallel_;
|
|
|
|
options.enable_pipelined_write = enable_pipelined_write_;
|
|
|
|
options.two_write_queues = two_queues_;
|
|
|
|
// Skip unsupported combinations
|
|
|
|
if (options.enable_pipelined_write && seq_per_batch_) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (options.enable_pipelined_write && options.two_write_queues) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (options.unordered_write && !options.allow_concurrent_memtable_write) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (options.unordered_write && options.enable_pipelined_write) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ReadOptions read_options;
|
|
|
|
DB* db;
|
|
|
|
DBImpl* db_impl;
|
|
|
|
|
2022-08-11 00:34:38 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, options));
|
2020-06-30 19:29:01 +00:00
|
|
|
|
|
|
|
DBOptions db_options(options);
|
|
|
|
ColumnFamilyOptions cf_options(options);
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
2023-12-04 19:17:32 +00:00
|
|
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
2020-06-30 19:29:01 +00:00
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
auto open_s = DBImpl::Open(db_options, dbname, column_families, &handles,
|
2024-04-19 00:36:33 +00:00
|
|
|
&db, seq_per_batch_, true /* batch_per_txn */,
|
|
|
|
false /* is_retry */, nullptr /* can_retry */);
|
2020-06-30 19:29:01 +00:00
|
|
|
ASSERT_OK(open_s);
|
|
|
|
assert(handles.size() == 1);
|
|
|
|
delete handles[0];
|
|
|
|
|
|
|
|
db_impl = dynamic_cast<DBImpl*>(db);
|
|
|
|
ASSERT_TRUE(db_impl);
|
|
|
|
|
|
|
|
// Writers that have called JoinBatchGroup.
|
|
|
|
std::atomic<uint64_t> threads_joining(0);
|
|
|
|
// Writers that have linked to the queue
|
|
|
|
std::atomic<uint64_t> threads_linked(0);
|
|
|
|
// Writers that pass WriteThread::JoinBatchGroup:Wait sync-point.
|
|
|
|
std::atomic<uint64_t> threads_verified(0);
|
|
|
|
|
|
|
|
std::atomic<uint64_t> seq(db_impl->GetLatestSequenceNumber());
|
|
|
|
ASSERT_EQ(db_impl->GetLatestSequenceNumber(), 0);
|
|
|
|
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"WriteThread::JoinBatchGroup:Start", [&](void*) {
|
|
|
|
uint64_t cur_threads_joining = threads_joining.fetch_add(1);
|
|
|
|
// Wait for the last joined writer to link to the queue.
|
|
|
|
// In this way the writers link to the queue one by one.
|
|
|
|
// This allows us to confidently detect the first writer
|
|
|
|
// who increases threads_linked as the leader.
|
|
|
|
while (threads_linked.load() < cur_threads_joining) {
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Verification once writers call JoinBatchGroup.
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"WriteThread::JoinBatchGroup:Wait", [&](void* arg) {
|
|
|
|
uint64_t cur_threads_linked = threads_linked.fetch_add(1);
|
|
|
|
bool is_leader = false;
|
|
|
|
bool is_last = false;
|
|
|
|
|
|
|
|
// who am i
|
|
|
|
is_leader = (cur_threads_linked == 0);
|
|
|
|
is_last = (cur_threads_linked == write_group.size() - 1);
|
|
|
|
|
|
|
|
// check my state
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
auto* writer = static_cast<WriteThread::Writer*>(arg);
|
2020-06-30 19:29:01 +00:00
|
|
|
|
|
|
|
if (is_leader) {
|
|
|
|
ASSERT_TRUE(writer->state ==
|
|
|
|
WriteThread::State::STATE_GROUP_LEADER);
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(writer->state == WriteThread::State::STATE_INIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
// (meta test) the first WriteOP should indeed be the first
|
|
|
|
// and the last should be the last (all others can be out of
|
|
|
|
// order)
|
|
|
|
if (is_leader) {
|
|
|
|
ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
|
|
|
|
!write_group.front().callback_.should_fail_);
|
|
|
|
} else if (is_last) {
|
|
|
|
ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
|
|
|
|
!write_group.back().callback_.should_fail_);
|
|
|
|
}
|
|
|
|
|
|
|
|
threads_verified.fetch_add(1);
|
|
|
|
// Wait here until all verification in this sync-point
|
|
|
|
// callback finish for all writers.
|
|
|
|
while (threads_verified.load() < write_group.size()) {
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"WriteThread::JoinBatchGroup:DoneWaiting", [&](void* arg) {
|
|
|
|
// check my state
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
auto* writer = static_cast<WriteThread::Writer*>(arg);
|
2020-06-30 19:29:01 +00:00
|
|
|
|
|
|
|
if (!allow_batching_) {
|
|
|
|
// no batching so everyone should be a leader
|
|
|
|
ASSERT_TRUE(writer->state ==
|
|
|
|
WriteThread::State::STATE_GROUP_LEADER);
|
|
|
|
} else if (!allow_parallel_) {
|
|
|
|
ASSERT_TRUE(writer->state == WriteThread::State::STATE_COMPLETED ||
|
|
|
|
(enable_pipelined_write_ &&
|
|
|
|
writer->state ==
|
|
|
|
WriteThread::State::STATE_MEMTABLE_WRITER_LEADER));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
std::atomic<uint32_t> thread_num(0);
|
|
|
|
std::atomic<char> dummy_key(0);
|
|
|
|
|
|
|
|
// Each write thread create a random write batch and write to DB
|
|
|
|
// with a write callback.
|
|
|
|
std::function<void()> write_with_callback_func = [&]() {
|
|
|
|
uint32_t i = thread_num.fetch_add(1);
|
|
|
|
Random rnd(i);
|
|
|
|
|
|
|
|
// leaders gotta lead
|
|
|
|
while (i > 0 && threads_verified.load() < 1) {
|
|
|
|
}
|
|
|
|
|
|
|
|
// loser has to lose
|
|
|
|
while (i == write_group.size() - 1 &&
|
|
|
|
threads_verified.load() < write_group.size() - 1) {
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& write_op = write_group.at(i);
|
|
|
|
write_op.Clear();
|
|
|
|
write_op.callback_.allow_batching_ = allow_batching_;
|
|
|
|
|
|
|
|
// insert some keys
|
|
|
|
for (uint32_t j = 0; j < rnd.Next() % 50; j++) {
|
|
|
|
// grab unique key
|
|
|
|
char my_key = dummy_key.fetch_add(1);
|
|
|
|
|
|
|
|
string skey(5, my_key);
|
|
|
|
string sval(10, my_key);
|
|
|
|
write_op.Put(skey, sval);
|
|
|
|
|
|
|
|
if (!write_op.callback_.should_fail_ && !seq_per_batch_) {
|
|
|
|
seq.fetch_add(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!write_op.callback_.should_fail_ && seq_per_batch_) {
|
|
|
|
seq.fetch_add(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteOptions woptions;
|
|
|
|
woptions.disableWAL = !enable_WAL_;
|
|
|
|
woptions.sync = enable_WAL_;
|
2022-06-17 06:10:07 +00:00
|
|
|
if (woptions.protection_bytes_per_key > 0) {
|
|
|
|
ASSERT_OK(WriteBatchInternal::UpdateProtectionInfo(
|
|
|
|
&write_op.write_batch_, woptions.protection_bytes_per_key));
|
|
|
|
}
|
2020-06-30 19:29:01 +00:00
|
|
|
Status s;
|
|
|
|
if (seq_per_batch_) {
|
|
|
|
class PublishSeqCallback : public PreReleaseCallback {
|
|
|
|
public:
|
|
|
|
PublishSeqCallback(DBImpl* db_impl_in) : db_impl_(db_impl_in) {}
|
|
|
|
Status Callback(SequenceNumber last_seq, bool /*not used*/, uint64_t,
|
|
|
|
size_t /*index*/, size_t /*total*/) override {
|
|
|
|
db_impl_->SetLastPublishedSequence(last_seq);
|
|
|
|
return Status::OK();
|
2017-05-19 21:24:23 +00:00
|
|
|
}
|
2020-06-30 19:29:01 +00:00
|
|
|
DBImpl* db_impl_;
|
|
|
|
} publish_seq_callback(db_impl);
|
|
|
|
// seq_per_batch_ requires a natural batch separator or Noop
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(WriteBatchInternal::InsertNoop(&write_op.write_batch_));
|
2020-06-30 19:29:01 +00:00
|
|
|
const size_t ONE_BATCH = 1;
|
|
|
|
s = db_impl->WriteImpl(woptions, &write_op.write_batch_,
|
2024-06-01 02:30:19 +00:00
|
|
|
&write_op.callback_, &write_op.user_write_cb_,
|
|
|
|
nullptr, 0, false, nullptr, ONE_BATCH,
|
2020-06-30 19:29:01 +00:00
|
|
|
two_queues_ ? &publish_seq_callback : nullptr);
|
|
|
|
} else {
|
|
|
|
s = db_impl->WriteWithCallback(woptions, &write_op.write_batch_,
|
2024-06-01 02:30:19 +00:00
|
|
|
&write_op.callback_,
|
|
|
|
&write_op.user_write_cb_);
|
2020-06-30 19:29:01 +00:00
|
|
|
}
|
|
|
|
|
2024-06-01 02:30:19 +00:00
|
|
|
ASSERT_TRUE(write_op.user_write_cb_.write_enqueued_.load());
|
2020-06-30 19:29:01 +00:00
|
|
|
if (write_op.callback_.should_fail_) {
|
|
|
|
ASSERT_TRUE(s.IsBusy());
|
2024-06-01 02:30:19 +00:00
|
|
|
ASSERT_FALSE(write_op.user_write_cb_.wal_write_done_.load());
|
2020-06-30 19:29:01 +00:00
|
|
|
} else {
|
|
|
|
ASSERT_OK(s);
|
2024-06-01 02:30:19 +00:00
|
|
|
if (enable_WAL_) {
|
|
|
|
ASSERT_TRUE(write_op.user_write_cb_.wal_write_done_.load());
|
|
|
|
} else {
|
|
|
|
ASSERT_FALSE(write_op.user_write_cb_.wal_write_done_.load());
|
|
|
|
}
|
2020-06-30 19:29:01 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
// do all the writes
|
|
|
|
std::vector<port::Thread> threads;
|
|
|
|
for (uint32_t i = 0; i < write_group.size(); i++) {
|
|
|
|
threads.emplace_back(write_with_callback_func);
|
|
|
|
}
|
|
|
|
for (auto& t : threads) {
|
|
|
|
t.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
|
|
|
|
// check for keys
|
|
|
|
string value;
|
|
|
|
for (auto& w : write_group) {
|
|
|
|
ASSERT_TRUE(w.callback_.was_called_.load());
|
|
|
|
for (auto& kvp : w.kvs_) {
|
|
|
|
if (w.callback_.should_fail_) {
|
|
|
|
ASSERT_TRUE(db->Get(read_options, kvp.first, &value).IsNotFound());
|
|
|
|
} else {
|
|
|
|
ASSERT_OK(db->Get(read_options, kvp.first, &value));
|
|
|
|
ASSERT_EQ(value, kvp.second);
|
2016-02-16 20:39:55 +00:00
|
|
|
}
|
2016-02-05 18:44:13 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-30 19:29:01 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(seq.load(), db_impl->TEST_GetLastVisibleSequence());
|
|
|
|
|
|
|
|
delete db;
|
2022-08-11 00:34:38 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, options));
|
2019-05-14 00:43:47 +00:00
|
|
|
}
|
2017-11-22 21:52:56 +00:00
|
|
|
}
|
2016-02-05 18:44:13 +00:00
|
|
|
|
2020-06-30 19:29:01 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(WriteCallbackPTest, WriteCallbackPTest,
|
|
|
|
::testing::Combine(::testing::Bool(), ::testing::Bool(),
|
|
|
|
::testing::Bool(), ::testing::Bool(),
|
|
|
|
::testing::Bool(), ::testing::Bool(),
|
|
|
|
::testing::Bool()));
|
2021-07-07 18:13:09 +00:00
|
|
|
#endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
2020-06-30 19:29:01 +00:00
|
|
|
|
2015-05-29 21:36:35 +00:00
|
|
|
TEST_F(WriteCallbackTest, WriteCallBackTest) {
|
|
|
|
Options options;
|
|
|
|
WriteOptions write_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
string value;
|
|
|
|
DB* db;
|
|
|
|
DBImpl* db_impl;
|
|
|
|
|
2022-08-11 00:34:38 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, options));
|
2016-04-16 00:13:12 +00:00
|
|
|
|
2015-05-29 21:36:35 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
Status s = DB::Open(options, dbname, &db);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
db_impl = dynamic_cast<DBImpl*>(db);
|
2015-05-29 21:36:35 +00:00
|
|
|
ASSERT_TRUE(db_impl);
|
|
|
|
|
|
|
|
WriteBatch wb;
|
|
|
|
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(wb.Put("a", "value.a"));
|
|
|
|
ASSERT_OK(wb.Delete("x"));
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
// Test a simple Write
|
|
|
|
s = db->Write(write_options, &wb);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
s = db->Get(read_options, "a", &value);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ("value.a", value);
|
|
|
|
|
|
|
|
// Test WriteWithCallback
|
|
|
|
WriteCallbackTestWriteCallback1 callback1;
|
|
|
|
WriteBatch wb2;
|
|
|
|
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(wb2.Put("a", "value.a2"));
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
s = db_impl->WriteWithCallback(write_options, &wb2, &callback1);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_TRUE(callback1.was_called);
|
|
|
|
|
|
|
|
s = db->Get(read_options, "a", &value);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ("value.a2", value);
|
|
|
|
|
|
|
|
// Test WriteWithCallback for a callback that fails
|
|
|
|
WriteCallbackTestWriteCallback2 callback2;
|
|
|
|
WriteBatch wb3;
|
|
|
|
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(wb3.Put("a", "value.a3"));
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
s = db_impl->WriteWithCallback(write_options, &wb3, &callback2);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
|
|
|
|
s = db->Get(read_options, "a", &value);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ("value.a2", value);
|
|
|
|
|
2024-06-01 02:30:19 +00:00
|
|
|
MockUserWriteCallback user_write_cb;
|
|
|
|
WriteBatch wb4;
|
|
|
|
ASSERT_OK(wb4.Put("a", "value.a4"));
|
|
|
|
|
|
|
|
ASSERT_OK(db->WriteWithCallback(write_options, &wb4, &user_write_cb));
|
|
|
|
ASSERT_OK(db->Get(read_options, "a", &value));
|
|
|
|
ASSERT_EQ(value, "value.a4");
|
|
|
|
ASSERT_TRUE(user_write_cb.write_enqueued_.load());
|
|
|
|
ASSERT_TRUE(user_write_cb.wal_write_done_.load());
|
|
|
|
|
2015-05-29 21:36:35 +00:00
|
|
|
delete db;
|
2022-08-11 00:34:38 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, options));
|
2015-05-29 21:36:35 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-05-29 21:36:35 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
|
|
|
|