mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
07bdcb91fe
Summary: PipelineWriteImpl is an alternative approach to WriteImpl. In WriteImpl, only one thread is allow to write at the same time. This thread will do both WAL and memtable writes for all write threads in the write group. Pending writers wait in queue until the current writer finishes. In the pipeline write approach, two queue is maintained: one WAL writer queue and one memtable writer queue. All writers (regardless of whether they need to write WAL) will still need to first join the WAL writer queue, and after the house keeping work and WAL writing, they will need to join memtable writer queue if needed. The benefit of this approach is that 1. Writers without memtable writes (e.g. the prepare phase of two phase commit) can exit write thread once WAL write is finish. They don't need to wait for memtable writes in case of group commit. 2. Pending writers only need to wait for previous WAL writer finish to be able to join the write thread, instead of wait also for previous memtable writes. Merging #2056 and #2058 into this PR. Closes https://github.com/facebook/rocksdb/pull/2286 Differential Revision: D5054606 Pulled By: yiwu-arbug fbshipit-source-id: ee5b11efd19d3e39d6b7210937b11cefdd4d1c8d
91 lines
2.5 KiB
C++
91 lines
2.5 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
// This source code is also licensed under the GPLv2 license found in the
|
|
// COPYING file in the root directory of this source tree.
|
|
|
|
#include "db/flush_scheduler.h"
|
|
|
|
#include <cassert>
|
|
|
|
#include "db/column_family.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
|
#ifndef NDEBUG
|
|
std::lock_guard<std::mutex> lock(checking_mutex_);
|
|
assert(checking_set_.count(cfd) == 0);
|
|
checking_set_.insert(cfd);
|
|
#endif // NDEBUG
|
|
cfd->Ref();
|
|
// Suppress false positive clang analyzer warnings.
|
|
#ifndef __clang_analyzer__
|
|
Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)};
|
|
while (!head_.compare_exchange_strong(
|
|
node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) {
|
|
// failing CAS updates the first param, so we are already set for
|
|
// retry. TakeNextColumnFamily won't happen until after another
|
|
// inter-thread synchronization, so we don't even need release
|
|
// semantics for this CAS
|
|
}
|
|
#endif // __clang_analyzer__
|
|
}
|
|
|
|
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
|
#ifndef NDEBUG
|
|
std::lock_guard<std::mutex> lock(checking_mutex_);
|
|
#endif // NDEBUG
|
|
while (true) {
|
|
if (head_.load(std::memory_order_relaxed) == nullptr) {
|
|
return nullptr;
|
|
}
|
|
|
|
// dequeue the head
|
|
Node* node = head_.load(std::memory_order_relaxed);
|
|
head_.store(node->next, std::memory_order_relaxed);
|
|
ColumnFamilyData* cfd = node->column_family;
|
|
delete node;
|
|
|
|
#ifndef NDEBUG
|
|
auto iter = checking_set_.find(cfd);
|
|
assert(iter != checking_set_.end());
|
|
checking_set_.erase(iter);
|
|
#endif // NDEBUG
|
|
|
|
if (!cfd->IsDropped()) {
|
|
// success
|
|
return cfd;
|
|
}
|
|
|
|
// no longer relevant, retry
|
|
if (cfd->Unref()) {
|
|
delete cfd;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool FlushScheduler::Empty() {
|
|
#ifndef NDEBUG
|
|
std::lock_guard<std::mutex> lock(checking_mutex_);
|
|
#endif // NDEBUG
|
|
auto rv = head_.load(std::memory_order_relaxed) == nullptr;
|
|
#ifndef NDEBUG
|
|
assert(rv == checking_set_.empty());
|
|
#endif // NDEBUG
|
|
return rv;
|
|
}
|
|
|
|
void FlushScheduler::Clear() {
|
|
ColumnFamilyData* cfd;
|
|
while ((cfd = TakeNextColumnFamily()) != nullptr) {
|
|
if (cfd->Unref()) {
|
|
delete cfd;
|
|
}
|
|
}
|
|
assert(head_.load(std::memory_order_relaxed) == nullptr);
|
|
}
|
|
|
|
} // namespace rocksdb
|