mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
Remove global locks from FlushScheduler (#5372)
Summary: FlushScheduler's methods are instrumented with debug-time locks to check the scheduler state against a simple container definition. Since https://github.com/facebook/rocksdb/pull/2286 the scope of such locks are widened to the entire methods' body. The result is that the concurrency tested during testing (in debug mode) is stricter than the concurrency level manifested at runtime (in release mode). The patch reverts this change to reduce the scope of such locks. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5372 Differential Revision: D15545831 Pulled By: maysamyabandeh fbshipit-source-id: 01d69191afb1dd807d4bdc990fc74813ae7b5426
This commit is contained in:
parent
641cc8d541
commit
b2584577fa
|
@ -13,9 +13,11 @@ namespace rocksdb {
|
||||||
|
|
||||||
void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
std::lock_guard<std::mutex> lock(checking_mutex_);
|
{
|
||||||
assert(checking_set_.count(cfd) == 0);
|
std::lock_guard<std::mutex> lock(checking_mutex_);
|
||||||
checking_set_.insert(cfd);
|
assert(checking_set_.count(cfd) == 0);
|
||||||
|
checking_set_.insert(cfd);
|
||||||
|
}
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
cfd->Ref();
|
cfd->Ref();
|
||||||
// Suppress false positive clang analyzer warnings.
|
// Suppress false positive clang analyzer warnings.
|
||||||
|
@ -32,9 +34,6 @@ void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
||||||
#ifndef NDEBUG
|
|
||||||
std::lock_guard<std::mutex> lock(checking_mutex_);
|
|
||||||
#endif // NDEBUG
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if (head_.load(std::memory_order_relaxed) == nullptr) {
|
if (head_.load(std::memory_order_relaxed) == nullptr) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -47,9 +46,12 @@ ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
||||||
delete node;
|
delete node;
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
auto iter = checking_set_.find(cfd);
|
{
|
||||||
assert(iter != checking_set_.end());
|
std::lock_guard<std::mutex> lock(checking_mutex_);
|
||||||
checking_set_.erase(iter);
|
auto iter = checking_set_.find(cfd);
|
||||||
|
assert(iter != checking_set_.end());
|
||||||
|
checking_set_.erase(iter);
|
||||||
|
}
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
|
|
||||||
if (!cfd->IsDropped()) {
|
if (!cfd->IsDropped()) {
|
||||||
|
@ -65,12 +67,12 @@ ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FlushScheduler::Empty() {
|
bool FlushScheduler::Empty() {
|
||||||
#ifndef NDEBUG
|
|
||||||
std::lock_guard<std::mutex> lock(checking_mutex_);
|
|
||||||
#endif // NDEBUG
|
|
||||||
auto rv = head_.load(std::memory_order_relaxed) == nullptr;
|
auto rv = head_.load(std::memory_order_relaxed) == nullptr;
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
assert(rv == checking_set_.empty());
|
std::lock_guard<std::mutex> lock(checking_mutex_);
|
||||||
|
// Empty is allowed to be called concurrnetly with ScheduleFlush. It would
|
||||||
|
// only miss the recent schedules.
|
||||||
|
assert((rv == checking_set_.empty()) || rv);
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue