2020-04-07 18:53:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <functional>
|
|
|
|
#include <memory>
|
|
|
|
#include <queue>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2020-08-03 17:15:41 +00:00
|
|
|
#include "monitoring/instrumented_mutex.h"
|
2020-04-07 18:53:00 +00:00
|
|
|
#include "rocksdb/env.h"
|
2020-08-04 16:18:45 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2020-04-07 18:53:00 +00:00
|
|
|
#include "util/mutexlock.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
// A Timer class to handle repeated work.
|
|
|
|
//
|
|
|
|
// A single timer instance can handle multiple functions via a single thread.
|
|
|
|
// It is better to leave long running work to a dedicated thread pool.
|
|
|
|
//
|
|
|
|
// Timer can be started by calling `Start()`, and ended by calling `Shutdown()`.
|
|
|
|
// Work (in terms of a `void function`) can be scheduled by calling `Add` with
|
|
|
|
// a unique function name and de-scheduled by calling `Cancel`.
|
|
|
|
// Many functions can be added.
|
|
|
|
//
|
|
|
|
// Impl Details:
|
|
|
|
// A heap is used to keep track of when the next timer goes off.
|
|
|
|
// A map from a function name to the function keeps track of all the functions.
|
|
|
|
class Timer {
|
|
|
|
public:
|
2020-08-12 05:36:12 +00:00
|
|
|
explicit Timer(Env* env)
|
2020-04-07 18:53:00 +00:00
|
|
|
: env_(env),
|
|
|
|
mutex_(env),
|
|
|
|
cond_var_(&mutex_),
|
2020-08-12 05:36:12 +00:00
|
|
|
running_(false),
|
|
|
|
executing_task_(false) {}
|
2020-04-07 18:53:00 +00:00
|
|
|
|
2020-08-15 03:11:35 +00:00
|
|
|
// Add a new function. If the fn_name already exists, overriding it,
|
|
|
|
// regardless if the function is pending removed (invalid) or not.
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
// repeat_every_us == 0 means do not repeat
|
2020-04-07 18:53:00 +00:00
|
|
|
void Add(std::function<void()> fn,
|
|
|
|
const std::string& fn_name,
|
|
|
|
uint64_t start_after_us,
|
|
|
|
uint64_t repeat_every_us) {
|
2020-08-15 03:11:35 +00:00
|
|
|
std::unique_ptr<FunctionInfo> fn_info(
|
|
|
|
new FunctionInfo(std::move(fn), fn_name,
|
|
|
|
env_->NowMicros() + start_after_us, repeat_every_us));
|
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
auto it = map_.find(fn_name);
|
|
|
|
if (it == map_.end()) {
|
|
|
|
heap_.push(fn_info.get());
|
|
|
|
map_.emplace(std::make_pair(fn_name, std::move(fn_info)));
|
|
|
|
} else {
|
|
|
|
// If it already exists, overriding it.
|
|
|
|
it->second->fn = std::move(fn_info->fn);
|
|
|
|
it->second->valid = true;
|
|
|
|
it->second->next_run_time_us = env_->NowMicros() + start_after_us;
|
|
|
|
it->second->repeat_every_us = repeat_every_us;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cond_var_.SignalAll();
|
2020-04-07 18:53:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Cancel(const std::string& fn_name) {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
// Mark the function with fn_name as invalid so that it will not be
|
|
|
|
// requeued.
|
2020-04-07 18:53:00 +00:00
|
|
|
auto it = map_.find(fn_name);
|
2020-08-12 05:36:12 +00:00
|
|
|
if (it != map_.end() && it->second) {
|
|
|
|
it->second->Cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the currently running function is fn_name, then we need to wait
|
|
|
|
// until it finishes before returning to caller.
|
|
|
|
while (!heap_.empty() && executing_task_) {
|
|
|
|
FunctionInfo* func_info = heap_.top();
|
|
|
|
assert(func_info);
|
|
|
|
if (func_info->name == fn_name) {
|
|
|
|
WaitForTaskCompleteIfNecessary();
|
|
|
|
} else {
|
|
|
|
break;
|
2020-04-07 18:53:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CancelAll() {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
CancelAllWithLock();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the Timer
|
|
|
|
bool Start() {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
if (running_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
running_ = true;
|
2020-08-12 05:36:12 +00:00
|
|
|
thread_.reset(new port::Thread(&Timer::Run, this));
|
2020-04-07 18:53:00 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the Timer
|
|
|
|
bool Shutdown() {
|
|
|
|
{
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
if (!running_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
running_ = false;
|
2020-08-12 05:36:12 +00:00
|
|
|
CancelAllWithLock();
|
2020-04-07 18:53:00 +00:00
|
|
|
cond_var_.SignalAll();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (thread_) {
|
|
|
|
thread_->join();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-15 03:11:35 +00:00
|
|
|
bool HasPendingTask() const {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
for (auto it = map_.begin(); it != map_.end(); it++) {
|
|
|
|
if (it->second->IsValid()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
void TEST_WaitForRun(std::function<void()> callback = nullptr) {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
while (!heap_.empty() &&
|
|
|
|
heap_.top()->next_run_time_us <= env_->NowMicros()) {
|
|
|
|
cond_var_.TimedWait(env_->NowMicros() + 1000);
|
|
|
|
}
|
|
|
|
if (callback != nullptr) {
|
|
|
|
callback();
|
|
|
|
}
|
|
|
|
cond_var_.SignalAll();
|
|
|
|
do {
|
|
|
|
cond_var_.TimedWait(env_->NowMicros() + 1000);
|
|
|
|
} while (!heap_.empty() &&
|
|
|
|
heap_.top()->next_run_time_us <= env_->NowMicros());
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t TEST_GetPendingTaskNum() const {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
size_t ret = 0;
|
|
|
|
for (auto it = map_.begin(); it != map_.end(); it++) {
|
|
|
|
if (it->second->IsValid()) {
|
|
|
|
ret++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
2020-04-07 18:53:00 +00:00
|
|
|
private:
|
|
|
|
|
|
|
|
void Run() {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
while (running_) {
|
|
|
|
if (heap_.empty()) {
|
|
|
|
// wait
|
2020-08-04 16:18:45 +00:00
|
|
|
TEST_SYNC_POINT("Timer::Run::Waiting");
|
2020-04-07 18:53:00 +00:00
|
|
|
cond_var_.Wait();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
FunctionInfo* current_fn = heap_.top();
|
2020-08-12 05:36:12 +00:00
|
|
|
assert(current_fn);
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
if (!current_fn->IsValid()) {
|
|
|
|
heap_.pop();
|
|
|
|
map_.erase(current_fn->name);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current_fn->next_run_time_us <= env_->NowMicros()) {
|
2020-08-15 03:11:35 +00:00
|
|
|
// make a copy of the function so it won't be changed after
|
|
|
|
// mutex_.unlock.
|
|
|
|
std::function<void()> fn = current_fn->fn;
|
2020-08-12 05:36:12 +00:00
|
|
|
executing_task_ = true;
|
|
|
|
mutex_.Unlock();
|
2020-04-07 18:53:00 +00:00
|
|
|
// Execute the work
|
2020-08-15 03:11:35 +00:00
|
|
|
fn();
|
2020-08-12 05:36:12 +00:00
|
|
|
mutex_.Lock();
|
|
|
|
executing_task_ = false;
|
|
|
|
cond_var_.SignalAll();
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
// Remove the work from the heap once it is done executing.
|
|
|
|
// Note that we are just removing the pointer from the heap. Its
|
|
|
|
// memory is still managed in the map (as it holds a unique ptr).
|
|
|
|
// So current_fn is still a valid ptr.
|
|
|
|
heap_.pop();
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
// current_fn may be cancelled already.
|
|
|
|
if (current_fn->IsValid() && current_fn->repeat_every_us > 0) {
|
|
|
|
assert(running_);
|
2020-04-07 18:53:00 +00:00
|
|
|
current_fn->next_run_time_us = env_->NowMicros() +
|
|
|
|
current_fn->repeat_every_us;
|
|
|
|
|
|
|
|
// Schedule new work into the heap with new time.
|
|
|
|
heap_.push(current_fn);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cond_var_.TimedWait(current_fn->next_run_time_us);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CancelAllWithLock() {
|
2020-08-12 05:36:12 +00:00
|
|
|
mutex_.AssertHeld();
|
2020-04-07 18:53:00 +00:00
|
|
|
if (map_.empty() && heap_.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
// With mutex_ held, set all tasks to invalid so that they will not be
|
|
|
|
// re-queued.
|
|
|
|
for (auto& elem : map_) {
|
|
|
|
auto& func_info = elem.second;
|
|
|
|
assert(func_info);
|
|
|
|
func_info->Cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForTaskCompleteIfNecessary() may release mutex_
|
|
|
|
WaitForTaskCompleteIfNecessary();
|
|
|
|
|
2020-04-07 18:53:00 +00:00
|
|
|
while (!heap_.empty()) {
|
|
|
|
heap_.pop();
|
|
|
|
}
|
|
|
|
map_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// A wrapper around std::function to keep track when it should run next
|
|
|
|
// and at what frequency.
|
|
|
|
struct FunctionInfo {
|
|
|
|
// the actual work
|
|
|
|
std::function<void()> fn;
|
|
|
|
// name of the function
|
|
|
|
std::string name;
|
|
|
|
// when the function should run next
|
|
|
|
uint64_t next_run_time_us;
|
|
|
|
// repeat interval
|
|
|
|
uint64_t repeat_every_us;
|
|
|
|
// controls whether this function is valid.
|
|
|
|
// A function is valid upon construction and until someone explicitly
|
|
|
|
// calls `Cancel()`.
|
|
|
|
bool valid;
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
FunctionInfo(std::function<void()>&& _fn, const std::string& _name,
|
|
|
|
const uint64_t _next_run_time_us, uint64_t _repeat_every_us)
|
|
|
|
: fn(std::move(_fn)),
|
|
|
|
name(_name),
|
|
|
|
next_run_time_us(_next_run_time_us),
|
|
|
|
repeat_every_us(_repeat_every_us),
|
|
|
|
valid(true) {}
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
void Cancel() {
|
|
|
|
valid = false;
|
|
|
|
}
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
bool IsValid() const { return valid; }
|
2020-04-07 18:53:00 +00:00
|
|
|
};
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
void WaitForTaskCompleteIfNecessary() {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
while (executing_task_) {
|
|
|
|
TEST_SYNC_POINT("Timer::WaitForTaskCompleteIfNecessary:TaskExecuting");
|
|
|
|
cond_var_.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:53:00 +00:00
|
|
|
struct RunTimeOrder {
|
|
|
|
bool operator()(const FunctionInfo* f1,
|
|
|
|
const FunctionInfo* f2) {
|
|
|
|
return f1->next_run_time_us > f2->next_run_time_us;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Env* const env_;
|
|
|
|
// This mutex controls both the heap_ and the map_. It needs to be held for
|
|
|
|
// making any changes in them.
|
2020-08-15 03:11:35 +00:00
|
|
|
mutable InstrumentedMutex mutex_;
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedCondVar cond_var_;
|
2020-04-07 18:53:00 +00:00
|
|
|
std::unique_ptr<port::Thread> thread_;
|
|
|
|
bool running_;
|
2020-08-12 05:36:12 +00:00
|
|
|
bool executing_task_;
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
std::priority_queue<FunctionInfo*,
|
|
|
|
std::vector<FunctionInfo*>,
|
|
|
|
RunTimeOrder> heap_;
|
|
|
|
|
|
|
|
// In addition to providing a mapping from a function name to a function,
|
|
|
|
// it is also responsible for memory management.
|
|
|
|
std::unordered_map<std::string, std::unique_ptr<FunctionInfo>> map_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|