2020-04-07 18:53:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <functional>
|
|
|
|
#include <memory>
|
|
|
|
#include <queue>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2020-08-03 17:15:41 +00:00
|
|
|
#include "monitoring/instrumented_mutex.h"
|
2021-01-26 06:07:26 +00:00
|
|
|
#include "rocksdb/system_clock.h"
|
2020-08-04 16:18:45 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2020-04-07 18:53:00 +00:00
|
|
|
#include "util/mutexlock.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
// A Timer class to handle repeated work.
|
|
|
|
//
|
2021-01-21 16:47:06 +00:00
|
|
|
// `Start()` and `Shutdown()` are currently not thread-safe. The client must
|
|
|
|
// serialize calls to these two member functions.
|
|
|
|
//
|
2020-04-07 18:53:00 +00:00
|
|
|
// A single timer instance can handle multiple functions via a single thread.
|
|
|
|
// It is better to leave long running work to a dedicated thread pool.
|
|
|
|
//
|
|
|
|
// Timer can be started by calling `Start()`, and ended by calling `Shutdown()`.
|
|
|
|
// Work (in terms of a `void function`) can be scheduled by calling `Add` with
|
|
|
|
// a unique function name and de-scheduled by calling `Cancel`.
|
|
|
|
// Many functions can be added.
|
|
|
|
//
|
|
|
|
// Impl Details:
|
|
|
|
// A heap is used to keep track of when the next timer goes off.
|
|
|
|
// A map from a function name to the function keeps track of all the functions.
|
|
|
|
class Timer {
|
|
|
|
public:
|
2021-03-15 11:32:24 +00:00
|
|
|
explicit Timer(SystemClock* clock)
|
2021-01-26 06:07:26 +00:00
|
|
|
: clock_(clock),
|
|
|
|
mutex_(clock),
|
2020-04-07 18:53:00 +00:00
|
|
|
cond_var_(&mutex_),
|
2020-08-12 05:36:12 +00:00
|
|
|
running_(false),
|
|
|
|
executing_task_(false) {}
|
2020-04-07 18:53:00 +00:00
|
|
|
|
2020-08-21 22:47:09 +00:00
|
|
|
~Timer() { Shutdown(); }
|
|
|
|
|
2020-08-20 15:42:05 +00:00
|
|
|
// Add a new function to run.
|
|
|
|
// fn_name has to be identical, otherwise, the new one overrides the existing
|
|
|
|
// one, regardless if the function is pending removed (invalid) or not.
|
|
|
|
// start_after_us is the initial delay.
|
|
|
|
// repeat_every_us is the interval between ending time of the last call and
|
|
|
|
// starting time of the next call. For example, repeat_every_us = 2000 and
|
|
|
|
// the function takes 1000us to run. If it starts at time [now]us, then it
|
|
|
|
// finishes at [now]+1000us, 2nd run starting time will be at [now]+3000us.
|
|
|
|
// repeat_every_us == 0 means do not repeat.
|
2020-04-07 18:53:00 +00:00
|
|
|
void Add(std::function<void()> fn,
|
|
|
|
const std::string& fn_name,
|
|
|
|
uint64_t start_after_us,
|
|
|
|
uint64_t repeat_every_us) {
|
2021-01-26 06:07:26 +00:00
|
|
|
std::unique_ptr<FunctionInfo> fn_info(new FunctionInfo(
|
|
|
|
std::move(fn), fn_name, clock_->NowMicros() + start_after_us,
|
|
|
|
repeat_every_us));
|
2020-08-15 03:11:35 +00:00
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
auto it = map_.find(fn_name);
|
|
|
|
if (it == map_.end()) {
|
|
|
|
heap_.push(fn_info.get());
|
|
|
|
map_.emplace(std::make_pair(fn_name, std::move(fn_info)));
|
|
|
|
} else {
|
|
|
|
// If it already exists, overriding it.
|
|
|
|
it->second->fn = std::move(fn_info->fn);
|
|
|
|
it->second->valid = true;
|
2021-01-26 06:07:26 +00:00
|
|
|
it->second->next_run_time_us = clock_->NowMicros() + start_after_us;
|
2020-08-15 03:11:35 +00:00
|
|
|
it->second->repeat_every_us = repeat_every_us;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cond_var_.SignalAll();
|
2020-04-07 18:53:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Cancel(const std::string& fn_name) {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
// Mark the function with fn_name as invalid so that it will not be
|
|
|
|
// requeued.
|
2020-04-07 18:53:00 +00:00
|
|
|
auto it = map_.find(fn_name);
|
2020-08-12 05:36:12 +00:00
|
|
|
if (it != map_.end() && it->second) {
|
|
|
|
it->second->Cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the currently running function is fn_name, then we need to wait
|
|
|
|
// until it finishes before returning to caller.
|
|
|
|
while (!heap_.empty() && executing_task_) {
|
|
|
|
FunctionInfo* func_info = heap_.top();
|
|
|
|
assert(func_info);
|
|
|
|
if (func_info->name == fn_name) {
|
|
|
|
WaitForTaskCompleteIfNecessary();
|
|
|
|
} else {
|
|
|
|
break;
|
2020-04-07 18:53:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CancelAll() {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
CancelAllWithLock();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the Timer
|
|
|
|
bool Start() {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
if (running_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
running_ = true;
|
2020-08-12 05:36:12 +00:00
|
|
|
thread_.reset(new port::Thread(&Timer::Run, this));
|
2020-04-07 18:53:00 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the Timer
|
|
|
|
bool Shutdown() {
|
|
|
|
{
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
if (!running_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
running_ = false;
|
2020-08-12 05:36:12 +00:00
|
|
|
CancelAllWithLock();
|
2020-04-07 18:53:00 +00:00
|
|
|
cond_var_.SignalAll();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (thread_) {
|
|
|
|
thread_->join();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-15 03:11:35 +00:00
|
|
|
bool HasPendingTask() const {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
for (auto it = map_.begin(); it != map_.end(); it++) {
|
|
|
|
if (it->second->IsValid()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2020-08-20 15:42:05 +00:00
|
|
|
// Wait until Timer starting waiting, call the optional callback, then wait
|
|
|
|
// for Timer waiting again.
|
2021-01-26 06:07:26 +00:00
|
|
|
// Tests can provide a custom Clock object to mock time, and use the callback
|
2020-08-20 15:42:05 +00:00
|
|
|
// here to bump current time and trigger Timer. See timer_test for example.
|
|
|
|
//
|
|
|
|
// Note: only support one caller of this method.
|
2020-08-15 03:11:35 +00:00
|
|
|
void TEST_WaitForRun(std::function<void()> callback = nullptr) {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-08-20 15:42:05 +00:00
|
|
|
// It act as a spin lock
|
|
|
|
while (executing_task_ ||
|
|
|
|
(!heap_.empty() &&
|
2021-01-26 06:07:26 +00:00
|
|
|
heap_.top()->next_run_time_us <= clock_->NowMicros())) {
|
|
|
|
cond_var_.TimedWait(clock_->NowMicros() + 1000);
|
2020-08-15 03:11:35 +00:00
|
|
|
}
|
|
|
|
if (callback != nullptr) {
|
|
|
|
callback();
|
|
|
|
}
|
|
|
|
cond_var_.SignalAll();
|
|
|
|
do {
|
2021-01-26 06:07:26 +00:00
|
|
|
cond_var_.TimedWait(clock_->NowMicros() + 1000);
|
|
|
|
} while (executing_task_ ||
|
|
|
|
(!heap_.empty() &&
|
|
|
|
heap_.top()->next_run_time_us <= clock_->NowMicros()));
|
2020-08-15 03:11:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t TEST_GetPendingTaskNum() const {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
size_t ret = 0;
|
|
|
|
for (auto it = map_.begin(); it != map_.end(); it++) {
|
|
|
|
if (it->second->IsValid()) {
|
|
|
|
ret++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
2020-04-07 18:53:00 +00:00
|
|
|
private:
|
|
|
|
|
|
|
|
void Run() {
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
while (running_) {
|
|
|
|
if (heap_.empty()) {
|
|
|
|
// wait
|
2020-08-04 16:18:45 +00:00
|
|
|
TEST_SYNC_POINT("Timer::Run::Waiting");
|
2020-04-07 18:53:00 +00:00
|
|
|
cond_var_.Wait();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
FunctionInfo* current_fn = heap_.top();
|
2020-08-12 05:36:12 +00:00
|
|
|
assert(current_fn);
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
if (!current_fn->IsValid()) {
|
|
|
|
heap_.pop();
|
|
|
|
map_.erase(current_fn->name);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-01-26 06:07:26 +00:00
|
|
|
if (current_fn->next_run_time_us <= clock_->NowMicros()) {
|
2020-08-15 03:11:35 +00:00
|
|
|
// make a copy of the function so it won't be changed after
|
|
|
|
// mutex_.unlock.
|
|
|
|
std::function<void()> fn = current_fn->fn;
|
2020-08-12 05:36:12 +00:00
|
|
|
executing_task_ = true;
|
|
|
|
mutex_.Unlock();
|
2020-04-07 18:53:00 +00:00
|
|
|
// Execute the work
|
2020-08-15 03:11:35 +00:00
|
|
|
fn();
|
2020-08-12 05:36:12 +00:00
|
|
|
mutex_.Lock();
|
|
|
|
executing_task_ = false;
|
|
|
|
cond_var_.SignalAll();
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
// Remove the work from the heap once it is done executing.
|
|
|
|
// Note that we are just removing the pointer from the heap. Its
|
|
|
|
// memory is still managed in the map (as it holds a unique ptr).
|
|
|
|
// So current_fn is still a valid ptr.
|
|
|
|
heap_.pop();
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
// current_fn may be cancelled already.
|
|
|
|
if (current_fn->IsValid() && current_fn->repeat_every_us > 0) {
|
|
|
|
assert(running_);
|
2021-01-26 06:07:26 +00:00
|
|
|
current_fn->next_run_time_us =
|
|
|
|
clock_->NowMicros() + current_fn->repeat_every_us;
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
// Schedule new work into the heap with new time.
|
|
|
|
heap_.push(current_fn);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cond_var_.TimedWait(current_fn->next_run_time_us);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CancelAllWithLock() {
|
2020-08-12 05:36:12 +00:00
|
|
|
mutex_.AssertHeld();
|
2020-04-07 18:53:00 +00:00
|
|
|
if (map_.empty() && heap_.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
// With mutex_ held, set all tasks to invalid so that they will not be
|
|
|
|
// re-queued.
|
|
|
|
for (auto& elem : map_) {
|
|
|
|
auto& func_info = elem.second;
|
|
|
|
assert(func_info);
|
|
|
|
func_info->Cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForTaskCompleteIfNecessary() may release mutex_
|
|
|
|
WaitForTaskCompleteIfNecessary();
|
|
|
|
|
2020-04-07 18:53:00 +00:00
|
|
|
while (!heap_.empty()) {
|
|
|
|
heap_.pop();
|
|
|
|
}
|
|
|
|
map_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// A wrapper around std::function to keep track when it should run next
|
|
|
|
// and at what frequency.
|
|
|
|
struct FunctionInfo {
|
|
|
|
// the actual work
|
|
|
|
std::function<void()> fn;
|
|
|
|
// name of the function
|
|
|
|
std::string name;
|
|
|
|
// when the function should run next
|
|
|
|
uint64_t next_run_time_us;
|
|
|
|
// repeat interval
|
|
|
|
uint64_t repeat_every_us;
|
|
|
|
// controls whether this function is valid.
|
|
|
|
// A function is valid upon construction and until someone explicitly
|
|
|
|
// calls `Cancel()`.
|
|
|
|
bool valid;
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
FunctionInfo(std::function<void()>&& _fn, const std::string& _name,
|
|
|
|
const uint64_t _next_run_time_us, uint64_t _repeat_every_us)
|
|
|
|
: fn(std::move(_fn)),
|
|
|
|
name(_name),
|
|
|
|
next_run_time_us(_next_run_time_us),
|
|
|
|
repeat_every_us(_repeat_every_us),
|
|
|
|
valid(true) {}
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
void Cancel() {
|
|
|
|
valid = false;
|
|
|
|
}
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
bool IsValid() const { return valid; }
|
2020-04-07 18:53:00 +00:00
|
|
|
};
|
|
|
|
|
2020-08-12 05:36:12 +00:00
|
|
|
void WaitForTaskCompleteIfNecessary() {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
while (executing_task_) {
|
|
|
|
TEST_SYNC_POINT("Timer::WaitForTaskCompleteIfNecessary:TaskExecuting");
|
|
|
|
cond_var_.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:53:00 +00:00
|
|
|
struct RunTimeOrder {
|
|
|
|
bool operator()(const FunctionInfo* f1,
|
|
|
|
const FunctionInfo* f2) {
|
|
|
|
return f1->next_run_time_us > f2->next_run_time_us;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
2020-04-07 18:53:00 +00:00
|
|
|
// This mutex controls both the heap_ and the map_. It needs to be held for
|
|
|
|
// making any changes in them.
|
2020-08-15 03:11:35 +00:00
|
|
|
mutable InstrumentedMutex mutex_;
|
2020-08-03 17:15:41 +00:00
|
|
|
InstrumentedCondVar cond_var_;
|
2020-04-07 18:53:00 +00:00
|
|
|
std::unique_ptr<port::Thread> thread_;
|
|
|
|
bool running_;
|
2020-08-12 05:36:12 +00:00
|
|
|
bool executing_task_;
|
2020-04-07 18:53:00 +00:00
|
|
|
|
|
|
|
std::priority_queue<FunctionInfo*,
|
|
|
|
std::vector<FunctionInfo*>,
|
|
|
|
RunTimeOrder> heap_;
|
|
|
|
|
|
|
|
// In addition to providing a mapping from a function name to a function,
|
|
|
|
// it is also responsible for memory management.
|
|
|
|
std::unordered_map<std::string, std::unique_ptr<FunctionInfo>> map_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|