mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
Store timestamp in deadlock detection (#4060)
Summary: - Summary Add timestamp into the DeadlockInfo to store the timestamp when deadlock detected on the rocksdb side. - Testplan: `make check -j64` Closes https://github.com/facebook/rocksdb/pull/4060 Differential Revision: D8655380 Pulled By: chouxi fbshipit-source-id: f58e1aa5e09eb1d1eed0a181d4e2304aaf01efe8
This commit is contained in:
parent
e5ae1bb465
commit
818c84e116
|
@ -169,12 +169,15 @@ struct DeadlockInfo {
|
|||
struct DeadlockPath {
|
||||
std::vector<DeadlockInfo> path;
|
||||
bool limit_exceeded;
|
||||
int64_t deadlock_time;
|
||||
|
||||
explicit DeadlockPath(std::vector<DeadlockInfo> path_entry)
|
||||
: path(path_entry), limit_exceeded(false) {}
|
||||
explicit DeadlockPath(
|
||||
std::vector<DeadlockInfo> path_entry, const int64_t& dl_time)
|
||||
: path(path_entry), limit_exceeded(false), deadlock_time(dl_time) {}
|
||||
|
||||
// empty path, limit exceeded constructor and default constructor
|
||||
explicit DeadlockPath(bool limit = false) : path(0), limit_exceeded(limit) {}
|
||||
explicit DeadlockPath(const int64_t& dl_time = 0, bool limit = false)
|
||||
: path(0), limit_exceeded(limit), deadlock_time(dl_time) {}
|
||||
|
||||
bool empty() { return path.empty() && !limit_exceeded; }
|
||||
};
|
||||
|
|
|
@ -372,7 +372,7 @@ Status TransactionLockMgr::AcquireWithTimeout(
|
|||
if (wait_ids.size() != 0) {
|
||||
if (txn->IsDeadlockDetect()) {
|
||||
if (IncrementWaiters(txn, wait_ids, key, column_family_id,
|
||||
lock_info.exclusive)) {
|
||||
lock_info.exclusive, env)) {
|
||||
result = Status::Busy(Status::SubCode::kDeadlock);
|
||||
stripe->stripe_mutex->UnLock();
|
||||
return result;
|
||||
|
@ -444,7 +444,7 @@ void TransactionLockMgr::DecrementWaitersImpl(
|
|||
bool TransactionLockMgr::IncrementWaiters(
|
||||
const PessimisticTransaction* txn,
|
||||
const autovector<TransactionID>& wait_ids, const std::string& key,
|
||||
const uint32_t& cf_id, const bool& exclusive) {
|
||||
const uint32_t& cf_id, const bool& exclusive, Env* const env) {
|
||||
auto id = txn->GetID();
|
||||
std::vector<int> queue_parents(txn->GetDeadlockDetectDepth());
|
||||
std::vector<TransactionID> queue_values(txn->GetDeadlockDetectDepth());
|
||||
|
@ -468,6 +468,7 @@ bool TransactionLockMgr::IncrementWaiters(
|
|||
|
||||
const auto* next_ids = &wait_ids;
|
||||
int parent = -1;
|
||||
int64_t deadlock_time = 0;
|
||||
for (int tail = 0, head = 0; head < txn->GetDeadlockDetectDepth(); head++) {
|
||||
int i = 0;
|
||||
if (next_ids) {
|
||||
|
@ -497,8 +498,10 @@ bool TransactionLockMgr::IncrementWaiters(
|
|||
extracted_info.m_exclusive});
|
||||
head = queue_parents[head];
|
||||
}
|
||||
env->GetCurrentTime(&deadlock_time);
|
||||
std::reverse(path.begin(), path.end());
|
||||
dlock_buffer_.AddNewPath(DeadlockPath(path));
|
||||
dlock_buffer_.AddNewPath(DeadlockPath(path, deadlock_time));
|
||||
deadlock_time = 0;
|
||||
DecrementWaitersImpl(txn, wait_ids);
|
||||
return true;
|
||||
} else if (!wait_txn_map_.Contains(next)) {
|
||||
|
@ -511,7 +514,8 @@ bool TransactionLockMgr::IncrementWaiters(
|
|||
}
|
||||
|
||||
// Wait cycle too big, just assume deadlock.
|
||||
dlock_buffer_.AddNewPath(DeadlockPath(true));
|
||||
env->GetCurrentTime(&deadlock_time);
|
||||
dlock_buffer_.AddNewPath(DeadlockPath(deadlock_time, true));
|
||||
DecrementWaitersImpl(txn, wait_ids);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ class TransactionLockMgr {
|
|||
bool IncrementWaiters(const PessimisticTransaction* txn,
|
||||
const autovector<TransactionID>& wait_ids,
|
||||
const std::string& key, const uint32_t& cf_id,
|
||||
const bool& exclusive);
|
||||
const bool& exclusive, Env* const env);
|
||||
void DecrementWaiters(const PessimisticTransaction* txn,
|
||||
const autovector<TransactionID>& wait_ids);
|
||||
void DecrementWaitersImpl(const PessimisticTransaction* txn,
|
||||
|
|
|
@ -465,6 +465,14 @@ TEST_P(TransactionTest, DeadlockCycleShared) {
|
|||
ASSERT_EQ(dlock_buffer.size(), curr_dlock_buffer_len_);
|
||||
auto dlock_entry = dlock_buffer[0].path;
|
||||
ASSERT_EQ(dlock_entry.size(), kInitialMaxDeadlocks);
|
||||
int64_t pre_deadlock_time = dlock_buffer[0].deadlock_time;
|
||||
int64_t cur_deadlock_time = 0;
|
||||
for (auto const& dl_path_rec : dlock_buffer) {
|
||||
cur_deadlock_time = dl_path_rec.deadlock_time;
|
||||
ASSERT_NE(cur_deadlock_time, 0);
|
||||
ASSERT_TRUE(cur_deadlock_time <= pre_deadlock_time);
|
||||
pre_deadlock_time = cur_deadlock_time;
|
||||
}
|
||||
|
||||
int64_t curr_waiting_key = 0;
|
||||
|
||||
|
@ -670,6 +678,15 @@ TEST_P(TransactionTest, DeadlockCycle) {
|
|||
ASSERT_EQ(dlock_entry.size(), check_len);
|
||||
ASSERT_EQ(dlock_buffer[0].limit_exceeded, check_limit_flag);
|
||||
|
||||
int64_t pre_deadlock_time = dlock_buffer[0].deadlock_time;
|
||||
int64_t cur_deadlock_time = 0;
|
||||
for (auto const& dl_path_rec : dlock_buffer) {
|
||||
cur_deadlock_time = dl_path_rec.deadlock_time;
|
||||
ASSERT_NE(cur_deadlock_time, 0);
|
||||
ASSERT_TRUE(cur_deadlock_time <= pre_deadlock_time);
|
||||
pre_deadlock_time = cur_deadlock_time;
|
||||
}
|
||||
|
||||
// Iterates backwards over path verifying decreasing txn_ids.
|
||||
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) {
|
||||
auto dl_node = *it;
|
||||
|
|
Loading…
Reference in a new issue