mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-28 05:43:50 +00:00
75a1230ce8
Summary: **Context:** Adding assertion `!PendingPut()&&!PendingDelete()` in `ExpectedValute::Exists()` surfaced a couple improper usages of `ExpectedValute::Exists()` in the crash test - Commit phase of `ExpectedValue::Delete()`/`SyncDelete()`: When we issue delete to expected value during commit phase or `SyncDelete()` (used in crash recovery verification) as below, we don't really care about the result.d458331ee9/db_stress_tool/expected_state.cc (L73)
d458331ee9/db_stress_tool/expected_value.cc (L52)
That means, we don't really need to check for `Exists()`d458331ee9/db_stress_tool/expected_value.cc (L24-L26)
. This actually gives an alternative solution tob65e29a4a9
to solve false-positive assertion violation. - TestMultiGetXX() path: `Exists()` is called without holding the lock as requiredf63428bcc7/db_stress_tool/no_batched_ops_stress.cc (L2688)
``` void MaybeAddKeyToTxnForRYW( ThreadState* thread, int column_family, int64_t key, Transaction* txn, std::unordered_map<std::string, ExpectedValue>& ryw_expected_values) { assert(thread); assert(txn); SharedState* const shared = thread->shared; assert(shared); if (!shared->AllowsOverwrite(key) && shared->Exists(column_family, key)) { // Just do read your write checks for keys that allow overwrites. return; } // With a 1 in 10 probability, insert the just added key in the batch // into the transaction. This will create an overlap with the MultiGet // keys and exercise some corner cases in the code if (thread->rand.OneIn(10)) { ```f63428bcc7/db_stress_tool/expected_state.h (L74-L76)
The assertion also failed if db stress compaction filter was invoked before crash recovery verification (`VerifyDB()`->`VerifyOrSyncValue()`) finishes.f63428bcc7/db_stress_tool/db_stress_compaction_filter.h (L53)
It failed because it can encounter a key with pending state when checking for `Exists()` since that key's expected state has not been sync-ed with db state in `VerifyOrSyncValue()`.f63428bcc7/db_stress_tool/no_batched_ops_stress.cc (L2579-L2591)
**Summary:** This PR fixes above issues by - not checking `Exists()` in commit phase/`SyncDelete()` - using the concurrent version of key existence check like in other read - conditionally temporarily disabling compaction till after crash recovery verification succeeds() And add back the assertion `!PendingPut()&&!PendingDelete()` Pull Request resolved: https://github.com/facebook/rocksdb/pull/12933 Test Plan: Rehearsal CI Reviewed By: cbi42 Differential Revision: D61214889 Pulled By: hx235 fbshipit-source-id: ef25ba896e64330ddf330182314981516880c3e4
257 lines
8.2 KiB
C++
257 lines
8.2 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
//
|
|
|
|
#include "db_stress_tool/db_stress_shared_state.h"
|
|
#ifdef GFLAGS
|
|
#include "db_stress_tool/db_stress_common.h"
|
|
#include "utilities/fault_injection_fs.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
void ThreadBody(void* v) {
|
|
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
|
|
ThreadState* thread = static_cast<ThreadState*>(v);
|
|
SharedState* shared = thread->shared;
|
|
|
|
if (!FLAGS_skip_verifydb && shared->ShouldVerifyAtBeginning()) {
|
|
thread->shared->GetStressTest()->VerifyDb(thread);
|
|
}
|
|
{
|
|
MutexLock l(shared->GetMutex());
|
|
shared->IncInitialized();
|
|
if (shared->AllInitialized()) {
|
|
shared->GetCondVar()->SignalAll();
|
|
}
|
|
}
|
|
if (!FLAGS_verification_only) {
|
|
{
|
|
MutexLock l(shared->GetMutex());
|
|
while (!shared->Started()) {
|
|
shared->GetCondVar()->Wait();
|
|
}
|
|
}
|
|
thread->shared->GetStressTest()->OperateDb(thread);
|
|
{
|
|
MutexLock l(shared->GetMutex());
|
|
shared->IncOperated();
|
|
if (shared->AllOperated()) {
|
|
shared->GetCondVar()->SignalAll();
|
|
}
|
|
while (!shared->VerifyStarted()) {
|
|
shared->GetCondVar()->Wait();
|
|
}
|
|
}
|
|
|
|
if (!FLAGS_skip_verifydb) {
|
|
thread->shared->GetStressTest()->VerifyDb(thread);
|
|
}
|
|
|
|
{
|
|
MutexLock l(shared->GetMutex());
|
|
shared->IncDone();
|
|
if (shared->AllDone()) {
|
|
shared->GetCondVar()->SignalAll();
|
|
}
|
|
}
|
|
}
|
|
|
|
ThreadStatusUtil::UnregisterThread();
|
|
}
|
|
bool RunStressTestImpl(SharedState* shared) {
|
|
SystemClock* clock = db_stress_env->GetSystemClock().get();
|
|
StressTest* stress = shared->GetStressTest();
|
|
|
|
if (shared->ShouldVerifyAtBeginning() && FLAGS_preserve_unverified_changes) {
|
|
Status s = InitUnverifiedSubdir(FLAGS_db);
|
|
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
|
|
s = InitUnverifiedSubdir(FLAGS_expected_values_dir);
|
|
}
|
|
if (!s.ok()) {
|
|
fprintf(stderr, "Failed to setup unverified state dir: %s\n",
|
|
s.ToString().c_str());
|
|
exit(1);
|
|
}
|
|
}
|
|
|
|
stress->InitDb(shared);
|
|
stress->FinishInitDb(shared);
|
|
|
|
uint32_t n = FLAGS_threads;
|
|
uint64_t now = clock->NowMicros();
|
|
fprintf(stdout, "%s Initializing worker threads\n",
|
|
clock->TimeToString(now / 1000000).c_str());
|
|
|
|
shared->SetThreads(n);
|
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
|
|
shared->IncBgThreads();
|
|
}
|
|
|
|
if (FLAGS_continuous_verification_interval > 0) {
|
|
shared->IncBgThreads();
|
|
}
|
|
|
|
if (FLAGS_compressed_secondary_cache_size > 0 ||
|
|
FLAGS_compressed_secondary_cache_ratio > 0.0) {
|
|
shared->IncBgThreads();
|
|
}
|
|
|
|
std::vector<ThreadState*> threads(n);
|
|
for (uint32_t i = 0; i < n; i++) {
|
|
threads[i] = new ThreadState(i, shared);
|
|
db_stress_env->StartThread(ThreadBody, threads[i]);
|
|
}
|
|
|
|
ThreadState bg_thread(0, shared);
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
|
|
db_stress_env->StartThread(PoolSizeChangeThread, &bg_thread);
|
|
}
|
|
|
|
ThreadState continuous_verification_thread(0, shared);
|
|
if (FLAGS_continuous_verification_interval > 0) {
|
|
db_stress_env->StartThread(DbVerificationThread,
|
|
&continuous_verification_thread);
|
|
}
|
|
|
|
ThreadState compressed_cache_set_capacity_thread(0, shared);
|
|
if (FLAGS_compressed_secondary_cache_size > 0 ||
|
|
FLAGS_compressed_secondary_cache_ratio > 0.0) {
|
|
db_stress_env->StartThread(CompressedCacheSetCapacityThread,
|
|
&compressed_cache_set_capacity_thread);
|
|
}
|
|
|
|
// Each thread goes through the following states:
|
|
// initializing -> wait for others to init -> read/populate/depopulate
|
|
// wait for others to operate -> verify -> done
|
|
|
|
{
|
|
MutexLock l(shared->GetMutex());
|
|
while (!shared->AllInitialized()) {
|
|
shared->GetCondVar()->Wait();
|
|
}
|
|
if (shared->ShouldVerifyAtBeginning()) {
|
|
if (shared->HasVerificationFailedYet()) {
|
|
fprintf(stderr, "Crash-recovery verification failed :(\n");
|
|
} else {
|
|
fprintf(stdout, "Crash-recovery verification passed :)\n");
|
|
Status s = DestroyUnverifiedSubdir(FLAGS_db);
|
|
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
|
|
s = DestroyUnverifiedSubdir(FLAGS_expected_values_dir);
|
|
}
|
|
if (!s.ok()) {
|
|
fprintf(stderr, "Failed to cleanup unverified state dir: %s\n",
|
|
s.ToString().c_str());
|
|
exit(1);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!FLAGS_verification_only) {
|
|
// This is after the verification step to avoid making all those `Get()`s
|
|
// and `MultiGet()`s contend on the DB-wide trace mutex.
|
|
if (!FLAGS_expected_values_dir.empty()) {
|
|
stress->TrackExpectedState(shared);
|
|
}
|
|
|
|
if (FLAGS_sync_fault_injection || FLAGS_write_fault_one_in > 0) {
|
|
fault_fs_guard->SetFilesystemDirectWritable(false);
|
|
fault_fs_guard->SetInjectUnsyncedDataLoss(FLAGS_sync_fault_injection);
|
|
if (FLAGS_exclude_wal_from_write_fault_injection) {
|
|
fault_fs_guard->SetFileTypesExcludedFromWriteFaultInjection(
|
|
{FileType::kWalFile});
|
|
}
|
|
}
|
|
if (ShouldDisableAutoCompactionsBeforeVerifyDb()) {
|
|
Status s = stress->EnableAutoCompaction();
|
|
assert(s.ok());
|
|
}
|
|
fprintf(stdout, "%s Starting database operations\n",
|
|
clock->TimeToString(now / 1000000).c_str());
|
|
|
|
shared->SetStart();
|
|
shared->GetCondVar()->SignalAll();
|
|
while (!shared->AllOperated()) {
|
|
shared->GetCondVar()->Wait();
|
|
}
|
|
|
|
now = clock->NowMicros();
|
|
if (FLAGS_test_batches_snapshots) {
|
|
fprintf(stdout, "%s Limited verification already done during gets\n",
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
|
} else if (FLAGS_skip_verifydb) {
|
|
fprintf(stdout, "%s Verification skipped\n",
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
|
} else {
|
|
fprintf(stdout, "%s Starting verification\n",
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
|
}
|
|
|
|
shared->SetStartVerify();
|
|
shared->GetCondVar()->SignalAll();
|
|
while (!shared->AllDone()) {
|
|
shared->GetCondVar()->Wait();
|
|
}
|
|
}
|
|
}
|
|
|
|
// If we are running verification_only
|
|
// stats will be empty and trying to report them will
|
|
// emit no ops or writes error. To avoid this, merging and reporting stats
|
|
// are not executed when running with verification_only
|
|
// TODO: We need to create verification stats (e.g. how many keys
|
|
// are verified by which method) and report them here instead of operation
|
|
// stats.
|
|
if (!FLAGS_verification_only) {
|
|
for (unsigned int i = 1; i < n; i++) {
|
|
threads[0]->stats.Merge(threads[i]->stats);
|
|
}
|
|
threads[0]->stats.Report("Stress Test");
|
|
}
|
|
|
|
for (unsigned int i = 0; i < n; i++) {
|
|
delete threads[i];
|
|
threads[i] = nullptr;
|
|
}
|
|
now = clock->NowMicros();
|
|
if (!FLAGS_skip_verifydb && !FLAGS_test_batches_snapshots &&
|
|
!shared->HasVerificationFailedYet()) {
|
|
fprintf(stdout, "%s Verification successful\n",
|
|
clock->TimeToString(now / 1000000).c_str());
|
|
}
|
|
|
|
if (!FLAGS_verification_only) {
|
|
stress->PrintStatistics();
|
|
}
|
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0 ||
|
|
FLAGS_continuous_verification_interval > 0 ||
|
|
FLAGS_compressed_secondary_cache_size > 0 ||
|
|
FLAGS_compressed_secondary_cache_ratio > 0.0) {
|
|
MutexLock l(shared->GetMutex());
|
|
shared->SetShouldStopBgThread();
|
|
while (!shared->BgThreadsFinished()) {
|
|
shared->GetCondVar()->Wait();
|
|
}
|
|
}
|
|
|
|
if (shared->HasVerificationFailedYet()) {
|
|
fprintf(stderr, "Verification failed :(\n");
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
bool RunStressTest(SharedState* shared) {
|
|
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
|
|
bool result = RunStressTestImpl(shared);
|
|
ThreadStatusUtil::UnregisterThread();
|
|
return result;
|
|
}
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
#endif // GFLAGS
|