2019-12-09 07:49:32 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
|
|
|
|
#ifdef GFLAGS
|
|
|
|
#include "db_stress_tool/db_stress_common.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "utilities/fault_injection_fs.h"
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-12-09 07:49:32 +00:00
|
|
|
void ThreadBody(void* v) {
|
|
|
|
ThreadState* thread = reinterpret_cast<ThreadState*>(v);
|
|
|
|
SharedState* shared = thread->shared;
|
|
|
|
|
2020-06-13 02:24:11 +00:00
|
|
|
if (!FLAGS_skip_verifydb && shared->ShouldVerifyAtBeginning()) {
|
2019-12-09 07:49:32 +00:00
|
|
|
thread->shared->GetStressTest()->VerifyDb(thread);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->IncInitialized();
|
|
|
|
if (shared->AllInitialized()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
while (!shared->Started()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
thread->shared->GetStressTest()->OperateDb(thread);
|
|
|
|
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->IncOperated();
|
|
|
|
if (shared->AllOperated()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
while (!shared->VerifyStarted()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-13 02:24:11 +00:00
|
|
|
if (!FLAGS_skip_verifydb) {
|
|
|
|
thread->shared->GetStressTest()->VerifyDb(thread);
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->IncDone();
|
|
|
|
if (shared->AllDone()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-04-21 16:07:18 +00:00
|
|
|
bool RunStressTestImpl(SharedState* shared) {
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock = db_stress_env->GetSystemClock().get();
|
2023-01-05 03:35:34 +00:00
|
|
|
StressTest* stress = shared->GetStressTest();
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
if (shared->ShouldVerifyAtBeginning() && FLAGS_preserve_unverified_changes) {
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
Status s = InitUnverifiedSubdir(FLAGS_db);
|
|
|
|
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
|
|
|
|
s = InitUnverifiedSubdir(FLAGS_expected_values_dir);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Failed to setup unverified state dir: %s\n",
|
|
|
|
s.ToString().c_str());
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
stress->InitDb(shared);
|
|
|
|
stress->FinishInitDb(shared);
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2020-04-16 18:10:53 +00:00
|
|
|
if (FLAGS_sync_fault_injection) {
|
|
|
|
fault_fs_guard->SetFilesystemDirectWritable(false);
|
|
|
|
}
|
2022-05-06 18:17:08 +00:00
|
|
|
if (FLAGS_write_fault_one_in) {
|
|
|
|
fault_fs_guard->EnableWriteErrorInjection();
|
|
|
|
}
|
2020-04-16 18:10:53 +00:00
|
|
|
|
2021-12-18 01:30:45 +00:00
|
|
|
uint32_t n = FLAGS_threads;
|
2021-03-15 11:32:24 +00:00
|
|
|
uint64_t now = clock->NowMicros();
|
2019-12-09 07:49:32 +00:00
|
|
|
fprintf(stdout, "%s Initializing worker threads\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString(now / 1000000).c_str());
|
2021-12-20 21:04:08 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->SetThreads(n);
|
2022-01-29 18:44:22 +00:00
|
|
|
|
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->IncBgThreads();
|
2022-01-29 18:44:22 +00:00
|
|
|
}
|
|
|
|
|
2022-02-01 23:55:00 +00:00
|
|
|
if (FLAGS_continuous_verification_interval > 0) {
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->IncBgThreads();
|
2022-01-29 18:44:22 +00:00
|
|
|
}
|
|
|
|
|
2021-12-20 21:04:08 +00:00
|
|
|
std::vector<ThreadState*> threads(n);
|
2022-01-29 18:44:22 +00:00
|
|
|
for (uint32_t i = 0; i < n; i++) {
|
2023-01-05 03:35:34 +00:00
|
|
|
threads[i] = new ThreadState(i, shared);
|
2022-01-29 18:44:22 +00:00
|
|
|
db_stress_env->StartThread(ThreadBody, threads[i]);
|
|
|
|
}
|
2021-12-20 21:04:08 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
ThreadState bg_thread(0, shared);
|
2022-01-29 18:44:22 +00:00
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
|
|
|
|
db_stress_env->StartThread(PoolSizeChangeThread, &bg_thread);
|
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
ThreadState continuous_verification_thread(0, shared);
|
2022-01-29 18:44:22 +00:00
|
|
|
if (FLAGS_continuous_verification_interval > 0) {
|
|
|
|
db_stress_env->StartThread(DbVerificationThread,
|
|
|
|
&continuous_verification_thread);
|
2019-12-20 16:46:52 +00:00
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
// Each thread goes through the following states:
|
|
|
|
// initializing -> wait for others to init -> read/populate/depopulate
|
|
|
|
// wait for others to operate -> verify -> done
|
|
|
|
|
|
|
|
{
|
2023-01-05 03:35:34 +00:00
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
while (!shared->AllInitialized()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2023-01-05 03:35:34 +00:00
|
|
|
if (shared->ShouldVerifyAtBeginning()) {
|
|
|
|
if (shared->HasVerificationFailedYet()) {
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stderr, "Crash-recovery verification failed :(\n");
|
2019-12-09 07:49:32 +00:00
|
|
|
} else {
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stdout, "Crash-recovery verification passed :)\n");
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
Status s = DestroyUnverifiedSubdir(FLAGS_db);
|
|
|
|
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
|
|
|
|
s = DestroyUnverifiedSubdir(FLAGS_expected_values_dir);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Failed to cleanup unverified state dir: %s\n",
|
|
|
|
s.ToString().c_str());
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-31 21:31:00 +00:00
|
|
|
// This is after the verification step to avoid making all those `Get()`s
|
|
|
|
// and `MultiGet()`s contend on the DB-wide trace mutex.
|
2022-09-30 20:37:05 +00:00
|
|
|
if (!FLAGS_expected_values_dir.empty()) {
|
2023-01-05 03:35:34 +00:00
|
|
|
stress->TrackExpectedState(shared);
|
2022-09-30 20:37:05 +00:00
|
|
|
}
|
2022-01-31 21:31:00 +00:00
|
|
|
|
2021-03-15 11:32:24 +00:00
|
|
|
now = clock->NowMicros();
|
2019-12-09 07:49:32 +00:00
|
|
|
fprintf(stdout, "%s Starting database operations\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString(now / 1000000).c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->SetStart();
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
while (!shared->AllOperated()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2021-03-15 11:32:24 +00:00
|
|
|
now = clock->NowMicros();
|
2019-12-09 07:49:32 +00:00
|
|
|
if (FLAGS_test_batches_snapshots) {
|
|
|
|
fprintf(stdout, "%s Limited verification already done during gets\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
2020-06-13 02:24:11 +00:00
|
|
|
} else if (FLAGS_skip_verifydb) {
|
|
|
|
fprintf(stdout, "%s Verification skipped\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
} else {
|
|
|
|
fprintf(stdout, "%s Starting verification\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->SetStartVerify();
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
while (!shared->AllDone()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned int i = 1; i < n; i++) {
|
|
|
|
threads[0]->stats.Merge(threads[i]->stats);
|
|
|
|
}
|
|
|
|
threads[0]->stats.Report("Stress Test");
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < n; i++) {
|
|
|
|
delete threads[i];
|
|
|
|
threads[i] = nullptr;
|
|
|
|
}
|
2021-03-15 11:32:24 +00:00
|
|
|
now = clock->NowMicros();
|
2020-06-13 02:24:11 +00:00
|
|
|
if (!FLAGS_skip_verifydb && !FLAGS_test_batches_snapshots &&
|
2023-01-05 03:35:34 +00:00
|
|
|
!shared->HasVerificationFailedYet()) {
|
2019-12-09 07:49:32 +00:00
|
|
|
fprintf(stdout, "%s Verification successful\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString(now / 1000000).c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
stress->PrintStatistics();
|
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0 ||
|
2022-07-19 18:25:43 +00:00
|
|
|
FLAGS_continuous_verification_interval > 0) {
|
2023-01-05 03:35:34 +00:00
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->SetShouldStopBgThread();
|
|
|
|
while (!shared->BgThreadsFinished()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
if (shared->HasVerificationFailedYet()) {
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stderr, "Verification failed :(\n");
|
2019-12-09 07:49:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2023-04-21 16:07:18 +00:00
|
|
|
bool RunStressTest(SharedState* shared) {
|
|
|
|
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
|
|
|
|
bool result = RunStressTestImpl(shared);
|
|
|
|
ThreadStatusUtil::UnregisterThread();
|
|
|
|
return result;
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2019-12-09 07:49:32 +00:00
|
|
|
#endif // GFLAGS
|