2019-12-09 07:49:32 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
|
|
|
|
#ifdef GFLAGS
|
|
|
|
#include "db_stress_tool/db_stress_common.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "utilities/fault_injection_fs.h"
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-12-09 07:49:32 +00:00
|
|
|
void ThreadBody(void* v) {
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
|
2019-12-09 07:49:32 +00:00
|
|
|
ThreadState* thread = reinterpret_cast<ThreadState*>(v);
|
|
|
|
SharedState* shared = thread->shared;
|
|
|
|
|
2020-06-13 02:24:11 +00:00
|
|
|
if (!FLAGS_skip_verifydb && shared->ShouldVerifyAtBeginning()) {
|
2019-12-09 07:49:32 +00:00
|
|
|
thread->shared->GetStressTest()->VerifyDb(thread);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->IncInitialized();
|
|
|
|
if (shared->AllInitialized()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
}
|
2023-08-23 22:24:23 +00:00
|
|
|
if (!FLAGS_verification_only) {
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
while (!shared->Started()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2023-08-23 22:24:23 +00:00
|
|
|
thread->shared->GetStressTest()->OperateDb(thread);
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->IncOperated();
|
|
|
|
if (shared->AllOperated()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
while (!shared->VerifyStarted()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
if (!FLAGS_skip_verifydb) {
|
|
|
|
thread->shared->GetStressTest()->VerifyDb(thread);
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->IncDone();
|
|
|
|
if (shared->AllDone()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
2023-08-23 22:24:23 +00:00
|
|
|
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
ThreadStatusUtil::UnregisterThread();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2023-04-21 16:07:18 +00:00
|
|
|
bool RunStressTestImpl(SharedState* shared) {
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock = db_stress_env->GetSystemClock().get();
|
2023-01-05 03:35:34 +00:00
|
|
|
StressTest* stress = shared->GetStressTest();
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
if (shared->ShouldVerifyAtBeginning() && FLAGS_preserve_unverified_changes) {
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
Status s = InitUnverifiedSubdir(FLAGS_db);
|
|
|
|
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
|
|
|
|
s = InitUnverifiedSubdir(FLAGS_expected_values_dir);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Failed to setup unverified state dir: %s\n",
|
|
|
|
s.ToString().c_str());
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
stress->InitDb(shared);
|
|
|
|
stress->FinishInitDb(shared);
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2020-04-16 18:10:53 +00:00
|
|
|
if (FLAGS_sync_fault_injection) {
|
|
|
|
fault_fs_guard->SetFilesystemDirectWritable(false);
|
|
|
|
}
|
2022-05-06 18:17:08 +00:00
|
|
|
if (FLAGS_write_fault_one_in) {
|
|
|
|
fault_fs_guard->EnableWriteErrorInjection();
|
|
|
|
}
|
2020-04-16 18:10:53 +00:00
|
|
|
|
2021-12-18 01:30:45 +00:00
|
|
|
uint32_t n = FLAGS_threads;
|
2021-03-15 11:32:24 +00:00
|
|
|
uint64_t now = clock->NowMicros();
|
2019-12-09 07:49:32 +00:00
|
|
|
fprintf(stdout, "%s Initializing worker threads\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString(now / 1000000).c_str());
|
2021-12-20 21:04:08 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->SetThreads(n);
|
2022-01-29 18:44:22 +00:00
|
|
|
|
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->IncBgThreads();
|
2022-01-29 18:44:22 +00:00
|
|
|
}
|
|
|
|
|
2022-02-01 23:55:00 +00:00
|
|
|
if (FLAGS_continuous_verification_interval > 0) {
|
2023-01-05 03:35:34 +00:00
|
|
|
shared->IncBgThreads();
|
2022-01-29 18:44:22 +00:00
|
|
|
}
|
|
|
|
|
2021-12-20 21:04:08 +00:00
|
|
|
std::vector<ThreadState*> threads(n);
|
2022-01-29 18:44:22 +00:00
|
|
|
for (uint32_t i = 0; i < n; i++) {
|
2023-01-05 03:35:34 +00:00
|
|
|
threads[i] = new ThreadState(i, shared);
|
2022-01-29 18:44:22 +00:00
|
|
|
db_stress_env->StartThread(ThreadBody, threads[i]);
|
|
|
|
}
|
2021-12-20 21:04:08 +00:00
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
ThreadState bg_thread(0, shared);
|
2022-01-29 18:44:22 +00:00
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
|
|
|
|
db_stress_env->StartThread(PoolSizeChangeThread, &bg_thread);
|
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
ThreadState continuous_verification_thread(0, shared);
|
2022-01-29 18:44:22 +00:00
|
|
|
if (FLAGS_continuous_verification_interval > 0) {
|
|
|
|
db_stress_env->StartThread(DbVerificationThread,
|
|
|
|
&continuous_verification_thread);
|
2019-12-20 16:46:52 +00:00
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
// Each thread goes through the following states:
|
|
|
|
// initializing -> wait for others to init -> read/populate/depopulate
|
|
|
|
// wait for others to operate -> verify -> done
|
|
|
|
|
|
|
|
{
|
2023-01-05 03:35:34 +00:00
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
while (!shared->AllInitialized()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2023-01-05 03:35:34 +00:00
|
|
|
if (shared->ShouldVerifyAtBeginning()) {
|
|
|
|
if (shared->HasVerificationFailedYet()) {
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stderr, "Crash-recovery verification failed :(\n");
|
2019-12-09 07:49:32 +00:00
|
|
|
} else {
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stdout, "Crash-recovery verification passed :)\n");
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
Status s = DestroyUnverifiedSubdir(FLAGS_db);
|
|
|
|
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
|
|
|
|
s = DestroyUnverifiedSubdir(FLAGS_expected_values_dir);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Failed to cleanup unverified state dir: %s\n",
|
|
|
|
s.ToString().c_str());
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
if (!FLAGS_verification_only) {
|
|
|
|
// This is after the verification step to avoid making all those `Get()`s
|
|
|
|
// and `MultiGet()`s contend on the DB-wide trace mutex.
|
|
|
|
if (!FLAGS_expected_values_dir.empty()) {
|
|
|
|
stress->TrackExpectedState(shared);
|
|
|
|
}
|
|
|
|
now = clock->NowMicros();
|
|
|
|
fprintf(stdout, "%s Starting database operations\n",
|
|
|
|
clock->TimeToString(now / 1000000).c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
shared->SetStart();
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
while (!shared->AllOperated()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
now = clock->NowMicros();
|
|
|
|
if (FLAGS_test_batches_snapshots) {
|
|
|
|
fprintf(stdout, "%s Limited verification already done during gets\n",
|
|
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
|
|
|
} else if (FLAGS_skip_verifydb) {
|
|
|
|
fprintf(stdout, "%s Verification skipped\n",
|
|
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "%s Starting verification\n",
|
|
|
|
clock->TimeToString((uint64_t)now / 1000000).c_str());
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
shared->SetStartVerify();
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
while (!shared->AllDone()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-23 22:24:23 +00:00
|
|
|
// If we are running verification_only
|
|
|
|
// stats will be empty and trying to report them will
|
|
|
|
// emit no ops or writes error. To avoid this, merging and reporting stats
|
|
|
|
// are not executed when running with verification_only
|
|
|
|
// TODO: We need to create verification stats (e.g. how many keys
|
|
|
|
// are verified by which method) and report them here instead of operation
|
|
|
|
// stats.
|
|
|
|
if (!FLAGS_verification_only) {
|
|
|
|
for (unsigned int i = 1; i < n; i++) {
|
|
|
|
threads[0]->stats.Merge(threads[i]->stats);
|
|
|
|
}
|
|
|
|
threads[0]->stats.Report("Stress Test");
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < n; i++) {
|
|
|
|
delete threads[i];
|
|
|
|
threads[i] = nullptr;
|
|
|
|
}
|
2021-03-15 11:32:24 +00:00
|
|
|
now = clock->NowMicros();
|
2020-06-13 02:24:11 +00:00
|
|
|
if (!FLAGS_skip_verifydb && !FLAGS_test_batches_snapshots &&
|
2023-01-05 03:35:34 +00:00
|
|
|
!shared->HasVerificationFailedYet()) {
|
2019-12-09 07:49:32 +00:00
|
|
|
fprintf(stdout, "%s Verification successful\n",
|
2021-03-15 11:32:24 +00:00
|
|
|
clock->TimeToString(now / 1000000).c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2023-08-23 22:24:23 +00:00
|
|
|
|
|
|
|
if (!FLAGS_verification_only) {
|
|
|
|
stress->PrintStatistics();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
if (FLAGS_compaction_thread_pool_adjust_interval > 0 ||
|
2022-07-19 18:25:43 +00:00
|
|
|
FLAGS_continuous_verification_interval > 0) {
|
2023-01-05 03:35:34 +00:00
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
shared->SetShouldStopBgThread();
|
|
|
|
while (!shared->BgThreadsFinished()) {
|
|
|
|
shared->GetCondVar()->Wait();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-05 03:35:34 +00:00
|
|
|
if (shared->HasVerificationFailedYet()) {
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stderr, "Verification failed :(\n");
|
2019-12-09 07:49:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2023-04-21 16:07:18 +00:00
|
|
|
bool RunStressTest(SharedState* shared) {
|
|
|
|
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
|
|
|
|
bool result = RunStressTestImpl(shared);
|
|
|
|
ThreadStatusUtil::UnregisterThread();
|
|
|
|
return result;
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2019-12-09 07:49:32 +00:00
|
|
|
#endif // GFLAGS
|