rocksdb/db_stress_tool/db_stress_driver.cc
Hui Xiao 9a034801ce Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
   - For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
   - New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.

- Misc
   - More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
   - Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444

Test Plan:
- CI fake db crash/stress test
- Microbenchmarking

**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: 604f6fd3f4
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR

**Test**

Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```

Result
```
Coming soon
```

AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```

Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,

PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```

Reviewed By: ajkr

Differential Revision: D45918925

Pulled By: hx235

fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-08 17:26:50 -07:00

219 lines
6.6 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
#ifdef GFLAGS
#include "db_stress_tool/db_stress_common.h"
#include "utilities/fault_injection_fs.h"
namespace ROCKSDB_NAMESPACE {
void ThreadBody(void* v) {
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
ThreadState* thread = reinterpret_cast<ThreadState*>(v);
SharedState* shared = thread->shared;
if (!FLAGS_skip_verifydb && shared->ShouldVerifyAtBeginning()) {
thread->shared->GetStressTest()->VerifyDb(thread);
}
{
MutexLock l(shared->GetMutex());
shared->IncInitialized();
if (shared->AllInitialized()) {
shared->GetCondVar()->SignalAll();
}
while (!shared->Started()) {
shared->GetCondVar()->Wait();
}
}
thread->shared->GetStressTest()->OperateDb(thread);
{
MutexLock l(shared->GetMutex());
shared->IncOperated();
if (shared->AllOperated()) {
shared->GetCondVar()->SignalAll();
}
while (!shared->VerifyStarted()) {
shared->GetCondVar()->Wait();
}
}
if (!FLAGS_skip_verifydb) {
thread->shared->GetStressTest()->VerifyDb(thread);
}
{
MutexLock l(shared->GetMutex());
shared->IncDone();
if (shared->AllDone()) {
shared->GetCondVar()->SignalAll();
}
}
ThreadStatusUtil::UnregisterThread();
}
bool RunStressTestImpl(SharedState* shared) {
SystemClock* clock = db_stress_env->GetSystemClock().get();
StressTest* stress = shared->GetStressTest();
if (shared->ShouldVerifyAtBeginning() && FLAGS_preserve_unverified_changes) {
Status s = InitUnverifiedSubdir(FLAGS_db);
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
s = InitUnverifiedSubdir(FLAGS_expected_values_dir);
}
if (!s.ok()) {
fprintf(stderr, "Failed to setup unverified state dir: %s\n",
s.ToString().c_str());
exit(1);
}
}
stress->InitDb(shared);
stress->FinishInitDb(shared);
if (FLAGS_sync_fault_injection) {
fault_fs_guard->SetFilesystemDirectWritable(false);
}
if (FLAGS_write_fault_one_in) {
fault_fs_guard->EnableWriteErrorInjection();
}
uint32_t n = FLAGS_threads;
uint64_t now = clock->NowMicros();
fprintf(stdout, "%s Initializing worker threads\n",
clock->TimeToString(now / 1000000).c_str());
shared->SetThreads(n);
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
shared->IncBgThreads();
}
if (FLAGS_continuous_verification_interval > 0) {
shared->IncBgThreads();
}
std::vector<ThreadState*> threads(n);
for (uint32_t i = 0; i < n; i++) {
threads[i] = new ThreadState(i, shared);
db_stress_env->StartThread(ThreadBody, threads[i]);
}
ThreadState bg_thread(0, shared);
if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
db_stress_env->StartThread(PoolSizeChangeThread, &bg_thread);
}
ThreadState continuous_verification_thread(0, shared);
if (FLAGS_continuous_verification_interval > 0) {
db_stress_env->StartThread(DbVerificationThread,
&continuous_verification_thread);
}
// Each thread goes through the following states:
// initializing -> wait for others to init -> read/populate/depopulate
// wait for others to operate -> verify -> done
{
MutexLock l(shared->GetMutex());
while (!shared->AllInitialized()) {
shared->GetCondVar()->Wait();
}
if (shared->ShouldVerifyAtBeginning()) {
if (shared->HasVerificationFailedYet()) {
fprintf(stderr, "Crash-recovery verification failed :(\n");
} else {
fprintf(stdout, "Crash-recovery verification passed :)\n");
Status s = DestroyUnverifiedSubdir(FLAGS_db);
if (s.ok() && !FLAGS_expected_values_dir.empty()) {
s = DestroyUnverifiedSubdir(FLAGS_expected_values_dir);
}
if (!s.ok()) {
fprintf(stderr, "Failed to cleanup unverified state dir: %s\n",
s.ToString().c_str());
exit(1);
}
}
}
// This is after the verification step to avoid making all those `Get()`s
// and `MultiGet()`s contend on the DB-wide trace mutex.
if (!FLAGS_expected_values_dir.empty()) {
stress->TrackExpectedState(shared);
}
now = clock->NowMicros();
fprintf(stdout, "%s Starting database operations\n",
clock->TimeToString(now / 1000000).c_str());
shared->SetStart();
shared->GetCondVar()->SignalAll();
while (!shared->AllOperated()) {
shared->GetCondVar()->Wait();
}
now = clock->NowMicros();
if (FLAGS_test_batches_snapshots) {
fprintf(stdout, "%s Limited verification already done during gets\n",
clock->TimeToString((uint64_t)now / 1000000).c_str());
} else if (FLAGS_skip_verifydb) {
fprintf(stdout, "%s Verification skipped\n",
clock->TimeToString((uint64_t)now / 1000000).c_str());
} else {
fprintf(stdout, "%s Starting verification\n",
clock->TimeToString((uint64_t)now / 1000000).c_str());
}
shared->SetStartVerify();
shared->GetCondVar()->SignalAll();
while (!shared->AllDone()) {
shared->GetCondVar()->Wait();
}
}
for (unsigned int i = 1; i < n; i++) {
threads[0]->stats.Merge(threads[i]->stats);
}
threads[0]->stats.Report("Stress Test");
for (unsigned int i = 0; i < n; i++) {
delete threads[i];
threads[i] = nullptr;
}
now = clock->NowMicros();
if (!FLAGS_skip_verifydb && !FLAGS_test_batches_snapshots &&
!shared->HasVerificationFailedYet()) {
fprintf(stdout, "%s Verification successful\n",
clock->TimeToString(now / 1000000).c_str());
}
stress->PrintStatistics();
if (FLAGS_compaction_thread_pool_adjust_interval > 0 ||
FLAGS_continuous_verification_interval > 0) {
MutexLock l(shared->GetMutex());
shared->SetShouldStopBgThread();
while (!shared->BgThreadsFinished()) {
shared->GetCondVar()->Wait();
}
}
if (shared->HasVerificationFailedYet()) {
fprintf(stderr, "Verification failed :(\n");
return false;
}
return true;
}
bool RunStressTest(SharedState* shared) {
ThreadStatusUtil::RegisterThread(db_stress_env, ThreadStatus::USER);
bool result = RunStressTestImpl(shared);
ThreadStatusUtil::UnregisterThread();
return result;
}
} // namespace ROCKSDB_NAMESPACE
#endif // GFLAGS