2019-12-09 07:49:32 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors
|
|
|
|
|
|
|
|
#ifdef GFLAGS
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "db_stress_tool/db_stress_stat.h"
|
2021-09-28 21:12:23 +00:00
|
|
|
#include "db_stress_tool/expected_state.h"
|
2020-04-11 00:18:56 +00:00
|
|
|
// SyncPoint is not supported in Released Windows Mode.
|
|
|
|
#if !(defined NDEBUG) || !defined(OS_WIN)
|
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#endif // !(defined NDEBUG) || !defined(OS_WIN)
|
2019-12-09 07:49:32 +00:00
|
|
|
#include "util/gflags_compat.h"
|
|
|
|
|
|
|
|
DECLARE_uint64(seed);
|
|
|
|
DECLARE_int64(max_key);
|
|
|
|
DECLARE_uint64(log2_keys_per_lock);
|
|
|
|
DECLARE_int32(threads);
|
|
|
|
DECLARE_int32(column_families);
|
|
|
|
DECLARE_int32(nooverwritepercent);
|
2021-09-28 21:12:23 +00:00
|
|
|
DECLARE_string(expected_values_dir);
|
2019-12-09 07:49:32 +00:00
|
|
|
DECLARE_int32(clear_column_family_one_in);
|
|
|
|
DECLARE_bool(test_batches_snapshots);
|
2019-12-20 16:46:52 +00:00
|
|
|
DECLARE_int32(compaction_thread_pool_adjust_interval);
|
|
|
|
DECLARE_int32(continuous_verification_interval);
|
2020-04-11 00:18:56 +00:00
|
|
|
DECLARE_int32(read_fault_one_in);
|
2020-12-17 19:51:04 +00:00
|
|
|
DECLARE_int32(write_fault_one_in);
|
2021-04-28 17:57:11 +00:00
|
|
|
DECLARE_int32(open_metadata_write_fault_one_in);
|
2021-06-30 23:45:44 +00:00
|
|
|
DECLARE_int32(open_write_fault_one_in);
|
2021-07-06 18:04:04 +00:00
|
|
|
DECLARE_int32(open_read_fault_one_in);
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2021-07-01 21:15:49 +00:00
|
|
|
DECLARE_int32(injest_error_severity);
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-12-09 07:49:32 +00:00
|
|
|
class StressTest;
|
|
|
|
|
|
|
|
// State shared by all concurrent executions of the same benchmark.
|
|
|
|
class SharedState {
|
|
|
|
public:
|
|
|
|
// indicates a key may have any value (or not be present) as an operation on
|
|
|
|
// it is incomplete.
|
|
|
|
static const uint32_t UNKNOWN_SENTINEL;
|
|
|
|
// indicates a key should definitely be deleted
|
|
|
|
static const uint32_t DELETION_SENTINEL;
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// Errors when reading filter blocks are ignored, so we use a thread
|
|
|
|
// local variable updated via sync points to keep track of errors injected
|
|
|
|
// while reading filter blocks in order to ignore the Get/MultiGet result
|
|
|
|
// for those calls
|
|
|
|
#if defined(ROCKSDB_SUPPORT_THREAD_LOCAL)
|
|
|
|
#if defined(OS_SOLARIS)
|
2020-04-24 20:03:08 +00:00
|
|
|
static __thread bool ignore_read_error;
|
2020-04-11 00:18:56 +00:00
|
|
|
#else
|
2020-04-24 20:03:08 +00:00
|
|
|
static thread_local bool ignore_read_error;
|
2020-04-11 00:18:56 +00:00
|
|
|
#endif // OS_SOLARIS
|
|
|
|
#else
|
2020-04-24 20:03:08 +00:00
|
|
|
static bool ignore_read_error;
|
2020-04-11 00:18:56 +00:00
|
|
|
#endif // ROCKSDB_SUPPORT_THREAD_LOCAL
|
|
|
|
|
2021-07-01 19:22:01 +00:00
|
|
|
SharedState(Env* /*env*/, StressTest* stress_test)
|
2019-12-09 07:49:32 +00:00
|
|
|
: cv_(&mu_),
|
|
|
|
seed_(static_cast<uint32_t>(FLAGS_seed)),
|
|
|
|
max_key_(FLAGS_max_key),
|
|
|
|
log2_keys_per_lock_(static_cast<uint32_t>(FLAGS_log2_keys_per_lock)),
|
2021-12-18 01:30:45 +00:00
|
|
|
num_threads_(0),
|
2019-12-09 07:49:32 +00:00
|
|
|
num_initialized_(0),
|
|
|
|
num_populated_(0),
|
|
|
|
vote_reopen_(0),
|
|
|
|
num_done_(0),
|
|
|
|
start_(false),
|
|
|
|
start_verify_(false),
|
2019-12-20 16:46:52 +00:00
|
|
|
num_bg_threads_(0),
|
2019-12-09 07:49:32 +00:00
|
|
|
should_stop_bg_thread_(false),
|
2019-12-20 16:46:52 +00:00
|
|
|
bg_thread_finished_(0),
|
2019-12-09 07:49:32 +00:00
|
|
|
stress_test_(stress_test),
|
|
|
|
verification_failure_(false),
|
2019-12-20 16:46:52 +00:00
|
|
|
should_stop_test_(false),
|
2019-12-09 07:49:32 +00:00
|
|
|
no_overwrite_ids_(FLAGS_column_families),
|
2021-09-28 21:12:23 +00:00
|
|
|
expected_state_manager_(nullptr),
|
2019-12-09 07:49:32 +00:00
|
|
|
printing_verification_results_(false) {
|
|
|
|
// Pick random keys in each column family that will not experience
|
|
|
|
// overwrite
|
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
fprintf(stdout, "Choosing random keys with no overwrite\n");
|
2019-12-09 07:49:32 +00:00
|
|
|
Random64 rnd(seed_);
|
|
|
|
// Start with the identity permutation. Subsequent iterations of
|
|
|
|
// for loop below will start with perm of previous for loop
|
|
|
|
int64_t* permutation = new int64_t[max_key_];
|
|
|
|
for (int64_t i = 0; i < max_key_; i++) {
|
|
|
|
permutation[i] = i;
|
|
|
|
}
|
|
|
|
// Now do the Knuth shuffle
|
|
|
|
int64_t num_no_overwrite_keys = (max_key_ * FLAGS_nooverwritepercent) / 100;
|
|
|
|
// Only need to figure out first num_no_overwrite_keys of permutation
|
|
|
|
no_overwrite_ids_.reserve(num_no_overwrite_keys);
|
|
|
|
for (int64_t i = 0; i < num_no_overwrite_keys; i++) {
|
|
|
|
int64_t rand_index = i + rnd.Next() % (max_key_ - i);
|
|
|
|
// Swap i and rand_index;
|
|
|
|
int64_t temp = permutation[i];
|
|
|
|
permutation[i] = permutation[rand_index];
|
|
|
|
permutation[rand_index] = temp;
|
|
|
|
// Fill no_overwrite_ids_ with the first num_no_overwrite_keys of
|
|
|
|
// permutation
|
|
|
|
no_overwrite_ids_.insert(permutation[i]);
|
|
|
|
}
|
|
|
|
delete[] permutation;
|
|
|
|
|
|
|
|
Status status;
|
2021-09-28 21:12:23 +00:00
|
|
|
// TODO: We should introduce a way to explicitly disable verification
|
|
|
|
// during shutdown. When that is disabled and FLAGS_expected_values_dir
|
|
|
|
// is empty (disabling verification at startup), we can skip tracking
|
|
|
|
// expected state. Only then should we permit bypassing the below feature
|
|
|
|
// compatibility checks.
|
|
|
|
if (!FLAGS_expected_values_dir.empty()) {
|
2019-12-09 07:49:32 +00:00
|
|
|
if (!std::atomic<uint32_t>{}.is_lock_free()) {
|
|
|
|
status = Status::InvalidArgument(
|
2021-09-28 21:12:23 +00:00
|
|
|
"Cannot use --expected_values_dir on platforms without lock-free "
|
2019-12-09 07:49:32 +00:00
|
|
|
"std::atomic<uint32_t>");
|
|
|
|
}
|
|
|
|
if (status.ok() && FLAGS_clear_column_family_one_in > 0) {
|
|
|
|
status = Status::InvalidArgument(
|
2021-09-28 21:12:23 +00:00
|
|
|
"Cannot use --expected_values_dir on when "
|
2019-12-09 07:49:32 +00:00
|
|
|
"--clear_column_family_one_in is greater than zero.");
|
|
|
|
}
|
2021-09-28 21:12:23 +00:00
|
|
|
}
|
|
|
|
if (status.ok()) {
|
|
|
|
if (FLAGS_expected_values_dir.empty()) {
|
|
|
|
expected_state_manager_.reset(
|
|
|
|
new AnonExpectedStateManager(FLAGS_max_key, FLAGS_column_families));
|
2019-12-09 07:49:32 +00:00
|
|
|
} else {
|
2021-09-28 21:12:23 +00:00
|
|
|
expected_state_manager_.reset(new FileExpectedStateManager(
|
|
|
|
FLAGS_max_key, FLAGS_column_families, FLAGS_expected_values_dir));
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2021-09-28 21:12:23 +00:00
|
|
|
status = expected_state_manager_->Open();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
2021-09-28 21:12:23 +00:00
|
|
|
if (!status.ok()) {
|
|
|
|
fprintf(stderr, "Failed setting up expected state with error: %s\n",
|
|
|
|
status.ToString().c_str());
|
|
|
|
exit(1);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (FLAGS_test_batches_snapshots) {
|
|
|
|
fprintf(stdout, "No lock creation because test_batches_snapshots set\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
long num_locks = static_cast<long>(max_key_ >> log2_keys_per_lock_);
|
|
|
|
if (max_key_ & ((1 << log2_keys_per_lock_) - 1)) {
|
|
|
|
num_locks++;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "Creating %ld locks\n", num_locks * FLAGS_column_families);
|
|
|
|
key_locks_.resize(FLAGS_column_families);
|
|
|
|
|
|
|
|
for (int i = 0; i < FLAGS_column_families; ++i) {
|
|
|
|
key_locks_[i].resize(num_locks);
|
|
|
|
for (auto& ptr : key_locks_[i]) {
|
|
|
|
ptr.reset(new port::Mutex);
|
|
|
|
}
|
|
|
|
}
|
2020-04-11 00:18:56 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (FLAGS_read_fault_one_in) {
|
2020-04-24 20:03:08 +00:00
|
|
|
SyncPoint::GetInstance()->SetCallBack("FaultInjectionIgnoreError",
|
|
|
|
IgnoreReadErrorCallback);
|
2020-04-11 00:18:56 +00:00
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
}
|
|
|
|
#endif // NDEBUG
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
~SharedState() {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (FLAGS_read_fault_one_in) {
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
port::Mutex* GetMutex() { return &mu_; }
|
|
|
|
|
|
|
|
port::CondVar* GetCondVar() { return &cv_; }
|
|
|
|
|
|
|
|
StressTest* GetStressTest() const { return stress_test_; }
|
|
|
|
|
|
|
|
int64_t GetMaxKey() const { return max_key_; }
|
|
|
|
|
|
|
|
uint32_t GetNumThreads() const { return num_threads_; }
|
|
|
|
|
2022-01-29 18:44:22 +00:00
|
|
|
void SetThreads(int num_threads) { num_threads_ = num_threads; }
|
2021-12-18 01:30:45 +00:00
|
|
|
|
2019-12-09 07:49:32 +00:00
|
|
|
void IncInitialized() { num_initialized_++; }
|
|
|
|
|
|
|
|
void IncOperated() { num_populated_++; }
|
|
|
|
|
|
|
|
void IncDone() { num_done_++; }
|
|
|
|
|
|
|
|
void IncVotedReopen() { vote_reopen_ = (vote_reopen_ + 1) % num_threads_; }
|
|
|
|
|
|
|
|
bool AllInitialized() const { return num_initialized_ >= num_threads_; }
|
|
|
|
|
|
|
|
bool AllOperated() const { return num_populated_ >= num_threads_; }
|
|
|
|
|
|
|
|
bool AllDone() const { return num_done_ >= num_threads_; }
|
|
|
|
|
|
|
|
bool AllVotedReopen() { return (vote_reopen_ == 0); }
|
|
|
|
|
|
|
|
void SetStart() { start_ = true; }
|
|
|
|
|
|
|
|
void SetStartVerify() { start_verify_ = true; }
|
|
|
|
|
|
|
|
bool Started() const { return start_; }
|
|
|
|
|
|
|
|
bool VerifyStarted() const { return start_verify_; }
|
|
|
|
|
|
|
|
void SetVerificationFailure() { verification_failure_.store(true); }
|
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
bool HasVerificationFailedYet() const { return verification_failure_.load(); }
|
|
|
|
|
|
|
|
void SetShouldStopTest() { should_stop_test_.store(true); }
|
|
|
|
|
|
|
|
bool ShouldStopTest() const { return should_stop_test_.load(); }
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
// Returns a lock covering `key` in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
port::Mutex* GetMutexForKey(int cf, int64_t key) {
|
|
|
|
return key_locks_[cf][key >> log2_keys_per_lock_].get();
|
|
|
|
}
|
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
// Acquires locks for all keys in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
void LockColumnFamily(int cf) {
|
|
|
|
for (auto& mutex : key_locks_[cf]) {
|
|
|
|
mutex->Lock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
// Releases locks for all keys in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
void UnlockColumnFamily(int cf) {
|
|
|
|
for (auto& mutex : key_locks_[cf]) {
|
|
|
|
mutex->Unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-07 21:40:46 +00:00
|
|
|
Status SaveAtAndAfter(DB* db) {
|
|
|
|
return expected_state_manager_->SaveAtAndAfter(db);
|
|
|
|
}
|
|
|
|
|
2021-12-15 20:53:32 +00:00
|
|
|
bool HasHistory() { return expected_state_manager_->HasHistory(); }
|
|
|
|
|
|
|
|
Status Restore(DB* db) { return expected_state_manager_->Restore(db); }
|
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
// Requires external locking covering all keys in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
void ClearColumnFamily(int cf) {
|
2021-09-28 21:12:23 +00:00
|
|
|
return expected_state_manager_->ClearColumnFamily(cf);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// @param pending True if the update may have started but is not yet
|
|
|
|
// guaranteed finished. This is useful for crash-recovery testing when the
|
|
|
|
// process may crash before updating the expected values array.
|
2021-09-28 21:12:23 +00:00
|
|
|
//
|
|
|
|
// Requires external locking covering `key` in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
void Put(int cf, int64_t key, uint32_t value_base, bool pending) {
|
2021-09-28 21:12:23 +00:00
|
|
|
return expected_state_manager_->Put(cf, key, value_base, pending);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
// Requires external locking covering `key` in `cf`.
|
|
|
|
uint32_t Get(int cf, int64_t key) const {
|
|
|
|
return expected_state_manager_->Get(cf, key);
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
// @param pending See comment above Put()
|
|
|
|
// Returns true if the key was not yet deleted.
|
2021-09-28 21:12:23 +00:00
|
|
|
//
|
|
|
|
// Requires external locking covering `key` in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
bool Delete(int cf, int64_t key, bool pending) {
|
2021-09-28 21:12:23 +00:00
|
|
|
return expected_state_manager_->Delete(cf, key, pending);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// @param pending See comment above Put()
|
|
|
|
// Returns true if the key was not yet deleted.
|
2021-09-28 21:12:23 +00:00
|
|
|
//
|
|
|
|
// Requires external locking covering `key` in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
bool SingleDelete(int cf, int64_t key, bool pending) {
|
2021-09-28 21:12:23 +00:00
|
|
|
return expected_state_manager_->Delete(cf, key, pending);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// @param pending See comment above Put()
|
|
|
|
// Returns number of keys deleted by the call.
|
2021-09-28 21:12:23 +00:00
|
|
|
//
|
|
|
|
// Requires external locking covering keys in `[begin_key, end_key)` in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
int DeleteRange(int cf, int64_t begin_key, int64_t end_key, bool pending) {
|
2021-09-28 21:12:23 +00:00
|
|
|
return expected_state_manager_->DeleteRange(cf, begin_key, end_key,
|
|
|
|
pending);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool AllowsOverwrite(int64_t key) {
|
|
|
|
return no_overwrite_ids_.find(key) == no_overwrite_ids_.end();
|
|
|
|
}
|
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
// Requires external locking covering `key` in `cf`.
|
2019-12-09 07:49:32 +00:00
|
|
|
bool Exists(int cf, int64_t key) {
|
2021-09-28 21:12:23 +00:00
|
|
|
return expected_state_manager_->Exists(cf, key);
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t GetSeed() const { return seed_; }
|
|
|
|
|
|
|
|
void SetShouldStopBgThread() { should_stop_bg_thread_ = true; }
|
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
bool ShouldStopBgThread() { return should_stop_bg_thread_; }
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2021-12-18 01:30:45 +00:00
|
|
|
void IncBgThreads() { ++num_bg_threads_; }
|
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
void IncBgThreadsFinished() { ++bg_thread_finished_; }
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
bool BgThreadsFinished() const {
|
|
|
|
return bg_thread_finished_ == num_bg_threads_;
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
bool ShouldVerifyAtBeginning() const {
|
2021-09-28 21:12:23 +00:00
|
|
|
return !FLAGS_expected_values_dir.empty();
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool PrintingVerificationResults() {
|
|
|
|
bool tmp = false;
|
|
|
|
return !printing_verification_results_.compare_exchange_strong(
|
|
|
|
tmp, true, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
void FinishPrintingVerificationResults() {
|
|
|
|
printing_verification_results_.store(false, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2020-04-24 20:03:08 +00:00
|
|
|
static void IgnoreReadErrorCallback(void*) {
|
|
|
|
ignore_read_error = true;
|
2020-04-11 00:18:56 +00:00
|
|
|
}
|
|
|
|
|
2019-12-09 07:49:32 +00:00
|
|
|
port::Mutex mu_;
|
|
|
|
port::CondVar cv_;
|
|
|
|
const uint32_t seed_;
|
|
|
|
const int64_t max_key_;
|
|
|
|
const uint32_t log2_keys_per_lock_;
|
2021-12-18 01:30:45 +00:00
|
|
|
int num_threads_;
|
2019-12-09 07:49:32 +00:00
|
|
|
long num_initialized_;
|
|
|
|
long num_populated_;
|
|
|
|
long vote_reopen_;
|
|
|
|
long num_done_;
|
|
|
|
bool start_;
|
|
|
|
bool start_verify_;
|
2019-12-20 16:46:52 +00:00
|
|
|
int num_bg_threads_;
|
2019-12-09 07:49:32 +00:00
|
|
|
bool should_stop_bg_thread_;
|
2019-12-20 16:46:52 +00:00
|
|
|
int bg_thread_finished_;
|
2019-12-09 07:49:32 +00:00
|
|
|
StressTest* stress_test_;
|
|
|
|
std::atomic<bool> verification_failure_;
|
2019-12-20 16:46:52 +00:00
|
|
|
std::atomic<bool> should_stop_test_;
|
2019-12-09 07:49:32 +00:00
|
|
|
|
|
|
|
// Keys that should not be overwritten
|
|
|
|
std::unordered_set<size_t> no_overwrite_ids_;
|
|
|
|
|
2021-09-28 21:12:23 +00:00
|
|
|
std::unique_ptr<ExpectedStateManager> expected_state_manager_;
|
2019-12-09 07:49:32 +00:00
|
|
|
// Has to make it owned by a smart ptr as port::Mutex is not copyable
|
|
|
|
// and storing it in the container may require copying depending on the impl.
|
|
|
|
std::vector<std::vector<std::unique_ptr<port::Mutex>>> key_locks_;
|
|
|
|
std::atomic<bool> printing_verification_results_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Per-thread state for concurrent executions of the same benchmark.
|
|
|
|
struct ThreadState {
|
|
|
|
uint32_t tid; // 0..n-1
|
|
|
|
Random rand; // Has different seeds for different threads
|
|
|
|
SharedState* shared;
|
|
|
|
Stats stats;
|
|
|
|
struct SnapshotState {
|
|
|
|
const Snapshot* snapshot;
|
|
|
|
// The cf from which we did a Get at this snapshot
|
|
|
|
int cf_at;
|
|
|
|
// The name of the cf at the time that we did a read
|
|
|
|
std::string cf_at_name;
|
|
|
|
// The key with which we did a Get at this snapshot
|
|
|
|
std::string key;
|
|
|
|
// The status of the Get
|
|
|
|
Status status;
|
|
|
|
// The value of the Get
|
|
|
|
std::string value;
|
|
|
|
// optional state of all keys in the db
|
|
|
|
std::vector<bool>* key_vec;
|
Add user-defined timestamps to db_stress (#8061)
Summary:
Add some basic test for user-defined timestamp to db_stress. Currently,
read with timestamp always tries to read using the current timestamp.
Due to the per-key timestamp-sequence ordering constraint, we only add timestamp-
related tests to the `NonBatchedOpsStressTest` since this test serializes accesses
to the same key and uses a file to cross-check data correctness.
The timestamp feature is not supported in a number of components, e.g. Merge, SingleDelete,
DeleteRange, CompactionFilter, Readonly instance, secondary instance, SST file ingestion, transaction,
etc. Therefore, db_stress should exit if user enables both timestamp and these features at the same
time. The (currently) incompatible features can be found in
`CheckAndSetOptionsForUserTimestamp`.
This PR also fixes a bug triggered when timestamp is enabled together with
`index_type=kBinarySearchWithFirstKey`. This bug fix will also be in another separate PR
with more unit tests coverage. Fixing it here because I do not want to exclude the index type
from crash test.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8061
Test Plan: make crash_test_with_ts
Reviewed By: jay-zhuang
Differential Revision: D27056282
Pulled By: riversand963
fbshipit-source-id: c3e00ad1023fdb9ebbdf9601ec18270c5e2925a9
2021-03-23 12:12:04 +00:00
|
|
|
|
|
|
|
std::string timestamp;
|
2019-12-09 07:49:32 +00:00
|
|
|
};
|
|
|
|
std::queue<std::pair<uint64_t, SnapshotState>> snapshot_queue;
|
|
|
|
|
|
|
|
ThreadState(uint32_t index, SharedState* _shared)
|
|
|
|
: tid(index), rand(1000 + index + _shared->GetSeed()), shared(_shared) {}
|
|
|
|
};
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2019-12-09 07:49:32 +00:00
|
|
|
#endif // GFLAGS
|