2019-12-09 07:49:32 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
|
|
|
|
#ifdef GFLAGS
|
|
|
|
#include "db_stress_tool/db_stress_common.h"
|
2020-09-04 06:49:27 +00:00
|
|
|
|
2019-12-16 21:59:21 +00:00
|
|
|
#include <cmath>
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2023-10-10 20:12:18 +00:00
|
|
|
#include "rocksdb/secondary_cache.h"
|
2020-09-04 06:49:27 +00:00
|
|
|
#include "util/file_checksum_helper.h"
|
|
|
|
#include "util/xxhash.h"
|
|
|
|
|
2022-01-25 23:56:08 +00:00
|
|
|
ROCKSDB_NAMESPACE::Env* db_stress_listener_env = nullptr;
|
2020-12-17 19:51:04 +00:00
|
|
|
ROCKSDB_NAMESPACE::Env* db_stress_env = nullptr;
|
2020-04-11 00:18:56 +00:00
|
|
|
// If non-null, injects read error at a rate specified by the
|
2020-12-17 19:51:04 +00:00
|
|
|
// read_fault_one_in or write_fault_one_in flag
|
2020-04-11 00:18:56 +00:00
|
|
|
std::shared_ptr<ROCKSDB_NAMESPACE::FaultInjectionTestFS> fault_fs_guard;
|
2023-10-10 20:12:18 +00:00
|
|
|
std::shared_ptr<ROCKSDB_NAMESPACE::SecondaryCache> compressed_secondary_cache;
|
|
|
|
std::shared_ptr<ROCKSDB_NAMESPACE::Cache> block_cache;
|
2020-02-20 20:07:53 +00:00
|
|
|
enum ROCKSDB_NAMESPACE::CompressionType compression_type_e =
|
|
|
|
ROCKSDB_NAMESPACE::kSnappyCompression;
|
|
|
|
enum ROCKSDB_NAMESPACE::CompressionType bottommost_compression_type_e =
|
|
|
|
ROCKSDB_NAMESPACE::kSnappyCompression;
|
|
|
|
enum ROCKSDB_NAMESPACE::ChecksumType checksum_type_e =
|
|
|
|
ROCKSDB_NAMESPACE::kCRC32c;
|
2019-12-09 07:49:32 +00:00
|
|
|
enum RepFactory FLAGS_rep_factory = kSkipList;
|
2019-12-16 21:59:21 +00:00
|
|
|
std::vector<double> sum_probs(100001);
|
Add user-defined timestamps to db_stress (#8061)
Summary:
Add some basic test for user-defined timestamp to db_stress. Currently,
read with timestamp always tries to read using the current timestamp.
Due to the per-key timestamp-sequence ordering constraint, we only add timestamp-
related tests to the `NonBatchedOpsStressTest` since this test serializes accesses
to the same key and uses a file to cross-check data correctness.
The timestamp feature is not supported in a number of components, e.g. Merge, SingleDelete,
DeleteRange, CompactionFilter, Readonly instance, secondary instance, SST file ingestion, transaction,
etc. Therefore, db_stress should exit if user enables both timestamp and these features at the same
time. The (currently) incompatible features can be found in
`CheckAndSetOptionsForUserTimestamp`.
This PR also fixes a bug triggered when timestamp is enabled together with
`index_type=kBinarySearchWithFirstKey`. This bug fix will also be in another separate PR
with more unit tests coverage. Fixing it here because I do not want to exclude the index type
from crash test.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8061
Test Plan: make crash_test_with_ts
Reviewed By: jay-zhuang
Differential Revision: D27056282
Pulled By: riversand963
fbshipit-source-id: c3e00ad1023fdb9ebbdf9601ec18270c5e2925a9
2021-03-23 12:12:04 +00:00
|
|
|
constexpr int64_t zipf_sum_size = 100000;
|
2019-12-09 07:49:32 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-12-16 21:59:21 +00:00
|
|
|
|
|
|
|
// Zipfian distribution is generated based on a pre-calculated array.
|
|
|
|
// It should be used before start the stress test.
|
|
|
|
// First, the probability distribution function (PDF) of this Zipfian follows
|
|
|
|
// power low. P(x) = 1/(x^alpha).
|
|
|
|
// So we calculate the PDF when x is from 0 to zipf_sum_size in first for loop
|
|
|
|
// and add the PDF value togetger as c. So we get the total probability in c.
|
|
|
|
// Next, we calculate inverse CDF of Zipfian and store the value of each in
|
|
|
|
// an array (sum_probs). The rank is from 0 to zipf_sum_size. For example, for
|
|
|
|
// integer k, its Zipfian CDF value is sum_probs[k].
|
|
|
|
// Third, when we need to get an integer whose probability follows Zipfian
|
|
|
|
// distribution, we use a rand_seed [0,1] which follows uniform distribution
|
|
|
|
// as a seed and search it in the sum_probs via binary search. When we find
|
|
|
|
// the closest sum_probs[i] of rand_seed, i is the integer that in
|
|
|
|
// [0, zipf_sum_size] following Zipfian distribution with parameter alpha.
|
|
|
|
// Finally, we can scale i to [0, max_key] scale.
|
|
|
|
// In order to avoid that hot keys are close to each other and skew towards 0,
|
|
|
|
// we use Rando64 to shuffle it.
|
|
|
|
void InitializeHotKeyGenerator(double alpha) {
|
|
|
|
double c = 0;
|
|
|
|
for (int64_t i = 1; i <= zipf_sum_size; i++) {
|
|
|
|
c = c + (1.0 / std::pow(static_cast<double>(i), alpha));
|
|
|
|
}
|
|
|
|
c = 1.0 / c;
|
|
|
|
|
|
|
|
sum_probs[0] = 0;
|
|
|
|
for (int64_t i = 1; i <= zipf_sum_size; i++) {
|
|
|
|
sum_probs[i] =
|
|
|
|
sum_probs[i - 1] + c / std::pow(static_cast<double>(i), alpha);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate one key that follows the Zipfian distribution. The skewness
|
|
|
|
// is decided by the parameter alpha. Input is the rand_seed [0,1] and
|
|
|
|
// the max of the key to be generated. If we directly return tmp_zipf_seed,
|
|
|
|
// the closer to 0, the higher probability will be. To randomly distribute
|
|
|
|
// the hot keys in [0, max_key], we use Random64 to shuffle it.
|
|
|
|
int64_t GetOneHotKeyID(double rand_seed, int64_t max_key) {
|
|
|
|
int64_t low = 1, mid, high = zipf_sum_size, zipf = 0;
|
|
|
|
while (low <= high) {
|
2020-01-31 00:05:44 +00:00
|
|
|
mid = (low + high) / 2;
|
2019-12-16 21:59:21 +00:00
|
|
|
if (sum_probs[mid] >= rand_seed && sum_probs[mid - 1] < rand_seed) {
|
|
|
|
zipf = mid;
|
|
|
|
break;
|
|
|
|
} else if (sum_probs[mid] >= rand_seed) {
|
|
|
|
high = mid - 1;
|
|
|
|
} else {
|
|
|
|
low = mid + 1;
|
|
|
|
}
|
|
|
|
}
|
2020-01-31 00:05:44 +00:00
|
|
|
int64_t tmp_zipf_seed = zipf * max_key / zipf_sum_size;
|
2019-12-16 21:59:21 +00:00
|
|
|
Random64 rand_local(tmp_zipf_seed);
|
|
|
|
return rand_local.Next() % max_key;
|
|
|
|
}
|
|
|
|
|
2019-12-09 07:49:32 +00:00
|
|
|
void PoolSizeChangeThread(void* v) {
|
|
|
|
assert(FLAGS_compaction_thread_pool_adjust_interval > 0);
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ThreadState* thread = static_cast<ThreadState*>(v);
|
2019-12-09 07:49:32 +00:00
|
|
|
SharedState* shared = thread->shared;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
2019-12-20 16:46:52 +00:00
|
|
|
if (shared->ShouldStopBgThread()) {
|
|
|
|
shared->IncBgThreadsFinished();
|
|
|
|
if (shared->BgThreadsFinished()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
auto thread_pool_size_base = FLAGS_max_background_compactions;
|
|
|
|
auto thread_pool_size_var = FLAGS_compaction_thread_pool_variations;
|
|
|
|
int new_thread_pool_size =
|
|
|
|
thread_pool_size_base - thread_pool_size_var +
|
|
|
|
thread->rand.Next() % (thread_pool_size_var * 2 + 1);
|
|
|
|
if (new_thread_pool_size < 1) {
|
|
|
|
new_thread_pool_size = 1;
|
|
|
|
}
|
2019-12-21 00:13:19 +00:00
|
|
|
db_stress_env->SetBackgroundThreads(new_thread_pool_size,
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::Env::Priority::LOW);
|
2019-12-09 07:49:32 +00:00
|
|
|
// Sleep up to 3 seconds
|
2019-12-21 00:13:19 +00:00
|
|
|
db_stress_env->SleepForMicroseconds(
|
2019-12-09 07:49:32 +00:00
|
|
|
thread->rand.Next() % FLAGS_compaction_thread_pool_adjust_interval *
|
|
|
|
1000 +
|
|
|
|
1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 16:46:52 +00:00
|
|
|
void DbVerificationThread(void* v) {
|
|
|
|
assert(FLAGS_continuous_verification_interval > 0);
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
auto* thread = static_cast<ThreadState*>(v);
|
2019-12-20 16:46:52 +00:00
|
|
|
SharedState* shared = thread->shared;
|
|
|
|
StressTest* stress_test = shared->GetStressTest();
|
|
|
|
assert(stress_test != nullptr);
|
|
|
|
while (true) {
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
if (shared->ShouldStopBgThread()) {
|
|
|
|
shared->IncBgThreadsFinished();
|
|
|
|
if (shared->BgThreadsFinished()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!shared->HasVerificationFailedYet()) {
|
|
|
|
stress_test->ContinuouslyVerifyDb(thread);
|
|
|
|
}
|
2019-12-21 00:13:19 +00:00
|
|
|
db_stress_env->SleepForMicroseconds(
|
2019-12-20 16:46:52 +00:00
|
|
|
thread->rand.Next() % FLAGS_continuous_verification_interval * 1000 +
|
|
|
|
1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-10 20:12:18 +00:00
|
|
|
void CompressedCacheSetCapacityThread(void* v) {
|
|
|
|
assert(FLAGS_compressed_secondary_cache_size > 0 ||
|
|
|
|
FLAGS_compressed_secondary_cache_ratio > 0.0);
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
auto* thread = static_cast<ThreadState*>(v);
|
2023-10-10 20:12:18 +00:00
|
|
|
SharedState* shared = thread->shared;
|
|
|
|
while (true) {
|
|
|
|
{
|
|
|
|
MutexLock l(shared->GetMutex());
|
|
|
|
if (shared->ShouldStopBgThread()) {
|
|
|
|
shared->IncBgThreadsFinished();
|
|
|
|
if (shared->BgThreadsFinished()) {
|
|
|
|
shared->GetCondVar()->SignalAll();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
db_stress_env->SleepForMicroseconds(FLAGS_secondary_cache_update_interval);
|
|
|
|
if (FLAGS_compressed_secondary_cache_size > 0) {
|
|
|
|
Status s = compressed_secondary_cache->SetCapacity(0);
|
|
|
|
size_t capacity;
|
|
|
|
if (s.ok()) {
|
|
|
|
s = compressed_secondary_cache->GetCapacity(capacity);
|
|
|
|
assert(capacity == 0);
|
|
|
|
}
|
|
|
|
db_stress_env->SleepForMicroseconds(10 * 1000 * 1000);
|
|
|
|
if (s.ok()) {
|
|
|
|
s = compressed_secondary_cache->SetCapacity(
|
|
|
|
FLAGS_compressed_secondary_cache_size);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = compressed_secondary_cache->GetCapacity(capacity);
|
|
|
|
assert(capacity == FLAGS_compressed_secondary_cache_size);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Compressed cache Set/GetCapacity returned error: %s\n",
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
|
|
|
} else if (FLAGS_compressed_secondary_cache_ratio > 0.0) {
|
2023-11-15 00:25:52 +00:00
|
|
|
if (thread->rand.OneIn(2)) { // if (thread->rand.OneIn(2)) {
|
2023-10-10 20:12:18 +00:00
|
|
|
size_t capacity = block_cache->GetCapacity();
|
|
|
|
size_t adjustment;
|
|
|
|
if (FLAGS_use_write_buffer_manager && FLAGS_db_write_buffer_size > 0) {
|
|
|
|
adjustment = (capacity - FLAGS_db_write_buffer_size);
|
|
|
|
} else {
|
|
|
|
adjustment = capacity;
|
|
|
|
}
|
|
|
|
// Lower by upto 50% of usable block cache capacity
|
|
|
|
adjustment = (adjustment * thread->rand.Uniform(50)) / 100;
|
|
|
|
block_cache->SetCapacity(capacity - adjustment);
|
2024-01-29 20:52:59 +00:00
|
|
|
fprintf(stdout, "New cache capacity = %lu\n",
|
2023-10-10 20:12:18 +00:00
|
|
|
block_cache->GetCapacity());
|
|
|
|
db_stress_env->SleepForMicroseconds(10 * 1000 * 1000);
|
|
|
|
block_cache->SetCapacity(capacity);
|
|
|
|
} else {
|
|
|
|
Status s;
|
|
|
|
double new_comp_cache_ratio =
|
|
|
|
(double)thread->rand.Uniform(
|
|
|
|
FLAGS_compressed_secondary_cache_ratio * 100) /
|
|
|
|
100;
|
2024-01-29 20:52:59 +00:00
|
|
|
fprintf(stdout, "New comp cache ratio = %f\n", new_comp_cache_ratio);
|
2023-10-10 20:12:18 +00:00
|
|
|
|
|
|
|
s = UpdateTieredCache(block_cache, /*capacity*/ -1,
|
|
|
|
new_comp_cache_ratio);
|
|
|
|
if (s.ok()) {
|
|
|
|
db_stress_env->SleepForMicroseconds(10 * 1000 * 1000);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = UpdateTieredCache(block_cache, /*capacity*/ -1,
|
|
|
|
FLAGS_compressed_secondary_cache_ratio);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "UpdateTieredCache returned error: %s\n",
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 07:49:32 +00:00
|
|
|
void PrintKeyValue(int cf, uint64_t key, const char* value, size_t sz) {
|
|
|
|
if (!FLAGS_verbose) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
std::string tmp;
|
|
|
|
tmp.reserve(sz * 2 + 16);
|
|
|
|
char buf[4];
|
|
|
|
for (size_t i = 0; i < sz; i++) {
|
|
|
|
snprintf(buf, 4, "%X", value[i]);
|
|
|
|
tmp.append(buf);
|
|
|
|
}
|
2020-10-13 19:37:07 +00:00
|
|
|
auto key_str = Key(key);
|
|
|
|
Slice key_slice = key_str;
|
|
|
|
fprintf(stdout, "[CF %d] %s (%" PRIi64 ") == > (%" ROCKSDB_PRIszt ") %s\n",
|
|
|
|
cf, key_slice.ToString(true).c_str(), key, sz, tmp.c_str());
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 21:59:21 +00:00
|
|
|
// Note that if hot_key_alpha != 0, it generates the key based on Zipfian
|
|
|
|
// distribution. Keys are randomly scattered to [0, FLAGS_max_key]. It does
|
|
|
|
// not ensure the order of the keys being generated and the keys does not have
|
|
|
|
// the active range which is related to FLAGS_active_width.
|
2019-12-09 07:49:32 +00:00
|
|
|
int64_t GenerateOneKey(ThreadState* thread, uint64_t iteration) {
|
|
|
|
const double completed_ratio =
|
|
|
|
static_cast<double>(iteration) / FLAGS_ops_per_thread;
|
|
|
|
const int64_t base_key = static_cast<int64_t>(
|
|
|
|
completed_ratio * (FLAGS_max_key - FLAGS_active_width));
|
2019-12-16 21:59:21 +00:00
|
|
|
int64_t rand_seed = base_key + thread->rand.Next() % FLAGS_active_width;
|
|
|
|
int64_t cur_key = rand_seed;
|
|
|
|
if (FLAGS_hot_key_alpha != 0) {
|
|
|
|
// If set the Zipfian distribution Alpha to non 0, use Zipfian
|
|
|
|
double float_rand =
|
|
|
|
(static_cast<double>(thread->rand.Next() % FLAGS_max_key)) /
|
|
|
|
FLAGS_max_key;
|
|
|
|
cur_key = GetOneHotKeyID(float_rand, FLAGS_max_key);
|
|
|
|
}
|
|
|
|
return cur_key;
|
2019-12-09 07:49:32 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 21:59:21 +00:00
|
|
|
// Note that if hot_key_alpha != 0, it generates the key based on Zipfian
|
|
|
|
// distribution. Keys being generated are in random order.
|
|
|
|
// If user want to generate keys based on uniform distribution, user needs to
|
|
|
|
// set hot_key_alpha == 0. It will generate the random keys in increasing
|
|
|
|
// order in the key array (ensure key[i] >= key[i+1]) and constrained in a
|
|
|
|
// range related to FLAGS_active_width.
|
2019-12-09 07:49:32 +00:00
|
|
|
std::vector<int64_t> GenerateNKeys(ThreadState* thread, int num_keys,
|
|
|
|
uint64_t iteration) {
|
|
|
|
const double completed_ratio =
|
|
|
|
static_cast<double>(iteration) / FLAGS_ops_per_thread;
|
|
|
|
const int64_t base_key = static_cast<int64_t>(
|
|
|
|
completed_ratio * (FLAGS_max_key - FLAGS_active_width));
|
|
|
|
std::vector<int64_t> keys;
|
|
|
|
keys.reserve(num_keys);
|
|
|
|
int64_t next_key = base_key + thread->rand.Next() % FLAGS_active_width;
|
|
|
|
keys.push_back(next_key);
|
|
|
|
for (int i = 1; i < num_keys; ++i) {
|
2019-12-16 21:59:21 +00:00
|
|
|
// Generate the key follows zipfian distribution
|
|
|
|
if (FLAGS_hot_key_alpha != 0) {
|
|
|
|
double float_rand =
|
|
|
|
(static_cast<double>(thread->rand.Next() % FLAGS_max_key)) /
|
|
|
|
FLAGS_max_key;
|
|
|
|
next_key = GetOneHotKeyID(float_rand, FLAGS_max_key);
|
|
|
|
} else {
|
|
|
|
// This may result in some duplicate keys
|
|
|
|
next_key = next_key + thread->rand.Next() %
|
|
|
|
(FLAGS_active_width - (next_key - base_key));
|
|
|
|
}
|
2019-12-09 07:49:32 +00:00
|
|
|
keys.push_back(next_key);
|
|
|
|
}
|
|
|
|
return keys;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t GenerateValue(uint32_t rand, char* v, size_t max_sz) {
|
|
|
|
size_t value_sz =
|
|
|
|
((rand % kRandomValueMaxFactor) + 1) * FLAGS_value_size_mult;
|
|
|
|
assert(value_sz <= max_sz && value_sz >= sizeof(uint32_t));
|
|
|
|
(void)max_sz;
|
2021-12-15 20:53:32 +00:00
|
|
|
PutUnaligned(reinterpret_cast<uint32_t*>(v), rand);
|
2019-12-09 07:49:32 +00:00
|
|
|
for (size_t i = sizeof(uint32_t); i < value_sz; i++) {
|
|
|
|
v[i] = (char)(rand ^ i);
|
|
|
|
}
|
|
|
|
v[value_sz] = '\0';
|
|
|
|
return value_sz; // the size of the value set.
|
|
|
|
}
|
2020-09-04 06:49:27 +00:00
|
|
|
|
2021-12-15 20:53:32 +00:00
|
|
|
uint32_t GetValueBase(Slice s) {
|
|
|
|
assert(s.size() >= sizeof(uint32_t));
|
|
|
|
uint32_t res;
|
|
|
|
GetUnaligned(reinterpret_cast<const uint32_t*>(s.data()), &res);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2024-05-09 23:40:22 +00:00
|
|
|
AttributeGroups GenerateAttributeGroups(
|
|
|
|
const std::vector<ColumnFamilyHandle*>& cfhs, uint32_t value_base,
|
|
|
|
const Slice& slice) {
|
|
|
|
WideColumns columns = GenerateWideColumns(value_base, slice);
|
|
|
|
AttributeGroups attribute_groups;
|
|
|
|
for (auto* cfh : cfhs) {
|
|
|
|
attribute_groups.emplace_back(cfh, columns);
|
|
|
|
}
|
|
|
|
return attribute_groups;
|
|
|
|
}
|
|
|
|
|
Add the PutEntity API to the stress/crash tests (#10760)
Summary:
The patch adds the `PutEntity` API to the non-batched, batched, and
CF consistency stress tests. Namely, when the new `db_stress` command
line parameter `use_put_entity_one_in` is greater than zero, one in
N writes on average is performed using `PutEntity` rather than `Put`.
The wide-column entity written has the generated value in its default
column; in addition, it contains up to three additional columns where
the original generated value is divided up between the column name and the
column value (with the column name containing the first k characters of
the generated value, and the column value containing the rest). Whether
`PutEntity` is used (and if so, how many columns the entity has) is completely
determined by the "value base" used to generate the value (that is, there is
no randomness involved). Assuming the same `use_put_entity_one_in` setting
is used across `db_stress` invocations, this enables us to reconstruct and
validate the entity during subsequent `db_stress` runs.
Note that `PutEntity` is currently incompatible with `Merge`, transactions, and
user-defined timestamps; these combinations are currently disabled/disallowed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10760
Test Plan: Ran some batched, non-batched, and CF consistency stress tests using the script.
Reviewed By: riversand963
Differential Revision: D39939032
Pulled By: ltamasi
fbshipit-source-id: eafdf124e95993fb7d73158e3b006d11819f7fa9
2022-09-30 18:11:07 +00:00
|
|
|
WideColumns GenerateWideColumns(uint32_t value_base, const Slice& slice) {
|
|
|
|
WideColumns columns;
|
|
|
|
|
|
|
|
constexpr size_t max_columns = 4;
|
|
|
|
const size_t num_columns = (value_base % max_columns) + 1;
|
|
|
|
|
|
|
|
columns.reserve(num_columns);
|
|
|
|
|
|
|
|
assert(slice.size() >= num_columns);
|
|
|
|
|
|
|
|
columns.emplace_back(kDefaultWideColumnName, slice);
|
|
|
|
|
|
|
|
for (size_t i = 1; i < num_columns; ++i) {
|
|
|
|
const Slice name(slice.data(), i);
|
|
|
|
const Slice value(slice.data() + i, slice.size() - i);
|
|
|
|
|
|
|
|
columns.emplace_back(name, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
return columns;
|
|
|
|
}
|
|
|
|
|
|
|
|
WideColumns GenerateExpectedWideColumns(uint32_t value_base,
|
|
|
|
const Slice& slice) {
|
2022-10-06 22:07:16 +00:00
|
|
|
if (FLAGS_use_put_entity_one_in == 0 ||
|
|
|
|
(value_base % FLAGS_use_put_entity_one_in) != 0) {
|
|
|
|
return WideColumns{{kDefaultWideColumnName, slice}};
|
|
|
|
}
|
|
|
|
|
Add the PutEntity API to the stress/crash tests (#10760)
Summary:
The patch adds the `PutEntity` API to the non-batched, batched, and
CF consistency stress tests. Namely, when the new `db_stress` command
line parameter `use_put_entity_one_in` is greater than zero, one in
N writes on average is performed using `PutEntity` rather than `Put`.
The wide-column entity written has the generated value in its default
column; in addition, it contains up to three additional columns where
the original generated value is divided up between the column name and the
column value (with the column name containing the first k characters of
the generated value, and the column value containing the rest). Whether
`PutEntity` is used (and if so, how many columns the entity has) is completely
determined by the "value base" used to generate the value (that is, there is
no randomness involved). Assuming the same `use_put_entity_one_in` setting
is used across `db_stress` invocations, this enables us to reconstruct and
validate the entity during subsequent `db_stress` runs.
Note that `PutEntity` is currently incompatible with `Merge`, transactions, and
user-defined timestamps; these combinations are currently disabled/disallowed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10760
Test Plan: Ran some batched, non-batched, and CF consistency stress tests using the script.
Reviewed By: riversand963
Differential Revision: D39939032
Pulled By: ltamasi
fbshipit-source-id: eafdf124e95993fb7d73158e3b006d11819f7fa9
2022-09-30 18:11:07 +00:00
|
|
|
WideColumns columns = GenerateWideColumns(value_base, slice);
|
|
|
|
|
2023-09-12 19:36:07 +00:00
|
|
|
WideColumnsHelper::SortColumns(columns);
|
Add the PutEntity API to the stress/crash tests (#10760)
Summary:
The patch adds the `PutEntity` API to the non-batched, batched, and
CF consistency stress tests. Namely, when the new `db_stress` command
line parameter `use_put_entity_one_in` is greater than zero, one in
N writes on average is performed using `PutEntity` rather than `Put`.
The wide-column entity written has the generated value in its default
column; in addition, it contains up to three additional columns where
the original generated value is divided up between the column name and the
column value (with the column name containing the first k characters of
the generated value, and the column value containing the rest). Whether
`PutEntity` is used (and if so, how many columns the entity has) is completely
determined by the "value base" used to generate the value (that is, there is
no randomness involved). Assuming the same `use_put_entity_one_in` setting
is used across `db_stress` invocations, this enables us to reconstruct and
validate the entity during subsequent `db_stress` runs.
Note that `PutEntity` is currently incompatible with `Merge`, transactions, and
user-defined timestamps; these combinations are currently disabled/disallowed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10760
Test Plan: Ran some batched, non-batched, and CF consistency stress tests using the script.
Reviewed By: riversand963
Differential Revision: D39939032
Pulled By: ltamasi
fbshipit-source-id: eafdf124e95993fb7d73158e3b006d11819f7fa9
2022-09-30 18:11:07 +00:00
|
|
|
|
|
|
|
return columns;
|
|
|
|
}
|
|
|
|
|
2023-03-17 21:47:29 +00:00
|
|
|
bool VerifyWideColumns(const Slice& value, const WideColumns& columns) {
|
|
|
|
if (value.size() < sizeof(uint32_t)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint32_t value_base = GetValueBase(value);
|
|
|
|
|
|
|
|
const WideColumns expected_columns =
|
|
|
|
GenerateExpectedWideColumns(value_base, value);
|
|
|
|
|
|
|
|
if (columns != expected_columns) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VerifyWideColumns(const WideColumns& columns) {
|
2023-09-11 23:32:32 +00:00
|
|
|
if (!WideColumnsHelper::HasDefaultColumn(columns)) {
|
2023-03-17 21:47:29 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-09-11 23:32:32 +00:00
|
|
|
const Slice& value_of_default = WideColumnsHelper::GetDefaultColumn(columns);
|
2023-03-17 21:47:29 +00:00
|
|
|
|
|
|
|
return VerifyWideColumns(value_of_default, columns);
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:30:15 +00:00
|
|
|
std::string GetNowNanos() {
|
Add user-defined timestamps to db_stress (#8061)
Summary:
Add some basic test for user-defined timestamp to db_stress. Currently,
read with timestamp always tries to read using the current timestamp.
Due to the per-key timestamp-sequence ordering constraint, we only add timestamp-
related tests to the `NonBatchedOpsStressTest` since this test serializes accesses
to the same key and uses a file to cross-check data correctness.
The timestamp feature is not supported in a number of components, e.g. Merge, SingleDelete,
DeleteRange, CompactionFilter, Readonly instance, secondary instance, SST file ingestion, transaction,
etc. Therefore, db_stress should exit if user enables both timestamp and these features at the same
time. The (currently) incompatible features can be found in
`CheckAndSetOptionsForUserTimestamp`.
This PR also fixes a bug triggered when timestamp is enabled together with
`index_type=kBinarySearchWithFirstKey`. This bug fix will also be in another separate PR
with more unit tests coverage. Fixing it here because I do not want to exclude the index type
from crash test.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8061
Test Plan: make crash_test_with_ts
Reviewed By: jay-zhuang
Differential Revision: D27056282
Pulled By: riversand963
fbshipit-source-id: c3e00ad1023fdb9ebbdf9601ec18270c5e2925a9
2021-03-23 12:12:04 +00:00
|
|
|
uint64_t t = db_stress_env->NowNanos();
|
|
|
|
std::string ret;
|
|
|
|
PutFixed64(&ret, t);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-30 22:40:35 +00:00
|
|
|
uint64_t GetWriteUnixTime(ThreadState* thread) {
|
|
|
|
static uint64_t kPreserveSeconds =
|
|
|
|
std::max(FLAGS_preserve_internal_time_seconds,
|
|
|
|
FLAGS_preclude_last_level_data_seconds);
|
|
|
|
static uint64_t kFallbackTime = std::numeric_limits<uint64_t>::max();
|
|
|
|
int64_t write_time = 0;
|
|
|
|
Status s = db_stress_env->GetCurrentTime(&write_time);
|
|
|
|
uint32_t write_time_mode = thread->rand.Uniform(3);
|
|
|
|
if (write_time_mode == 0 || !s.ok()) {
|
|
|
|
return kFallbackTime;
|
|
|
|
} else if (write_time_mode == 1) {
|
|
|
|
uint64_t delta = kPreserveSeconds > 0
|
|
|
|
? static_cast<uint64_t>(thread->rand.Uniform(
|
|
|
|
static_cast<int>(kPreserveSeconds)))
|
|
|
|
: 0;
|
|
|
|
return static_cast<uint64_t>(write_time) - delta;
|
|
|
|
} else {
|
|
|
|
return static_cast<uint64_t>(write_time) - kPreserveSeconds;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-04 06:49:27 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class MyXXH64Checksum : public FileChecksumGenerator {
|
|
|
|
public:
|
|
|
|
explicit MyXXH64Checksum(bool big) : big_(big) {
|
|
|
|
state_ = XXH64_createState();
|
|
|
|
XXH64_reset(state_, 0);
|
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
~MyXXH64Checksum() override { XXH64_freeState(state_); }
|
2020-09-04 06:49:27 +00:00
|
|
|
|
|
|
|
void Update(const char* data, size_t n) override {
|
|
|
|
XXH64_update(state_, data, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Finalize() override {
|
|
|
|
assert(str_.empty());
|
|
|
|
uint64_t digest = XXH64_digest(state_);
|
|
|
|
// Store as little endian raw bytes
|
|
|
|
PutFixed64(&str_, digest);
|
|
|
|
if (big_) {
|
|
|
|
// Throw in some more data for stress testing (448 bits total)
|
|
|
|
PutFixed64(&str_, GetSliceHash64(str_));
|
|
|
|
PutFixed64(&str_, GetSliceHash64(str_));
|
|
|
|
PutFixed64(&str_, GetSliceHash64(str_));
|
|
|
|
PutFixed64(&str_, GetSliceHash64(str_));
|
|
|
|
PutFixed64(&str_, GetSliceHash64(str_));
|
|
|
|
PutFixed64(&str_, GetSliceHash64(str_));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GetChecksum() const override {
|
|
|
|
assert(!str_.empty());
|
|
|
|
return str_;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Name() const override {
|
|
|
|
return big_ ? "MyBigChecksum" : "MyXXH64Checksum";
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool big_;
|
|
|
|
XXH64_state_t* state_;
|
|
|
|
std::string str_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class DbStressChecksumGenFactory : public FileChecksumGenFactory {
|
|
|
|
std::string default_func_name_;
|
|
|
|
|
|
|
|
std::unique_ptr<FileChecksumGenerator> CreateFromFuncName(
|
|
|
|
const std::string& func_name) {
|
|
|
|
std::unique_ptr<FileChecksumGenerator> rv;
|
|
|
|
if (func_name == "FileChecksumCrc32c") {
|
|
|
|
rv.reset(new FileChecksumGenCrc32c(FileChecksumGenContext()));
|
|
|
|
} else if (func_name == "MyXXH64Checksum") {
|
|
|
|
rv.reset(new MyXXH64Checksum(false /* big */));
|
|
|
|
} else if (func_name == "MyBigChecksum") {
|
|
|
|
rv.reset(new MyXXH64Checksum(true /* big */));
|
|
|
|
} else {
|
|
|
|
// Should be a recognized function when we get here
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit DbStressChecksumGenFactory(const std::string& default_func_name)
|
|
|
|
: default_func_name_(default_func_name) {}
|
|
|
|
|
|
|
|
std::unique_ptr<FileChecksumGenerator> CreateFileChecksumGenerator(
|
|
|
|
const FileChecksumGenContext& context) override {
|
|
|
|
if (context.requested_checksum_func_name.empty()) {
|
|
|
|
return CreateFromFuncName(default_func_name_);
|
|
|
|
} else {
|
|
|
|
return CreateFromFuncName(context.requested_checksum_func_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Name() const override { return "FileChecksumGenCrc32cFactory"; }
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::shared_ptr<FileChecksumGenFactory> GetFileChecksumImpl(
|
|
|
|
const std::string& name) {
|
|
|
|
// Translate from friendly names to internal names
|
|
|
|
std::string internal_name;
|
|
|
|
if (name == "crc32c") {
|
|
|
|
internal_name = "FileChecksumCrc32c";
|
|
|
|
} else if (name == "xxh64") {
|
|
|
|
internal_name = "MyXXH64Checksum";
|
|
|
|
} else if (name == "big") {
|
|
|
|
internal_name = "MyBigChecksum";
|
|
|
|
} else {
|
|
|
|
assert(name.empty() || name == "none");
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return std::make_shared<DbStressChecksumGenFactory>(internal_name);
|
|
|
|
}
|
|
|
|
|
db_stress option to preserve all files until verification success (#10659)
Summary:
In `db_stress`, DB and expected state files containing changes leading up to a verification failure are often deleted, which makes debugging such failures difficult. On the DB side, flushed WAL files and compacted SST files are marked obsolete and then deleted. Without those files, we cannot pinpoint where a key that failed verification changed unexpectedly. On the expected state side, files for verifying prefix-recoverability in the presence of unsynced data loss are deleted before verification. These include a baseline state file containing the expected state at the time of the last successful verification, and a trace file containing all operations since then. Without those files, we cannot know the sequence of DB operations expected to be recovered.
This PR attempts to address this gap with a new `db_stress` flag: `preserve_unverified_changes`. Setting `preserve_unverified_changes=1` has two effects.
First, prior to startup verification, `db_stress` hardlinks all DB and expected state files in "unverified/" subdirectories of `FLAGS_db` and `FLAGS_expected_values_dir`. The separate directories are needed because the pre-verification opening process deletes files written by the previous `db_stress` run as described above. These "unverified/" subdirectories are cleaned up following startup verification success.
I considered other approaches for preserving DB files through startup verification, like using a read-only DB or preventing deletion of DB files externally, e.g., in the `Env` layer. However, I decided against it since such an approach would not work for expected state files, and I did not want to change the DB management logic. If there were a way to disable DB file deletions before regular DB open, I would have preferred to use that.
Second, `db_stress` attempts to keep all DB and expected state files that were live at some point since the start of the `db_stress` run. This is a bit tricky and involves the following changes.
- Open the DB with `disable_auto_compactions=1` and `avoid_flush_during_recovery=1`
- DisableFileDeletions()
- EnableAutoCompactions()
For this part, too, I would have preferred to use a hypothetical API that disables DB file deletion before regular DB open.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10659
Reviewed By: hx235
Differential Revision: D39407454
Pulled By: ajkr
fbshipit-source-id: 6e981025c7dce147649d2e770728471395a7fa53
2022-09-12 21:49:38 +00:00
|
|
|
Status DeleteFilesInDirectory(const std::string& dirname) {
|
|
|
|
std::vector<std::string> filenames;
|
|
|
|
Status s = Env::Default()->GetChildren(dirname, &filenames);
|
|
|
|
for (size_t i = 0; s.ok() && i < filenames.size(); ++i) {
|
|
|
|
s = Env::Default()->DeleteFile(dirname + "/" + filenames[i]);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SaveFilesInDirectory(const std::string& src_dirname,
|
|
|
|
const std::string& dst_dirname) {
|
|
|
|
std::vector<std::string> filenames;
|
|
|
|
Status s = Env::Default()->GetChildren(src_dirname, &filenames);
|
|
|
|
for (size_t i = 0; s.ok() && i < filenames.size(); ++i) {
|
|
|
|
bool is_dir = false;
|
|
|
|
s = Env::Default()->IsDirectory(src_dirname + "/" + filenames[i], &is_dir);
|
|
|
|
if (s.ok()) {
|
|
|
|
if (is_dir) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
s = Env::Default()->LinkFile(src_dirname + "/" + filenames[i],
|
|
|
|
dst_dirname + "/" + filenames[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status InitUnverifiedSubdir(const std::string& dirname) {
|
|
|
|
Status s = Env::Default()->FileExists(dirname);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string kUnverifiedDirname = dirname + "/unverified";
|
|
|
|
if (s.ok()) {
|
|
|
|
s = Env::Default()->CreateDirIfMissing(kUnverifiedDirname);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
// It might already exist with some stale contents. Delete any such
|
|
|
|
// contents.
|
|
|
|
s = DeleteFilesInDirectory(kUnverifiedDirname);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = SaveFilesInDirectory(dirname, kUnverifiedDirname);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DestroyUnverifiedSubdir(const std::string& dirname) {
|
|
|
|
Status s = Env::Default()->FileExists(dirname);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string kUnverifiedDirname = dirname + "/unverified";
|
|
|
|
if (s.ok()) {
|
|
|
|
s = Env::Default()->FileExists(kUnverifiedDirname);
|
|
|
|
}
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
s = DeleteFilesInDirectory(kUnverifiedDirname);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = Env::Default()->DeleteDir(kUnverifiedDirname);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2019-12-09 07:49:32 +00:00
|
|
|
#endif // GFLAGS
|