From 24045549a61d755418d5e9e6b208b2bfd77093b8 Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Fri, 1 Nov 2024 17:07:34 -0700 Subject: [PATCH] Add a flag for testing standalone range deletion file (#13101) Summary: As titled. This flag controls how frequent standalone range deletion file is tested in the file ingestion flow, for better debuggability. Pull Request resolved: https://github.com/facebook/rocksdb/pull/13101 Test Plan: Manually tested in stress test Reviewed By: hx235 Differential Revision: D65361004 Pulled By: jowlyzhang fbshipit-source-id: 21882e7cc5918aff45449acaeb33b696ab1e37f0 --- db_stress_tool/db_stress_common.h | 1 + db_stress_tool/db_stress_gflags.cc | 4 ++++ db_stress_tool/no_batched_ops_stress.cc | 5 ++--- tools/db_crashtest.py | 3 +++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h index 8ae432cc73..a56bd98cd3 100644 --- a/db_stress_tool/db_stress_common.h +++ b/db_stress_tool/db_stress_common.h @@ -417,6 +417,7 @@ DECLARE_bool(check_multiget_consistency); DECLARE_bool(check_multiget_entity_consistency); DECLARE_bool(inplace_update_support); DECLARE_uint32(uncache_aggressiveness); +DECLARE_int32(test_ingest_standalone_range_deletion_one_in); constexpr long KB = 1024; constexpr int kRandomValueMaxFactor = 3; diff --git a/db_stress_tool/db_stress_gflags.cc b/db_stress_tool/db_stress_gflags.cc index a2632dfa3e..f454148ad9 100644 --- a/db_stress_tool/db_stress_gflags.cc +++ b/db_stress_tool/db_stress_gflags.cc @@ -835,6 +835,10 @@ DEFINE_bool(use_get_entity, false, "If set, use the GetEntity API for reads"); DEFINE_bool(use_multi_get_entity, false, "If set, use the MultiGetEntity API for reads"); +DEFINE_int32(test_ingest_standalone_range_deletion_one_in, 0, + "If non-zero, file ingestion flow will test standalone range " + "deletion file once every N file ingestion operations."); + static bool ValidateInt32Percent(const char* flagname, int32_t value) { if (value < 0 || value > 100) { fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n", flagname, diff --git a/db_stress_tool/no_batched_ops_stress.cc b/db_stress_tool/no_batched_ops_stress.cc index a5a61d28f1..5f6dbd6006 100644 --- a/db_stress_tool/no_batched_ops_stress.cc +++ b/db_stress_tool/no_batched_ops_stress.cc @@ -1994,9 +1994,8 @@ class NonBatchedOpsStressTest : public StressTest { // a continuous range of keys, the second one with a standalone range // deletion for all the keys. This is to exercise the standalone range // deletion file's compaction input optimization. - // TODO(yuzhangyu): make this an option. - bool test_standalone_range_deletion = - thread->rand.OneInOpt(10) && FLAGS_delrangepercent > 0; + bool test_standalone_range_deletion = thread->rand.OneInOpt( + FLAGS_test_ingest_standalone_range_deletion_one_in); std::vector external_files; const std::string sst_filename = FLAGS_db + "/." + std::to_string(thread->tid) + ".sst"; diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index ccfa878d29..38d56c49c1 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -104,6 +104,7 @@ default_params = { # Temporarily disable hash index "index_type": lambda: random.choice([0, 0, 0, 2, 2, 3]), "ingest_external_file_one_in": lambda: random.choice([1000, 1000000]), + "test_ingest_standalone_range_deletion_one_in": lambda: random.choice([0, 5, 10]), "iterpercent": 10, "lock_wal_one_in": lambda: random.choice([10000, 1000000]), "mark_for_compaction_one_file_in": lambda: 10 * random.randint(0, 1), @@ -971,6 +972,8 @@ def finalize_and_sanitize(src_params): # can cause checkpoint verification to fail. So make the two mutually exclusive. if dest_params.get("checkpoint_one_in") != 0: dest_params["lock_wal_one_in"] = 0 + if dest_params.get("ingest_external_file_one_in") == 0 or dest_params.get("delrangepercent") == 0: + dest_params["test_ingest_standalone_range_deletion_one_in"] = 0 return dest_params