Add new experimental ReadOption auto_readahead_size to db_bench and db_stress (#11729)

Summary:
Same as title

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11729

Test Plan: make crash_test -j32

Reviewed By: anand1976

Differential Revision: D48534820

Pulled By: akankshamahajan15

fbshipit-source-id: 3a2a28af98dfad164b82ddaaf9fddb94c53a652e
This commit is contained in:
Akanksha Mahajan 2023-08-24 14:58:27 -07:00 committed by Facebook GitHub Bot
parent 451316597f
commit 6353c6e2fb
5 changed files with 14 additions and 0 deletions

View File

@ -347,6 +347,7 @@ DECLARE_uint64(initial_auto_readahead_size);
DECLARE_uint64(max_auto_readahead_size);
DECLARE_uint64(num_file_reads_for_auto_readahead);
DECLARE_bool(use_io_uring);
DECLARE_bool(auto_readahead_size);
constexpr long KB = 1024;
constexpr int kRandomValueMaxFactor = 3;

View File

@ -1123,4 +1123,7 @@ DEFINE_uint32(bottommost_file_compaction_delay, 0,
"Delay kBottommostFiles compaction by this amount of seconds."
"See more in option comment.");
DEFINE_bool(auto_readahead_size, false,
"Does auto tuning of readahead_size when enabled during scans.");
#endif // GFLAGS

View File

@ -747,6 +747,7 @@ void StressTest::OperateDb(ThreadState* thread) {
read_opts.async_io = FLAGS_async_io;
read_opts.adaptive_readahead = FLAGS_adaptive_readahead;
read_opts.readahead_size = FLAGS_readahead_size;
read_opts.auto_readahead_size = FLAGS_auto_readahead_size;
WriteOptions write_opts;
if (FLAGS_rate_limit_auto_wal_flush) {
write_opts.rate_limiter_priority = Env::IO_USER;

View File

@ -1243,6 +1243,10 @@ DEFINE_uint64(
"num_file_reads_for_auto_readahead indicates after how many sequential "
"reads into that file internal auto prefetching should be start.");
DEFINE_bool(
auto_readahead_size, false,
"When set true, RocksDB does auto tuning of readahead size during Scans");
static enum ROCKSDB_NAMESPACE::CompressionType StringToCompressionType(
const char* ctype) {
assert(ctype);
@ -3368,6 +3372,7 @@ class Benchmark {
read_options_.adaptive_readahead = FLAGS_adaptive_readahead;
read_options_.async_io = FLAGS_async_io;
read_options_.optimize_multiget_for_io = FLAGS_optimize_multiget_for_io;
read_options_.auto_readahead_size = FLAGS_auto_readahead_size;
void (Benchmark::*method)(ThreadState*) = nullptr;
void (Benchmark::*post_process_method)() = nullptr;
@ -5754,6 +5759,7 @@ class Benchmark {
options.adaptive_readahead = FLAGS_adaptive_readahead;
options.async_io = FLAGS_async_io;
options.auto_readahead_size = FLAGS_auto_readahead_size;
Iterator* iter = db->NewIterator(options);
int64_t i = 0;
@ -7749,6 +7755,7 @@ class Benchmark {
ro.rate_limiter_priority =
FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
ro.readahead_size = FLAGS_readahead_size;
ro.auto_readahead_size = FLAGS_auto_readahead_size;
Status s = db->VerifyChecksum(ro);
if (!s.ok()) {
fprintf(stderr, "VerifyChecksum() failed: %s\n", s.ToString().c_str());
@ -7764,6 +7771,7 @@ class Benchmark {
ro.rate_limiter_priority =
FLAGS_rate_limit_user_ops ? Env::IO_USER : Env::IO_TOTAL;
ro.readahead_size = FLAGS_readahead_size;
ro.auto_readahead_size = FLAGS_auto_readahead_size;
Status s = db->VerifyFileChecksums(ro);
if (!s.ok()) {
fprintf(stderr, "VerifyFileChecksums() failed: %s\n",

View File

@ -217,6 +217,7 @@ default_params = {
"memtable_max_range_deletions": lambda: random.choice([0] * 6 + [100, 1000]),
# 0 (disable) is the default and more commonly used value.
"bottommost_file_compaction_delay": lambda: random.choice([0, 0, 0, 600, 3600, 86400]),
"auto_readahead_size" : lambda: random.choice([0, 1]),
}
_TEST_DIR_ENV_VAR = "TEST_TMPDIR"