2016-07-12 22:30:38 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-07-12 22:30:38 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2016-08-03 04:55:26 +00:00
|
|
|
#include <limits>
|
|
|
|
#include <string>
|
2016-09-15 05:10:28 +00:00
|
|
|
#include <unordered_map>
|
2016-08-03 04:55:26 +00:00
|
|
|
|
2016-09-15 05:10:28 +00:00
|
|
|
#include "db/column_family.h"
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2019-05-31 22:21:36 +00:00
|
|
|
#include "db/db_test_util.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/options_helper.h"
|
2016-07-12 22:30:38 +00:00
|
|
|
#include "port/stack_trace.h"
|
2017-05-04 03:46:17 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2016-09-15 05:10:28 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2017-05-24 16:52:08 +00:00
|
|
|
#include "rocksdb/rate_limiter.h"
|
2019-02-20 23:46:59 +00:00
|
|
|
#include "rocksdb/stats_history.h"
|
2023-10-12 17:05:23 +00:00
|
|
|
#include "rocksdb/utilities/options_util.h"
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
#include "test_util/mock_time_env.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/random.h"
|
2023-05-13 05:39:39 +00:00
|
|
|
#include "utilities/fault_injection_fs.h"
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-07-12 22:30:38 +00:00
|
|
|
|
|
|
|
class DBOptionsTest : public DBTestBase {
|
|
|
|
public:
|
2021-07-23 15:37:27 +00:00
|
|
|
DBOptionsTest() : DBTestBase("db_options_test", /*env_do_fsync=*/true) {}
|
2016-09-15 05:10:28 +00:00
|
|
|
|
2016-10-14 19:25:39 +00:00
|
|
|
std::unordered_map<std::string, std::string> GetMutableDBOptionsMap(
|
|
|
|
const DBOptions& options) {
|
|
|
|
std::string options_str;
|
2020-09-14 23:59:00 +00:00
|
|
|
std::unordered_map<std::string, std::string> mutable_map;
|
2021-05-11 13:45:49 +00:00
|
|
|
ConfigOptions config_options(options);
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options.delimiter = "; ";
|
2020-09-14 23:59:00 +00:00
|
|
|
|
2020-09-16 22:45:30 +00:00
|
|
|
EXPECT_OK(GetStringFromMutableDBOptions(
|
|
|
|
config_options, MutableDBOptions(options), &options_str));
|
|
|
|
EXPECT_OK(StringToMap(options_str, &mutable_map));
|
|
|
|
|
2016-10-14 19:25:39 +00:00
|
|
|
return mutable_map;
|
|
|
|
}
|
|
|
|
|
2016-09-15 05:10:28 +00:00
|
|
|
std::unordered_map<std::string, std::string> GetMutableCFOptionsMap(
|
|
|
|
const ColumnFamilyOptions& options) {
|
|
|
|
std::string options_str;
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.delimiter = "; ";
|
|
|
|
|
2016-09-15 05:10:28 +00:00
|
|
|
std::unordered_map<std::string, std::string> mutable_map;
|
2020-09-16 22:45:30 +00:00
|
|
|
EXPECT_OK(GetStringFromMutableCFOptions(
|
|
|
|
config_options, MutableCFOptions(options), &options_str));
|
|
|
|
EXPECT_OK(StringToMap(options_str, &mutable_map));
|
2016-09-15 05:10:28 +00:00
|
|
|
return mutable_map;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> GetRandomizedMutableCFOptionsMap(
|
|
|
|
Random* rnd) {
|
2019-06-04 02:47:02 +00:00
|
|
|
Options options = CurrentOptions();
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-23 23:34:04 +00:00
|
|
|
ImmutableDBOptions db_options(options);
|
2019-06-04 02:47:02 +00:00
|
|
|
test::RandomInitCFOptions(&options, options, rnd);
|
2016-10-21 18:31:42 +00:00
|
|
|
auto sanitized_options = SanitizeOptions(db_options, options);
|
2016-09-15 21:57:32 +00:00
|
|
|
auto opt_map = GetMutableCFOptionsMap(sanitized_options);
|
|
|
|
delete options.compaction_filter;
|
|
|
|
return opt_map;
|
2016-09-15 05:10:28 +00:00
|
|
|
}
|
2016-10-14 19:25:39 +00:00
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> GetRandomizedMutableDBOptionsMap(
|
|
|
|
Random* rnd) {
|
|
|
|
DBOptions db_options;
|
|
|
|
test::RandomInitDBOptions(&db_options, rnd);
|
|
|
|
auto sanitized_options = SanitizeOptions(dbname_, db_options);
|
|
|
|
return GetMutableDBOptionsMap(sanitized_options);
|
|
|
|
}
|
2016-07-12 22:30:38 +00:00
|
|
|
};
|
|
|
|
|
2020-10-09 23:40:25 +00:00
|
|
|
TEST_F(DBOptionsTest, ImmutableTrackAndVerifyWalsInManifest) {
|
|
|
|
Options options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = env_;
|
2020-10-09 23:40:25 +00:00
|
|
|
options.track_and_verify_wals_in_manifest = true;
|
|
|
|
|
|
|
|
ImmutableDBOptions db_options(options);
|
|
|
|
ASSERT_TRUE(db_options.track_and_verify_wals_in_manifest);
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_TRUE(dbfull()->GetDBOptions().track_and_verify_wals_in_manifest);
|
|
|
|
|
|
|
|
Status s =
|
|
|
|
dbfull()->SetDBOptions({{"track_and_verify_wals_in_manifest", "false"}});
|
|
|
|
ASSERT_FALSE(s.ok());
|
|
|
|
}
|
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
TEST_F(DBOptionsTest, ImmutableVerifySstUniqueIdInManifest) {
|
|
|
|
Options options;
|
|
|
|
options.env = env_;
|
|
|
|
options.verify_sst_unique_id_in_manifest = true;
|
|
|
|
|
|
|
|
ImmutableDBOptions db_options(options);
|
|
|
|
ASSERT_TRUE(db_options.verify_sst_unique_id_in_manifest);
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_TRUE(dbfull()->GetDBOptions().verify_sst_unique_id_in_manifest);
|
|
|
|
|
|
|
|
Status s =
|
|
|
|
dbfull()->SetDBOptions({{"verify_sst_unique_id_in_manifest", "false"}});
|
|
|
|
ASSERT_FALSE(s.ok());
|
|
|
|
}
|
|
|
|
|
2016-07-25 18:48:17 +00:00
|
|
|
// RocksDB lite don't support dynamic options.
|
|
|
|
|
2021-07-21 20:44:39 +00:00
|
|
|
TEST_F(DBOptionsTest, AvoidUpdatingOptions) {
|
|
|
|
Options options;
|
|
|
|
options.env = env_;
|
|
|
|
options.max_background_jobs = 4;
|
|
|
|
options.delayed_write_rate = 1024;
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
bool is_changed_stats = false;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::WriteOptionsFile:PersistOptions", [&](void* /*arg*/) {
|
|
|
|
ASSERT_FALSE(is_changed_stats); // should only save options file once
|
|
|
|
is_changed_stats = true;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
// helper function to check the status and reset after each check
|
|
|
|
auto is_changed = [&] {
|
|
|
|
bool ret = is_changed_stats;
|
|
|
|
is_changed_stats = false;
|
|
|
|
return ret;
|
|
|
|
};
|
|
|
|
|
|
|
|
// without changing the value, but it's sanitized to a different value
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"bytes_per_sync", "0"}}));
|
|
|
|
ASSERT_TRUE(is_changed());
|
|
|
|
|
|
|
|
// without changing the value
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_background_jobs", "4"}}));
|
|
|
|
ASSERT_FALSE(is_changed());
|
|
|
|
|
|
|
|
// changing the value
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"bytes_per_sync", "123"}}));
|
|
|
|
ASSERT_TRUE(is_changed());
|
|
|
|
|
|
|
|
// update again
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"bytes_per_sync", "123"}}));
|
|
|
|
ASSERT_FALSE(is_changed());
|
|
|
|
|
|
|
|
// without changing a default value
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"strict_bytes_per_sync", "false"}}));
|
|
|
|
ASSERT_FALSE(is_changed());
|
|
|
|
|
|
|
|
// now change
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"strict_bytes_per_sync", "true"}}));
|
|
|
|
ASSERT_TRUE(is_changed());
|
|
|
|
|
|
|
|
// multiple values without change
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(
|
|
|
|
{{"max_total_wal_size", "0"}, {"stats_dump_period_sec", "600"}}));
|
|
|
|
ASSERT_FALSE(is_changed());
|
|
|
|
|
|
|
|
// multiple values with change
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(
|
|
|
|
{{"max_open_files", "100"}, {"stats_dump_period_sec", "600"}}));
|
|
|
|
ASSERT_TRUE(is_changed());
|
|
|
|
}
|
|
|
|
|
2016-10-14 19:25:39 +00:00
|
|
|
TEST_F(DBOptionsTest, GetLatestDBOptions) {
|
|
|
|
// GetOptions should be able to get latest option changed by SetOptions.
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-10-14 19:25:39 +00:00
|
|
|
Random rnd(228);
|
|
|
|
Reopen(options);
|
|
|
|
auto new_options = GetRandomizedMutableDBOptionsMap(&rnd);
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(new_options));
|
|
|
|
ASSERT_EQ(new_options, GetMutableDBOptionsMap(dbfull()->GetDBOptions()));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, GetLatestCFOptions) {
|
2016-09-15 05:10:28 +00:00
|
|
|
// GetOptions should be able to get latest option changed by SetOptions.
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-15 05:10:28 +00:00
|
|
|
Random rnd(228);
|
|
|
|
Reopen(options);
|
|
|
|
CreateColumnFamilies({"foo"}, options);
|
|
|
|
ReopenWithColumnFamilies({"default", "foo"}, options);
|
|
|
|
auto options_default = GetRandomizedMutableCFOptionsMap(&rnd);
|
|
|
|
auto options_foo = GetRandomizedMutableCFOptionsMap(&rnd);
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(handles_[0], options_default));
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(handles_[1], options_foo));
|
|
|
|
ASSERT_EQ(options_default,
|
|
|
|
GetMutableCFOptionsMap(dbfull()->GetOptions(handles_[0])));
|
|
|
|
ASSERT_EQ(options_foo,
|
|
|
|
GetMutableCFOptionsMap(dbfull()->GetOptions(handles_[1])));
|
|
|
|
}
|
|
|
|
|
2021-02-19 18:25:39 +00:00
|
|
|
TEST_F(DBOptionsTest, SetMutableTableOptions) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.env = env_;
|
|
|
|
options.blob_file_size = 16384;
|
|
|
|
BlockBasedTableOptions bbto;
|
|
|
|
bbto.no_block_cache = true;
|
|
|
|
bbto.block_size = 8192;
|
|
|
|
bbto.block_restart_interval = 7;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
ColumnFamilyHandle* cfh = dbfull()->DefaultColumnFamily();
|
|
|
|
Options c_opts = dbfull()->GetOptions(cfh);
|
2022-06-14 21:19:26 +00:00
|
|
|
|
2021-02-19 18:25:39 +00:00
|
|
|
const auto* c_bbto =
|
|
|
|
c_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(c_bbto, nullptr);
|
|
|
|
ASSERT_EQ(c_opts.blob_file_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->no_block_cache, true);
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 8192);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 7);
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
cfh, {{"table_factory.block_size", "16384"},
|
|
|
|
{"table_factory.block_restart_interval", "11"}}));
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 11);
|
|
|
|
|
|
|
|
// Now set an option that is not mutable - options should not change
|
|
|
|
ASSERT_NOK(
|
|
|
|
dbfull()->SetOptions(cfh, {{"table_factory.no_block_cache", "false"}}));
|
|
|
|
ASSERT_EQ(c_bbto->no_block_cache, true);
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 11);
|
|
|
|
|
|
|
|
// Set some that are mutable and some that are not - options should not change
|
|
|
|
ASSERT_NOK(dbfull()->SetOptions(
|
|
|
|
cfh, {{"table_factory.no_block_cache", "false"},
|
|
|
|
{"table_factory.block_size", "8192"},
|
|
|
|
{"table_factory.block_restart_interval", "7"}}));
|
|
|
|
ASSERT_EQ(c_bbto->no_block_cache, true);
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 11);
|
|
|
|
|
|
|
|
// Set some that are mutable and some that do not exist - options should not
|
|
|
|
// change
|
|
|
|
ASSERT_NOK(dbfull()->SetOptions(
|
|
|
|
cfh, {{"table_factory.block_size", "8192"},
|
|
|
|
{"table_factory.does_not_exist", "true"},
|
|
|
|
{"table_factory.block_restart_interval", "7"}}));
|
|
|
|
ASSERT_EQ(c_bbto->no_block_cache, true);
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 11);
|
|
|
|
|
|
|
|
// Trying to change the table factory fails
|
|
|
|
ASSERT_NOK(dbfull()->SetOptions(
|
|
|
|
cfh, {{"table_factory", TableFactory::kPlainTableName()}}));
|
|
|
|
|
|
|
|
// Set some on the table and some on the Column Family
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
cfh, {{"table_factory.block_size", "16384"},
|
|
|
|
{"blob_file_size", "32768"},
|
|
|
|
{"table_factory.block_restart_interval", "13"}}));
|
|
|
|
c_opts = dbfull()->GetOptions(cfh);
|
|
|
|
ASSERT_EQ(c_opts.blob_file_size, 32768);
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 13);
|
|
|
|
// Set some on the table and a bad one on the ColumnFamily - options should
|
|
|
|
// not change
|
|
|
|
ASSERT_NOK(dbfull()->SetOptions(
|
|
|
|
cfh, {{"table_factory.block_size", "1024"},
|
|
|
|
{"no_such_option", "32768"},
|
|
|
|
{"table_factory.block_restart_interval", "7"}}));
|
|
|
|
ASSERT_EQ(c_bbto->block_size, 16384);
|
|
|
|
ASSERT_EQ(c_bbto->block_restart_interval, 13);
|
|
|
|
}
|
|
|
|
|
2021-12-09 20:35:18 +00:00
|
|
|
TEST_F(DBOptionsTest, SetWithCustomMemTableFactory) {
|
|
|
|
class DummySkipListFactory : public SkipListFactory {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "DummySkipListFactory"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
explicit DummySkipListFactory() : SkipListFactory(2) {}
|
|
|
|
};
|
|
|
|
{
|
|
|
|
// Verify the DummySkipList cannot be created
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
std::unique_ptr<MemTableRepFactory> factory;
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, DummySkipListFactory::kClassName(), &factory));
|
|
|
|
}
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Try with fail_if_options_file_error=false/true to update the options
|
|
|
|
for (bool on_error : {false, true}) {
|
|
|
|
options.fail_if_options_file_error = on_error;
|
|
|
|
options.env = env_;
|
|
|
|
options.disable_auto_compactions = false;
|
|
|
|
|
|
|
|
options.memtable_factory.reset(new DummySkipListFactory());
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
ColumnFamilyHandle* cfh = dbfull()->DefaultColumnFamily();
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetOptions(cfh, {{"disable_auto_compactions", "true"}}));
|
|
|
|
ColumnFamilyDescriptor cfd;
|
|
|
|
ASSERT_OK(cfh->GetDescriptor(&cfd));
|
|
|
|
ASSERT_STREQ(cfd.options.memtable_factory->Name(),
|
|
|
|
DummySkipListFactory::kClassName());
|
|
|
|
ColumnFamilyHandle* test = nullptr;
|
|
|
|
ASSERT_OK(dbfull()->CreateColumnFamily(options, "test", &test));
|
|
|
|
ASSERT_OK(test->GetDescriptor(&cfd));
|
|
|
|
ASSERT_STREQ(cfd.options.memtable_factory->Name(),
|
|
|
|
DummySkipListFactory::kClassName());
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->DropColumnFamily(test));
|
|
|
|
delete test;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 00:37:08 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBytesPerSync) {
|
2017-10-03 20:36:53 +00:00
|
|
|
const size_t kValueSize = 1024 * 1024; // 1MB
|
2017-09-28 00:37:08 +00:00
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.bytes_per_sync = 1024 * 1024;
|
|
|
|
options.use_direct_reads = false;
|
|
|
|
options.write_buffer_size = 400 * kValueSize;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-10-03 20:36:53 +00:00
|
|
|
options.compression = kNoCompression;
|
2017-09-28 00:37:08 +00:00
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
int counter = 0;
|
|
|
|
int low_bytes_per_sync = 0;
|
|
|
|
int i = 0;
|
|
|
|
const std::string kValue(kValueSize, 'v');
|
|
|
|
ASSERT_EQ(options.bytes_per_sync, dbfull()->GetDBOptions().bytes_per_sync);
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"WritableFileWriter::RangeSync:0", [&](void* /*arg*/) { counter++; });
|
2017-09-28 00:37:08 +00:00
|
|
|
|
|
|
|
WriteOptions write_opts;
|
2017-10-03 20:36:53 +00:00
|
|
|
// should sync approximately 40MB/1MB ~= 40 times.
|
|
|
|
for (i = 0; i < 40; i++) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Put(Key(i), kValue, write_opts));
|
2017-09-28 00:37:08 +00:00
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
low_bytes_per_sync = counter;
|
2017-10-03 20:36:53 +00:00
|
|
|
ASSERT_GT(low_bytes_per_sync, 35);
|
|
|
|
ASSERT_LT(low_bytes_per_sync, 45);
|
|
|
|
|
2017-09-28 00:37:08 +00:00
|
|
|
counter = 0;
|
|
|
|
// 8388608 = 8 * 1024 * 1024
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"bytes_per_sync", "8388608"}}));
|
|
|
|
ASSERT_EQ(8388608, dbfull()->GetDBOptions().bytes_per_sync);
|
2017-10-03 20:36:53 +00:00
|
|
|
// should sync approximately 40MB*2/8MB ~= 10 times.
|
|
|
|
// data will be 40*2MB because of previous Puts too.
|
|
|
|
for (i = 0; i < 40; i++) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Put(Key(i), kValue, write_opts));
|
2017-09-28 00:37:08 +00:00
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-03 20:36:53 +00:00
|
|
|
ASSERT_GT(counter, 5);
|
|
|
|
ASSERT_LT(counter, 15);
|
|
|
|
|
|
|
|
// Redundant assert. But leaving it here just to get the point across that
|
|
|
|
// low_bytes_per_sync > counter.
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_GT(low_bytes_per_sync, counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, SetWalBytesPerSync) {
|
|
|
|
const size_t kValueSize = 1024 * 1024 * 3;
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.wal_bytes_per_sync = 512;
|
|
|
|
options.write_buffer_size = 100 * kValueSize;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-10-03 20:36:53 +00:00
|
|
|
options.compression = kNoCompression;
|
2017-09-28 00:37:08 +00:00
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(512, dbfull()->GetDBOptions().wal_bytes_per_sync);
|
2021-01-11 18:15:59 +00:00
|
|
|
std::atomic_int counter{0};
|
2017-09-28 00:37:08 +00:00
|
|
|
int low_bytes_per_sync = 0;
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2021-01-11 18:15:59 +00:00
|
|
|
"WritableFileWriter::RangeSync:0",
|
|
|
|
[&](void* /*arg*/) { counter.fetch_add(1); });
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
const std::string kValue(kValueSize, 'v');
|
|
|
|
int i = 0;
|
|
|
|
for (; i < 10; i++) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Put(Key(i), kValue));
|
2017-09-28 00:37:08 +00:00
|
|
|
}
|
|
|
|
// Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its
|
|
|
|
// empty and will not get the new wal_bytes_per_sync value.
|
|
|
|
low_bytes_per_sync = counter;
|
2022-11-02 21:34:24 +00:00
|
|
|
// 5242880 = 1024 * 1024 * 5
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"wal_bytes_per_sync", "5242880"}}));
|
|
|
|
ASSERT_EQ(5242880, dbfull()->GetDBOptions().wal_bytes_per_sync);
|
|
|
|
counter = 0;
|
|
|
|
i = 0;
|
|
|
|
for (; i < 10; i++) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Put(Key(i), kValue));
|
2017-09-28 00:37:08 +00:00
|
|
|
}
|
|
|
|
ASSERT_GT(counter, 0);
|
|
|
|
ASSERT_GT(low_bytes_per_sync, 0);
|
|
|
|
ASSERT_GT(low_bytes_per_sync, counter);
|
|
|
|
}
|
|
|
|
|
2017-10-31 20:49:25 +00:00
|
|
|
TEST_F(DBOptionsTest, WritableFileMaxBufferSize) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.writable_file_max_buffer_size = 1024 * 1024;
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
options.max_manifest_file_size = 1;
|
|
|
|
options.env = env_;
|
|
|
|
int buffer_size = 1024 * 1024;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(buffer_size,
|
|
|
|
dbfull()->GetDBOptions().writable_file_max_buffer_size);
|
|
|
|
|
|
|
|
std::atomic<int> match_cnt(0);
|
|
|
|
std::atomic<int> unmatch_cnt(0);
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2017-10-31 20:49:25 +00:00
|
|
|
"WritableFileWriter::WritableFileWriter:0", [&](void* arg) {
|
|
|
|
int value = static_cast<int>(reinterpret_cast<uintptr_t>(arg));
|
|
|
|
if (value == buffer_size) {
|
|
|
|
match_cnt++;
|
|
|
|
} else {
|
|
|
|
unmatch_cnt++;
|
|
|
|
}
|
|
|
|
});
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-10-31 20:49:25 +00:00
|
|
|
int i = 0;
|
|
|
|
for (; i < 3; i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put("foo", std::to_string(i)));
|
|
|
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2017-10-31 20:49:25 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2017-10-31 20:49:25 +00:00
|
|
|
ASSERT_EQ(unmatch_cnt, 0);
|
|
|
|
ASSERT_GE(match_cnt, 11);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"writable_file_max_buffer_size", "524288"}}));
|
2018-05-21 23:37:47 +00:00
|
|
|
buffer_size = 512 * 1024;
|
|
|
|
match_cnt = 0;
|
2022-06-22 22:45:21 +00:00
|
|
|
unmatch_cnt = 0; // SetDBOptions() will create a WritableFileWriter
|
2018-05-21 23:37:47 +00:00
|
|
|
|
2017-10-31 20:49:25 +00:00
|
|
|
ASSERT_EQ(buffer_size,
|
|
|
|
dbfull()->GetDBOptions().writable_file_max_buffer_size);
|
|
|
|
i = 0;
|
|
|
|
for (; i < 3; i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put("foo", std::to_string(i)));
|
|
|
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2017-10-31 20:49:25 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2017-10-31 20:49:25 +00:00
|
|
|
ASSERT_EQ(unmatch_cnt, 0);
|
|
|
|
ASSERT_GE(match_cnt, 11);
|
|
|
|
}
|
|
|
|
|
2016-09-27 18:17:15 +00:00
|
|
|
TEST_F(DBOptionsTest, SetOptionsAndReopen) {
|
|
|
|
Random rnd(1044);
|
|
|
|
auto rand_opts = GetRandomizedMutableCFOptionsMap(&rnd);
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(rand_opts));
|
|
|
|
// Verify if DB can be reopen after setting options.
|
|
|
|
Options options;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-27 18:17:15 +00:00
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) {
|
|
|
|
const std::string kValue(1024, 'v');
|
|
|
|
for (int method_type = 0; method_type < 2; method_type++) {
|
|
|
|
for (int option_type = 0; option_type < 4; option_type++) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.disable_auto_compactions = true;
|
2016-08-05 23:45:57 +00:00
|
|
|
options.write_buffer_size = 1024 * 1024 * 10;
|
2016-08-03 04:55:26 +00:00
|
|
|
options.compression = CompressionType::kNoCompression;
|
|
|
|
options.level0_file_num_compaction_trigger = 1;
|
|
|
|
options.level0_stop_writes_trigger = std::numeric_limits<int>::max();
|
|
|
|
options.level0_slowdown_writes_trigger = std::numeric_limits<int>::max();
|
|
|
|
options.hard_pending_compaction_bytes_limit =
|
|
|
|
std::numeric_limits<uint64_t>::max();
|
|
|
|
options.soft_pending_compaction_bytes_limit =
|
|
|
|
std::numeric_limits<uint64_t>::max();
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
DestroyAndReopen(options);
|
2016-08-05 23:45:57 +00:00
|
|
|
int i = 0;
|
|
|
|
for (; i < 1024; i++) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Put(Key(i), kValue));
|
2016-08-03 04:55:26 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-08-05 23:45:57 +00:00
|
|
|
for (; i < 1024 * 2; i++) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Put(Key(i), kValue));
|
2016-08-05 23:45:57 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2016-08-03 04:55:26 +00:00
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
|
|
|
uint64_t l0_size = SizeAtLevel(0);
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
switch (option_type) {
|
|
|
|
case 0:
|
|
|
|
// test with level0_stop_writes_trigger
|
|
|
|
options.level0_stop_writes_trigger = 2;
|
|
|
|
options.level0_slowdown_writes_trigger = 2;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
options.level0_slowdown_writes_trigger = 2;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
options.hard_pending_compaction_bytes_limit = l0_size;
|
|
|
|
options.soft_pending_compaction_bytes_limit = l0_size;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
options.soft_pending_compaction_bytes_limit = l0_size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Reopen(options);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-08-03 04:55:26 +00:00
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"DBOptionsTest::EnableAutoCompactionAndTriggerStall:1",
|
|
|
|
"BackgroundCallCompaction:0"},
|
|
|
|
{"DBImpl::BackgroundCompaction():BeforePickCompaction",
|
|
|
|
"DBOptionsTest::EnableAutoCompactionAndTriggerStall:2"},
|
|
|
|
{"DBOptionsTest::EnableAutoCompactionAndTriggerStall:3",
|
|
|
|
"DBImpl::BackgroundCompaction():AfterPickCompaction"}});
|
|
|
|
// Block background compaction.
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
switch (method_type) {
|
|
|
|
case 0:
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
ASSERT_OK(dbfull()->EnableAutoCompaction(
|
|
|
|
{dbfull()->DefaultColumnFamily()}));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:1");
|
|
|
|
// Wait for stall condition recalculate.
|
|
|
|
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:2");
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
switch (option_type) {
|
|
|
|
case 0:
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3");
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
// Background compaction executed.
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-08-03 04:55:26 +00:00
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
}
|
|
|
|
}
|
2016-07-12 22:30:38 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 18:17:15 +00:00
|
|
|
TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) {
|
|
|
|
Options options;
|
2023-06-16 04:12:39 +00:00
|
|
|
options.level_compaction_dynamic_level_bytes = false;
|
2016-09-27 18:17:15 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
options.level0_file_num_compaction_trigger = 1000;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-27 18:17:15 +00:00
|
|
|
Reopen(options);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
// Need to insert two keys to avoid trivial move.
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put("foo", std::to_string(i)));
|
|
|
|
ASSERT_OK(Put("bar", std::to_string(i)));
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-09-27 18:17:15 +00:00
|
|
|
}
|
|
|
|
ASSERT_EQ("3", FilesPerLevel());
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "3"}}));
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-09-27 18:17:15 +00:00
|
|
|
ASSERT_EQ("0,1", FilesPerLevel());
|
|
|
|
}
|
|
|
|
|
2016-10-14 19:25:39 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBackgroundCompactionThreads) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2022-11-02 21:34:24 +00:00
|
|
|
options.max_background_compactions = 1; // default value
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-10-14 19:25:39 +00:00
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
2017-05-24 18:25:38 +00:00
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_background_compactions", "3"}}));
|
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
2016-10-14 19:25:39 +00:00
|
|
|
auto stop_token = dbfull()->TEST_write_controler().GetStopToken();
|
|
|
|
ASSERT_EQ(3, dbfull()->TEST_BGCompactionsAllowed());
|
|
|
|
}
|
|
|
|
|
2020-04-20 23:17:25 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBackgroundFlushThreads) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_background_flushes = 1;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGFlushesAllowed());
|
|
|
|
ASSERT_EQ(1, env_->GetBackgroundThreads(Env::Priority::HIGH));
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_background_flushes", "3"}}));
|
|
|
|
ASSERT_EQ(3, env_->GetBackgroundThreads(Env::Priority::HIGH));
|
|
|
|
ASSERT_EQ(3, dbfull()->TEST_BGFlushesAllowed());
|
|
|
|
}
|
|
|
|
|
2017-05-24 18:25:38 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBackgroundJobs) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_background_jobs = 8;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
if (i > 0) {
|
|
|
|
options.max_background_jobs = 12;
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(
|
|
|
|
{{"max_background_jobs",
|
|
|
|
std::to_string(options.max_background_jobs)}}));
|
|
|
|
}
|
|
|
|
|
2020-01-16 22:33:28 +00:00
|
|
|
const int expected_max_flushes = options.max_background_jobs / 4;
|
|
|
|
|
|
|
|
ASSERT_EQ(expected_max_flushes, dbfull()->TEST_BGFlushesAllowed());
|
2017-05-24 18:25:38 +00:00
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
|
|
|
|
|
|
|
auto stop_token = dbfull()->TEST_write_controler().GetStopToken();
|
|
|
|
|
2020-01-16 22:33:28 +00:00
|
|
|
const int expected_max_compactions = 3 * expected_max_flushes;
|
|
|
|
|
|
|
|
ASSERT_EQ(expected_max_flushes, dbfull()->TEST_BGFlushesAllowed());
|
|
|
|
ASSERT_EQ(expected_max_compactions, dbfull()->TEST_BGCompactionsAllowed());
|
|
|
|
|
|
|
|
ASSERT_EQ(expected_max_flushes,
|
|
|
|
env_->GetBackgroundThreads(Env::Priority::HIGH));
|
|
|
|
ASSERT_EQ(expected_max_compactions,
|
|
|
|
env_->GetBackgroundThreads(Env::Priority::LOW));
|
2017-05-24 18:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-02 22:22:13 +00:00
|
|
|
TEST_F(DBOptionsTest, AvoidFlushDuringShutdown) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-11-02 22:22:13 +00:00
|
|
|
WriteOptions write_without_wal;
|
|
|
|
write_without_wal.disableWAL = true;
|
|
|
|
|
|
|
|
ASSERT_FALSE(options.avoid_flush_during_shutdown);
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
ASSERT_OK(Put("foo", "v1", write_without_wal));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
ASSERT_OK(Put("foo", "v2", write_without_wal));
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"avoid_flush_during_shutdown", "true"}}));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
|
|
|
ASSERT_EQ("", FilesPerLevel());
|
|
|
|
}
|
|
|
|
|
2016-11-12 23:43:33 +00:00
|
|
|
TEST_F(DBOptionsTest, SetDelayedWriteRateOption) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.delayed_write_rate = 2 * 1024U * 1024U;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-11-12 23:43:33 +00:00
|
|
|
Reopen(options);
|
2022-11-02 21:34:24 +00:00
|
|
|
ASSERT_EQ(2 * 1024U * 1024U,
|
|
|
|
dbfull()->TEST_write_controler().max_delayed_write_rate());
|
2016-11-12 23:43:33 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"delayed_write_rate", "20000"}}));
|
|
|
|
ASSERT_EQ(20000, dbfull()->TEST_write_controler().max_delayed_write_rate());
|
|
|
|
}
|
|
|
|
|
2016-11-15 06:45:16 +00:00
|
|
|
TEST_F(DBOptionsTest, MaxTotalWalSizeChange) {
|
|
|
|
Random rnd(1044);
|
|
|
|
const auto value_size = size_t(1024);
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string value = rnd.RandomString(value_size);
|
2016-11-15 06:45:16 +00:00
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-11-15 06:45:16 +00:00
|
|
|
CreateColumnFamilies({"1", "2", "3"}, options);
|
|
|
|
ReopenWithColumnFamilies({"default", "1", "2", "3"}, options);
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
|
|
|
|
const int key_count = 100;
|
|
|
|
for (int i = 0; i < key_count; ++i) {
|
|
|
|
for (size_t cf = 0; cf < handles_.size(); ++cf) {
|
|
|
|
ASSERT_OK(Put(static_cast<int>(cf), Key(i), value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_total_wal_size", "10"}}));
|
|
|
|
|
|
|
|
for (size_t cf = 0; cf < handles_.size(); ++cf) {
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf]));
|
2016-11-15 06:45:16 +00:00
|
|
|
ASSERT_EQ("1", FilesPerLevel(static_cast<int>(cf)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-21 05:50:56 +00:00
|
|
|
TEST_F(DBOptionsTest, SetStatsDumpPeriodSec) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.stats_dump_period_sec = 5;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_dump_period_sec);
|
2017-03-21 05:50:56 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < 20; i++) {
|
2019-09-09 18:22:28 +00:00
|
|
|
unsigned int num = rand() % 5000 + 1;
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(
|
|
|
|
{{"stats_dump_period_sec", std::to_string(num)}}));
|
2017-03-21 05:50:56 +00:00
|
|
|
ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
|
|
|
|
}
|
2019-02-20 23:46:59 +00:00
|
|
|
Close();
|
2017-03-21 05:50:56 +00:00
|
|
|
}
|
|
|
|
|
2023-10-12 17:05:23 +00:00
|
|
|
TEST_F(DBOptionsTest, SetStatsDumpPeriodSecRace) {
|
|
|
|
// This is a mini-stress test looking for inconsistency between the reported
|
|
|
|
// state of the option and the behavior in effect for the DB, after the last
|
|
|
|
// modification to that option (indefinite inconsistency).
|
|
|
|
std::vector<std::thread> threads;
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
threads.emplace_back([this, i]() {
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(
|
|
|
|
{{"stats_dump_period_sec", i % 2 ? "100" : "0"}}));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& t : threads) {
|
|
|
|
t.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool stats_dump_set = dbfull()->GetDBOptions().stats_dump_period_sec > 0;
|
|
|
|
bool task_enabled = dbfull()->TEST_GetPeriodicTaskScheduler().TEST_HasTask(
|
|
|
|
PeriodicTaskType::kDumpStats);
|
|
|
|
|
|
|
|
ASSERT_EQ(stats_dump_set, task_enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, SetOptionsAndFileRace) {
|
|
|
|
// This is a mini-stress test looking for inconsistency between the reported
|
|
|
|
// state of the option and what is persisted in the options file, after the
|
|
|
|
// last modification to that option (indefinite inconsistency).
|
|
|
|
std::vector<std::thread> threads;
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
threads.emplace_back([this, i]() {
|
|
|
|
ASSERT_OK(dbfull()->SetOptions({{"ttl", std::to_string(i * 100)}}));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& t : threads) {
|
|
|
|
t.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto setting_in_mem = dbfull()->GetOptions().ttl;
|
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
|
|
|
DBOptions db_options;
|
|
|
|
ConfigOptions cfg;
|
|
|
|
cfg.env = env_;
|
|
|
|
ASSERT_OK(LoadLatestOptions(cfg, dbname_, &db_options, &cf_descs, nullptr));
|
|
|
|
ASSERT_EQ(cf_descs.size(), 1);
|
|
|
|
ASSERT_EQ(setting_in_mem, cf_descs[0].options.ttl);
|
|
|
|
}
|
|
|
|
|
2019-02-20 23:46:59 +00:00
|
|
|
TEST_F(DBOptionsTest, SetOptionsStatsPersistPeriodSec) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.stats_persist_period_sec = 5;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
2019-02-20 23:46:59 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "12345"}}));
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(12345u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
2019-02-20 23:46:59 +00:00
|
|
|
ASSERT_NOK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "abcde"}}));
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(12345u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
2019-02-20 23:46:59 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 22:09:35 +00:00
|
|
|
static void assert_candidate_files_empty(DBImpl* dbfull, const bool empty) {
|
|
|
|
dbfull->TEST_LockMutex();
|
|
|
|
JobContext job_context(0);
|
|
|
|
dbfull->FindObsoleteFiles(&job_context, false);
|
|
|
|
ASSERT_EQ(empty, job_context.full_scan_candidate_files.empty());
|
|
|
|
dbfull->TEST_UnlockMutex();
|
2018-01-18 01:37:10 +00:00
|
|
|
if (job_context.HaveSomethingToDelete()) {
|
|
|
|
// fulfill the contract of FindObsoleteFiles by calling PurgeObsoleteFiles
|
|
|
|
// afterwards; otherwise the test may hang on shutdown
|
|
|
|
dbfull->PurgeObsoleteFiles(job_context);
|
|
|
|
}
|
|
|
|
job_context.Clean();
|
2016-12-05 22:09:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, DeleteObsoleteFilesPeriodChange) {
|
|
|
|
Options options;
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
options.env = env_;
|
|
|
|
SetTimeElapseOnlySleepOnReopen(&options);
|
2016-12-05 22:09:35 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
// Verify that candidate files set is empty when no full scan requested.
|
|
|
|
assert_candidate_files_empty(dbfull(), true);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"delete_obsolete_files_period_micros", "0"}}));
|
|
|
|
|
|
|
|
// After delete_obsolete_files_period_micros updated to 0, the next call
|
|
|
|
// to FindObsoleteFiles should make a full scan
|
|
|
|
assert_candidate_files_empty(dbfull(), false);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"delete_obsolete_files_period_micros", "20"}}));
|
|
|
|
|
|
|
|
assert_candidate_files_empty(dbfull(), true);
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForMicroseconds(20);
|
2016-12-05 22:09:35 +00:00
|
|
|
assert_candidate_files_empty(dbfull(), true);
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForMicroseconds(1);
|
2016-12-05 22:09:35 +00:00
|
|
|
assert_candidate_files_empty(dbfull(), false);
|
|
|
|
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2017-05-04 03:46:17 +00:00
|
|
|
TEST_F(DBOptionsTest, MaxOpenFilesChange) {
|
|
|
|
SpecialEnv env(env_);
|
|
|
|
Options options;
|
2017-06-26 23:52:06 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2017-05-04 03:46:17 +00:00
|
|
|
options.max_open_files = -1;
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
Cache* tc = dbfull()->TEST_table_cache();
|
|
|
|
|
|
|
|
ASSERT_EQ(-1, dbfull()->GetDBOptions().max_open_files);
|
|
|
|
ASSERT_LT(2000, tc->GetCapacity());
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_open_files", "1024"}}));
|
|
|
|
ASSERT_EQ(1024, dbfull()->GetDBOptions().max_open_files);
|
|
|
|
// examine the table cache (actual size should be 1014)
|
|
|
|
ASSERT_GT(1500, tc->GetCapacity());
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2017-05-24 16:52:08 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeDelayedWriteRate) {
|
|
|
|
Options options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2017-05-24 16:52:08 +00:00
|
|
|
options.delayed_write_rate = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(16 * 1024 * 1024, dbfull()->GetDBOptions().delayed_write_rate);
|
|
|
|
|
|
|
|
options.rate_limiter.reset(NewGenericRateLimiter(31 * 1024 * 1024));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(31 * 1024 * 1024, dbfull()->GetDBOptions().delayed_write_rate);
|
|
|
|
}
|
|
|
|
|
2019-11-23 06:12:09 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeUniversalTTLCompaction) {
|
|
|
|
Options options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2019-11-23 06:12:09 +00:00
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
|
|
|
|
options.ttl = 0;
|
|
|
|
options.periodic_compaction_seconds = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
|
|
|
|
options.ttl = 0;
|
|
|
|
options.periodic_compaction_seconds = 100;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
options.periodic_compaction_seconds = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
options.periodic_compaction_seconds = 500;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
}
|
|
|
|
|
2019-11-26 01:11:26 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeTtlDefault) {
|
|
|
|
Options options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2019-11-26 01:11:26 +00:00
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(30 * 24 * 60 * 60, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.compaction_style = kCompactionStyleLevel;
|
|
|
|
options.ttl = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
}
|
|
|
|
|
2019-10-31 17:59:13 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeFIFOPeriodicCompaction) {
|
|
|
|
Options options;
|
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2023-07-05 21:40:45 +00:00
|
|
|
// Default value allows RocksDB to set ttl to 30 days.
|
|
|
|
ASSERT_EQ(30 * 24 * 60 * 60, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
// Disable
|
2019-10-31 17:59:13 +00:00
|
|
|
options.ttl = 0;
|
|
|
|
Reopen(options);
|
2023-07-05 21:40:45 +00:00
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
2019-10-31 17:59:13 +00:00
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 100 * 24 * 60 * 60;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100 * 24 * 60 * 60, dbfull()->GetOptions().ttl);
|
|
|
|
|
2023-07-05 21:40:45 +00:00
|
|
|
// periodic_compaction_seconds should have no effect
|
|
|
|
// on FIFO compaction.
|
2019-10-31 17:59:13 +00:00
|
|
|
options.ttl = 500;
|
|
|
|
options.periodic_compaction_seconds = 300;
|
|
|
|
Reopen(options);
|
2023-07-05 21:40:45 +00:00
|
|
|
ASSERT_EQ(500, dbfull()->GetOptions().ttl);
|
2019-10-31 17:59:13 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 22:19:20 +00:00
|
|
|
TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|
|
|
Options options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2017-10-19 22:19:20 +00:00
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
2023-10-19 01:00:07 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2017-10-19 22:19:20 +00:00
|
|
|
options.write_buffer_size = 10 << 10; // 10KB
|
|
|
|
options.arena_block_size = 4096;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.compaction_options_fifo.allow_compaction = false;
|
2023-05-11 23:40:59 +00:00
|
|
|
options.num_levels = 1;
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->SetMockSleep();
|
2017-10-19 22:19:20 +00:00
|
|
|
options.env = env_;
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
// NOTE: Presumed unnecessary and removed: resetting mock time in env
|
|
|
|
|
2019-02-15 17:48:44 +00:00
|
|
|
// Test dynamically changing ttl.
|
|
|
|
options.ttl = 1 * 60 * 60; // 1 hour
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
2020-12-24 00:54:05 +00:00
|
|
|
ASSERT_OK(Flush());
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(61);
|
2017-10-19 22:19:20 +00:00
|
|
|
|
|
|
|
// No files should be compacted as ttl is set to 1 hour.
|
2019-02-15 17:48:44 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 3600);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
2023-10-19 01:00:07 +00:00
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(FIFO_TTL_COMPACTIONS), 0);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(FIFO_MAX_SIZE_COMPACTIONS), 0);
|
|
|
|
|
2017-10-19 22:19:20 +00:00
|
|
|
// Set ttl to 1 minute. So all files should get deleted.
|
2019-02-15 17:48:44 +00:00
|
|
|
ASSERT_OK(dbfull()->SetOptions({{"ttl", "60"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 60);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
|
2023-10-19 01:00:07 +00:00
|
|
|
ASSERT_GT(options.statistics->getTickerCount(FIFO_TTL_COMPACTIONS), 0);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(FIFO_MAX_SIZE_COMPACTIONS), 0);
|
|
|
|
ASSERT_OK(options.statistics->Reset());
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
// NOTE: Presumed unnecessary and removed: resetting mock time in env
|
|
|
|
|
2017-10-19 22:19:20 +00:00
|
|
|
// Test dynamically changing compaction_options_fifo.max_table_files_size
|
|
|
|
options.compaction_options_fifo.max_table_files_size = 500 << 10; // 00KB
|
2019-02-15 17:48:44 +00:00
|
|
|
options.ttl = 0;
|
2017-10-19 22:19:20 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// No files should be compacted as max_table_files_size is set to 500 KB.
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
500 << 10);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
2023-10-19 01:00:07 +00:00
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(FIFO_MAX_SIZE_COMPACTIONS), 0);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(FIFO_TTL_COMPACTIONS), 0);
|
|
|
|
|
2017-10-19 22:19:20 +00:00
|
|
|
// Set max_table_files_size to 12 KB. So only 1 file should remain now.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo", "{max_table_files_size=12288;}"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
12 << 10);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
|
2023-10-19 01:00:07 +00:00
|
|
|
ASSERT_GT(options.statistics->getTickerCount(FIFO_MAX_SIZE_COMPACTIONS), 0);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(FIFO_TTL_COMPACTIONS), 0);
|
|
|
|
ASSERT_OK(options.statistics->Reset());
|
|
|
|
|
2017-10-19 22:19:20 +00:00
|
|
|
// Test dynamically changing compaction_options_fifo.allow_compaction
|
|
|
|
options.compaction_options_fifo.max_table_files_size = 500 << 10; // 500KB
|
2019-02-15 17:48:44 +00:00
|
|
|
options.ttl = 0;
|
2017-10-19 22:19:20 +00:00
|
|
|
options.compaction_options_fifo.allow_compaction = false;
|
|
|
|
options.level0_file_num_compaction_trigger = 6;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// No files should be compacted as max_table_files_size is set to 500 KB and
|
|
|
|
// allow_compaction is false
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
false);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// Set allow_compaction to true. So number of files should be between 1 and 5.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo", "{allow_compaction=true;}"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
true);
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_GE(NumTableFilesAtLevel(0), 1);
|
|
|
|
ASSERT_LE(NumTableFilesAtLevel(0), 5);
|
2023-05-11 23:40:59 +00:00
|
|
|
|
|
|
|
// Test dynamically setting `file_temperature_age_thresholds`
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()
|
|
|
|
->GetOptions()
|
|
|
|
.compaction_options_fifo.file_temperature_age_thresholds.empty());
|
|
|
|
ASSERT_OK(dbfull()->SetOptions({{"compaction_options_fifo",
|
|
|
|
"{file_temperature_age_thresholds={{age=10;"
|
|
|
|
"temperature=kWarm}:{age=30000;"
|
|
|
|
"temperature=kCold}}}"}}));
|
|
|
|
auto opts = dbfull()->GetOptions();
|
|
|
|
const auto& fifo_temp_opt =
|
|
|
|
opts.compaction_options_fifo.file_temperature_age_thresholds;
|
|
|
|
ASSERT_EQ(fifo_temp_opt.size(), 2);
|
|
|
|
ASSERT_EQ(fifo_temp_opt[0].temperature, Temperature::kWarm);
|
|
|
|
ASSERT_EQ(fifo_temp_opt[0].age, 10);
|
|
|
|
ASSERT_EQ(fifo_temp_opt[1].temperature, Temperature::kCold);
|
|
|
|
ASSERT_EQ(fifo_temp_opt[1].age, 30000);
|
2017-10-19 22:19:20 +00:00
|
|
|
}
|
|
|
|
|
2023-10-27 22:56:48 +00:00
|
|
|
TEST_F(DBOptionsTest, OffpeakTimes) {
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2023-10-02 23:52:39 +00:00
|
|
|
Random rnd(test::RandomSeed());
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
|
|
|
|
auto verify_invalid = [&]() {
|
|
|
|
Status s = DBImpl::TEST_ValidateOptions(options);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
};
|
|
|
|
|
|
|
|
auto verify_valid = [&]() {
|
|
|
|
Status s = DBImpl::TEST_ValidateOptions(options);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_FALSE(s.IsInvalidArgument());
|
|
|
|
};
|
|
|
|
std::vector<std::string> invalid_cases = {
|
|
|
|
"06:30-",
|
|
|
|
"-23:30", // Both need to be set
|
2023-10-02 23:52:39 +00:00
|
|
|
"00:00-00:00",
|
|
|
|
"06:30-06:30" // Start time cannot be the same as end time
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
"12:30 PM-23:30",
|
|
|
|
"12:01AM-11:00PM", // Invalid format
|
|
|
|
"01:99-22:00", // Invalid value for minutes
|
|
|
|
"00:00-24:00", // 24:00 is an invalid value
|
|
|
|
"6-7",
|
|
|
|
"6:-7",
|
|
|
|
"06:31.42-7:00",
|
|
|
|
"6.31:42-7:00",
|
|
|
|
"6:0-7:",
|
|
|
|
"15:0.2-3:.7",
|
|
|
|
":00-00:02",
|
|
|
|
"02:00-:00",
|
|
|
|
"random-value",
|
|
|
|
"No:No-Hi:Hi",
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<std::string> valid_cases = {
|
2023-10-02 23:52:39 +00:00
|
|
|
"", // Not enabled. Valid case
|
|
|
|
"06:30-11:30",
|
|
|
|
"06:30-23:30",
|
|
|
|
"13:30-14:30",
|
|
|
|
"00:00-23:59", // Entire Day
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
"23:30-01:15", // From 11:30PM to 1:15AM next day. Valid case.
|
|
|
|
"1:0000000000000-2:000000000042", // Weird, but we can parse the int.
|
|
|
|
};
|
|
|
|
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const std::string& invalid_case : invalid_cases) {
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
options.daily_offpeak_time_utc = invalid_case;
|
|
|
|
verify_invalid();
|
|
|
|
}
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const std::string& valid_case : valid_cases) {
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
options.daily_offpeak_time_utc = valid_case;
|
|
|
|
verify_valid();
|
|
|
|
}
|
|
|
|
|
2023-11-06 19:43:59 +00:00
|
|
|
auto verify_offpeak_info = [&](bool expected_is_now_off_peak,
|
|
|
|
int expected_seconds_till_next_offpeak_start,
|
|
|
|
int now_utc_hour, int now_utc_minute,
|
|
|
|
int now_utc_second = 0) {
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
auto mock_clock = std::make_shared<MockSystemClock>(env_->GetSystemClock());
|
|
|
|
// Add some extra random days to current time
|
|
|
|
int days = rnd.Uniform(100);
|
2023-11-06 19:43:59 +00:00
|
|
|
mock_clock->SetCurrentTime(
|
|
|
|
days * OffpeakTimeOption::kSecondsPerDay +
|
|
|
|
now_utc_hour * OffpeakTimeOption::kSecondsPerHour +
|
|
|
|
now_utc_minute * OffpeakTimeOption::kSecondsPerMinute + now_utc_second);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
Status s = DBImpl::TEST_ValidateOptions(options);
|
|
|
|
ASSERT_OK(s);
|
2023-11-06 19:43:59 +00:00
|
|
|
auto offpeak_option = OffpeakTimeOption(options.daily_offpeak_time_utc);
|
|
|
|
int64_t now;
|
|
|
|
ASSERT_OK(mock_clock.get()->GetCurrentTime(&now));
|
|
|
|
auto offpeak_info = offpeak_option.GetOffpeakTimeInfo(now);
|
|
|
|
ASSERT_EQ(expected_is_now_off_peak, offpeak_info.is_now_offpeak);
|
|
|
|
ASSERT_EQ(expected_seconds_till_next_offpeak_start,
|
|
|
|
offpeak_info.seconds_till_next_offpeak_start);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
options.daily_offpeak_time_utc = "";
|
2023-11-06 19:43:59 +00:00
|
|
|
verify_offpeak_info(false, 0, 12, 30);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
|
|
|
|
options.daily_offpeak_time_utc = "06:30-11:30";
|
2023-11-06 19:43:59 +00:00
|
|
|
verify_offpeak_info(false, 1 * OffpeakTimeOption::kSecondsPerHour, 5, 30);
|
|
|
|
verify_offpeak_info(true, 24 * OffpeakTimeOption::kSecondsPerHour, 6, 30);
|
|
|
|
verify_offpeak_info(true, 20 * OffpeakTimeOption::kSecondsPerHour, 10, 30);
|
|
|
|
verify_offpeak_info(true, 19 * OffpeakTimeOption::kSecondsPerHour, 11, 30);
|
|
|
|
verify_offpeak_info(false, 17 * OffpeakTimeOption::kSecondsPerHour, 13, 30);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
|
|
|
|
options.daily_offpeak_time_utc = "23:30-04:30";
|
2023-11-06 19:43:59 +00:00
|
|
|
verify_offpeak_info(false, 17 * OffpeakTimeOption::kSecondsPerHour, 6, 30);
|
|
|
|
verify_offpeak_info(true, 24 * OffpeakTimeOption::kSecondsPerHour, 23, 30);
|
|
|
|
verify_offpeak_info(true,
|
|
|
|
23 * OffpeakTimeOption::kSecondsPerHour +
|
|
|
|
30 * OffpeakTimeOption::kSecondsPerMinute,
|
|
|
|
0, 0);
|
|
|
|
verify_offpeak_info(true,
|
|
|
|
22 * OffpeakTimeOption::kSecondsPerHour +
|
|
|
|
30 * OffpeakTimeOption::kSecondsPerMinute,
|
|
|
|
1, 0);
|
|
|
|
verify_offpeak_info(true, 19 * OffpeakTimeOption::kSecondsPerHour, 4, 30);
|
|
|
|
verify_offpeak_info(false,
|
|
|
|
18 * OffpeakTimeOption::kSecondsPerHour +
|
|
|
|
59 * OffpeakTimeOption::kSecondsPerMinute,
|
|
|
|
4, 31);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
|
2023-10-02 23:52:39 +00:00
|
|
|
// Entire day offpeak
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
options.daily_offpeak_time_utc = "00:00-23:59";
|
2023-11-06 19:43:59 +00:00
|
|
|
verify_offpeak_info(true, 24 * OffpeakTimeOption::kSecondsPerHour, 0, 0);
|
|
|
|
verify_offpeak_info(true, 12 * OffpeakTimeOption::kSecondsPerHour, 12, 00);
|
|
|
|
verify_offpeak_info(true, 1 * OffpeakTimeOption::kSecondsPerMinute, 23, 59);
|
|
|
|
verify_offpeak_info(true, 59, 23, 59, 1);
|
|
|
|
verify_offpeak_info(true, 1, 23, 59, 59);
|
|
|
|
|
|
|
|
// Start with a valid option
|
|
|
|
options.daily_offpeak_time_utc = "01:30-04:15";
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
DestroyAndReopen(options);
|
2023-11-06 19:43:59 +00:00
|
|
|
ASSERT_EQ("01:30-04:15", dbfull()->GetDBOptions().daily_offpeak_time_utc);
|
2023-10-27 22:56:48 +00:00
|
|
|
|
|
|
|
int may_schedule_compaction_called = 0;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::MaybeScheduleFlushOrCompaction:Start",
|
|
|
|
[&](void*) { may_schedule_compaction_called++; });
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2023-11-06 19:43:59 +00:00
|
|
|
// Make sure calling SetDBOptions with invalid option does not change the
|
|
|
|
// value nor call MaybeScheduleFlushOrCompaction()
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
for (std::string invalid_case : invalid_cases) {
|
|
|
|
ASSERT_NOK(
|
|
|
|
dbfull()->SetDBOptions({{"daily_offpeak_time_utc", invalid_case}}));
|
2023-11-06 19:43:59 +00:00
|
|
|
ASSERT_EQ("01:30-04:15", dbfull()
|
|
|
|
->GetVersionSet()
|
|
|
|
->offpeak_time_option()
|
|
|
|
.daily_offpeak_time_utc);
|
|
|
|
ASSERT_EQ(1 * kSecondInHour + 30 * kSecondInMinute,
|
|
|
|
dbfull()
|
|
|
|
->GetVersionSet()
|
|
|
|
->offpeak_time_option()
|
|
|
|
.daily_offpeak_start_time_utc);
|
|
|
|
ASSERT_EQ(4 * kSecondInHour + 15 * kSecondInMinute,
|
|
|
|
dbfull()
|
|
|
|
->GetVersionSet()
|
|
|
|
->offpeak_time_option()
|
|
|
|
.daily_offpeak_end_time_utc);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
}
|
2023-10-27 22:56:48 +00:00
|
|
|
ASSERT_EQ(0, may_schedule_compaction_called);
|
|
|
|
|
|
|
|
// Changing to new valid values should call MaybeScheduleFlushOrCompaction()
|
2023-11-06 19:43:59 +00:00
|
|
|
// and sets the offpeak_time_option in VersionSet
|
2023-10-27 22:56:48 +00:00
|
|
|
int expected_count = 0;
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
for (std::string valid_case : valid_cases) {
|
2023-11-06 19:43:59 +00:00
|
|
|
if (dbfull()
|
|
|
|
->GetVersionSet()
|
|
|
|
->offpeak_time_option()
|
|
|
|
.daily_offpeak_time_utc != valid_case) {
|
2023-10-27 22:56:48 +00:00
|
|
|
expected_count++;
|
|
|
|
}
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"daily_offpeak_time_utc", valid_case}}));
|
|
|
|
ASSERT_EQ(valid_case, dbfull()->GetDBOptions().daily_offpeak_time_utc);
|
2023-11-06 19:43:59 +00:00
|
|
|
ASSERT_EQ(valid_case, dbfull()
|
|
|
|
->GetVersionSet()
|
|
|
|
->offpeak_time_option()
|
|
|
|
.daily_offpeak_time_utc);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
}
|
2023-10-27 22:56:48 +00:00
|
|
|
ASSERT_EQ(expected_count, may_schedule_compaction_called);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
|
2023-10-27 22:56:48 +00:00
|
|
|
// Changing to the same value should not call MaybeScheduleFlushOrCompaction()
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"daily_offpeak_time_utc", "06:30-11:30"}}));
|
|
|
|
may_schedule_compaction_called = 0;
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"daily_offpeak_time_utc", "06:30-11:30"}}));
|
|
|
|
ASSERT_EQ(0, may_schedule_compaction_called);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
|
2023-10-27 22:56:48 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2017-11-17 01:46:43 +00:00
|
|
|
TEST_F(DBOptionsTest, CompactionReadaheadSizeChange) {
|
2023-08-30 21:57:08 +00:00
|
|
|
for (bool use_direct_reads : {true, false}) {
|
|
|
|
SpecialEnv env(env_);
|
|
|
|
Options options;
|
|
|
|
options.env = &env;
|
|
|
|
|
|
|
|
options.use_direct_reads = use_direct_reads;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
const std::string kValue(1024, 'v');
|
|
|
|
Status s = TryReopen(options);
|
|
|
|
if (use_direct_reads && (s.IsNotSupported() || s.IsInvalidArgument())) {
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
ASSERT_OK(s);
|
|
|
|
}
|
2017-11-17 01:46:43 +00:00
|
|
|
|
2023-08-30 21:57:08 +00:00
|
|
|
ASSERT_EQ(1024 * 1024 * 2,
|
|
|
|
dbfull()->GetDBOptions().compaction_readahead_size);
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"compaction_readahead_size", "256"}}));
|
|
|
|
ASSERT_EQ(256, dbfull()->GetDBOptions().compaction_readahead_size);
|
|
|
|
for (int i = 0; i < 1024; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), kValue));
|
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
for (int i = 0; i < 1024 * 2; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), kValue));
|
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(256, env_->compaction_readahead_size_);
|
|
|
|
Close();
|
2017-11-17 01:46:43 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-30 17:43:34 +00:00
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) {
|
|
|
|
Options options;
|
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
|
|
|
options.write_buffer_size = 10 << 10; // 10KB
|
|
|
|
options.create_if_missing = true;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2023-05-11 23:40:59 +00:00
|
|
|
options.num_levels = 1;
|
2019-05-30 17:43:34 +00:00
|
|
|
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980)));
|
2019-05-30 17:43:34 +00:00
|
|
|
}
|
2020-09-16 22:45:30 +00:00
|
|
|
ASSERT_OK(Flush());
|
2019-05-30 17:43:34 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// In release 6.0, ttl was promoted from a secondary level option under
|
|
|
|
// compaction_options_fifo to a top level option under ColumnFamilyOptions.
|
|
|
|
// We still need to handle old SetOptions calls but should ignore
|
|
|
|
// ttl under compaction_options_fifo.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo",
|
2023-05-11 23:40:59 +00:00
|
|
|
"{allow_compaction=true;max_table_files_size=1024;ttl=731;file_"
|
|
|
|
"temperature_age_thresholds={temperature=kCold;age=12345}}"},
|
2019-05-30 17:43:34 +00:00
|
|
|
{"ttl", "60"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
true);
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
1024);
|
2023-05-11 23:40:59 +00:00
|
|
|
auto opts = dbfull()->GetOptions();
|
|
|
|
const auto& file_temp_age =
|
|
|
|
opts.compaction_options_fifo.file_temperature_age_thresholds;
|
|
|
|
ASSERT_EQ(file_temp_age.size(), 1);
|
|
|
|
ASSERT_EQ(file_temp_age[0].temperature, Temperature::kCold);
|
|
|
|
ASSERT_EQ(file_temp_age[0].age, 12345);
|
2019-05-30 17:43:34 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 60);
|
|
|
|
|
|
|
|
// Put ttl as the first option inside compaction_options_fifo. That works as
|
|
|
|
// it doesn't overwrite any other option.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo",
|
|
|
|
"{ttl=985;allow_compaction=true;max_table_files_size=1024;}"},
|
|
|
|
{"ttl", "191"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
true);
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
1024);
|
2023-05-11 23:40:59 +00:00
|
|
|
ASSERT_EQ(file_temp_age.size(), 1);
|
|
|
|
ASSERT_EQ(file_temp_age[0].temperature, Temperature::kCold);
|
|
|
|
ASSERT_EQ(file_temp_age[0].age, 12345);
|
2019-05-30 17:43:34 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 191);
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:08:41 +00:00
|
|
|
TEST_F(DBOptionsTest, ChangeCompression) {
|
|
|
|
if (!Snappy_Supported() || !LZ4_Supported()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Options options;
|
|
|
|
options.write_buffer_size = 10 << 10; // 10KB
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.compression = CompressionType::kLZ4Compression;
|
|
|
|
options.bottommost_compression = CompressionType::kNoCompression;
|
|
|
|
options.bottommost_compression_opts.level = 2;
|
2020-04-01 23:37:54 +00:00
|
|
|
options.bottommost_compression_opts.parallel_threads = 1;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2020-03-31 19:08:41 +00:00
|
|
|
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
CompressionType compression_used = CompressionType::kLZ4Compression;
|
|
|
|
CompressionOptions compression_opt_used;
|
|
|
|
bool compacted = false;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
Compaction* c = static_cast<Compaction*>(arg);
|
2020-03-31 19:08:41 +00:00
|
|
|
compression_used = c->output_compression();
|
|
|
|
compression_opt_used = c->output_compression_opts();
|
|
|
|
compacted = true;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2020-03-31 19:08:41 +00:00
|
|
|
ASSERT_TRUE(compacted);
|
|
|
|
ASSERT_EQ(CompressionType::kNoCompression, compression_used);
|
|
|
|
ASSERT_EQ(options.compression_opts.level, compression_opt_used.level);
|
2020-04-01 23:37:54 +00:00
|
|
|
ASSERT_EQ(options.compression_opts.parallel_threads,
|
|
|
|
compression_opt_used.parallel_threads);
|
2020-03-31 19:08:41 +00:00
|
|
|
|
|
|
|
compression_used = CompressionType::kLZ4Compression;
|
|
|
|
compacted = false;
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"bottommost_compression", "kSnappyCompression"},
|
2020-04-30 23:59:16 +00:00
|
|
|
{"bottommost_compression_opts", "0:6:0:0:4:true"}}));
|
2020-03-31 19:08:41 +00:00
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2020-03-31 19:08:41 +00:00
|
|
|
ASSERT_TRUE(compacted);
|
|
|
|
ASSERT_EQ(CompressionType::kSnappyCompression, compression_used);
|
|
|
|
ASSERT_EQ(6, compression_opt_used.level);
|
2020-04-30 23:59:16 +00:00
|
|
|
// Right now parallel_level is not yet allowed to be changed.
|
2020-03-31 19:08:41 +00:00
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
2020-04-02 23:39:18 +00:00
|
|
|
|
2020-11-12 04:30:58 +00:00
|
|
|
TEST_F(DBOptionsTest, BottommostCompressionOptsWithFallbackType) {
|
|
|
|
// Verify the bottommost compression options still take effect even when the
|
|
|
|
// bottommost compression type is left at its default value. Verify for both
|
|
|
|
// automatic and manual compaction.
|
|
|
|
if (!Snappy_Supported() || !LZ4_Supported()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr int kUpperCompressionLevel = 1;
|
|
|
|
constexpr int kBottommostCompressionLevel = 2;
|
|
|
|
constexpr int kNumL0Files = 2;
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.level0_file_num_compaction_trigger = kNumL0Files;
|
|
|
|
options.compression = CompressionType::kLZ4Compression;
|
|
|
|
options.compression_opts.level = kUpperCompressionLevel;
|
|
|
|
options.bottommost_compression_opts.level = kBottommostCompressionLevel;
|
|
|
|
options.bottommost_compression_opts.enabled = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
CompressionType compression_used = CompressionType::kDisableCompressionOption;
|
|
|
|
CompressionOptions compression_opt_used;
|
|
|
|
bool compacted = false;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"CompactionPicker::RegisterCompaction:Registered", [&](void* arg) {
|
|
|
|
Compaction* c = static_cast<Compaction*>(arg);
|
|
|
|
compression_used = c->output_compression();
|
|
|
|
compression_opt_used = c->output_compression_opts();
|
|
|
|
compacted = true;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
// First, verify for automatic compaction.
|
|
|
|
for (int i = 0; i < kNumL0Files; ++i) {
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
|
|
|
|
ASSERT_TRUE(compacted);
|
|
|
|
ASSERT_EQ(CompressionType::kLZ4Compression, compression_used);
|
|
|
|
ASSERT_EQ(kBottommostCompressionLevel, compression_opt_used.level);
|
|
|
|
|
|
|
|
// Second, verify for manual compaction.
|
|
|
|
compacted = false;
|
|
|
|
compression_used = CompressionType::kDisableCompressionOption;
|
|
|
|
compression_opt_used = CompressionOptions();
|
|
|
|
CompactRangeOptions cro;
|
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
|
|
|
|
ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr));
|
|
|
|
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
|
|
|
|
ASSERT_TRUE(compacted);
|
|
|
|
ASSERT_EQ(CompressionType::kLZ4Compression, compression_used);
|
|
|
|
ASSERT_EQ(kBottommostCompressionLevel, compression_opt_used.level);
|
|
|
|
}
|
|
|
|
|
2023-05-11 23:40:59 +00:00
|
|
|
TEST_F(DBOptionsTest, FIFOTemperatureAgeThresholdValidation) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
Destroy(options);
|
|
|
|
|
|
|
|
options.num_levels = 1;
|
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
|
|
|
options.max_open_files = -1;
|
|
|
|
// elements are not sorted
|
|
|
|
// During DB open
|
|
|
|
options.compaction_options_fifo.file_temperature_age_thresholds.push_back(
|
|
|
|
{Temperature::kCold, 1000});
|
|
|
|
options.compaction_options_fifo.file_temperature_age_thresholds.push_back(
|
|
|
|
{Temperature::kWarm, 500});
|
|
|
|
Status s = TryReopen(options);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
ASSERT_TRUE(std::strstr(
|
|
|
|
s.getState(),
|
|
|
|
"Option file_temperature_age_thresholds requires elements to be sorted "
|
|
|
|
"in increasing order with respect to `age` field."));
|
|
|
|
// Dynamically set option
|
|
|
|
options.compaction_options_fifo.file_temperature_age_thresholds.pop_back();
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
s = db_->SetOptions({{"compaction_options_fifo",
|
|
|
|
"{file_temperature_age_thresholds={{temperature=kCold;"
|
|
|
|
"age=1000000}:{temperature=kWarm;age=1}}}"}});
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
ASSERT_TRUE(std::strstr(
|
|
|
|
s.getState(),
|
|
|
|
"Option file_temperature_age_thresholds requires elements to be sorted "
|
|
|
|
"in increasing order with respect to `age` field."));
|
|
|
|
|
|
|
|
// not single level
|
|
|
|
// During DB open
|
|
|
|
options.num_levels = 2;
|
|
|
|
s = TryReopen(options);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
ASSERT_TRUE(std::strstr(s.getState(),
|
|
|
|
"Option file_temperature_age_thresholds is only "
|
|
|
|
"supported when num_levels = 1."));
|
|
|
|
// Dynamically set option
|
|
|
|
options.compaction_options_fifo.file_temperature_age_thresholds.clear();
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
s = db_->SetOptions(
|
|
|
|
{{"compaction_options_fifo",
|
|
|
|
"{file_temperature_age_thresholds={temperature=kCold;age=1000}}"}});
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
ASSERT_TRUE(std::strstr(s.getState(),
|
|
|
|
"Option file_temperature_age_thresholds is only "
|
|
|
|
"supported when num_levels = 1."));
|
|
|
|
}
|
|
|
|
|
2023-05-13 05:39:39 +00:00
|
|
|
TEST_F(DBOptionsTest, TempOptionsFailTest) {
|
|
|
|
std::shared_ptr<FaultInjectionTestFS> fs;
|
|
|
|
std::unique_ptr<Env> env;
|
|
|
|
|
|
|
|
fs.reset(new FaultInjectionTestFS(env_->GetFileSystem()));
|
|
|
|
env = NewCompositeEnv(fs);
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env.get();
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"PersistRocksDBOptions:create",
|
|
|
|
[&](void* /*arg*/) { fs->SetFilesystemActive(false); });
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"PersistRocksDBOptions:written",
|
|
|
|
[&](void* /*arg*/) { fs->SetFilesystemActive(true); });
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
2023-08-09 22:46:44 +00:00
|
|
|
ASSERT_NOK(TryReopen(options));
|
2023-05-13 05:39:39 +00:00
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
|
|
|
|
std::vector<std::string> filenames;
|
|
|
|
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
|
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
bool found_temp_file = false;
|
|
|
|
for (size_t i = 0; i < filenames.size(); i++) {
|
|
|
|
if (ParseFileName(filenames[i], &number, &type) && type == kTempFile) {
|
|
|
|
found_temp_file = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_FALSE(found_temp_file);
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-07-12 22:30:38 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2016-07-12 22:30:38 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|