2016-07-12 22:30:38 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-07-12 22:30:38 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2016-08-03 04:55:26 +00:00
|
|
|
#include <limits>
|
|
|
|
#include <string>
|
2016-09-15 05:10:28 +00:00
|
|
|
#include <unordered_map>
|
2016-08-03 04:55:26 +00:00
|
|
|
|
2016-09-15 05:10:28 +00:00
|
|
|
#include "db/column_family.h"
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2019-05-31 22:21:36 +00:00
|
|
|
#include "db/db_test_util.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/options_helper.h"
|
2016-07-12 22:30:38 +00:00
|
|
|
#include "port/stack_trace.h"
|
2017-05-04 03:46:17 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2016-09-15 05:10:28 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2017-05-24 16:52:08 +00:00
|
|
|
#include "rocksdb/rate_limiter.h"
|
2019-02-20 23:46:59 +00:00
|
|
|
#include "rocksdb/stats_history.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/random.h"
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-07-12 22:30:38 +00:00
|
|
|
|
|
|
|
class DBOptionsTest : public DBTestBase {
|
|
|
|
public:
|
2020-07-02 17:21:12 +00:00
|
|
|
DBOptionsTest() : DBTestBase("/db_options_test") {}
|
2016-09-15 05:10:28 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
2016-10-14 19:25:39 +00:00
|
|
|
std::unordered_map<std::string, std::string> GetMutableDBOptionsMap(
|
|
|
|
const DBOptions& options) {
|
|
|
|
std::string options_str;
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.delimiter = "; ";
|
|
|
|
GetStringFromDBOptions(config_options, options, &options_str);
|
2016-10-14 19:25:39 +00:00
|
|
|
std::unordered_map<std::string, std::string> options_map;
|
|
|
|
StringToMap(options_str, &options_map);
|
|
|
|
std::unordered_map<std::string, std::string> mutable_map;
|
2020-06-24 23:20:55 +00:00
|
|
|
for (const auto& opt : db_options_type_info) {
|
2020-04-29 01:02:11 +00:00
|
|
|
if (opt.second.IsMutable() && opt.second.ShouldSerialize()) {
|
2016-10-14 19:25:39 +00:00
|
|
|
mutable_map[opt.first] = options_map[opt.first];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mutable_map;
|
|
|
|
}
|
|
|
|
|
2016-09-15 05:10:28 +00:00
|
|
|
std::unordered_map<std::string, std::string> GetMutableCFOptionsMap(
|
|
|
|
const ColumnFamilyOptions& options) {
|
|
|
|
std::string options_str;
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.delimiter = "; ";
|
|
|
|
|
|
|
|
GetStringFromColumnFamilyOptions(config_options, options, &options_str);
|
2016-09-15 05:10:28 +00:00
|
|
|
std::unordered_map<std::string, std::string> options_map;
|
|
|
|
StringToMap(options_str, &options_map);
|
|
|
|
std::unordered_map<std::string, std::string> mutable_map;
|
2020-06-24 23:20:55 +00:00
|
|
|
for (const auto& opt : cf_options_type_info) {
|
2020-04-29 01:02:11 +00:00
|
|
|
if (opt.second.IsMutable() && opt.second.ShouldSerialize()) {
|
2016-09-15 05:10:28 +00:00
|
|
|
mutable_map[opt.first] = options_map[opt.first];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mutable_map;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> GetRandomizedMutableCFOptionsMap(
|
|
|
|
Random* rnd) {
|
2019-06-04 02:47:02 +00:00
|
|
|
Options options = CurrentOptions();
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-23 23:34:04 +00:00
|
|
|
ImmutableDBOptions db_options(options);
|
2019-06-04 02:47:02 +00:00
|
|
|
test::RandomInitCFOptions(&options, options, rnd);
|
2016-10-21 18:31:42 +00:00
|
|
|
auto sanitized_options = SanitizeOptions(db_options, options);
|
2016-09-15 21:57:32 +00:00
|
|
|
auto opt_map = GetMutableCFOptionsMap(sanitized_options);
|
|
|
|
delete options.compaction_filter;
|
|
|
|
return opt_map;
|
2016-09-15 05:10:28 +00:00
|
|
|
}
|
2016-10-14 19:25:39 +00:00
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> GetRandomizedMutableDBOptionsMap(
|
|
|
|
Random* rnd) {
|
|
|
|
DBOptions db_options;
|
|
|
|
test::RandomInitDBOptions(&db_options, rnd);
|
|
|
|
auto sanitized_options = SanitizeOptions(dbname_, db_options);
|
|
|
|
return GetMutableDBOptionsMap(sanitized_options);
|
|
|
|
}
|
2016-09-15 05:10:28 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
2016-07-12 22:30:38 +00:00
|
|
|
};
|
|
|
|
|
2016-07-25 18:48:17 +00:00
|
|
|
// RocksDB lite don't support dynamic options.
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2016-10-14 19:25:39 +00:00
|
|
|
TEST_F(DBOptionsTest, GetLatestDBOptions) {
|
|
|
|
// GetOptions should be able to get latest option changed by SetOptions.
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-10-14 19:25:39 +00:00
|
|
|
Random rnd(228);
|
|
|
|
Reopen(options);
|
|
|
|
auto new_options = GetRandomizedMutableDBOptionsMap(&rnd);
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(new_options));
|
|
|
|
ASSERT_EQ(new_options, GetMutableDBOptionsMap(dbfull()->GetDBOptions()));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, GetLatestCFOptions) {
|
2016-09-15 05:10:28 +00:00
|
|
|
// GetOptions should be able to get latest option changed by SetOptions.
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-15 05:10:28 +00:00
|
|
|
Random rnd(228);
|
|
|
|
Reopen(options);
|
|
|
|
CreateColumnFamilies({"foo"}, options);
|
|
|
|
ReopenWithColumnFamilies({"default", "foo"}, options);
|
|
|
|
auto options_default = GetRandomizedMutableCFOptionsMap(&rnd);
|
|
|
|
auto options_foo = GetRandomizedMutableCFOptionsMap(&rnd);
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(handles_[0], options_default));
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(handles_[1], options_foo));
|
|
|
|
ASSERT_EQ(options_default,
|
|
|
|
GetMutableCFOptionsMap(dbfull()->GetOptions(handles_[0])));
|
|
|
|
ASSERT_EQ(options_foo,
|
|
|
|
GetMutableCFOptionsMap(dbfull()->GetOptions(handles_[1])));
|
|
|
|
}
|
|
|
|
|
2017-09-28 00:37:08 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBytesPerSync) {
|
2017-10-03 20:36:53 +00:00
|
|
|
const size_t kValueSize = 1024 * 1024; // 1MB
|
2017-09-28 00:37:08 +00:00
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.bytes_per_sync = 1024 * 1024;
|
|
|
|
options.use_direct_reads = false;
|
|
|
|
options.write_buffer_size = 400 * kValueSize;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-10-03 20:36:53 +00:00
|
|
|
options.compression = kNoCompression;
|
2017-09-28 00:37:08 +00:00
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
int counter = 0;
|
|
|
|
int low_bytes_per_sync = 0;
|
|
|
|
int i = 0;
|
|
|
|
const std::string kValue(kValueSize, 'v');
|
|
|
|
ASSERT_EQ(options.bytes_per_sync, dbfull()->GetDBOptions().bytes_per_sync);
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"WritableFileWriter::RangeSync:0", [&](void* /*arg*/) { counter++; });
|
2017-09-28 00:37:08 +00:00
|
|
|
|
|
|
|
WriteOptions write_opts;
|
2017-10-03 20:36:53 +00:00
|
|
|
// should sync approximately 40MB/1MB ~= 40 times.
|
|
|
|
for (i = 0; i < 40; i++) {
|
2017-09-28 00:37:08 +00:00
|
|
|
Put(Key(i), kValue, write_opts);
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
low_bytes_per_sync = counter;
|
2017-10-03 20:36:53 +00:00
|
|
|
ASSERT_GT(low_bytes_per_sync, 35);
|
|
|
|
ASSERT_LT(low_bytes_per_sync, 45);
|
|
|
|
|
2017-09-28 00:37:08 +00:00
|
|
|
counter = 0;
|
|
|
|
// 8388608 = 8 * 1024 * 1024
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"bytes_per_sync", "8388608"}}));
|
|
|
|
ASSERT_EQ(8388608, dbfull()->GetDBOptions().bytes_per_sync);
|
2017-10-03 20:36:53 +00:00
|
|
|
// should sync approximately 40MB*2/8MB ~= 10 times.
|
|
|
|
// data will be 40*2MB because of previous Puts too.
|
|
|
|
for (i = 0; i < 40; i++) {
|
2017-09-28 00:37:08 +00:00
|
|
|
Put(Key(i), kValue, write_opts);
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2017-10-03 20:36:53 +00:00
|
|
|
ASSERT_GT(counter, 5);
|
|
|
|
ASSERT_LT(counter, 15);
|
|
|
|
|
|
|
|
// Redundant assert. But leaving it here just to get the point across that
|
|
|
|
// low_bytes_per_sync > counter.
|
2017-09-28 00:37:08 +00:00
|
|
|
ASSERT_GT(low_bytes_per_sync, counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, SetWalBytesPerSync) {
|
|
|
|
const size_t kValueSize = 1024 * 1024 * 3;
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.wal_bytes_per_sync = 512;
|
|
|
|
options.write_buffer_size = 100 * kValueSize;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-10-03 20:36:53 +00:00
|
|
|
options.compression = kNoCompression;
|
2017-09-28 00:37:08 +00:00
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(512, dbfull()->GetDBOptions().wal_bytes_per_sync);
|
|
|
|
int counter = 0;
|
|
|
|
int low_bytes_per_sync = 0;
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"WritableFileWriter::RangeSync:0", [&](void* /*arg*/) { counter++; });
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-09-28 00:37:08 +00:00
|
|
|
const std::string kValue(kValueSize, 'v');
|
|
|
|
int i = 0;
|
|
|
|
for (; i < 10; i++) {
|
|
|
|
Put(Key(i), kValue);
|
|
|
|
}
|
|
|
|
// Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its
|
|
|
|
// empty and will not get the new wal_bytes_per_sync value.
|
|
|
|
low_bytes_per_sync = counter;
|
|
|
|
//5242880 = 1024 * 1024 * 5
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"wal_bytes_per_sync", "5242880"}}));
|
|
|
|
ASSERT_EQ(5242880, dbfull()->GetDBOptions().wal_bytes_per_sync);
|
|
|
|
counter = 0;
|
|
|
|
i = 0;
|
|
|
|
for (; i < 10; i++) {
|
|
|
|
Put(Key(i), kValue);
|
|
|
|
}
|
|
|
|
ASSERT_GT(counter, 0);
|
|
|
|
ASSERT_GT(low_bytes_per_sync, 0);
|
|
|
|
ASSERT_GT(low_bytes_per_sync, counter);
|
|
|
|
}
|
|
|
|
|
2017-10-31 20:49:25 +00:00
|
|
|
TEST_F(DBOptionsTest, WritableFileMaxBufferSize) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.writable_file_max_buffer_size = 1024 * 1024;
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
options.max_manifest_file_size = 1;
|
|
|
|
options.env = env_;
|
|
|
|
int buffer_size = 1024 * 1024;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(buffer_size,
|
|
|
|
dbfull()->GetDBOptions().writable_file_max_buffer_size);
|
|
|
|
|
|
|
|
std::atomic<int> match_cnt(0);
|
|
|
|
std::atomic<int> unmatch_cnt(0);
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2017-10-31 20:49:25 +00:00
|
|
|
"WritableFileWriter::WritableFileWriter:0", [&](void* arg) {
|
|
|
|
int value = static_cast<int>(reinterpret_cast<uintptr_t>(arg));
|
|
|
|
if (value == buffer_size) {
|
|
|
|
match_cnt++;
|
|
|
|
} else {
|
|
|
|
unmatch_cnt++;
|
|
|
|
}
|
|
|
|
});
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-10-31 20:49:25 +00:00
|
|
|
int i = 0;
|
|
|
|
for (; i < 3; i++) {
|
|
|
|
ASSERT_OK(Put("foo", ToString(i)));
|
|
|
|
ASSERT_OK(Put("bar", ToString(i)));
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_EQ(unmatch_cnt, 0);
|
|
|
|
ASSERT_GE(match_cnt, 11);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"writable_file_max_buffer_size", "524288"}}));
|
2018-05-21 23:37:47 +00:00
|
|
|
buffer_size = 512 * 1024;
|
|
|
|
match_cnt = 0;
|
|
|
|
unmatch_cnt = 0; // SetDBOptions() will create a WriteableFileWriter
|
|
|
|
|
2017-10-31 20:49:25 +00:00
|
|
|
ASSERT_EQ(buffer_size,
|
|
|
|
dbfull()->GetDBOptions().writable_file_max_buffer_size);
|
|
|
|
i = 0;
|
|
|
|
for (; i < 3; i++) {
|
|
|
|
ASSERT_OK(Put("foo", ToString(i)));
|
|
|
|
ASSERT_OK(Put("bar", ToString(i)));
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_EQ(unmatch_cnt, 0);
|
|
|
|
ASSERT_GE(match_cnt, 11);
|
|
|
|
}
|
|
|
|
|
2016-09-27 18:17:15 +00:00
|
|
|
TEST_F(DBOptionsTest, SetOptionsAndReopen) {
|
|
|
|
Random rnd(1044);
|
|
|
|
auto rand_opts = GetRandomizedMutableCFOptionsMap(&rnd);
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(rand_opts));
|
|
|
|
// Verify if DB can be reopen after setting options.
|
|
|
|
Options options;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-27 18:17:15 +00:00
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) {
|
|
|
|
const std::string kValue(1024, 'v');
|
|
|
|
for (int method_type = 0; method_type < 2; method_type++) {
|
|
|
|
for (int option_type = 0; option_type < 4; option_type++) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.disable_auto_compactions = true;
|
2016-08-05 23:45:57 +00:00
|
|
|
options.write_buffer_size = 1024 * 1024 * 10;
|
2016-08-03 04:55:26 +00:00
|
|
|
options.compression = CompressionType::kNoCompression;
|
|
|
|
options.level0_file_num_compaction_trigger = 1;
|
|
|
|
options.level0_stop_writes_trigger = std::numeric_limits<int>::max();
|
|
|
|
options.level0_slowdown_writes_trigger = std::numeric_limits<int>::max();
|
|
|
|
options.hard_pending_compaction_bytes_limit =
|
|
|
|
std::numeric_limits<uint64_t>::max();
|
|
|
|
options.soft_pending_compaction_bytes_limit =
|
|
|
|
std::numeric_limits<uint64_t>::max();
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
DestroyAndReopen(options);
|
2016-08-05 23:45:57 +00:00
|
|
|
int i = 0;
|
|
|
|
for (; i < 1024; i++) {
|
2016-08-03 04:55:26 +00:00
|
|
|
Put(Key(i), kValue);
|
|
|
|
}
|
2016-08-05 23:45:57 +00:00
|
|
|
Flush();
|
|
|
|
for (; i < 1024 * 2; i++) {
|
|
|
|
Put(Key(i), kValue);
|
|
|
|
}
|
|
|
|
Flush();
|
2016-08-03 04:55:26 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
|
|
|
uint64_t l0_size = SizeAtLevel(0);
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
switch (option_type) {
|
|
|
|
case 0:
|
|
|
|
// test with level0_stop_writes_trigger
|
|
|
|
options.level0_stop_writes_trigger = 2;
|
|
|
|
options.level0_slowdown_writes_trigger = 2;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
options.level0_slowdown_writes_trigger = 2;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
options.hard_pending_compaction_bytes_limit = l0_size;
|
|
|
|
options.soft_pending_compaction_bytes_limit = l0_size;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
options.soft_pending_compaction_bytes_limit = l0_size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Reopen(options);
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"DBOptionsTest::EnableAutoCompactionAndTriggerStall:1",
|
|
|
|
"BackgroundCallCompaction:0"},
|
|
|
|
{"DBImpl::BackgroundCompaction():BeforePickCompaction",
|
|
|
|
"DBOptionsTest::EnableAutoCompactionAndTriggerStall:2"},
|
|
|
|
{"DBOptionsTest::EnableAutoCompactionAndTriggerStall:3",
|
|
|
|
"DBImpl::BackgroundCompaction():AfterPickCompaction"}});
|
|
|
|
// Block background compaction.
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
switch (method_type) {
|
|
|
|
case 0:
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
ASSERT_OK(dbfull()->EnableAutoCompaction(
|
|
|
|
{dbfull()->DefaultColumnFamily()}));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:1");
|
|
|
|
// Wait for stall condition recalculate.
|
|
|
|
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:2");
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
switch (option_type) {
|
|
|
|
case 0:
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3");
|
2016-07-12 22:30:38 +00:00
|
|
|
|
2016-08-03 04:55:26 +00:00
|
|
|
// Background compaction executed.
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
|
|
|
|
ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
|
|
|
|
}
|
|
|
|
}
|
2016-07-12 22:30:38 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 18:17:15 +00:00
|
|
|
TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.level0_file_num_compaction_trigger = 1000;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-09-27 18:17:15 +00:00
|
|
|
Reopen(options);
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
// Need to insert two keys to avoid trivial move.
|
|
|
|
ASSERT_OK(Put("foo", ToString(i)));
|
|
|
|
ASSERT_OK(Put("bar", ToString(i)));
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
ASSERT_EQ("3", FilesPerLevel());
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "3"}}));
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_EQ("0,1", FilesPerLevel());
|
|
|
|
}
|
|
|
|
|
2016-10-14 19:25:39 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBackgroundCompactionThreads) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_background_compactions = 1; // default value
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-10-14 19:25:39 +00:00
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
2017-05-24 18:25:38 +00:00
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_background_compactions", "3"}}));
|
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
2016-10-14 19:25:39 +00:00
|
|
|
auto stop_token = dbfull()->TEST_write_controler().GetStopToken();
|
|
|
|
ASSERT_EQ(3, dbfull()->TEST_BGCompactionsAllowed());
|
|
|
|
}
|
|
|
|
|
2020-04-20 23:17:25 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBackgroundFlushThreads) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_background_flushes = 1;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGFlushesAllowed());
|
|
|
|
ASSERT_EQ(1, env_->GetBackgroundThreads(Env::Priority::HIGH));
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_background_flushes", "3"}}));
|
|
|
|
ASSERT_EQ(3, env_->GetBackgroundThreads(Env::Priority::HIGH));
|
|
|
|
ASSERT_EQ(3, dbfull()->TEST_BGFlushesAllowed());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-05-24 18:25:38 +00:00
|
|
|
TEST_F(DBOptionsTest, SetBackgroundJobs) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_background_jobs = 8;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
if (i > 0) {
|
|
|
|
options.max_background_jobs = 12;
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions(
|
|
|
|
{{"max_background_jobs",
|
|
|
|
std::to_string(options.max_background_jobs)}}));
|
|
|
|
}
|
|
|
|
|
2020-01-16 22:33:28 +00:00
|
|
|
const int expected_max_flushes = options.max_background_jobs / 4;
|
|
|
|
|
|
|
|
ASSERT_EQ(expected_max_flushes, dbfull()->TEST_BGFlushesAllowed());
|
2017-05-24 18:25:38 +00:00
|
|
|
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
|
|
|
|
|
|
|
|
auto stop_token = dbfull()->TEST_write_controler().GetStopToken();
|
|
|
|
|
2020-01-16 22:33:28 +00:00
|
|
|
const int expected_max_compactions = 3 * expected_max_flushes;
|
|
|
|
|
|
|
|
ASSERT_EQ(expected_max_flushes, dbfull()->TEST_BGFlushesAllowed());
|
|
|
|
ASSERT_EQ(expected_max_compactions, dbfull()->TEST_BGCompactionsAllowed());
|
|
|
|
|
|
|
|
ASSERT_EQ(expected_max_flushes,
|
|
|
|
env_->GetBackgroundThreads(Env::Priority::HIGH));
|
|
|
|
ASSERT_EQ(expected_max_compactions,
|
|
|
|
env_->GetBackgroundThreads(Env::Priority::LOW));
|
2017-05-24 18:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-02 22:22:13 +00:00
|
|
|
TEST_F(DBOptionsTest, AvoidFlushDuringShutdown) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-11-02 22:22:13 +00:00
|
|
|
WriteOptions write_without_wal;
|
|
|
|
write_without_wal.disableWAL = true;
|
|
|
|
|
|
|
|
ASSERT_FALSE(options.avoid_flush_during_shutdown);
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
ASSERT_OK(Put("foo", "v1", write_without_wal));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
ASSERT_OK(Put("foo", "v2", write_without_wal));
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"avoid_flush_during_shutdown", "true"}}));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
|
|
|
ASSERT_EQ("", FilesPerLevel());
|
|
|
|
}
|
|
|
|
|
2016-11-12 23:43:33 +00:00
|
|
|
TEST_F(DBOptionsTest, SetDelayedWriteRateOption) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.delayed_write_rate = 2 * 1024U * 1024U;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-11-12 23:43:33 +00:00
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, dbfull()->TEST_write_controler().max_delayed_write_rate());
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"delayed_write_rate", "20000"}}));
|
|
|
|
ASSERT_EQ(20000, dbfull()->TEST_write_controler().max_delayed_write_rate());
|
|
|
|
}
|
|
|
|
|
2016-11-15 06:45:16 +00:00
|
|
|
TEST_F(DBOptionsTest, MaxTotalWalSizeChange) {
|
|
|
|
Random rnd(1044);
|
|
|
|
const auto value_size = size_t(1024);
|
|
|
|
std::string value;
|
|
|
|
test::RandomString(&rnd, value_size, &value);
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-11-15 06:45:16 +00:00
|
|
|
CreateColumnFamilies({"1", "2", "3"}, options);
|
|
|
|
ReopenWithColumnFamilies({"default", "1", "2", "3"}, options);
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
|
|
|
|
const int key_count = 100;
|
|
|
|
for (int i = 0; i < key_count; ++i) {
|
|
|
|
for (size_t cf = 0; cf < handles_.size(); ++cf) {
|
|
|
|
ASSERT_OK(Put(static_cast<int>(cf), Key(i), value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_total_wal_size", "10"}}));
|
|
|
|
|
|
|
|
for (size_t cf = 0; cf < handles_.size(); ++cf) {
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
|
|
|
|
ASSERT_EQ("1", FilesPerLevel(static_cast<int>(cf)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-21 05:50:56 +00:00
|
|
|
TEST_F(DBOptionsTest, SetStatsDumpPeriodSec) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.stats_dump_period_sec = 5;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_dump_period_sec);
|
2017-03-21 05:50:56 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < 20; i++) {
|
2019-09-09 18:22:28 +00:00
|
|
|
unsigned int num = rand() % 5000 + 1;
|
2019-02-20 23:46:59 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"stats_dump_period_sec", ToString(num)}}));
|
2017-03-21 05:50:56 +00:00
|
|
|
ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
|
|
|
|
}
|
2019-02-20 23:46:59 +00:00
|
|
|
Close();
|
2017-03-21 05:50:56 +00:00
|
|
|
}
|
|
|
|
|
2019-02-20 23:46:59 +00:00
|
|
|
TEST_F(DBOptionsTest, SetOptionsStatsPersistPeriodSec) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.stats_persist_period_sec = 5;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(options);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
2019-02-20 23:46:59 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "12345"}}));
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(12345u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
2019-02-20 23:46:59 +00:00
|
|
|
ASSERT_NOK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "abcde"}}));
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(12345u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
2019-02-20 23:46:59 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 22:09:35 +00:00
|
|
|
static void assert_candidate_files_empty(DBImpl* dbfull, const bool empty) {
|
|
|
|
dbfull->TEST_LockMutex();
|
|
|
|
JobContext job_context(0);
|
|
|
|
dbfull->FindObsoleteFiles(&job_context, false);
|
|
|
|
ASSERT_EQ(empty, job_context.full_scan_candidate_files.empty());
|
|
|
|
dbfull->TEST_UnlockMutex();
|
2018-01-18 01:37:10 +00:00
|
|
|
if (job_context.HaveSomethingToDelete()) {
|
|
|
|
// fulfill the contract of FindObsoleteFiles by calling PurgeObsoleteFiles
|
|
|
|
// afterwards; otherwise the test may hang on shutdown
|
|
|
|
dbfull->PurgeObsoleteFiles(job_context);
|
|
|
|
}
|
|
|
|
job_context.Clean();
|
2016-12-05 22:09:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, DeleteObsoleteFilesPeriodChange) {
|
2017-03-13 16:41:30 +00:00
|
|
|
SpecialEnv env(env_);
|
2016-12-05 22:09:35 +00:00
|
|
|
env.time_elapse_only_sleep_ = true;
|
|
|
|
Options options;
|
|
|
|
options.env = &env;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
// Verify that candidate files set is empty when no full scan requested.
|
|
|
|
assert_candidate_files_empty(dbfull(), true);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"delete_obsolete_files_period_micros", "0"}}));
|
|
|
|
|
|
|
|
// After delete_obsolete_files_period_micros updated to 0, the next call
|
|
|
|
// to FindObsoleteFiles should make a full scan
|
|
|
|
assert_candidate_files_empty(dbfull(), false);
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->SetDBOptions({{"delete_obsolete_files_period_micros", "20"}}));
|
|
|
|
|
|
|
|
assert_candidate_files_empty(dbfull(), true);
|
|
|
|
|
|
|
|
env.addon_time_.store(20);
|
|
|
|
assert_candidate_files_empty(dbfull(), true);
|
|
|
|
|
|
|
|
env.addon_time_.store(21);
|
|
|
|
assert_candidate_files_empty(dbfull(), false);
|
|
|
|
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2017-05-04 03:46:17 +00:00
|
|
|
TEST_F(DBOptionsTest, MaxOpenFilesChange) {
|
|
|
|
SpecialEnv env(env_);
|
|
|
|
Options options;
|
2017-06-26 23:52:06 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2017-05-04 03:46:17 +00:00
|
|
|
options.max_open_files = -1;
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
Cache* tc = dbfull()->TEST_table_cache();
|
|
|
|
|
|
|
|
ASSERT_EQ(-1, dbfull()->GetDBOptions().max_open_files);
|
|
|
|
ASSERT_LT(2000, tc->GetCapacity());
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"max_open_files", "1024"}}));
|
|
|
|
ASSERT_EQ(1024, dbfull()->GetDBOptions().max_open_files);
|
|
|
|
// examine the table cache (actual size should be 1014)
|
|
|
|
ASSERT_GT(1500, tc->GetCapacity());
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2017-05-24 16:52:08 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeDelayedWriteRate) {
|
|
|
|
Options options;
|
|
|
|
options.delayed_write_rate = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(16 * 1024 * 1024, dbfull()->GetDBOptions().delayed_write_rate);
|
|
|
|
|
|
|
|
options.rate_limiter.reset(NewGenericRateLimiter(31 * 1024 * 1024));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(31 * 1024 * 1024, dbfull()->GetDBOptions().delayed_write_rate);
|
|
|
|
}
|
|
|
|
|
2019-11-23 06:12:09 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeUniversalTTLCompaction) {
|
|
|
|
Options options;
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
|
|
|
|
options.ttl = 0;
|
|
|
|
options.periodic_compaction_seconds = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
|
|
|
|
options.ttl = 0;
|
|
|
|
options.periodic_compaction_seconds = 100;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
options.periodic_compaction_seconds = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
options.periodic_compaction_seconds = 500;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().periodic_compaction_seconds);
|
|
|
|
}
|
|
|
|
|
2019-11-26 01:11:26 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeTtlDefault) {
|
|
|
|
Options options;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(30 * 24 * 60 * 60, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.compaction_style = kCompactionStyleLevel;
|
|
|
|
options.ttl = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(0, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
}
|
|
|
|
|
2019-10-31 17:59:13 +00:00
|
|
|
TEST_F(DBOptionsTest, SanitizeFIFOPeriodicCompaction) {
|
|
|
|
Options options;
|
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
|
|
|
options.ttl = 0;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(30 * 24 * 60 * 60, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 100;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 100 * 24 * 60 * 60;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(100 * 24 * 60 * 60, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 200;
|
|
|
|
options.periodic_compaction_seconds = 300;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(200, dbfull()->GetOptions().ttl);
|
|
|
|
|
|
|
|
options.ttl = 500;
|
|
|
|
options.periodic_compaction_seconds = 300;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(300, dbfull()->GetOptions().ttl);
|
|
|
|
}
|
|
|
|
|
2017-10-19 22:19:20 +00:00
|
|
|
TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
|
|
|
|
Options options;
|
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
|
|
|
options.write_buffer_size = 10 << 10; // 10KB
|
|
|
|
options.arena_block_size = 4096;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.compaction_options_fifo.allow_compaction = false;
|
|
|
|
env_->time_elapse_only_sleep_ = false;
|
|
|
|
options.env = env_;
|
|
|
|
|
2019-02-15 17:48:44 +00:00
|
|
|
// Test dynamically changing ttl.
|
2017-10-19 22:19:20 +00:00
|
|
|
env_->addon_time_.store(0);
|
2019-02-15 17:48:44 +00:00
|
|
|
options.ttl = 1 * 60 * 60; // 1 hour
|
2017-10-19 22:19:20 +00:00
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// Add 61 seconds to the time.
|
|
|
|
env_->addon_time_.fetch_add(61);
|
|
|
|
|
|
|
|
// No files should be compacted as ttl is set to 1 hour.
|
2019-02-15 17:48:44 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 3600);
|
2017-10-19 22:19:20 +00:00
|
|
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// Set ttl to 1 minute. So all files should get deleted.
|
2019-02-15 17:48:44 +00:00
|
|
|
ASSERT_OK(dbfull()->SetOptions({{"ttl", "60"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 60);
|
2017-10-19 22:19:20 +00:00
|
|
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
|
|
|
|
// Test dynamically changing compaction_options_fifo.max_table_files_size
|
|
|
|
env_->addon_time_.store(0);
|
|
|
|
options.compaction_options_fifo.max_table_files_size = 500 << 10; // 00KB
|
2019-02-15 17:48:44 +00:00
|
|
|
options.ttl = 0;
|
2017-10-19 22:19:20 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// No files should be compacted as max_table_files_size is set to 500 KB.
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
500 << 10);
|
|
|
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// Set max_table_files_size to 12 KB. So only 1 file should remain now.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo", "{max_table_files_size=12288;}"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
12 << 10);
|
|
|
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
|
|
|
|
// Test dynamically changing compaction_options_fifo.allow_compaction
|
|
|
|
options.compaction_options_fifo.max_table_files_size = 500 << 10; // 500KB
|
2019-02-15 17:48:44 +00:00
|
|
|
options.ttl = 0;
|
2017-10-19 22:19:20 +00:00
|
|
|
options.compaction_options_fifo.allow_compaction = false;
|
|
|
|
options.level0_file_num_compaction_trigger = 6;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// No files should be compacted as max_table_files_size is set to 500 KB and
|
|
|
|
// allow_compaction is false
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
false);
|
|
|
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// Set allow_compaction to true. So number of files should be between 1 and 5.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo", "{allow_compaction=true;}"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
true);
|
|
|
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_GE(NumTableFilesAtLevel(0), 1);
|
|
|
|
ASSERT_LE(NumTableFilesAtLevel(0), 5);
|
|
|
|
}
|
|
|
|
|
2017-11-17 01:46:43 +00:00
|
|
|
TEST_F(DBOptionsTest, CompactionReadaheadSizeChange) {
|
|
|
|
SpecialEnv env(env_);
|
|
|
|
Options options;
|
|
|
|
options.env = &env;
|
|
|
|
|
|
|
|
options.compaction_readahead_size = 0;
|
|
|
|
options.new_table_reader_for_compaction_inputs = true;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
const std::string kValue(1024, 'v');
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
ASSERT_EQ(0, dbfull()->GetDBOptions().compaction_readahead_size);
|
|
|
|
ASSERT_OK(dbfull()->SetDBOptions({{"compaction_readahead_size", "256"}}));
|
|
|
|
ASSERT_EQ(256, dbfull()->GetDBOptions().compaction_readahead_size);
|
|
|
|
for (int i = 0; i < 1024; i++) {
|
|
|
|
Put(Key(i), kValue);
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
for (int i = 0; i < 1024 * 2; i++) {
|
|
|
|
Put(Key(i), kValue);
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_EQ(256, env_->compaction_readahead_size_);
|
|
|
|
Close();
|
|
|
|
}
|
2019-05-30 17:43:34 +00:00
|
|
|
|
|
|
|
TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) {
|
|
|
|
Options options;
|
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
|
|
|
options.write_buffer_size = 10 << 10; // 10KB
|
|
|
|
options.create_if_missing = true;
|
|
|
|
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Generate and flush a file about 10KB.
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
|
|
|
|
|
|
|
|
// In release 6.0, ttl was promoted from a secondary level option under
|
|
|
|
// compaction_options_fifo to a top level option under ColumnFamilyOptions.
|
|
|
|
// We still need to handle old SetOptions calls but should ignore
|
|
|
|
// ttl under compaction_options_fifo.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo",
|
|
|
|
"{allow_compaction=true;max_table_files_size=1024;ttl=731;}"},
|
|
|
|
{"ttl", "60"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
true);
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
1024);
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 60);
|
|
|
|
|
|
|
|
// Put ttl as the first option inside compaction_options_fifo. That works as
|
|
|
|
// it doesn't overwrite any other option.
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"compaction_options_fifo",
|
|
|
|
"{ttl=985;allow_compaction=true;max_table_files_size=1024;}"},
|
|
|
|
{"ttl", "191"}}));
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
|
|
|
|
true);
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
|
|
|
|
1024);
|
|
|
|
ASSERT_EQ(dbfull()->GetOptions().ttl, 191);
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:08:41 +00:00
|
|
|
TEST_F(DBOptionsTest, ChangeCompression) {
|
|
|
|
if (!Snappy_Supported() || !LZ4_Supported()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Options options;
|
|
|
|
options.write_buffer_size = 10 << 10; // 10KB
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.compression = CompressionType::kLZ4Compression;
|
|
|
|
options.bottommost_compression = CompressionType::kNoCompression;
|
|
|
|
options.bottommost_compression_opts.level = 2;
|
2020-04-01 23:37:54 +00:00
|
|
|
options.bottommost_compression_opts.parallel_threads = 1;
|
2020-03-31 19:08:41 +00:00
|
|
|
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
|
|
|
|
CompressionType compression_used = CompressionType::kLZ4Compression;
|
|
|
|
CompressionOptions compression_opt_used;
|
|
|
|
bool compacted = false;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
|
|
|
|
Compaction* c = reinterpret_cast<Compaction*>(arg);
|
|
|
|
compression_used = c->output_compression();
|
|
|
|
compression_opt_used = c->output_compression_opts();
|
|
|
|
compacted = true;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_TRUE(compacted);
|
|
|
|
ASSERT_EQ(CompressionType::kNoCompression, compression_used);
|
|
|
|
ASSERT_EQ(options.compression_opts.level, compression_opt_used.level);
|
2020-04-01 23:37:54 +00:00
|
|
|
ASSERT_EQ(options.compression_opts.parallel_threads,
|
|
|
|
compression_opt_used.parallel_threads);
|
2020-03-31 19:08:41 +00:00
|
|
|
|
|
|
|
compression_used = CompressionType::kLZ4Compression;
|
|
|
|
compacted = false;
|
|
|
|
ASSERT_OK(dbfull()->SetOptions(
|
|
|
|
{{"bottommost_compression", "kSnappyCompression"},
|
2020-04-30 23:59:16 +00:00
|
|
|
{"bottommost_compression_opts", "0:6:0:0:4:true"}}));
|
2020-03-31 19:08:41 +00:00
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(Put("foo", "foofoofoo"));
|
|
|
|
ASSERT_OK(Put("bar", "foofoofoo"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_TRUE(compacted);
|
|
|
|
ASSERT_EQ(CompressionType::kSnappyCompression, compression_used);
|
|
|
|
ASSERT_EQ(6, compression_opt_used.level);
|
2020-04-30 23:59:16 +00:00
|
|
|
// Right now parallel_level is not yet allowed to be changed.
|
2020-03-31 19:08:41 +00:00
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
2020-04-02 23:39:18 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-07-12 22:30:38 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2016-07-12 22:30:38 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|