Remove deprecated option `level_compaction_dynamic_file_size` (#12325)

Summary:
The option is introduced in https://github.com/facebook/rocksdb/issues/10655 to allow reverting to old behavior. The option is enabled by default and there has not been a need to disable it. Remove it for 9.0 release. Also fixed and improved a few unit tests that depended on setting this option to false.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/12325

Test Plan: existing tests.

Reviewed By: hx235

Differential Revision: D53369430

Pulled By: cbi42

fbshipit-source-id: 0ec2440ca8d88db7f7211c581542c7581bd4d3de
This commit is contained in:
Changyu Bi 2024-02-02 15:37:40 -08:00 committed by Facebook GitHub Bot
parent 1d6dbfb8b7
commit ace1721b28
11 changed files with 72 additions and 129 deletions

View File

@ -350,11 +350,9 @@ Compaction::Compaction(
// for the non-bottommost levels, it tries to build files match the target
// file size, but not guaranteed. It could be 2x the size of the target size.
max_output_file_size_ =
bottommost_level_ || grandparents_.empty() ||
!_immutable_options.level_compaction_dynamic_file_size
? target_output_file_size_
: 2 * target_output_file_size_;
max_output_file_size_ = bottommost_level_ || grandparents_.empty()
? target_output_file_size_
: 2 * target_output_file_size_;
#ifndef NDEBUG
for (size_t i = 1; i < inputs_.size(); ++i) {

View File

@ -1752,23 +1752,9 @@ TEST_F(CompactionJobTest, ResultSerialization) {
}
}
class CompactionJobDynamicFileSizeTest
: public CompactionJobTestBase,
public ::testing::WithParamInterface<bool> {
public:
CompactionJobDynamicFileSizeTest()
: CompactionJobTestBase(
test::PerThreadDBPath("compaction_job_dynamic_file_size_test"),
BytewiseComparator(), [](uint64_t /*ts*/) { return ""; },
/*test_io_priority=*/false, TableTypeForTest::kMockTable) {}
};
TEST_P(CompactionJobDynamicFileSizeTest, CutForMaxCompactionBytes) {
TEST_F(CompactionJobTest, CutForMaxCompactionBytes) {
// dynamic_file_size option should have no impact on cutting for max
// compaction bytes.
bool enable_dyanmic_file_size = GetParam();
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
NewDB();
mutable_cf_options_.target_file_size_base = 80;
mutable_cf_options_.max_compaction_bytes = 21;
@ -1842,10 +1828,7 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutForMaxCompactionBytes) {
{expected_file1, expected_file2});
}
TEST_P(CompactionJobDynamicFileSizeTest, CutToSkipGrandparentFile) {
bool enable_dyanmic_file_size = GetParam();
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
TEST_F(CompactionJobTest, CutToSkipGrandparentFile) {
NewDB();
// Make sure the grandparent level file size (10) qualifies skipping.
// Currently, it has to be > 1/8 of target file size.
@ -1880,28 +1863,15 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutToSkipGrandparentFile) {
mock::MakeMockFile({{KeyStr("x", 4U, kTypeValue), "val"},
{KeyStr("z", 6U, kTypeValue), "val3"}});
auto expected_file_disable_dynamic_file_size =
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
{KeyStr("c", 3U, kTypeValue), "val"},
{KeyStr("x", 4U, kTypeValue), "val"},
{KeyStr("z", 6U, kTypeValue), "val3"}});
SetLastSequence(6U);
const std::vector<int> input_levels = {0, 1};
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
if (enable_dyanmic_file_size) {
RunCompaction({lvl0_files, lvl1_files}, input_levels,
{expected_file1, expected_file2});
} else {
RunCompaction({lvl0_files, lvl1_files}, input_levels,
{expected_file_disable_dynamic_file_size});
}
}
TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundary) {
bool enable_dyanmic_file_size = GetParam();
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
TEST_F(CompactionJobTest, CutToAlignGrandparentBoundary) {
NewDB();
// MockTable has 1 byte per entry by default and each file is 10 bytes.
@ -1968,40 +1938,15 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundary) {
}
expected_file2.emplace_back(KeyStr("s", 4U, kTypeValue), "val");
mock::KVVector expected_file_disable_dynamic_file_size1;
for (char i = 0; i < 10; i++) {
expected_file_disable_dynamic_file_size1.emplace_back(
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
"val" + std::to_string(i));
}
mock::KVVector expected_file_disable_dynamic_file_size2;
for (char i = 10; i < 12; i++) {
expected_file_disable_dynamic_file_size2.emplace_back(
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
"val" + std::to_string(i));
}
expected_file_disable_dynamic_file_size2.emplace_back(
KeyStr("s", 4U, kTypeValue), "val");
SetLastSequence(22U);
const std::vector<int> input_levels = {0, 1};
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
if (enable_dyanmic_file_size) {
RunCompaction({lvl0_files, lvl1_files}, input_levels,
{expected_file1, expected_file2});
} else {
RunCompaction({lvl0_files, lvl1_files}, input_levels,
{expected_file_disable_dynamic_file_size1,
expected_file_disable_dynamic_file_size2});
}
}
TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundarySameKey) {
bool enable_dyanmic_file_size = GetParam();
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
TEST_F(CompactionJobTest, CutToAlignGrandparentBoundarySameKey) {
NewDB();
// MockTable has 1 byte per entry by default and each file is 10 bytes.
@ -2038,13 +1983,9 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundarySameKey) {
AddMockFile(file5, 2);
mock::KVVector expected_file1;
mock::KVVector expected_file_disable_dynamic_file_size;
for (int i = 0; i < 8; i++) {
expected_file1.emplace_back(KeyStr("a", 100 - i, kTypeValue),
"val" + std::to_string(100 - i));
expected_file_disable_dynamic_file_size.emplace_back(
KeyStr("a", 100 - i, kTypeValue), "val" + std::to_string(100 - i));
}
// make sure `b` is cut in a separated file (so internally it's not using
@ -2053,9 +1994,6 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundarySameKey) {
auto expected_file2 =
mock::MakeMockFile({{KeyStr("b", 90U, kTypeValue), "valb"}});
expected_file_disable_dynamic_file_size.emplace_back(
KeyStr("b", 90U, kTypeValue), "valb");
SetLastSequence(122U);
const std::vector<int> input_levels = {0, 1};
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
@ -2066,20 +2004,13 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundarySameKey) {
for (int i = 80; i <= 100; i++) {
snapshots.emplace_back(i);
}
if (enable_dyanmic_file_size) {
RunCompaction({lvl0_files, lvl1_files}, input_levels,
{expected_file1, expected_file2}, snapshots);
} else {
RunCompaction({lvl0_files, lvl1_files}, input_levels,
{expected_file_disable_dynamic_file_size}, snapshots);
}
}
TEST_P(CompactionJobDynamicFileSizeTest, CutForMaxCompactionBytesSameKey) {
TEST_F(CompactionJobTest, CutForMaxCompactionBytesSameKey) {
// dynamic_file_size option should have no impact on cutting for max
// compaction bytes.
bool enable_dyanmic_file_size = GetParam();
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
NewDB();
mutable_cf_options_.target_file_size_base = 80;
@ -2136,9 +2067,6 @@ TEST_P(CompactionJobDynamicFileSizeTest, CutForMaxCompactionBytesSameKey) {
{expected_file1, expected_file2, expected_file3}, snapshots);
}
INSTANTIATE_TEST_CASE_P(CompactionJobDynamicFileSizeTest,
CompactionJobDynamicFileSizeTest, testing::Bool());
class CompactionJobTimestampTest : public CompactionJobTestBase {
public:
CompactionJobTimestampTest()

View File

@ -320,7 +320,6 @@ bool CompactionOutputs::ShouldStopBefore(const CompactionIterator& c_iter) {
being_grandparent_gap_ ? 2 : 3;
if (compaction_->immutable_options()->compaction_style ==
kCompactionStyleLevel &&
compaction_->immutable_options()->level_compaction_dynamic_file_size &&
num_grandparent_boundaries_crossed >=
num_skippable_boundaries_crossed &&
grandparent_overlapped_bytes_ - previous_overlapped_bytes >
@ -342,7 +341,6 @@ bool CompactionOutputs::ShouldStopBefore(const CompactionIterator& c_iter) {
// improvement.
if (compaction_->immutable_options()->compaction_style ==
kCompactionStyleLevel &&
compaction_->immutable_options()->level_compaction_dynamic_file_size &&
current_output_file_size_ >=
((compaction_->target_output_file_size() + 99) / 100) *
(50 + std::min(grandparent_boundary_switched_num_ * 5,

View File

@ -4720,7 +4720,6 @@ TEST_F(DBCompactionTest, LevelTtlCompactionOutputCuttingIteractingWithOther) {
options.env = env_;
options.target_file_size_base = 4 << 10;
options.disable_auto_compactions = true;
options.level_compaction_dynamic_file_size = false;
DestroyAndReopen(options);
Random rnd(301);

View File

@ -1036,9 +1036,6 @@ TEST_F(DBRangeDelTest, CompactionTreatsSplitInputLevelDeletionAtomically) {
test::NewSpecialSkipListFactory(2 /* num_entries_flush */));
// max file size could be 2x of target file size, so set it to half of that
options.target_file_size_base = kValueBytes / 2;
// disable dynamic_file_size, as it will cut L1 files into more files (than
// kNumFilesPerLevel).
options.level_compaction_dynamic_file_size = false;
options.max_compaction_bytes = 1500;
// i == 0: CompactFiles
// i == 1: CompactRange
@ -1107,14 +1104,9 @@ TEST_F(DBRangeDelTest, RangeTombstoneEndKeyAsSstableUpperBound) {
options.level0_file_num_compaction_trigger = kNumFilesPerLevel;
options.memtable_factory.reset(
test::NewSpecialSkipListFactory(2 /* num_entries_flush */));
options.target_file_size_base = kValueBytes;
// Compaction can generate files of size at most 2 * target_file_size_base.
options.target_file_size_base = kValueBytes / 2;
options.disable_auto_compactions = true;
// disable it for now, otherwise the L1 files are going be cut before data 1:
// L1: [0] [1,4]
// L2: [0,0]
// because the grandparent file is between [0]->[1] and it's size is more than
// 1/8 of target size (4k).
options.level_compaction_dynamic_file_size = false;
DestroyAndReopen(options);
@ -1154,6 +1146,13 @@ TEST_F(DBRangeDelTest, RangeTombstoneEndKeyAsSstableUpperBound) {
// [key000000#1,1, key000000#1,1]
MoveFilesToLevel(1);
ASSERT_EQ(2, NumTableFilesAtLevel(1));
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
ASSERT_EQ(
files[1][0].largest.Encode(),
InternalKey(Key(2), kMaxSequenceNumber, kTypeRangeDeletion).Encode());
ASSERT_EQ(files[1][1].smallest.Encode(),
InternalKey(Key(2), 6, kTypeValue).Encode());
{
// Compact the second sstable in L1:
@ -1172,6 +1171,12 @@ TEST_F(DBRangeDelTest, RangeTombstoneEndKeyAsSstableUpperBound) {
ASSERT_EQ(1, NumTableFilesAtLevel(1));
ASSERT_EQ(2, NumTableFilesAtLevel(2));
ASSERT_EQ(value, Get(Key(2)));
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
ASSERT_EQ(files[2][1].smallest.Encode(),
InternalKey(Key(2), 6, kTypeValue).Encode());
ASSERT_EQ(
files[2][1].largest.Encode(),
InternalKey(Key(4), kMaxSequenceNumber, kTypeRangeDeletion).Encode());
}
{
@ -1190,6 +1195,20 @@ TEST_F(DBRangeDelTest, RangeTombstoneEndKeyAsSstableUpperBound) {
ASSERT_OK(dbfull()->TEST_CompactRange(1, &begin, &begin));
ASSERT_EQ(0, NumTableFilesAtLevel(1));
ASSERT_EQ(3, NumTableFilesAtLevel(2));
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
ASSERT_EQ(
files[2][0].largest.Encode(),
InternalKey(Key(1), kMaxSequenceNumber, kTypeRangeDeletion).Encode());
ASSERT_EQ(files[2][1].smallest.Encode(),
InternalKey(Key(1), 5, kTypeValue).Encode());
ASSERT_EQ(
files[2][1].largest.Encode(),
InternalKey(Key(2), kMaxSequenceNumber, kTypeRangeDeletion).Encode());
ASSERT_EQ(files[2][2].smallest.Encode(),
InternalKey(Key(2), 6, kTypeValue).Encode());
ASSERT_EQ(
files[2][2].largest.Encode(),
InternalKey(Key(4), kMaxSequenceNumber, kTypeRangeDeletion).Encode());
}
db_->ReleaseSnapshot(snapshot);
@ -2309,13 +2328,13 @@ TEST_F(DBRangeDelTest, NonOverlappingTombstonAtBoundary) {
// Test set up:
// L1_0: 1, 3, [4, 7) L1_1: 6, 8, [4, 7)
// L2: 5
// L1_0's largest key: Key(6)@kMaxSequenceNumber with type kTypeRangeDeletion
// Note that [4, 7) is at end of L1_0 and not overlapping with any point key
// in L1_0. [4, 7) from L1_0 should cover 5 is sentinel works
// in L1_0. [4, 7) from L1_0 should cover 5 if sentinel in LevelIterator works
Options options = CurrentOptions();
options.compression = kNoCompression;
options.disable_auto_compactions = true;
options.target_file_size_base = 2 * 1024;
options.level_compaction_dynamic_file_size = false;
options.target_file_size_base = 4 * 1024;
DestroyAndReopen(options);
Random rnd(301);
@ -2335,6 +2354,7 @@ TEST_F(DBRangeDelTest, NonOverlappingTombstonAtBoundary) {
ASSERT_OK(db_->Put(WriteOptions(), Key(1), rnd.RandomString(4 << 10)));
ASSERT_OK(db_->Put(WriteOptions(), Key(3), rnd.RandomString(4 << 10)));
// Prevent keys being compacted away
const Snapshot* snapshot = db_->GetSnapshot();
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(4),
Key(7)));
ASSERT_OK(db_->Flush(FlushOptions()));
@ -2342,6 +2362,11 @@ TEST_F(DBRangeDelTest, NonOverlappingTombstonAtBoundary) {
MoveFilesToLevel(1);
ASSERT_EQ(2, NumTableFilesAtLevel(1));
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
InternalKey ik = InternalKey(Key(6), kMaxSequenceNumber, kTypeRangeDeletion);
ASSERT_EQ(files[1][0].largest.Encode(), ik.Encode());
auto iter = db_->NewIterator(ReadOptions());
iter->Seek(Key(3));
ASSERT_TRUE(iter->Valid());
@ -2361,6 +2386,7 @@ TEST_F(DBRangeDelTest, NonOverlappingTombstonAtBoundary) {
ASSERT_EQ(get_perf_context()->internal_range_del_reseek_count, 1);
}
delete iter;
db_->ReleaseSnapshot(snapshot);
}
TEST_F(DBRangeDelTest, OlderLevelHasNewerData) {
@ -3126,14 +3152,18 @@ TEST_F(DBRangeDelTest, RangetombesoneCompensateFilesizePersistDuringReopen) {
TEST_F(DBRangeDelTest, SingleKeyFile) {
// Test for a bug fix where a range tombstone could be added
// to an SST file while is not within the file's key range.
// Create 3 files in L0 and then L1 where all keys have the same user key
// `Key(2)`. The middle file will contain Key(2)@6 and Key(2)@5. Before fix,
// the range tombstone [Key(2), Key(5))@2 would be added to this file during
// compaction, but it is not in this file's key range.
// Create 3 files in L0 and then compact them to L1 where all keys have the
// same user key `Key(2)`.
// L0_0: Key(2)@5
// L0_1: Key(2)@4
// L0_2: Key(2)@3, range tombstone [Key(2), Key(5))@2
//
// After compaction, the first output file contains Key(2)@5 and Key(2)@4.
// Before fix, the range tombstone [Key(2), Key(5))@2 would be added to this
// file during compaction, but it is not in this file's key range.
Options opts = CurrentOptions();
opts.disable_auto_compactions = true;
opts.target_file_size_base = 1 << 10;
opts.level_compaction_dynamic_file_size = false;
DestroyAndReopen(opts);
// prevent range tombstone drop
@ -3178,6 +3208,12 @@ TEST_F(DBRangeDelTest, SingleKeyFile) {
std::numeric_limits<uint64_t>::max() /*max_file_num_to_ignore*/,
"" /*trim_ts*/));
ASSERT_EQ(2, NumTableFilesAtLevel(1));
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
ASSERT_EQ(files[1][0].largest.Encode(),
InternalKey(Key(2), 4, kTypeValue).Encode());
for (const auto s : snapshots) {
db_->ReleaseSnapshot(s);
}
@ -3248,13 +3284,12 @@ TEST_F(DBRangeDelTest, AddRangeDelsSameLowerAndUpperBound) {
Options opts = CurrentOptions();
opts.disable_auto_compactions = true;
opts.target_file_size_base = 1 << 10;
opts.level_compaction_dynamic_file_size = false;
DestroyAndReopen(opts);
Random rnd(301);
// Create file at bottommost level so the manual compaction below is
// non-bottommost level and goes through code path like compensate range
// tombstone size.
// non-bottommost level and goes through code path in
// versions->ApproximateSize() to calculate compensated range tombstone size
ASSERT_OK(Put(Key(1), "v1"));
ASSERT_OK(Put(Key(4), "v2"));
ASSERT_OK(Flush());
@ -3277,6 +3312,12 @@ TEST_F(DBRangeDelTest, AddRangeDelsSameLowerAndUpperBound) {
// File 1: Key(1)@1, Key(3)@6, DeleteRange ends at Key(3)@6
// File 2: Key(3)@4, Key(4)@7, DeleteRange start from Key(3)@4
ASSERT_EQ(NumTableFilesAtLevel(1), 2);
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
ASSERT_EQ(files[1][0].largest.Encode(),
InternalKey(Key(3), 6, kTypeValue).Encode());
ASSERT_EQ(files[1][1].smallest.Encode(),
InternalKey(Key(3), 4, kTypeValue).Encode());
// Manually update compaction output file cutting decisions
// to cut before range tombstone sentinel Key(3)@4
@ -3325,7 +3366,6 @@ TEST_F(DBRangeDelTest, AddRangeDelsSingleUserKeyTombstoneOnlyFile) {
Options opts = CurrentOptions();
opts.disable_auto_compactions = true;
opts.target_file_size_base = 1 << 10;
opts.level_compaction_dynamic_file_size = false;
DestroyAndReopen(opts);
Random rnd(301);
@ -3486,7 +3526,6 @@ TEST_F(DBRangeDelTest, NonBottommostCompactionDropRangetombstone) {
TEST_F(DBRangeDelTest, MemtableMaxRangeDeletions) {
// Tests option `memtable_max_range_deletions`.
Options options = CurrentOptions();
options.level_compaction_dynamic_file_size = false;
options.memtable_max_range_deletions = 50;
options.level0_file_num_compaction_trigger = 5;
DestroyAndReopen(options);

View File

@ -547,17 +547,6 @@ struct AdvancedColumnFamilyOptions {
// Default: true
bool level_compaction_dynamic_level_bytes = true;
// DEPRECATED: This option might be removed in a future release.
//
// Allows RocksDB to generate files that are not exactly the target_file_size
// only for the non-bottommost files. Which can reduce the write-amplification
// from compaction. The file size could be from 0 to 2x target_file_size.
// Once enabled, non-bottommost compaction will try to cut the files align
// with the next level file boundaries (grandparent level).
//
// Default: true
bool level_compaction_dynamic_file_size = true;
// Default: 10.
//
// Dynamically changeable through SetOptions() API

View File

@ -594,9 +594,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
OptionType::kBoolean, OptionVerificationType::kNormal,
OptionTypeFlags::kNone}},
{"level_compaction_dynamic_file_size",
{offsetof(struct ImmutableCFOptions,
level_compaction_dynamic_file_size),
OptionType::kBoolean, OptionVerificationType::kNormal,
{0, OptionType::kBoolean, OptionVerificationType::kDeprecated,
OptionTypeFlags::kNone}},
{"optimize_filters_for_hits",
{offsetof(struct ImmutableCFOptions, optimize_filters_for_hits),
@ -944,8 +942,6 @@ ImmutableCFOptions::ImmutableCFOptions(const ColumnFamilyOptions& cf_options)
bloom_locality(cf_options.bloom_locality),
level_compaction_dynamic_level_bytes(
cf_options.level_compaction_dynamic_level_bytes),
level_compaction_dynamic_file_size(
cf_options.level_compaction_dynamic_file_size),
num_levels(cf_options.num_levels),
optimize_filters_for_hits(cf_options.optimize_filters_for_hits),
force_consistency_checks(cf_options.force_consistency_checks),

View File

@ -64,8 +64,6 @@ struct ImmutableCFOptions {
bool level_compaction_dynamic_level_bytes;
bool level_compaction_dynamic_file_size;
int num_levels;
bool optimize_filters_for_hits;

View File

@ -302,8 +302,6 @@ void UpdateColumnFamilyOptions(const ImmutableCFOptions& ioptions,
cf_opts->bloom_locality = ioptions.bloom_locality;
cf_opts->level_compaction_dynamic_level_bytes =
ioptions.level_compaction_dynamic_level_bytes;
cf_opts->level_compaction_dynamic_file_size =
ioptions.level_compaction_dynamic_file_size;
cf_opts->num_levels = ioptions.num_levels;
cf_opts->optimize_filters_for_hits = ioptions.optimize_filters_for_hits;
cf_opts->force_consistency_checks = ioptions.force_consistency_checks;

View File

@ -2451,7 +2451,6 @@ TEST_F(OptionsOldApiTest, GetOptionsFromMapTest) {
ASSERT_EQ(new_cf_opt.target_file_size_multiplier, 13);
ASSERT_EQ(new_cf_opt.max_bytes_for_level_base, 14U);
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_level_bytes, true);
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_file_size, true);
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier, 15.0);
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional.size(), 3U);
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);

View File

@ -0,0 +1 @@
Removed deprecated option `ColumnFamilyOptions::level_compaction_dynamic_file_size`