Deprecate purge_redundant_kvs_while_flush

Summary: This option is guarding the feature implemented 2 and a half years ago: D8991. The feature was enabled by default back then and has been running without issues. There is no reason why any client would turn this feature off. I found no reference in fbcode.

Test Plan: none

Reviewers: sdong, yhchiang, anthony, dhruba

Reviewed By: dhruba

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D42063
This commit is contained in:
Igor Canadi 2015-07-14 13:07:02 +02:00
parent 5aea98ddd8
commit a9c5109515
11 changed files with 21 additions and 55 deletions

View file

@ -4,6 +4,7 @@
### Public API changes
* Deprecated WriteOptions::timeout_hint_us. We no longer support write timeout. If you really need this option, talk to us and we might consider returning it.
* Deprecated purge_redundant_kvs_while_flush option.
## 3.12.0 (7/2/2015)
### New Features

View file

@ -64,7 +64,7 @@ Status BuildTable(
// If the sequence number of the smallest entry in the memtable is
// smaller than the most recent snapshot, then we do not trigger
// removal of duplicate/deleted keys as part of this builder.
bool purge = ioptions.purge_redundant_kvs_while_flush;
bool purge = true;
if (earliest_seqno_in_memtable <= newest_snapshot) {
purge = false;
}

View file

@ -1674,10 +1674,9 @@ void rocksdb_options_set_manifest_preallocation_size(
opt->rep.manifest_preallocation_size = v;
}
void rocksdb_options_set_purge_redundant_kvs_while_flush(
rocksdb_options_t* opt, unsigned char v) {
opt->rep.purge_redundant_kvs_while_flush = v;
}
// noop
void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t* opt,
unsigned char v) {}
void rocksdb_options_set_allow_os_buffer(rocksdb_options_t* opt,
unsigned char v) {

View file

@ -5806,11 +5806,7 @@ TEST_F(DBTest, DeletionMarkers1) {
Put(1, "foo", "v2");
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]");
ASSERT_OK(Flush(1)); // Moves to level last-2
if (CurrentOptions().purge_redundant_kvs_while_flush) {
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
} else {
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]");
}
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
Slice z("z");
dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1]);
// DEL eliminated, but v1 remains because we aren't compacting that level
@ -6879,7 +6875,6 @@ TEST_F(DBTest, CompactOnFlush) {
options_override.skip_policy = kSkipNoSnapshot;
do {
Options options = CurrentOptions(options_override);
options.purge_redundant_kvs_while_flush = true;
options.disable_auto_compactions = true;
CreateAndReopenWithCF({"pikachu"}, options);

View file

@ -540,8 +540,8 @@ struct ColumnFamilyOptions {
// Dynamically changeable through SetOptions() API
bool disable_auto_compactions;
// Purge duplicate/deleted keys when a memtable is flushed to storage.
// Default: true
// DEPREACTED
// Does not have any effect.
bool purge_redundant_kvs_while_flush;
// The compaction style. Default: kCompactionStyleLevel

View file

@ -359,13 +359,6 @@ DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((unused)) =
RegisterFlagValidator(&FLAGS_log2_keys_per_lock, &ValidateUint32Range);
DEFINE_int32(purge_redundant_percent, 50,
"Percentage of times we want to purge redundant keys in memory "
"before flushing");
static const bool FLAGS_purge_redundant_percent_dummy __attribute__((unused)) =
RegisterFlagValidator(&FLAGS_purge_redundant_percent,
&ValidateInt32Percent);
DEFINE_bool(filter_deletes, false, "On true, deletes use KeyMayExist to drop"
" the delete if key not present");
@ -1812,8 +1805,6 @@ class StressTest {
fprintf(stdout, "Num times DB reopens: %d\n", FLAGS_reopen);
fprintf(stdout, "Batches/snapshots : %d\n",
FLAGS_test_batches_snapshots);
fprintf(stdout, "Purge redundant %% : %d\n",
FLAGS_purge_redundant_percent);
fprintf(stdout, "Deletes use filter : %d\n",
FLAGS_filter_deletes);
fprintf(stdout, "Do update in place : %d\n",
@ -1931,12 +1922,6 @@ class StressTest {
#endif // ROCKSDB_LITE
}
static Random purge_percent(1000); // no benefit from non-determinism here
if (static_cast<int32_t>(purge_percent.Uniform(100)) <
FLAGS_purge_redundant_percent - 1) {
options_.purge_redundant_kvs_while_flush = false;
}
if (FLAGS_use_merge) {
options_.merge_operator = MergeOperators::CreatePutOperator();
}

View file

@ -236,11 +236,7 @@ Options DBTestBase::CurrentOptions(
break;
case kManifestFileSize:
options.max_manifest_file_size = 50; // 50 bytes
case kCompactOnFlush:
options.purge_redundant_kvs_while_flush =
!options.purge_redundant_kvs_while_flush;
break;
case kPerfOptions:
case kPerfOptions:
options.soft_rate_limit = 2.0;
options.delayed_write_rate = 8 * 1024 * 1024;
// TODO(3.13) -- test more options

View file

@ -399,19 +399,18 @@ class DBTestBase : public testing::Test {
kDBLogDir = 14,
kWalDirAndMmapReads = 15,
kManifestFileSize = 16,
kCompactOnFlush = 17,
kPerfOptions = 18,
kDeletesFilterFirst = 19,
kHashSkipList = 20,
kUniversalCompaction = 21,
kUniversalCompactionMultiLevel = 22,
kCompressedBlockCache = 23,
kInfiniteMaxOpenFiles = 24,
kxxHashChecksum = 25,
kFIFOCompaction = 26,
kOptimizeFiltersForHits = 27,
kRowCache = 28,
kEnd = 29
kPerfOptions = 17,
kDeletesFilterFirst = 18,
kHashSkipList = 19,
kUniversalCompaction = 20,
kUniversalCompactionMultiLevel = 21,
kCompressedBlockCache = 22,
kInfiniteMaxOpenFiles = 23,
kxxHashChecksum = 24,
kFIFOCompaction = 25,
kOptimizeFiltersForHits = 26,
kRowCache = 27,
kEnd = 28
};
int option_config_;

View file

@ -407,8 +407,6 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
Warn(log, " Options.num_levels: %d", num_levels);
Warn(log, " Options.min_write_buffer_number_to_merge: %d",
min_write_buffer_number_to_merge);
Warn(log, " Options.purge_redundant_kvs_while_flush: %d",
purge_redundant_kvs_while_flush);
Warn(log, " Options.max_write_buffer_number_to_maintain: %d",
max_write_buffer_number_to_maintain);
Warn(log, " Options.compression_opts.window_bits: %d",
@ -461,8 +459,6 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
rate_limit_delay_max_milliseconds);
Warn(log, " Options.disable_auto_compactions: %d",
disable_auto_compactions);
Warn(log, " Options.purge_redundant_kvs_while_flush: %d",
purge_redundant_kvs_while_flush);
Warn(log, " Options.filter_deletes: %d",
filter_deletes);
Warn(log, " Options.verify_checksums_in_compaction: %d",

View file

@ -437,9 +437,6 @@ bool ParseColumnFamilyOption(const std::string& name, const std::string& value,
} else if (name == "level_compaction_dynamic_level_bytes") {
new_options->level_compaction_dynamic_level_bytes =
ParseBoolean(name, value);
} else if (name == "purge_redundant_kvs_while_flush") {
new_options->purge_redundant_kvs_while_flush =
ParseBoolean(name, value);
} else if (name == "compaction_style") {
new_options->compaction_style = ParseCompactionStyle(value);
} else if (name == "compaction_options_universal") {

View file

@ -124,7 +124,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
{"hard_rate_limit", "2.1"},
{"arena_block_size", "22"},
{"disable_auto_compactions", "true"},
{"purge_redundant_kvs_while_flush", "1"},
{"compaction_style", "kCompactionStyleLevel"},
{"verify_checksums_in_compaction", "false"},
{"compaction_options_fifo", "23"},
@ -216,7 +215,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
ASSERT_EQ(new_cf_opt.hard_rate_limit, 2.1);
ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
ASSERT_EQ(new_cf_opt.disable_auto_compactions, true);
ASSERT_EQ(new_cf_opt.purge_redundant_kvs_while_flush, true);
ASSERT_EQ(new_cf_opt.compaction_style, kCompactionStyleLevel);
ASSERT_EQ(new_cf_opt.verify_checksums_in_compaction, false);
ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,