From eff5e076f53e5bbe9a98a93c5a53e550d0580bb3 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Fri, 10 Jan 2020 16:51:34 -0800 Subject: [PATCH] unordered_write incompatible with max_successive_merges (#6284) Summary: unordered_write is incompatible with non-zero max_successive_merges. Although we check this at runtime, we currently don't prevent the user from setting this combination in options. This has led to stress tests to fail with this combination is tried in ::SetOptions. The patch fixes that and also reverts the changes performed by https://github.com/facebook/rocksdb/pull/6254, in which max_successive_merges was mistakenly declared incompatible with unordered_write. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6284 Differential Revision: D19356115 Pulled By: maysamyabandeh fbshipit-source-id: f06dadec777622bd75f267361c022735cf8cecb6 --- db/column_family.cc | 10 +++++----- db/db_merge_operator_test.cc | 2 -- db/db_test2.cc | 2 -- db/merge_test.cc | 3 --- test_util/testutil.cc | 3 +-- utilities/transactions/transaction_test.cc | 2 +- 6 files changed, 7 insertions(+), 15 deletions(-) diff --git a/db/column_family.cc b/db/column_family.cc index 05e0966bf9..c65ab90f74 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -160,11 +160,6 @@ Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) { return Status::InvalidArgument( "Memtable doesn't concurrent writes (allow_concurrent_memtable_write)"); } - if (cf_options.max_successive_merges != 0) { - return Status::InvalidArgument( - "max_successive_merges > 0 is incompatible " - "with concurrent writes (allow_concurrent_memtable_write)"); - } return Status::OK(); } @@ -1252,6 +1247,11 @@ Status ColumnFamilyData::ValidateOptions( if (s.ok() && db_options.allow_concurrent_memtable_write) { s = CheckConcurrentWritesSupported(cf_options); } + if (s.ok() && db_options.unordered_write && + cf_options.max_successive_merges != 0) { + s = Status::InvalidArgument( + "max_successive_merges > 0 is incompatible with unordered_write"); + } if (s.ok()) { s = CheckCFPathsSupported(db_options, cf_options); } diff --git a/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc index 114c7e8116..8358ddb56c 100644 --- a/db/db_merge_operator_test.cc +++ b/db/db_merge_operator_test.cc @@ -150,8 +150,6 @@ TEST_F(DBMergeOperatorTest, MergeErrorOnWrite) { options.create_if_missing = true; options.merge_operator.reset(new TestPutOperator()); options.max_successive_merges = 3; - // allow_concurrent_memtable_write is incompatible with max_successive_merges - options.allow_concurrent_memtable_write = false; options.env = env_; Reopen(options); ASSERT_OK(Merge("k1", "v1")); diff --git a/db/db_test2.cc b/db/db_test2.cc index 8095330d2c..07ed8608b3 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -153,8 +153,6 @@ TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) { options.create_if_missing = true; options.statistics = rocksdb::CreateDBStatistics(); options.max_successive_merges = 3; - // allow_concurrent_memtable_write is incompatible with max_successive_merges - options.allow_concurrent_memtable_write = false; options.merge_operator = MergeOperators::CreatePutOperator(); options.disable_auto_compactions = true; DestroyAndReopen(options); diff --git a/db/merge_test.cc b/db/merge_test.cc index 228cb5e302..2965045d9d 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -78,9 +78,6 @@ std::shared_ptr OpenDb(const std::string& dbname, const bool ttl = false, options.create_if_missing = true; options.merge_operator = std::make_shared(); options.max_successive_merges = max_successive_merges; - if (max_successive_merges > 0) { - options.allow_concurrent_memtable_write = false; - } Status s; DestroyDB(dbname, Options()); // DBWithTTL is not supported in ROCKSDB_LITE diff --git a/test_util/testutil.cc b/test_util/testutil.cc index a4be935f95..e309df088c 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -351,8 +351,7 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options, // size_t options cf_opt->arena_block_size = rnd->Uniform(10000); cf_opt->inplace_update_num_locks = rnd->Uniform(10000); - cf_opt->max_successive_merges = - db_options.allow_concurrent_memtable_write ? 0 : rnd->Uniform(10000); + cf_opt->max_successive_merges = rnd->Uniform(10000); cf_opt->memtable_huge_page_size = rnd->Uniform(10000); cf_opt->write_buffer_size = rnd->Uniform(10000); diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 72fd0b5461..da1edd1857 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -5820,7 +5820,7 @@ TEST_P(TransactionTest, DuplicateKeys) { } // do_rollback } // do_prepare - if (!options.unordered_write && !options.allow_concurrent_memtable_write) { + if (!options.unordered_write) { // Also test with max_successive_merges > 0. max_successive_merges will not // affect our algorithm for duplicate key insertion but we add the test to // verify that.