mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
OptimizeForSmallDb(): revert some options whose defaults were just changed
Summary: We changed default options of max_open_files and max_file_opening_threads but didn't revert it in OptimizeForSmallDb(). Test Plan: Add a unit test Reviewers: igor, yhchiang, IslamAbdelRahman Reviewed By: IslamAbdelRahman Subscribers: leveldb, andrewkr, dhruba Differential Revision: https://reviews.facebook.net/D57675
This commit is contained in:
parent
967476eaee
commit
e3c6ba37dd
|
@ -824,6 +824,10 @@ struct DBOptions {
|
|||
|
||||
// Some functions that make it easier to optimize RocksDB
|
||||
|
||||
// Use this if your DB is very small (like under 1GB) and you don't want to
|
||||
// spend lots of memory for memtables.
|
||||
DBOptions* OptimizeForSmallDb();
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
// By default, RocksDB uses only one background thread for flush and
|
||||
// compaction. Calling this function will set it up such that total of
|
||||
|
@ -898,7 +902,7 @@ struct DBOptions {
|
|||
|
||||
// If max_open_files is -1, DB will open all files on DB::Open(). You can
|
||||
// use this option to increase the number of threads used to open the files.
|
||||
// Default: 1
|
||||
// Default: 16
|
||||
int max_file_opening_threads;
|
||||
|
||||
// Once write-ahead logs exceed this size, we will start forcing the flush of
|
||||
|
@ -1343,6 +1347,8 @@ struct Options : public DBOptions, public ColumnFamilyOptions {
|
|||
|
||||
void DumpCFOptions(Logger* log) const;
|
||||
|
||||
// Some functions that make it easier to optimize RocksDB
|
||||
|
||||
// Set appropriate parameters for bulk loading.
|
||||
// The reason that this is a function that returns "this" instead of a
|
||||
// constructor is to enable chaining of multiple similar calls in the future.
|
||||
|
@ -1352,6 +1358,10 @@ struct Options : public DBOptions, public ColumnFamilyOptions {
|
|||
// It's recommended to manually call CompactRange(NULL, NULL) before reading
|
||||
// from the database, because otherwise the read can be very slow.
|
||||
Options* PrepareForBulkLoad();
|
||||
|
||||
// Use this if your DB is very small (like under 1GB) and you don't want to
|
||||
// spend lots of memory for memtables.
|
||||
Options* OptimizeForSmallDb();
|
||||
};
|
||||
|
||||
//
|
||||
|
|
|
@ -668,6 +668,12 @@ Options::PrepareForBulkLoad()
|
|||
return this;
|
||||
}
|
||||
|
||||
Options* Options::OptimizeForSmallDb() {
|
||||
ColumnFamilyOptions::OptimizeForSmallDb();
|
||||
DBOptions::OptimizeForSmallDb();
|
||||
return this;
|
||||
}
|
||||
|
||||
Options* Options::OldDefaults(int rocksdb_major_version,
|
||||
int rocksdb_minor_version) {
|
||||
ColumnFamilyOptions::OldDefaults(rocksdb_major_version,
|
||||
|
@ -705,6 +711,12 @@ ColumnFamilyOptions* ColumnFamilyOptions::OldDefaults(
|
|||
}
|
||||
|
||||
// Optimization functions
|
||||
DBOptions* DBOptions::OptimizeForSmallDb() {
|
||||
max_file_opening_threads = 1;
|
||||
max_open_files = 5000;
|
||||
return this;
|
||||
}
|
||||
|
||||
ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForSmallDb() {
|
||||
write_buffer_size = 2 << 20;
|
||||
target_file_size_base = 2 * 1048576;
|
||||
|
|
|
@ -1315,9 +1315,10 @@ TEST_F(OptionsParserTest, DifferentDefault) {
|
|||
old_default_cf_opts.compaction_pri);
|
||||
}
|
||||
|
||||
ColumnFamilyOptions cf_small_opts;
|
||||
cf_small_opts.OptimizeForSmallDb();
|
||||
ASSERT_EQ(2 << 20, cf_small_opts.write_buffer_size);
|
||||
Options small_opts;
|
||||
small_opts.OptimizeForSmallDb();
|
||||
ASSERT_EQ(2 << 20, small_opts.write_buffer_size);
|
||||
ASSERT_EQ(5000, small_opts.max_open_files);
|
||||
}
|
||||
|
||||
class OptionsSanityCheckTest : public OptionsParserTest {
|
||||
|
|
Loading…
Reference in a new issue