2016-04-11 18:39:51 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-04-11 18:39:51 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include <cstring>
|
|
|
|
|
2017-11-02 00:23:52 +00:00
|
|
|
#include "options/options_helper.h"
|
2016-04-11 18:39:51 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
2016-04-11 18:39:51 +00:00
|
|
|
|
|
|
|
#ifndef GFLAGS
|
|
|
|
bool FLAGS_enable_print = false;
|
|
|
|
#else
|
2017-12-01 18:40:45 +00:00
|
|
|
#include "util/gflags_compat.h"
|
|
|
|
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
2016-04-11 18:39:51 +00:00
|
|
|
DEFINE_bool(enable_print, false, "Print options generated to console.");
|
|
|
|
#endif // GFLAGS
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
// Verify options are settable from options strings.
|
|
|
|
// We take the approach that depends on compiler behavior that copy constructor
|
|
|
|
// won't touch implicit padding bytes, so that the test is fragile.
|
|
|
|
// As a result, we only run the tests to verify new fields in options are
|
|
|
|
// settable through string on limited platforms as it depends on behavior of
|
|
|
|
// compilers.
|
|
|
|
#ifndef ROCKSDB_LITE
|
2017-04-27 19:19:55 +00:00
|
|
|
#if defined OS_LINUX || defined OS_WIN
|
2016-04-11 18:39:51 +00:00
|
|
|
#ifndef __clang__
|
|
|
|
|
|
|
|
class OptionsSettableTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
OptionsSettableTest() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
const char kSpecialChar = 'z';
|
2017-04-27 19:19:55 +00:00
|
|
|
typedef std::vector<std::pair<size_t, size_t>> OffsetGap;
|
2016-04-11 18:39:51 +00:00
|
|
|
|
|
|
|
void FillWithSpecialChar(char* start_ptr, size_t total_size,
|
|
|
|
const OffsetGap& blacklist) {
|
|
|
|
size_t offset = 0;
|
|
|
|
for (auto& pair : blacklist) {
|
|
|
|
std::memset(start_ptr + offset, kSpecialChar, pair.first - offset);
|
|
|
|
offset = pair.first + pair.second;
|
|
|
|
}
|
|
|
|
std::memset(start_ptr + offset, kSpecialChar, total_size - offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
int NumUnsetBytes(char* start_ptr, size_t total_size,
|
|
|
|
const OffsetGap& blacklist) {
|
|
|
|
int total_unset_bytes_base = 0;
|
|
|
|
size_t offset = 0;
|
|
|
|
for (auto& pair : blacklist) {
|
|
|
|
for (char* ptr = start_ptr + offset; ptr < start_ptr + pair.first; ptr++) {
|
|
|
|
if (*ptr == kSpecialChar) {
|
|
|
|
total_unset_bytes_base++;
|
|
|
|
}
|
|
|
|
}
|
2016-04-11 21:57:27 +00:00
|
|
|
offset = pair.first + pair.second;
|
2016-04-11 18:39:51 +00:00
|
|
|
}
|
|
|
|
for (char* ptr = start_ptr + offset; ptr < start_ptr + total_size; ptr++) {
|
|
|
|
if (*ptr == kSpecialChar) {
|
|
|
|
total_unset_bytes_base++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return total_unset_bytes_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the test fails, likely a new option is added to BlockBasedTableOptions
|
|
|
|
// but it cannot be set through GetBlockBasedTableOptionsFromString(), or the
|
|
|
|
// test is not updated accordingly.
|
|
|
|
// After adding an option, we need to make sure it is settable by
|
|
|
|
// GetBlockBasedTableOptionsFromString() and add the option to the input string
|
|
|
|
// passed to the GetBlockBasedTableOptionsFromString() in this test.
|
|
|
|
// If it is a complicated type, you also need to add the field to
|
|
|
|
// kBbtoBlacklist, and maybe add customized verification for it.
|
|
|
|
TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
|
|
|
|
// Items in the form of <offset, size>. Need to be in ascending order
|
|
|
|
// and not overlapping. Need to updated if new pointer-option is added.
|
|
|
|
const OffsetGap kBbtoBlacklist = {
|
|
|
|
{offsetof(struct BlockBasedTableOptions, flush_block_policy_factory),
|
|
|
|
sizeof(std::shared_ptr<FlushBlockPolicyFactory>)},
|
|
|
|
{offsetof(struct BlockBasedTableOptions, block_cache),
|
|
|
|
sizeof(std::shared_ptr<Cache>)},
|
2015-12-16 02:20:10 +00:00
|
|
|
{offsetof(struct BlockBasedTableOptions, persistent_cache),
|
|
|
|
sizeof(std::shared_ptr<PersistentCache>)},
|
2016-04-11 18:39:51 +00:00
|
|
|
{offsetof(struct BlockBasedTableOptions, block_cache_compressed),
|
|
|
|
sizeof(std::shared_ptr<Cache>)},
|
|
|
|
{offsetof(struct BlockBasedTableOptions, filter_policy),
|
|
|
|
sizeof(std::shared_ptr<const FilterPolicy>)},
|
|
|
|
};
|
|
|
|
|
|
|
|
// In this test, we catch a new option of BlockBasedTableOptions that is not
|
|
|
|
// settable through GetBlockBasedTableOptionsFromString().
|
|
|
|
// We count padding bytes of the option struct, and assert it to be the same
|
|
|
|
// as unset bytes of an option struct initialized by
|
|
|
|
// GetBlockBasedTableOptionsFromString().
|
|
|
|
|
|
|
|
char* bbto_ptr = new char[sizeof(BlockBasedTableOptions)];
|
|
|
|
|
|
|
|
// Count padding bytes by setting all bytes in the memory to a special char,
|
|
|
|
// copy a well constructed struct to this memory and see how many special
|
|
|
|
// bytes left.
|
|
|
|
BlockBasedTableOptions* bbto = new (bbto_ptr) BlockBasedTableOptions();
|
|
|
|
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
|
|
|
|
// It based on the behavior of compiler that padding bytes are not changed
|
|
|
|
// when copying the struct. It's prone to failure when compiler behavior
|
|
|
|
// changes. We verify there is unset bytes to detect the case.
|
|
|
|
*bbto = BlockBasedTableOptions();
|
|
|
|
int unset_bytes_base =
|
|
|
|
NumUnsetBytes(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
|
|
|
|
ASSERT_GT(unset_bytes_base, 0);
|
|
|
|
bbto->~BlockBasedTableOptions();
|
|
|
|
|
|
|
|
// Construct the base option passed into
|
|
|
|
// GetBlockBasedTableOptionsFromString().
|
|
|
|
bbto = new (bbto_ptr) BlockBasedTableOptions();
|
|
|
|
FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
|
|
|
|
// This option is not setable:
|
|
|
|
bbto->use_delta_encoding = true;
|
|
|
|
|
|
|
|
char* new_bbto_ptr = new char[sizeof(BlockBasedTableOptions)];
|
|
|
|
BlockBasedTableOptions* new_bbto =
|
|
|
|
new (new_bbto_ptr) BlockBasedTableOptions();
|
|
|
|
FillWithSpecialChar(new_bbto_ptr, sizeof(BlockBasedTableOptions),
|
|
|
|
kBbtoBlacklist);
|
|
|
|
|
|
|
|
// Need to update the option string if a new option is added.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
*bbto,
|
|
|
|
"cache_index_and_filter_blocks=1;"
|
2016-08-23 20:44:13 +00:00
|
|
|
"cache_index_and_filter_blocks_with_high_priority=true;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"pin_l0_filter_and_index_blocks_in_cache=1;"
|
2018-06-22 22:14:05 +00:00
|
|
|
"pin_top_level_index_and_filter=1;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"index_type=kHashSearch;"
|
2018-08-15 21:27:47 +00:00
|
|
|
"data_block_index_type=kDataBlockBinaryAndHash;"
|
2019-04-22 15:17:45 +00:00
|
|
|
"index_shortening=kNoShortening;"
|
2018-08-15 21:27:47 +00:00
|
|
|
"data_block_hash_table_util_ratio=0.75;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"checksum=kxxHash;hash_index_allow_collision=1;no_block_cache=1;"
|
|
|
|
"block_cache=1M;block_cache_compressed=1k;block_size=1024;"
|
|
|
|
"block_size_deviation=8;block_restart_interval=4; "
|
2017-03-28 18:56:56 +00:00
|
|
|
"metadata_block_size=1024;"
|
2017-03-07 21:48:02 +00:00
|
|
|
"partition_filters=false;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"index_block_restart_interval=4;"
|
|
|
|
"filter_policy=bloomfilter:4:true;whole_key_filtering=1;"
|
2017-03-03 00:45:07 +00:00
|
|
|
"format_version=1;"
|
2016-06-11 01:20:54 +00:00
|
|
|
"hash_index_allow_collision=false;"
|
2018-01-10 23:06:29 +00:00
|
|
|
"verify_compression=true;read_amp_bytes_per_bit=0;"
|
2018-03-27 03:14:24 +00:00
|
|
|
"enable_index_compression=false;"
|
|
|
|
"block_align=true",
|
2016-04-11 18:39:51 +00:00
|
|
|
new_bbto));
|
|
|
|
|
|
|
|
ASSERT_EQ(unset_bytes_base,
|
|
|
|
NumUnsetBytes(new_bbto_ptr, sizeof(BlockBasedTableOptions),
|
|
|
|
kBbtoBlacklist));
|
|
|
|
|
|
|
|
ASSERT_TRUE(new_bbto->block_cache.get() != nullptr);
|
|
|
|
ASSERT_TRUE(new_bbto->block_cache_compressed.get() != nullptr);
|
|
|
|
ASSERT_TRUE(new_bbto->filter_policy.get() != nullptr);
|
|
|
|
|
|
|
|
bbto->~BlockBasedTableOptions();
|
|
|
|
new_bbto->~BlockBasedTableOptions();
|
|
|
|
|
|
|
|
delete[] bbto_ptr;
|
|
|
|
delete[] new_bbto_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the test fails, likely a new option is added to DBOptions
|
|
|
|
// but it cannot be set through GetDBOptionsFromString(), or the test is not
|
|
|
|
// updated accordingly.
|
|
|
|
// After adding an option, we need to make sure it is settable by
|
|
|
|
// GetDBOptionsFromString() and add the option to the input string passed to
|
|
|
|
// DBOptionsFromString()in this test.
|
|
|
|
// If it is a complicated type, you also need to add the field to
|
|
|
|
// kDBOptionsBlacklist, and maybe add customized verification for it.
|
|
|
|
TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
|
|
|
|
const OffsetGap kDBOptionsBlacklist = {
|
|
|
|
{offsetof(struct DBOptions, env), sizeof(Env*)},
|
|
|
|
{offsetof(struct DBOptions, rate_limiter),
|
|
|
|
sizeof(std::shared_ptr<RateLimiter>)},
|
|
|
|
{offsetof(struct DBOptions, sst_file_manager),
|
|
|
|
sizeof(std::shared_ptr<SstFileManager>)},
|
|
|
|
{offsetof(struct DBOptions, info_log), sizeof(std::shared_ptr<Logger>)},
|
|
|
|
{offsetof(struct DBOptions, statistics),
|
|
|
|
sizeof(std::shared_ptr<Statistics>)},
|
|
|
|
{offsetof(struct DBOptions, db_paths), sizeof(std::vector<DbPath>)},
|
|
|
|
{offsetof(struct DBOptions, db_log_dir), sizeof(std::string)},
|
|
|
|
{offsetof(struct DBOptions, wal_dir), sizeof(std::string)},
|
2016-06-21 01:01:03 +00:00
|
|
|
{offsetof(struct DBOptions, write_buffer_manager),
|
|
|
|
sizeof(std::shared_ptr<WriteBufferManager>)},
|
2016-04-11 18:39:51 +00:00
|
|
|
{offsetof(struct DBOptions, listeners),
|
|
|
|
sizeof(std::vector<std::shared_ptr<EventListener>>)},
|
|
|
|
{offsetof(struct DBOptions, row_cache), sizeof(std::shared_ptr<Cache>)},
|
|
|
|
{offsetof(struct DBOptions, wal_filter), sizeof(const WalFilter*)},
|
|
|
|
};
|
|
|
|
|
|
|
|
char* options_ptr = new char[sizeof(DBOptions)];
|
|
|
|
|
|
|
|
// Count padding bytes by setting all bytes in the memory to a special char,
|
|
|
|
// copy a well constructed struct to this memory and see how many special
|
|
|
|
// bytes left.
|
|
|
|
DBOptions* options = new (options_ptr) DBOptions();
|
|
|
|
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
|
|
|
|
// It based on the behavior of compiler that padding bytes are not changed
|
|
|
|
// when copying the struct. It's prone to failure when compiler behavior
|
|
|
|
// changes. We verify there is unset bytes to detect the case.
|
|
|
|
*options = DBOptions();
|
|
|
|
int unset_bytes_base =
|
|
|
|
NumUnsetBytes(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
|
|
|
|
ASSERT_GT(unset_bytes_base, 0);
|
|
|
|
options->~DBOptions();
|
|
|
|
|
|
|
|
options = new (options_ptr) DBOptions();
|
|
|
|
FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
|
|
|
|
|
|
|
|
char* new_options_ptr = new char[sizeof(DBOptions)];
|
|
|
|
DBOptions* new_options = new (new_options_ptr) DBOptions();
|
|
|
|
FillWithSpecialChar(new_options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
|
|
|
|
|
|
|
|
// Need to update the option string if a new option is added.
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromString(*options,
|
|
|
|
"wal_bytes_per_sync=4295048118;"
|
|
|
|
"delete_obsolete_files_period_micros=4294967758;"
|
|
|
|
"WAL_ttl_seconds=4295008036;"
|
|
|
|
"WAL_size_limit_MB=4295036161;"
|
|
|
|
"wal_dir=path/to/wal_dir;"
|
|
|
|
"db_write_buffer_size=2587;"
|
|
|
|
"max_subcompactions=64330;"
|
|
|
|
"table_cache_numshardbits=28;"
|
|
|
|
"max_open_files=72;"
|
|
|
|
"max_file_opening_threads=35;"
|
2017-05-24 18:25:38 +00:00
|
|
|
"max_background_jobs=8;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"base_background_compactions=3;"
|
|
|
|
"max_background_compactions=33;"
|
|
|
|
"use_fsync=true;"
|
|
|
|
"use_adaptive_mutex=false;"
|
|
|
|
"max_total_wal_size=4295005604;"
|
|
|
|
"compaction_readahead_size=0;"
|
|
|
|
"new_table_reader_for_compaction_inputs=false;"
|
|
|
|
"keep_log_file_num=4890;"
|
|
|
|
"skip_stats_update_on_db_open=false;"
|
|
|
|
"max_manifest_file_size=4295009941;"
|
|
|
|
"db_log_dir=path/to/db_log_dir;"
|
|
|
|
"skip_log_error_on_recovery=true;"
|
|
|
|
"writable_file_max_buffer_size=1048576;"
|
|
|
|
"paranoid_checks=true;"
|
|
|
|
"is_fd_close_on_exec=false;"
|
|
|
|
"bytes_per_sync=4295013613;"
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
"strict_bytes_per_sync=true;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"enable_thread_tracking=false;"
|
|
|
|
"recycle_log_file_num=0;"
|
|
|
|
"create_missing_column_families=true;"
|
|
|
|
"log_file_time_to_roll=3097;"
|
|
|
|
"max_background_flushes=35;"
|
|
|
|
"create_if_missing=false;"
|
|
|
|
"error_if_exists=true;"
|
|
|
|
"delayed_write_rate=4294976214;"
|
|
|
|
"manifest_preallocation_size=1222;"
|
|
|
|
"allow_mmap_writes=false;"
|
|
|
|
"stats_dump_period_sec=70127;"
|
2019-02-20 23:46:59 +00:00
|
|
|
"stats_persist_period_sec=54321;"
|
2019-06-17 22:17:43 +00:00
|
|
|
"persist_stats_to_disk=true;"
|
2019-02-20 23:46:59 +00:00
|
|
|
"stats_history_buffer_size=14159;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"allow_fallocate=true;"
|
|
|
|
"allow_mmap_reads=false;"
|
2016-10-28 17:36:05 +00:00
|
|
|
"use_direct_reads=false;"
|
2017-04-13 20:07:33 +00:00
|
|
|
"use_direct_io_for_flush_and_compaction=false;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"max_log_file_size=4607;"
|
|
|
|
"random_access_max_buffer_size=1048576;"
|
|
|
|
"advise_random_on_open=true;"
|
|
|
|
"fail_if_options_file_error=false;"
|
2017-05-19 21:24:23 +00:00
|
|
|
"enable_pipelined_write=false;"
|
2019-05-14 00:43:47 +00:00
|
|
|
"unordered_write=false;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"allow_concurrent_memtable_write=true;"
|
|
|
|
"wal_recovery_mode=kPointInTimeRecovery;"
|
|
|
|
"enable_write_thread_adaptive_yield=true;"
|
|
|
|
"write_thread_slow_yield_usec=5;"
|
|
|
|
"write_thread_max_yield_usec=1000;"
|
|
|
|
"access_hint_on_compaction_start=NONE;"
|
2016-04-27 23:23:33 +00:00
|
|
|
"info_log_level=DEBUG_LEVEL;"
|
2016-04-18 18:11:51 +00:00
|
|
|
"dump_malloc_stats=false;"
|
2016-06-13 18:34:16 +00:00
|
|
|
"allow_2pc=false;"
|
2016-11-02 22:22:13 +00:00
|
|
|
"avoid_flush_during_recovery=false;"
|
2017-05-17 18:32:26 +00:00
|
|
|
"avoid_flush_during_shutdown=false;"
|
2017-06-24 21:06:43 +00:00
|
|
|
"allow_ingest_behind=false;"
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
2017-11-02 01:43:29 +00:00
|
|
|
"preserve_deletes=false;"
|
2017-06-24 21:06:43 +00:00
|
|
|
"concurrent_prepare=false;"
|
2017-11-11 01:18:01 +00:00
|
|
|
"two_write_queues=false;"
|
2017-09-18 21:36:53 +00:00
|
|
|
"manual_wal_flush=false;"
|
2018-10-26 22:06:44 +00:00
|
|
|
"seq_per_batch=false;"
|
2019-04-02 00:07:38 +00:00
|
|
|
"atomic_flush=false;"
|
2019-07-19 18:54:38 +00:00
|
|
|
"avoid_unnecessary_blocking_io=false;"
|
|
|
|
"log_readahead_size=0",
|
2016-04-11 18:39:51 +00:00
|
|
|
new_options));
|
|
|
|
|
|
|
|
ASSERT_EQ(unset_bytes_base, NumUnsetBytes(new_options_ptr, sizeof(DBOptions),
|
|
|
|
kDBOptionsBlacklist));
|
|
|
|
|
|
|
|
options->~DBOptions();
|
|
|
|
new_options->~DBOptions();
|
|
|
|
|
|
|
|
delete[] options_ptr;
|
|
|
|
delete[] new_options_ptr;
|
|
|
|
}
|
|
|
|
|
2017-11-18 01:02:13 +00:00
|
|
|
template <typename T1, typename T2>
|
|
|
|
inline int offset_of(T1 T2::*member) {
|
|
|
|
static T2 obj;
|
|
|
|
return int(size_t(&(obj.*member)) - size_t(&obj));
|
|
|
|
}
|
|
|
|
|
2016-04-11 18:39:51 +00:00
|
|
|
// If the test fails, likely a new option is added to ColumnFamilyOptions
|
|
|
|
// but it cannot be set through GetColumnFamilyOptionsFromString(), or the
|
|
|
|
// test is not updated accordingly.
|
|
|
|
// After adding an option, we need to make sure it is settable by
|
|
|
|
// GetColumnFamilyOptionsFromString() and add the option to the input
|
|
|
|
// string passed to GetColumnFamilyOptionsFromString()in this test.
|
|
|
|
// If it is a complicated type, you also need to add the field to
|
|
|
|
// kColumnFamilyOptionsBlacklist, and maybe add customized verification
|
|
|
|
// for it.
|
|
|
|
TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
|
2016-11-14 02:58:17 +00:00
|
|
|
// options in the blacklist need to appear in the same order as in
|
|
|
|
// ColumnFamilyOptions.
|
2016-04-11 18:39:51 +00:00
|
|
|
const OffsetGap kColumnFamilyOptionsBlacklist = {
|
2017-02-28 01:36:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::inplace_callback),
|
|
|
|
sizeof(UpdateStatus(*)(char*, uint32_t*, Slice, std::string*))},
|
|
|
|
{offset_of(
|
|
|
|
&ColumnFamilyOptions::memtable_insert_with_hint_prefix_extractor),
|
2016-04-11 18:39:51 +00:00
|
|
|
sizeof(std::shared_ptr<const SliceTransform>)},
|
2017-02-28 01:36:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::compression_per_level),
|
|
|
|
sizeof(std::vector<CompressionType>)},
|
|
|
|
{offset_of(
|
|
|
|
&ColumnFamilyOptions::max_bytes_for_level_multiplier_additional),
|
2016-04-11 18:39:51 +00:00
|
|
|
sizeof(std::vector<int>)},
|
2017-02-28 01:36:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::memtable_factory),
|
2016-04-11 18:39:51 +00:00
|
|
|
sizeof(std::shared_ptr<MemTableRepFactory>)},
|
2017-02-28 01:36:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::table_properties_collector_factories),
|
2016-04-11 18:39:51 +00:00
|
|
|
sizeof(ColumnFamilyOptions::TablePropertiesCollectorFactories)},
|
2017-02-28 01:36:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::comparator), sizeof(Comparator*)},
|
|
|
|
{offset_of(&ColumnFamilyOptions::merge_operator),
|
|
|
|
sizeof(std::shared_ptr<MergeOperator>)},
|
|
|
|
{offset_of(&ColumnFamilyOptions::compaction_filter),
|
|
|
|
sizeof(const CompactionFilter*)},
|
|
|
|
{offset_of(&ColumnFamilyOptions::compaction_filter_factory),
|
|
|
|
sizeof(std::shared_ptr<CompactionFilterFactory>)},
|
|
|
|
{offset_of(&ColumnFamilyOptions::prefix_extractor),
|
2016-11-14 02:58:17 +00:00
|
|
|
sizeof(std::shared_ptr<const SliceTransform>)},
|
2017-02-28 01:36:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::table_factory),
|
|
|
|
sizeof(std::shared_ptr<TableFactory>)},
|
2018-04-06 02:49:06 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::cf_paths),
|
|
|
|
sizeof(std::vector<DbPath>)},
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 21:16:04 +00:00
|
|
|
{offset_of(&ColumnFamilyOptions::compaction_thread_limiter),
|
|
|
|
sizeof(std::shared_ptr<ConcurrentTaskLimiter>)},
|
2016-04-11 18:39:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
char* options_ptr = new char[sizeof(ColumnFamilyOptions)];
|
|
|
|
|
|
|
|
// Count padding bytes by setting all bytes in the memory to a special char,
|
|
|
|
// copy a well constructed struct to this memory and see how many special
|
|
|
|
// bytes left.
|
|
|
|
ColumnFamilyOptions* options = new (options_ptr) ColumnFamilyOptions();
|
|
|
|
FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
|
|
|
|
kColumnFamilyOptionsBlacklist);
|
|
|
|
// It based on the behavior of compiler that padding bytes are not changed
|
|
|
|
// when copying the struct. It's prone to failure when compiler behavior
|
|
|
|
// changes. We verify there is unset bytes to detect the case.
|
|
|
|
*options = ColumnFamilyOptions();
|
|
|
|
|
|
|
|
// Deprecatd option which is not initialized. Need to set it to avoid
|
|
|
|
// Valgrind error
|
|
|
|
options->max_mem_compaction_level = 0;
|
|
|
|
|
|
|
|
int unset_bytes_base = NumUnsetBytes(options_ptr, sizeof(ColumnFamilyOptions),
|
|
|
|
kColumnFamilyOptionsBlacklist);
|
|
|
|
ASSERT_GT(unset_bytes_base, 0);
|
|
|
|
options->~ColumnFamilyOptions();
|
|
|
|
|
|
|
|
options = new (options_ptr) ColumnFamilyOptions();
|
|
|
|
FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
|
|
|
|
kColumnFamilyOptionsBlacklist);
|
|
|
|
|
|
|
|
// Following options are not settable through
|
|
|
|
// GetColumnFamilyOptionsFromString():
|
|
|
|
options->rate_limit_delay_max_milliseconds = 33;
|
|
|
|
options->compaction_options_universal = CompactionOptionsUniversal();
|
|
|
|
options->compression_opts = CompressionOptions();
|
2018-06-28 00:34:07 +00:00
|
|
|
options->bottommost_compression_opts = CompressionOptions();
|
2016-04-11 18:39:51 +00:00
|
|
|
options->hard_rate_limit = 0;
|
|
|
|
options->soft_rate_limit = 0;
|
2017-09-13 18:48:16 +00:00
|
|
|
options->purge_redundant_kvs_while_flush = false;
|
2016-04-11 18:39:51 +00:00
|
|
|
options->max_mem_compaction_level = 0;
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 21:16:04 +00:00
|
|
|
options->compaction_filter = nullptr;
|
2016-04-11 18:39:51 +00:00
|
|
|
|
|
|
|
char* new_options_ptr = new char[sizeof(ColumnFamilyOptions)];
|
|
|
|
ColumnFamilyOptions* new_options =
|
|
|
|
new (new_options_ptr) ColumnFamilyOptions();
|
|
|
|
FillWithSpecialChar(new_options_ptr, sizeof(ColumnFamilyOptions),
|
|
|
|
kColumnFamilyOptionsBlacklist);
|
|
|
|
|
|
|
|
// Need to update the option string if a new option is added.
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
*options,
|
|
|
|
"compaction_filter_factory=mpudlojcujCompactionFilterFactory;"
|
|
|
|
"table_factory=PlainTable;"
|
|
|
|
"prefix_extractor=rocksdb.CappedPrefix.13;"
|
|
|
|
"comparator=leveldb.BytewiseComparator;"
|
|
|
|
"compression_per_level=kBZip2Compression:kBZip2Compression:"
|
|
|
|
"kBZip2Compression:kNoCompression:kZlibCompression:kBZip2Compression:"
|
|
|
|
"kSnappyCompression;"
|
|
|
|
"max_bytes_for_level_base=986;"
|
2019-05-04 00:26:20 +00:00
|
|
|
"snap_refresh_nanos=1000000000;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"bloom_locality=8016;"
|
|
|
|
"target_file_size_base=4294976376;"
|
2016-07-27 01:05:30 +00:00
|
|
|
"memtable_huge_page_size=2557;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"max_successive_merges=5497;"
|
|
|
|
"max_sequential_skip_in_iterations=4294971408;"
|
|
|
|
"arena_block_size=1893;"
|
|
|
|
"target_file_size_multiplier=35;"
|
|
|
|
"min_write_buffer_number_to_merge=9;"
|
|
|
|
"max_write_buffer_number=84;"
|
|
|
|
"write_buffer_size=1653;"
|
2016-06-16 23:02:52 +00:00
|
|
|
"max_compaction_bytes=64;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"max_bytes_for_level_multiplier=60;"
|
|
|
|
"memtable_factory=SkipListFactory;"
|
|
|
|
"compression=kNoCompression;"
|
2016-05-09 22:57:19 +00:00
|
|
|
"bottommost_compression=kDisableCompressionOption;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"level0_stop_writes_trigger=33;"
|
|
|
|
"num_levels=99;"
|
|
|
|
"level0_slowdown_writes_trigger=22;"
|
|
|
|
"level0_file_num_compaction_trigger=14;"
|
|
|
|
"compaction_filter=urxcqstuwnCompactionFilter;"
|
|
|
|
"soft_rate_limit=530.615385;"
|
|
|
|
"soft_pending_compaction_bytes_limit=0;"
|
|
|
|
"max_write_buffer_number_to_maintain=84;"
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
"max_write_buffer_size_to_maintain=2147483648;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"merge_operator=aabcxehazrMergeOperator;"
|
2016-06-04 00:02:10 +00:00
|
|
|
"memtable_prefix_bloom_size_ratio=0.4642;"
|
2019-02-19 20:12:25 +00:00
|
|
|
"memtable_whole_key_filtering=true;"
|
2016-11-14 02:58:17 +00:00
|
|
|
"memtable_insert_with_hint_prefix_extractor=rocksdb.CappedPrefix.13;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"paranoid_file_checks=true;"
|
2016-10-08 00:21:45 +00:00
|
|
|
"force_consistency_checks=true;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"inplace_update_num_locks=7429;"
|
|
|
|
"optimize_filters_for_hits=false;"
|
|
|
|
"level_compaction_dynamic_level_bytes=false;"
|
|
|
|
"inplace_update_support=false;"
|
|
|
|
"compaction_style=kCompactionStyleFIFO;"
|
2017-03-02 18:08:49 +00:00
|
|
|
"compaction_pri=kMinOverlappingRatio;"
|
2016-04-11 18:39:51 +00:00
|
|
|
"hard_pending_compaction_bytes_limit=0;"
|
|
|
|
"disable_auto_compactions=false;"
|
2017-10-19 22:19:20 +00:00
|
|
|
"report_bg_io_stats=true;"
|
2018-04-03 04:57:28 +00:00
|
|
|
"ttl=60;"
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 02:24:25 +00:00
|
|
|
"periodic_compaction_seconds=3600;"
|
2019-03-18 19:07:35 +00:00
|
|
|
"sample_for_compression=0;"
|
2019-02-15 17:48:44 +00:00
|
|
|
"compaction_options_fifo={max_table_files_size=3;allow_"
|
2017-11-02 00:23:52 +00:00
|
|
|
"compaction=false;};",
|
2016-04-11 18:39:51 +00:00
|
|
|
new_options));
|
|
|
|
|
|
|
|
ASSERT_EQ(unset_bytes_base,
|
|
|
|
NumUnsetBytes(new_options_ptr, sizeof(ColumnFamilyOptions),
|
|
|
|
kColumnFamilyOptionsBlacklist));
|
|
|
|
|
|
|
|
options->~ColumnFamilyOptions();
|
|
|
|
new_options->~ColumnFamilyOptions();
|
|
|
|
|
|
|
|
delete[] options_ptr;
|
|
|
|
delete[] new_options_ptr;
|
|
|
|
}
|
|
|
|
#endif // !__clang__
|
2017-04-27 19:19:55 +00:00
|
|
|
#endif // OS_LINUX || OS_WIN
|
2016-04-11 18:39:51 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
#ifdef GFLAGS
|
|
|
|
ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
#endif // GFLAGS
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|