2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-05-16 17:35:41 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
#include <cctype>
|
2019-09-20 19:00:55 +00:00
|
|
|
#include <cinttypes>
|
2016-01-19 21:10:06 +00:00
|
|
|
#include <cstring>
|
2014-09-17 19:46:32 +00:00
|
|
|
#include <unordered_map>
|
2014-05-16 17:35:41 +00:00
|
|
|
|
2017-11-28 18:35:17 +00:00
|
|
|
#include "cache/lru_cache.h"
|
|
|
|
#include "cache/sharded_cache.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/options_helper.h"
|
|
|
|
#include "options/options_parser.h"
|
2019-03-12 20:46:12 +00:00
|
|
|
#include "port/port.h"
|
2014-12-22 21:18:57 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2015-07-15 21:51:51 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2021-09-28 12:30:32 +00:00
|
|
|
#include "rocksdb/file_checksum.h"
|
2015-11-17 22:29:01 +00:00
|
|
|
#include "rocksdb/memtablerep.h"
|
2014-12-22 21:18:57 +00:00
|
|
|
#include "rocksdb/utilities/leveldb_options.h"
|
2019-03-26 21:15:26 +00:00
|
|
|
#include "rocksdb/utilities/object_registry.h"
|
2020-09-14 23:59:00 +00:00
|
|
|
#include "rocksdb/utilities/options_type.h"
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
2019-11-26 23:49:16 +00:00
|
|
|
#include "table/block_based/filter_policy_internal.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2015-03-20 00:06:02 +00:00
|
|
|
#include "util/random.h"
|
2016-04-01 18:06:06 +00:00
|
|
|
#include "util/stderr_logger.h"
|
2017-04-06 21:49:13 +00:00
|
|
|
#include "util/string_util.h"
|
2019-03-28 21:50:06 +00:00
|
|
|
#include "utilities/merge_operators/bytesxor.h"
|
2021-08-06 15:26:23 +00:00
|
|
|
#include "utilities/merge_operators/sortlist.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend2.h"
|
2014-05-16 17:35:41 +00:00
|
|
|
|
2014-11-24 20:53:23 +00:00
|
|
|
#ifndef GFLAGS
|
|
|
|
bool FLAGS_enable_print = false;
|
|
|
|
#else
|
2017-12-01 18:40:45 +00:00
|
|
|
#include "util/gflags_compat.h"
|
|
|
|
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
2014-05-16 17:35:41 +00:00
|
|
|
DEFINE_bool(enable_print, false, "Print options generated to console.");
|
2014-11-24 20:53:23 +00:00
|
|
|
#endif // GFLAGS
|
2014-05-16 17:35:41 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-05-16 17:35:41 +00:00
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
class OptionsTest : public testing::Test {};
|
|
|
|
|
2022-06-14 20:20:54 +00:00
|
|
|
class UnregisteredTableFactory : public TableFactory {
|
|
|
|
public:
|
2023-12-01 19:10:30 +00:00
|
|
|
UnregisteredTableFactory() = default;
|
2022-06-14 20:20:54 +00:00
|
|
|
const char* Name() const override { return "Unregistered"; }
|
|
|
|
using TableFactory::NewTableReader;
|
|
|
|
Status NewTableReader(const ReadOptions&, const TableReaderOptions&,
|
|
|
|
std::unique_ptr<RandomAccessFileReader>&&, uint64_t,
|
|
|
|
std::unique_ptr<TableReader>*, bool) const override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
TableBuilder* NewTableBuilder(const TableBuilderOptions&,
|
|
|
|
WritableFileWriter*) const override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
2014-10-10 17:00:12 +00:00
|
|
|
std::unordered_map<std::string, std::string> cf_options_map = {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"write_buffer_size", "1"},
|
|
|
|
{"max_write_buffer_number", "2"},
|
|
|
|
{"min_write_buffer_number_to_merge", "3"},
|
2015-07-03 00:23:41 +00:00
|
|
|
{"max_write_buffer_number_to_maintain", "99"},
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
{"max_write_buffer_size_to_maintain", "-99999"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"compression", "kSnappyCompression"},
|
|
|
|
{"compression_per_level",
|
|
|
|
"kNoCompression:"
|
|
|
|
"kSnappyCompression:"
|
|
|
|
"kZlibCompression:"
|
|
|
|
"kBZip2Compression:"
|
|
|
|
"kLZ4Compression:"
|
2015-08-27 22:40:42 +00:00
|
|
|
"kLZ4HCCompression:"
|
2016-04-20 05:54:24 +00:00
|
|
|
"kXpressCompression:"
|
2016-09-01 22:28:40 +00:00
|
|
|
"kZSTD:"
|
2015-08-27 22:40:42 +00:00
|
|
|
"kZSTDNotFinalCompression"},
|
2016-05-09 22:57:19 +00:00
|
|
|
{"bottommost_compression", "kLZ4Compression"},
|
2020-04-30 23:59:16 +00:00
|
|
|
{"bottommost_compression_opts", "5:6:7:8:10:true"},
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
{"compression_opts", "4:5:6:7:8:2:true:100:false"},
|
2016-04-20 05:54:24 +00:00
|
|
|
{"num_levels", "8"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"level0_file_num_compaction_trigger", "8"},
|
|
|
|
{"level0_slowdown_writes_trigger", "9"},
|
|
|
|
{"level0_stop_writes_trigger", "10"},
|
|
|
|
{"target_file_size_base", "12"},
|
|
|
|
{"target_file_size_multiplier", "13"},
|
|
|
|
{"max_bytes_for_level_base", "14"},
|
|
|
|
{"level_compaction_dynamic_level_bytes", "true"},
|
2016-11-02 04:05:32 +00:00
|
|
|
{"max_bytes_for_level_multiplier", "15.0"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"max_bytes_for_level_multiplier_additional", "16:17:18"},
|
2016-06-16 23:02:52 +00:00
|
|
|
{"max_compaction_bytes", "21"},
|
2015-09-11 21:31:23 +00:00
|
|
|
{"hard_pending_compaction_bytes_limit", "211"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"arena_block_size", "22"},
|
|
|
|
{"disable_auto_compactions", "true"},
|
|
|
|
{"compaction_style", "kCompactionStyleLevel"},
|
2017-03-02 18:08:49 +00:00
|
|
|
{"compaction_pri", "kOldestSmallestSeqFirst"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"verify_checksums_in_compaction", "false"},
|
2023-05-11 23:40:59 +00:00
|
|
|
{"compaction_options_fifo",
|
|
|
|
"{allow_compaction=true;max_table_files_size=11002244;"
|
|
|
|
"file_temperature_age_thresholds={{temperature=kCold;age=12345}}}"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"max_sequential_skip_in_iterations", "24"},
|
|
|
|
{"inplace_update_support", "true"},
|
2016-04-14 20:56:29 +00:00
|
|
|
{"report_bg_io_stats", "true"},
|
|
|
|
{"compaction_measure_io_stats", "false"},
|
2022-01-27 06:02:02 +00:00
|
|
|
{"purge_redundant_kvs_while_flush", "false"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"inplace_update_num_locks", "25"},
|
2016-06-04 00:02:10 +00:00
|
|
|
{"memtable_prefix_bloom_size_ratio", "0.26"},
|
2019-02-19 20:12:25 +00:00
|
|
|
{"memtable_whole_key_filtering", "true"},
|
2016-07-27 01:05:30 +00:00
|
|
|
{"memtable_huge_page_size", "28"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"bloom_locality", "29"},
|
|
|
|
{"max_successive_merges", "30"},
|
2024-02-21 21:15:27 +00:00
|
|
|
{"strict_max_successive_merges", "true"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"min_partial_merge_operands", "31"},
|
|
|
|
{"prefix_extractor", "fixed:31"},
|
2022-06-23 16:42:18 +00:00
|
|
|
{"experimental_mempurge_threshold", "0.003"},
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
{"optimize_filters_for_hits", "true"},
|
2020-08-19 01:31:31 +00:00
|
|
|
{"enable_blob_files", "true"},
|
|
|
|
{"min_blob_size", "1K"},
|
|
|
|
{"blob_file_size", "1G"},
|
|
|
|
{"blob_compression_type", "kZSTD"},
|
2020-11-13 02:57:20 +00:00
|
|
|
{"enable_blob_garbage_collection", "true"},
|
|
|
|
{"blob_garbage_collection_age_cutoff", "0.5"},
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
2021-10-12 01:00:44 +00:00
|
|
|
{"blob_garbage_collection_force_threshold", "0.75"},
|
2021-11-20 01:52:42 +00:00
|
|
|
{"blob_compaction_readahead_size", "256K"},
|
2022-06-03 03:04:33 +00:00
|
|
|
{"blob_file_starting_level", "1"},
|
2022-07-17 14:13:59 +00:00
|
|
|
{"prepopulate_blob_cache", "kDisable"},
|
2022-08-08 21:36:34 +00:00
|
|
|
{"last_level_temperature", "kWarm"},
|
2024-02-28 22:36:13 +00:00
|
|
|
{"default_write_temperature", "kCold"},
|
2023-08-18 00:06:57 +00:00
|
|
|
{"default_temperature", "kHot"},
|
2023-04-12 00:50:34 +00:00
|
|
|
{"persist_user_defined_timestamps", "true"},
|
2023-08-03 02:58:56 +00:00
|
|
|
{"memtable_max_range_deletions", "0"},
|
2014-10-10 17:00:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> db_options_map = {
|
2015-08-21 05:33:25 +00:00
|
|
|
{"create_if_missing", "false"},
|
|
|
|
{"create_missing_column_families", "true"},
|
|
|
|
{"error_if_exists", "false"},
|
|
|
|
{"paranoid_checks", "true"},
|
2020-10-09 23:40:25 +00:00
|
|
|
{"track_and_verify_wals_in_manifest", "true"},
|
2022-05-19 18:04:21 +00:00
|
|
|
{"verify_sst_unique_id_in_manifest", "true"},
|
2015-08-21 05:33:25 +00:00
|
|
|
{"max_open_files", "32"},
|
|
|
|
{"max_total_wal_size", "33"},
|
|
|
|
{"use_fsync", "true"},
|
|
|
|
{"db_log_dir", "/db_log_dir"},
|
|
|
|
{"wal_dir", "/wal_dir"},
|
|
|
|
{"delete_obsolete_files_period_micros", "34"},
|
|
|
|
{"max_background_compactions", "35"},
|
|
|
|
{"max_background_flushes", "36"},
|
|
|
|
{"max_log_file_size", "37"},
|
|
|
|
{"log_file_time_to_roll", "38"},
|
|
|
|
{"keep_log_file_num", "39"},
|
2015-10-08 01:06:28 +00:00
|
|
|
{"recycle_log_file_num", "5"},
|
2015-08-21 05:33:25 +00:00
|
|
|
{"max_manifest_file_size", "40"},
|
|
|
|
{"table_cache_numshardbits", "41"},
|
|
|
|
{"WAL_ttl_seconds", "43"},
|
|
|
|
{"WAL_size_limit_MB", "44"},
|
|
|
|
{"manifest_preallocation_size", "45"},
|
|
|
|
{"allow_mmap_reads", "true"},
|
|
|
|
{"allow_mmap_writes", "false"},
|
2016-12-22 20:51:29 +00:00
|
|
|
{"use_direct_reads", "false"},
|
2017-04-13 20:07:33 +00:00
|
|
|
{"use_direct_io_for_flush_and_compaction", "false"},
|
2015-08-21 05:33:25 +00:00
|
|
|
{"is_fd_close_on_exec", "true"},
|
|
|
|
{"skip_log_error_on_recovery", "false"},
|
|
|
|
{"stats_dump_period_sec", "46"},
|
2019-02-20 23:46:59 +00:00
|
|
|
{"stats_persist_period_sec", "57"},
|
2019-06-17 22:17:43 +00:00
|
|
|
{"persist_stats_to_disk", "false"},
|
2019-02-20 23:46:59 +00:00
|
|
|
{"stats_history_buffer_size", "69"},
|
2015-08-21 05:33:25 +00:00
|
|
|
{"advise_random_on_open", "true"},
|
|
|
|
{"use_adaptive_mutex", "false"},
|
2015-08-26 22:25:59 +00:00
|
|
|
{"compaction_readahead_size", "100"},
|
2015-10-29 22:52:32 +00:00
|
|
|
{"random_access_max_buffer_size", "3145728"},
|
2015-10-30 05:10:25 +00:00
|
|
|
{"writable_file_max_buffer_size", "314159"},
|
2015-08-21 05:33:25 +00:00
|
|
|
{"bytes_per_sync", "47"},
|
2015-10-08 01:06:28 +00:00
|
|
|
{"wal_bytes_per_sync", "48"},
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
{"strict_bytes_per_sync", "true"},
|
2022-01-28 21:26:32 +00:00
|
|
|
{"preserve_deletes", "false"},
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
{"daily_offpeak_time_utc", ""},
|
2015-10-08 01:06:28 +00:00
|
|
|
};
|
2014-09-17 19:46:32 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions exact, loose;
|
|
|
|
exact.input_strings_escaped = false;
|
|
|
|
exact.ignore_unknown_options = false;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
loose.sanity_level = ConfigOptions::kSanityLevelLooselyCompatible;
|
|
|
|
|
|
|
|
loose.input_strings_escaped = false;
|
|
|
|
loose.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 1U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 2);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_write_buffer_number_to_merge, 3);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number_to_maintain, 99);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_size_to_maintain, -99999);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression, kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level.size(), 9U);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[0], kNoCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[1], kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[2], kZlibCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[3], kBZip2Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[4], kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[5], kLZ4HCCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[6], kXpressCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[7], kZSTD);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[8], kZSTDNotFinalCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 2u);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_buffer_bytes, 100u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
2020-04-30 23:59:16 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 10u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
CompressionOptions().parallel_threads);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
CompressionOptions().use_zstd_dict_trainer);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.num_levels, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_slowdown_writes_trigger, 9);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_stop_writes_trigger, 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_base, static_cast<uint64_t>(12));
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_multiplier, 13);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_base, 14U);
|
|
|
|
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_level_bytes, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier, 15.0);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional.size(), 3U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[1], 17);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[2], 18);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_compaction_bytes, 21);
|
|
|
|
ASSERT_EQ(new_cf_opt.hard_pending_compaction_bytes_limit, 211);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
|
|
|
|
ASSERT_EQ(new_cf_opt.disable_auto_compactions, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_style, kCompactionStyleLevel);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_pri, kOldestSmallestSeqFirst);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,
|
2023-05-11 23:40:59 +00:00
|
|
|
static_cast<uint64_t>(11002244));
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_options_fifo.allow_compaction, true);
|
|
|
|
ASSERT_EQ(
|
|
|
|
new_cf_opt.compaction_options_fifo.file_temperature_age_thresholds.size(),
|
|
|
|
1);
|
|
|
|
ASSERT_EQ(
|
|
|
|
new_cf_opt.compaction_options_fifo.file_temperature_age_thresholds[0]
|
|
|
|
.temperature,
|
|
|
|
Temperature::kCold);
|
|
|
|
ASSERT_EQ(
|
|
|
|
new_cf_opt.compaction_options_fifo.file_temperature_age_thresholds[0].age,
|
|
|
|
12345);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations,
|
|
|
|
static_cast<uint64_t>(24));
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_support, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 25U);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_size_ratio, 0.26);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_whole_key_filtering, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.memtable_huge_page_size, 28U);
|
|
|
|
ASSERT_EQ(new_cf_opt.bloom_locality, 29U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_successive_merges, 30U);
|
2024-02-21 21:15:27 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.strict_max_successive_merges, true);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor != nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.optimize_filters_for_hits, true);
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.FixedPrefix.31");
|
2022-06-23 16:42:18 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.experimental_mempurge_threshold, 0.003);
|
2020-08-19 01:31:31 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_files, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_blob_size, 1ULL << 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_size, 1ULL << 30);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_compression_type, kZSTD);
|
2020-11-13 02:57:20 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_garbage_collection, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_age_cutoff, 0.5);
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
2021-10-12 01:00:44 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_force_threshold, 0.75);
|
2021-11-20 01:52:42 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.blob_compaction_readahead_size, 262144);
|
2022-06-03 03:04:33 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_starting_level, 1);
|
2022-07-17 14:13:59 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.prepopulate_blob_cache, PrepopulateBlobCache::kDisable);
|
2022-08-08 21:36:34 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.last_level_temperature, Temperature::kWarm);
|
2024-02-28 22:36:13 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.default_write_temperature, Temperature::kCold);
|
2023-08-18 00:06:57 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.default_temperature, Temperature::kHot);
|
2023-04-12 00:50:34 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.persist_user_defined_timestamps, true);
|
2023-08-03 02:58:56 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.memtable_max_range_deletions, 0);
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "hello";
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "1";
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
cf_options_map["unknown_option"] = "1";
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(exact, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
// ignore_unknown_options=true;input_strings_escaped=false
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(loose, base_cf_opt, cf_options_map,
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(loose, base_cf_opt, new_cf_opt));
|
|
|
|
ASSERT_NOK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
DBOptions new_db_opt;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromMap(exact, base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.create_missing_column_families, true);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.paranoid_checks, true);
|
2020-10-09 23:40:25 +00:00
|
|
|
ASSERT_EQ(new_db_opt.track_and_verify_wals_in_manifest, true);
|
2022-05-19 18:04:21 +00:00
|
|
|
ASSERT_EQ(new_db_opt.verify_sst_unique_id_in_manifest, true);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 32);
|
|
|
|
ASSERT_EQ(new_db_opt.max_total_wal_size, static_cast<uint64_t>(33));
|
|
|
|
ASSERT_EQ(new_db_opt.use_fsync, true);
|
|
|
|
ASSERT_EQ(new_db_opt.db_log_dir, "/db_log_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.wal_dir, "/wal_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.delete_obsolete_files_period_micros,
|
|
|
|
static_cast<uint64_t>(34));
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_compactions, 35);
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_flushes, 36);
|
|
|
|
ASSERT_EQ(new_db_opt.max_log_file_size, 37U);
|
|
|
|
ASSERT_EQ(new_db_opt.log_file_time_to_roll, 38U);
|
|
|
|
ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
|
|
|
|
ASSERT_EQ(new_db_opt.recycle_log_file_num, 5U);
|
|
|
|
ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
|
|
|
|
ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
|
|
|
|
ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_reads, true);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_writes, false);
|
|
|
|
ASSERT_EQ(new_db_opt.use_direct_reads, false);
|
|
|
|
ASSERT_EQ(new_db_opt.use_direct_io_for_flush_and_compaction, false);
|
|
|
|
ASSERT_EQ(new_db_opt.is_fd_close_on_exec, true);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_dump_period_sec, 46U);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_persist_period_sec, 57U);
|
|
|
|
ASSERT_EQ(new_db_opt.persist_stats_to_disk, false);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_history_buffer_size, 69U);
|
|
|
|
ASSERT_EQ(new_db_opt.advise_random_on_open, true);
|
|
|
|
ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
|
|
|
|
ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
|
|
|
|
ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
|
|
|
|
ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
|
|
|
|
ASSERT_EQ(new_db_opt.bytes_per_sync, static_cast<uint64_t>(47));
|
|
|
|
ASSERT_EQ(new_db_opt.wal_bytes_per_sync, static_cast<uint64_t>(48));
|
|
|
|
ASSERT_EQ(new_db_opt.strict_bytes_per_sync, true);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
ASSERT_EQ(new_db_opt.daily_offpeak_time_utc, "");
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
db_options_map["max_open_files"] = "hello";
|
2020-10-20 18:51:51 +00:00
|
|
|
Status s =
|
|
|
|
GetDBOptionsFromMap(exact, base_db_opt, db_options_map, &new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
|
|
|
|
// unknow options should fail parsing without ignore_unknown_options = true
|
|
|
|
db_options_map["unknown_db_option"] = "1";
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetDBOptionsFromMap(exact, base_db_opt, db_options_map, &new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromMap(loose, base_db_opt, db_options_map, &new_db_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_NOK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
base_cf_opt.table_factory.reset();
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt, "",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=5", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 5U);
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory == nullptr);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=6;", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 6U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, " write_buffer_size = 7 ", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 7U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, " write_buffer_size = 8 ; ", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 8U);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=9;max_write_buffer_number=10", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 9U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 10);
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=11; max_write_buffer_number = 12 ;", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 11U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 12);
|
|
|
|
// Wrong name "max_write_buffer_number_"
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number_=14;", &new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Comparator from object registry
|
|
|
|
std::string kCompName = "reverse_comp";
|
2022-01-11 14:32:42 +00:00
|
|
|
ObjectLibrary::Default()->AddFactory<const Comparator>(
|
2020-04-22 00:35:28 +00:00
|
|
|
kCompName,
|
|
|
|
[](const std::string& /*name*/,
|
|
|
|
std::unique_ptr<const Comparator>* /*guard*/,
|
|
|
|
std::string* /* errmsg */) { return ReverseBytewiseComparator(); });
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"comparator=" + kCompName + ";",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.comparator, ReverseBytewiseComparator());
|
|
|
|
|
|
|
|
// MergeOperator from object registry
|
|
|
|
std::unique_ptr<BytesXOROperator> bxo(new BytesXOROperator());
|
|
|
|
std::string kMoName = bxo->Name();
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"merge_operator=" + kMoName + ";",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(kMoName, std::string(new_cf_opt.merge_operator->Name()));
|
|
|
|
|
|
|
|
// Wrong key/value pair
|
2020-10-20 18:51:51 +00:00
|
|
|
Status s = GetColumnFamilyOptionsFromString(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, base_cf_opt,
|
2020-10-20 18:51:51 +00:00
|
|
|
"write_buffer_size=13;max_write_buffer_number;", &new_cf_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
2020-10-20 18:51:51 +00:00
|
|
|
// Error Parsing value
|
|
|
|
s = GetColumnFamilyOptionsFromString(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, base_cf_opt,
|
2020-10-20 18:51:51 +00:00
|
|
|
"write_buffer_size=13;max_write_buffer_number=;", &new_cf_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Missing option name
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=13; =100;", &new_cf_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
const uint64_t kilo = 1024UL;
|
|
|
|
const uint64_t mega = 1024 * kilo;
|
|
|
|
const uint64_t giga = 1024 * mega;
|
|
|
|
const uint64_t tera = 1024 * giga;
|
|
|
|
|
|
|
|
// Units (k)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "max_write_buffer_number=15K", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 15 * kilo);
|
|
|
|
// Units (m)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"max_write_buffer_number=16m;inplace_update_num_locks=17M", &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17u * mega);
|
|
|
|
// Units (g)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=18g;prefix_extractor=capped:8;"
|
|
|
|
"arena_block_size=19G",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 18 * giga);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 19 * giga);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.CappedPrefix.8");
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
// Units (t)
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=20t;arena_block_size=21T",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 20 * tera);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 21 * tera);
|
|
|
|
|
|
|
|
// Nested block based table options
|
|
|
|
// Empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Non-empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Last one
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Mismatch curly braces
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={{{block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Unexpected chars after closing curly brace
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa;"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Invalid block based table option
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={xx_block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=true",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=false",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=junk",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opt,
|
|
|
|
new_cf_opt));
|
|
|
|
|
|
|
|
// Nested plain table options
|
|
|
|
// Empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
|
|
|
// Non-empty
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={user_key_len=66;bloom_bits_per_key=20;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
|
|
|
|
|
|
|
// memtable factory
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"memtable=skip_list:10;arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory != nullptr);
|
|
|
|
ASSERT_EQ(std::string(new_cf_opt.memtable_factory->Name()), "SkipListFactory");
|
2021-09-08 14:45:59 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory->IsInstanceOf("SkipListFactory"));
|
2022-06-14 21:19:26 +00:00
|
|
|
|
|
|
|
// blob cache
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"blob_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_NE(new_cf_opt.blob_cache, nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->GetCapacity(), 1024UL * 1024UL);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(static_cast<ShardedCacheBase*>(new_cf_opt.blob_cache.get())
|
2022-06-14 21:19:26 +00:00
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(static_cast<LRUCache*>(new_cf_opt.blob_cache.get())
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
2020-04-22 00:35:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 02:04:44 +00:00
|
|
|
TEST_F(OptionsTest, CompressionOptionsFromString) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string opts_str;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
CompressionOptions dflt;
|
|
|
|
// Test with some optional values removed....
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=3:4:5; "
|
|
|
|
"bottommost_compression_opts=4:5:6:7",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.window_bits, 3);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.level, 4);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.strategy, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_bytes, dflt.max_dict_bytes);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.zstd_max_train_bytes,
|
|
|
|
dflt.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.enabled, dflt.enabled);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
2020-07-17 02:04:44 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.zstd_max_train_bytes,
|
|
|
|
dflt.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.enabled, dflt.enabled);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
2020-07-17 02:04:44 +00:00
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
"compression_opts=4:5:6:7:8:9:true:10:false; "
|
2020-07-17 02:04:44 +00:00
|
|
|
"bottommost_compression_opts=5:6:7:8:9:false",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_buffer_bytes, 10u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
2020-07-17 02:04:44 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
2020-07-17 02:04:44 +00:00
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
GetStringFromColumnFamilyOptions(config_options, base_cf_opt, &opts_str));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(), opts_str, &new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.max_dict_buffer_bytes, 10u);
|
|
|
|
ASSERT_EQ(base_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
2020-07-17 02:04:44 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
dflt.parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
dflt.use_zstd_dict_trainer);
|
2020-07-17 02:04:44 +00:00
|
|
|
|
|
|
|
// Test as struct values
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts={window_bits=5; level=6; strategy=7; max_dict_bytes=8;"
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
"zstd_max_train_bytes=9;parallel_threads=10;enabled=true;use_zstd_dict_"
|
|
|
|
"trainer=false}; "
|
2020-07-17 02:04:44 +00:00
|
|
|
"bottommost_compression_opts={window_bits=4; level=5; strategy=6;"
|
|
|
|
" max_dict_bytes=7;zstd_max_train_bytes=8;parallel_threads=9;"
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
"enabled=false;use_zstd_dict_trainer=true}; ",
|
2020-07-17 02:04:44 +00:00
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 10u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
2020-07-17 02:04:44 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads, 9u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer, true);
|
2020-07-17 02:04:44 +00:00
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"compression_opts={window_bits=4; strategy=5;};"
|
|
|
|
"bottommost_compression_opts={level=6; strategy=7;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level,
|
|
|
|
base_cf_opt.compression_opts.level);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes,
|
|
|
|
base_cf_opt.compression_opts.max_dict_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes,
|
|
|
|
base_cf_opt.compression_opts.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads,
|
|
|
|
base_cf_opt.compression_opts.parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled,
|
|
|
|
base_cf_opt.compression_opts.enabled);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits,
|
|
|
|
base_cf_opt.bottommost_compression_opts.window_bits);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes,
|
|
|
|
base_cf_opt.bottommost_compression_opts.max_dict_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes,
|
|
|
|
base_cf_opt.bottommost_compression_opts.zstd_max_train_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
base_cf_opt.bottommost_compression_opts.parallel_threads);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled,
|
|
|
|
base_cf_opt.bottommost_compression_opts.enabled);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
base_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer);
|
2020-07-17 02:04:44 +00:00
|
|
|
|
|
|
|
// Test a few individual struct values
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"compression_opts.enabled=false; "
|
|
|
|
"bottommost_compression_opts.enabled=true; ",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, false);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
|
|
|
|
|
|
|
// Now test some illegal values
|
|
|
|
ConfigOptions ignore;
|
|
|
|
ignore.ignore_unknown_options = true;
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=5:6:7:8:9:x:false", &base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ignore, ColumnFamilyOptions(), "compression_opts=5:6:7:8:9:x:false",
|
|
|
|
&base_cf_opt));
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
2021-02-19 22:06:59 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2020-07-17 02:04:44 +00:00
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=1:2:3:4:5:6:true:8", &base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ignore, ColumnFamilyOptions(), "compression_opts=1:2:3:4:5:6:true:8",
|
|
|
|
&base_cf_opt));
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
2021-02-19 22:06:59 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(),
|
|
|
|
"compression_opts=1:2:3:4:5:6:true:8:9", &base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
ignore, ColumnFamilyOptions(), "compression_opts=1:2:3:4:5:6:true:8:9",
|
|
|
|
&base_cf_opt));
|
2020-07-17 02:04:44 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(), "compression_opts={unknown=bad;}",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(ignore, ColumnFamilyOptions(),
|
|
|
|
"compression_opts={unknown=bad;}",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, ColumnFamilyOptions(), "compression_opts.unknown=bad",
|
|
|
|
&base_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(ignore, ColumnFamilyOptions(),
|
|
|
|
"compression_opts.unknown=bad",
|
|
|
|
&base_cf_opt));
|
|
|
|
}
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsTest, OldInterfaceTest) {
|
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
|
|
|
ConfigOptions exact;
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions cf_config_options;
|
|
|
|
cf_config_options.input_strings_escaped = false;
|
|
|
|
cf_config_options.ignore_unknown_options = false;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
cf_config_options, base_cf_opt,
|
2020-04-22 00:35:28 +00:00
|
|
|
"write_buffer_size=18;prefix_extractor=capped:8;"
|
|
|
|
"arena_block_size=19",
|
|
|
|
&new_cf_opt));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 18);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 19);
|
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
|
|
|
|
|
|
|
|
// And with a bad option
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
cf_config_options, base_cf_opt,
|
2020-04-22 00:35:28 +00:00
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={xx_block_size=4;}",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> cf_options_map = {
|
|
|
|
{"write_buffer_size", "1"},
|
|
|
|
{"max_write_buffer_number", "2"},
|
|
|
|
{"min_write_buffer_number_to_merge", "3"},
|
|
|
|
};
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
cf_options_map["unknown_option"] = "1";
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2023-02-07 22:11:53 +00:00
|
|
|
cf_config_options.input_strings_escaped = true;
|
|
|
|
cf_config_options.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
DBOptions new_db_opt;
|
|
|
|
std::unordered_map<std::string, std::string> db_options_map = {
|
|
|
|
{"create_if_missing", "false"},
|
|
|
|
{"create_missing_column_families", "true"},
|
|
|
|
{"error_if_exists", "false"},
|
|
|
|
{"paranoid_checks", "true"},
|
2020-10-09 23:40:25 +00:00
|
|
|
{"track_and_verify_wals_in_manifest", "true"},
|
2022-05-19 18:04:21 +00:00
|
|
|
{"verify_sst_unique_id_in_manifest", "true"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"max_open_files", "32"},
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
{"daily_offpeak_time_utc", "06:30-23:30"},
|
2020-04-22 00:35:28 +00:00
|
|
|
};
|
2023-02-07 22:11:53 +00:00
|
|
|
|
|
|
|
ConfigOptions db_config_options(base_db_opt);
|
|
|
|
db_config_options.input_strings_escaped = false;
|
|
|
|
db_config_options.ignore_unknown_options = false;
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.create_missing_column_families, true);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.paranoid_checks, true);
|
2020-10-09 23:40:25 +00:00
|
|
|
ASSERT_EQ(new_db_opt.track_and_verify_wals_in_manifest, true);
|
2022-05-19 18:04:21 +00:00
|
|
|
ASSERT_EQ(new_db_opt.verify_sst_unique_id_in_manifest, true);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 32);
|
|
|
|
db_options_map["unknown_option"] = "1";
|
2023-02-07 22:11:53 +00:00
|
|
|
Status s = GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt);
|
2020-10-20 18:51:51 +00:00
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
2023-02-07 22:11:53 +00:00
|
|
|
db_config_options.input_strings_escaped = true;
|
|
|
|
db_config_options.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt));
|
|
|
|
db_config_options.input_strings_escaped = false;
|
|
|
|
db_config_options.ignore_unknown_options = false;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(GetDBOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
db_config_options, base_db_opt,
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
"create_if_missing=false;error_if_exists=false;max_open_files=42;"
|
|
|
|
"daily_offpeak_time_utc=08:30-19:00;",
|
2020-04-22 00:35:28 +00:00
|
|
|
&new_db_opt));
|
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 42);
|
Offpeak in db option (#11893)
Summary:
RocksDB's primary function is to facilitate read and write operations. Compactions, while essential for minimizing read amplifications and optimizing storage, can sometimes compete with these primary tasks. Especially during periods of high read/write traffic, it's vital to ensure that primary operations receive priority, avoiding any potential disruptions or slowdowns. Conversely, during off-peak times when traffic is minimal, it's an opportune moment to tackle low-priority tasks like TTL based compactions, optimizing resource usage.
In this PR, we are incorporating the concept of off-peak time into RocksDB by introducing `daily_offpeak_time_utc` within the DBOptions. This setting is formatted as "HH:mm-HH:mm" where the first one before "-" is the start time and the second one is the end time, inclusive. It will be later used for resource optimization in subsequent PRs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11893
Test Plan:
- New Unit Test Added - `DBOptionsTest::OffPeakTimes`
- Existing Unit Test Updated - `OptionsTest`, `OptionsSettableTest`
Reviewed By: pdillinger
Differential Revision: D49714553
Pulled By: jaykorean
fbshipit-source-id: fef51ea7c0fede6431c715bff116ddbb567c8752
2023-09-29 20:03:39 +00:00
|
|
|
ASSERT_EQ(new_db_opt.daily_offpeak_time_utc, "08:30-19:00");
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetDBOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
db_config_options, base_db_opt,
|
2020-04-22 00:35:28 +00:00
|
|
|
"create_if_missing=false;error_if_exists=false;max_open_files=42;"
|
|
|
|
"unknown_option=1;",
|
2020-10-20 18:51:51 +00:00
|
|
|
&new_db_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
|
|
|
|
BlockBasedTableOptions table_opt;
|
|
|
|
BlockBasedTableOptions new_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
2022-02-18 20:23:48 +00:00
|
|
|
config_options.ignore_unsupported_options = false;
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
// make sure default values are overwritten by something else
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kHashSearch;"
|
2022-03-01 21:58:02 +00:00
|
|
|
"checksum=kxxHash;"
|
2020-04-22 00:35:28 +00:00
|
|
|
"block_cache=1M;block_cache_compressed=1k;block_size=1024;"
|
|
|
|
"block_size_deviation=8;block_restart_interval=4;"
|
|
|
|
"format_version=5;whole_key_filtering=1;"
|
Detect (new) Bloom/Ribbon Filter construction corruption (#9342)
Summary:
Note: rebase on and merge after https://github.com/facebook/rocksdb/pull/9349, https://github.com/facebook/rocksdb/pull/9345, (optional) https://github.com/facebook/rocksdb/pull/9393
**Context:**
(Quoted from pdillinger) Layers of information during new Bloom/Ribbon Filter construction in building block-based tables includes the following:
a) set of keys to add to filter
b) set of hashes to add to filter (64-bit hash applied to each key)
c) set of Bloom indices to set in filter, with duplicates
d) set of Bloom indices to set in filter, deduplicated
e) final filter and its checksum
This PR aims to detect corruption (e.g, unexpected hardware/software corruption on data structures residing in the memory for a long time) from b) to e) and leave a) as future works for application level.
- b)'s corruption is detected by verifying the xor checksum of the hash entries calculated as the entries accumulate before being added to the filter. (i.e, `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()`)
- c) - e)'s corruption is detected by verifying the hash entries indeed exists in the constructed filter by re-querying these hash entries in the filter (i.e, `FilterBitsBuilder::MaybePostVerify()`) after computing the block checksum (except for PartitionFilter, which is done right after each `FilterBitsBuilder::Finish` for impl simplicity - see code comment for more). For this stage of detection, we assume hash entries are not corrupted after checking on b) since the time interval from b) to c) is relatively short IMO.
Option to enable this feature of detection is `BlockBasedTableOptions::detect_filter_construct_corruption` which is false by default.
**Summary:**
- Implemented new functions `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()` and `FilterBitsBuilder::MaybePostVerify()`
- Ensured hash entries, final filter and banding and their [cache reservation ](https://github.com/facebook/rocksdb/issues/9073) are released properly despite corruption
- See [Filter.construction.artifacts.release.point.pdf ](https://github.com/facebook/rocksdb/files/7923487/Design.Filter.construction.artifacts.release.point.pdf) for high-level design
- Bundled and refactored hash entries's related artifact in XXPH3FilterBitsBuilder into `HashEntriesInfo` for better control on lifetime of these artifact during `SwapEntires`, `ResetEntries`
- Ensured RocksDB block-based table builder calls `FilterBitsBuilder::MaybePostVerify()` after constructing the filter by `FilterBitsBuilder::Finish()`
- When encountering such filter construction corruption, stop writing the filter content to files and mark such a block-based table building non-ok by storing the corruption status in the builder.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9342
Test Plan:
- Added new unit test `DBFilterConstructionCorruptionTestWithParam.DetectCorruption`
- Included this new feature in `DBFilterConstructionReserveMemoryTestWithParam.ReserveMemory` as this feature heavily touch ReserveMemory's impl
- For fallback case, I run `./filter_bench -impl=3 -detect_filter_construct_corruption=true -reserve_table_builder_memory=true -strict_capacity_limit=true -quick -runs 10 | grep 'Build avg'` to make sure nothing break.
- Added to `filter_bench`: increased filter construction time by **30%**, mostly by `MaybePostVerify()`
- FastLocalBloom
- Before change: `./filter_bench -impl=2 -quick -runs 10 | grep 'Build avg'`: **28.86643s**
- After change:
- `./filter_bench -impl=2 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless): **27.6644s (-4% perf improvement might be due to now we don't drop bloom hash entry in `AddAllEntries` along iteration but in bulk later, same with the bypassing-MaybePostVerify case below)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (expect acceptable increase): **34.41159s (+20%)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (by-passing MaybePostVerify, expect minor increase): **27.13431s (-6%)**
- Standard128Ribbon
- Before change: `./filter_bench -impl=3 -quick -runs 10 | grep 'Build avg'`: **122.5384s**
- After change:
- `./filter_bench -impl=3 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless - verified by removing MaybePostVerify under this case and found only +-1ns difference): **124.3588s (+2%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(expect acceptable increase): **159.4946s (+30%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(by-passing MaybePostVerify, expect minor increase) : **125.258s (+2%)**
- Added to `db_stress`: `make crash_test`, `./db_stress --detect_filter_construct_corruption=true`
- Manually smoke-tested: manually corrupted the filter construction in some db level tests with basic PUT and background flush. As expected, the error did get returned to users in subsequent PUT and Flush status.
Reviewed By: pdillinger
Differential Revision: D33746928
Pulled By: hx235
fbshipit-source-id: cb056426be5a7debc1cd16f23bc250f36a08ca57
2022-02-02 01:41:20 +00:00
|
|
|
"filter_policy=bloomfilter:4.567:false;detect_filter_construct_"
|
|
|
|
"corruption=true;"
|
2020-11-13 19:51:24 +00:00
|
|
|
// A bug caused read_amp_bytes_per_bit to be a large integer in OPTIONS
|
|
|
|
// file generated by 6.10 to 6.14. Though bug is fixed in these releases,
|
|
|
|
// we need to handle the case of loading OPTIONS file generated before the
|
|
|
|
// fix.
|
|
|
|
"read_amp_bytes_per_bit=17179869185;",
|
2020-04-22 00:35:28 +00:00
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(new_opt.index_type, BlockBasedTableOptions::kHashSearch);
|
|
|
|
ASSERT_EQ(new_opt.checksum, ChecksumType::kxxHash);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size, 1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size_deviation, 8);
|
|
|
|
ASSERT_EQ(new_opt.block_restart_interval, 4);
|
|
|
|
ASSERT_EQ(new_opt.format_version, 5U);
|
|
|
|
ASSERT_EQ(new_opt.whole_key_filtering, true);
|
Detect (new) Bloom/Ribbon Filter construction corruption (#9342)
Summary:
Note: rebase on and merge after https://github.com/facebook/rocksdb/pull/9349, https://github.com/facebook/rocksdb/pull/9345, (optional) https://github.com/facebook/rocksdb/pull/9393
**Context:**
(Quoted from pdillinger) Layers of information during new Bloom/Ribbon Filter construction in building block-based tables includes the following:
a) set of keys to add to filter
b) set of hashes to add to filter (64-bit hash applied to each key)
c) set of Bloom indices to set in filter, with duplicates
d) set of Bloom indices to set in filter, deduplicated
e) final filter and its checksum
This PR aims to detect corruption (e.g, unexpected hardware/software corruption on data structures residing in the memory for a long time) from b) to e) and leave a) as future works for application level.
- b)'s corruption is detected by verifying the xor checksum of the hash entries calculated as the entries accumulate before being added to the filter. (i.e, `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()`)
- c) - e)'s corruption is detected by verifying the hash entries indeed exists in the constructed filter by re-querying these hash entries in the filter (i.e, `FilterBitsBuilder::MaybePostVerify()`) after computing the block checksum (except for PartitionFilter, which is done right after each `FilterBitsBuilder::Finish` for impl simplicity - see code comment for more). For this stage of detection, we assume hash entries are not corrupted after checking on b) since the time interval from b) to c) is relatively short IMO.
Option to enable this feature of detection is `BlockBasedTableOptions::detect_filter_construct_corruption` which is false by default.
**Summary:**
- Implemented new functions `XXPH3FilterBitsBuilder::MaybeVerifyHashEntriesChecksum()` and `FilterBitsBuilder::MaybePostVerify()`
- Ensured hash entries, final filter and banding and their [cache reservation ](https://github.com/facebook/rocksdb/issues/9073) are released properly despite corruption
- See [Filter.construction.artifacts.release.point.pdf ](https://github.com/facebook/rocksdb/files/7923487/Design.Filter.construction.artifacts.release.point.pdf) for high-level design
- Bundled and refactored hash entries's related artifact in XXPH3FilterBitsBuilder into `HashEntriesInfo` for better control on lifetime of these artifact during `SwapEntires`, `ResetEntries`
- Ensured RocksDB block-based table builder calls `FilterBitsBuilder::MaybePostVerify()` after constructing the filter by `FilterBitsBuilder::Finish()`
- When encountering such filter construction corruption, stop writing the filter content to files and mark such a block-based table building non-ok by storing the corruption status in the builder.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9342
Test Plan:
- Added new unit test `DBFilterConstructionCorruptionTestWithParam.DetectCorruption`
- Included this new feature in `DBFilterConstructionReserveMemoryTestWithParam.ReserveMemory` as this feature heavily touch ReserveMemory's impl
- For fallback case, I run `./filter_bench -impl=3 -detect_filter_construct_corruption=true -reserve_table_builder_memory=true -strict_capacity_limit=true -quick -runs 10 | grep 'Build avg'` to make sure nothing break.
- Added to `filter_bench`: increased filter construction time by **30%**, mostly by `MaybePostVerify()`
- FastLocalBloom
- Before change: `./filter_bench -impl=2 -quick -runs 10 | grep 'Build avg'`: **28.86643s**
- After change:
- `./filter_bench -impl=2 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless): **27.6644s (-4% perf improvement might be due to now we don't drop bloom hash entry in `AddAllEntries` along iteration but in bulk later, same with the bypassing-MaybePostVerify case below)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (expect acceptable increase): **34.41159s (+20%)**
- `./filter_bench -impl=2 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'` (by-passing MaybePostVerify, expect minor increase): **27.13431s (-6%)**
- Standard128Ribbon
- Before change: `./filter_bench -impl=3 -quick -runs 10 | grep 'Build avg'`: **122.5384s**
- After change:
- `./filter_bench -impl=3 -detect_filter_construct_corruption=false -quick -runs 10 | grep 'Build avg'` (expect a tiny increase due to MaybePostVerify is always called regardless - verified by removing MaybePostVerify under this case and found only +-1ns difference): **124.3588s (+2%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(expect acceptable increase): **159.4946s (+30%)**
- `./filter_bench -impl=3 -detect_filter_construct_corruption=true -quick -runs 10 | grep 'Build avg'`(by-passing MaybePostVerify, expect minor increase) : **125.258s (+2%)**
- Added to `db_stress`: `make crash_test`, `./db_stress --detect_filter_construct_corruption=true`
- Manually smoke-tested: manually corrupted the filter construction in some db level tests with basic PUT and background flush. As expected, the error did get returned to users in subsequent PUT and Flush status.
Reviewed By: pdillinger
Differential Revision: D33746928
Pulled By: hx235
fbshipit-source-id: cb056426be5a7debc1cd16f23bc250f36a08ca57
2022-02-02 01:41:20 +00:00
|
|
|
ASSERT_EQ(new_opt.detect_filter_construct_corruption, true);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
2022-02-18 20:23:48 +00:00
|
|
|
auto bfp = new_opt.filter_policy->CheckedCast<BloomFilterPolicy>();
|
|
|
|
ASSERT_NE(bfp, nullptr);
|
Experimental (production candidate) SST schema for Ribbon filter (#7658)
Summary:
Added experimental public API for Ribbon filter:
NewExperimentalRibbonFilterPolicy(). This experimental API will
take a "Bloom equivalent" bits per key, and configure the Ribbon
filter for the same FP rate as Bloom would have but ~30% space
savings. (Note: optimize_filters_for_memory is not yet implemented
for Ribbon filter. That can be added with no effect on schema.)
Internally, the Ribbon filter is configured using a "one_in_fp_rate"
value, which is 1 over desired FP rate. For example, use 100 for 1%
FP rate. I'm expecting this will be used in the future for configuring
Bloom-like filters, as I expect people to more commonly hold constant
the filter accuracy and change the space vs. time trade-off, rather than
hold constant the space (per key) and change the accuracy vs. time
trade-off, though we might make that available.
### Benchmarking
```
$ ./filter_bench -impl=2 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 34.1341
Number of filters: 1993
Total size (MB): 238.488
Reported total allocated memory (MB): 262.875
Reported internal fragmentation: 10.2255%
Bits/key stored: 10.0029
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 18.7508
Random filter net ns/op: 258.246
Average FP rate %: 0.968672
----------------------------
Done. (For more info, run with -legend or -help.)
$ ./filter_bench -impl=3 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 130.851
Number of filters: 1993
Total size (MB): 168.166
Reported total allocated memory (MB): 183.211
Reported internal fragmentation: 8.94626%
Bits/key stored: 7.05341
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 58.4523
Random filter net ns/op: 363.717
Average FP rate %: 0.952978
----------------------------
Done. (For more info, run with -legend or -help.)
```
168.166 / 238.488 = 0.705 -> 29.5% space reduction
130.851 / 34.1341 = 3.83x construction time for this Ribbon filter vs. lastest Bloom filter (could make that as little as about 2.5x for less space reduction)
### Working around a hashing "flaw"
bloom_test discovered a flaw in the simple hashing applied in
StandardHasher when num_starts == 1 (num_slots == 128), showing an
excessively high FP rate. The problem is that when many entries, on the
order of number of hash bits or kCoeffBits, are associated with the same
start location, the correlation between the CoeffRow and ResultRow (for
efficiency) can lead to a solution that is "universal," or nearly so, for
entries mapping to that start location. (Normally, variance in start
location breaks the effective association between CoeffRow and
ResultRow; the same value for CoeffRow is effectively different if start
locations are different.) Without kUseSmash and with num_starts > 1 (thus
num_starts ~= num_slots), this flaw should be completely irrelevant. Even
with 10M slots, the chances of a single slot having just 16 (or more)
entries map to it--not enough to cause an FP problem, which would be local
to that slot if it happened--is 1 in millions. This spreadsheet formula
shows that: =1/(10000000*(1 - POISSON(15, 1, TRUE)))
As kUseSmash==false (the setting for Standard128RibbonBitsBuilder) is
intended for CPU efficiency of filters with many more entries/slots than
kCoeffBits, a very reasonable work-around is to disallow num_starts==1
when !kUseSmash, by making the minimum non-zero number of slots
2*kCoeffBits. This is the work-around I've applied. This also means that
the new Ribbon filter schema (Standard128RibbonBitsBuilder) is not
space-efficient for less than a few hundred entries. Because of this, I
have made it fall back on constructing a Bloom filter, under existing
schema, when that is more space efficient for small filters. (We can
change this in the future if we want.)
TODO: better unit tests for this case in ribbon_test, and probably
update StandardHasher for kUseSmash case so that it can scale nicely to
small filters.
### Other related changes
* Add Ribbon filter to stress/crash test
* Add Ribbon filter to filter_bench as -impl=3
* Add option string support, as in "filter_policy=experimental_ribbon:5.678;"
where 5.678 is the Bloom equivalent bits per key.
* Rename internal mode BloomFilterPolicy::kAuto to kAutoBloom
* Add a general BuiltinFilterBitsBuilder::CalculateNumEntry based on
binary searching CalculateSpace (inefficient), so that subclasses
(especially experimental ones) don't have to provide an efficient
implementation inverting CalculateSpace.
* Minor refactor FastLocalBloomBitsBuilder for new base class
XXH3pFilterBitsBuilder shared with new Standard128RibbonBitsBuilder,
which allows the latter to fall back on Bloom construction in some
extreme cases.
* Mostly updated bloom_test for Ribbon filter, though a test like
FullBloomTest::Schema is a next TODO to ensure schema stability
(in case this becomes production-ready schema as it is).
* Add some APIs to ribbon_impl.h for configuring Ribbon filters.
Although these are reasonably covered by bloom_test, TODO more unit
tests in ribbon_test
* Added a "tool" FindOccupancyForSuccessRate to ribbon_test to get data
for constructing the linear approximations in GetNumSlotsFor95PctSuccess.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7658
Test Plan:
Some unit tests updated but other testing is left TODO. This
is considered experimental but laying down schema compatibility as early
as possible in case it proves production-quality. Also tested in
stress/crash test.
Reviewed By: jay-zhuang
Differential Revision: D24899349
Pulled By: pdillinger
fbshipit-source-id: 9715f3e6371c959d923aea8077c9423c7a9f82b8
2020-11-13 04:45:02 +00:00
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4567);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 5);
|
2020-11-13 19:51:24 +00:00
|
|
|
// Verify that only the lower 32bits are stored in
|
|
|
|
// new_opt.read_amp_bytes_per_bit.
|
|
|
|
EXPECT_EQ(1U, new_opt.read_amp_bytes_per_bit);
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
// unknown option
|
2020-10-20 18:51:51 +00:00
|
|
|
Status s = GetBlockBasedTableOptionsFromString(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
|
|
|
|
"bad_option=1",
|
2020-10-20 18:51:51 +00:00
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(static_cast<bool>(table_opt.cache_index_and_filter_blocks),
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized index type
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetBlockBasedTableOptionsFromString(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, table_opt,
|
2020-10-20 18:51:51 +00:00
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearchXX", &new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized checksum type
|
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;checksum=kxxHashXX", &new_opt));
|
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
|
|
|
|
|
|
|
// unrecognized filter policy name
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetBlockBasedTableOptionsFromString(config_options, table_opt,
|
2020-04-22 00:35:28 +00:00
|
|
|
"filter_policy=bloomfilterxx:4:true",
|
2020-10-20 18:51:51 +00:00
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
2022-02-12 15:04:09 +00:00
|
|
|
|
|
|
|
// missing bits per key
|
|
|
|
s = GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=bloomfilter", &new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
|
2022-02-08 21:54:29 +00:00
|
|
|
// Used to be rejected, now accepted
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=bloomfilter:4", &new_opt));
|
|
|
|
bfp = dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4000);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 4);
|
2020-04-22 00:35:28 +00:00
|
|
|
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
2022-02-12 15:04:09 +00:00
|
|
|
// use_block_based_builder=true now ignored in public API (same as false)
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=bloomfilter:4:true", &new_opt));
|
|
|
|
bfp = dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4000);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 4);
|
|
|
|
|
|
|
|
// Test configuring using other internal names
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"filter_policy=rocksdb.internal.LegacyBloomFilter:3", &new_opt));
|
Remove deprecated block-based filter (#10184)
Summary:
In https://github.com/facebook/rocksdb/issues/9535, release 7.0, we hid the old block-based filter from being created using
the public API, because of its inefficiency. Although we normally maintain read compatibility
on old DBs forever, filters are not required for reading a DB, only for optimizing read
performance. Thus, it should be acceptable to remove this code and the substantial
maintenance burden it carries as useful features are developed and validated (such
as user timestamp).
This change completely removes the code for reading and writing the old block-based
filters, net removing about 1370 lines of code no longer needed. Options removed from
testing / benchmarking tools. The prior existence is only evident in a couple of places:
* `CacheEntryRole::kDeprecatedFilterBlock` - We can update this public API enum in
a major release to minimize source code incompatibilities.
* A warning is logged when an old table file is opened that used the old block-based
filter. This is provided as a courtesy, and would be a pain to unit test, so manual testing
should suffice. Unfortunately, sst_dump does not tell you whether a file uses
block-based filter, and the structure of the code makes it very difficult to fix.
* To detect that case, `kObsoleteFilterBlockPrefix` (renamed from `kFilterBlockPrefix`)
for metaindex is maintained (for now).
Other notes:
* In some cases where numbers are associated with filter configurations, we have had to
update the assigned numbers so that they all correspond to something that exists.
* Fixed potential stat counting bug by assuming `filter_checked = false` for cases
like `filter == nullptr` rather than assuming `filter_checked = true`
* Removed obsolete `block_offset` and `prefix_extractor` parameters from several
functions.
* Removed some unnecessary checks `if (!table_prefix_extractor() && !prefix_extractor)`
because the caller guarantees the prefix extractor exists and is compatible
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10184
Test Plan:
tests updated, manually test new warning in LOG using base version to
generate a DB
Reviewed By: riversand963
Differential Revision: D37212647
Pulled By: pdillinger
fbshipit-source-id: 06ee020d8de3b81260ffc36ad0c1202cbf463a80
2022-06-16 22:51:33 +00:00
|
|
|
auto builtin =
|
2022-02-16 16:27:37 +00:00
|
|
|
dynamic_cast<const BuiltinFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(builtin->GetId(), "rocksdb.internal.LegacyBloomFilter:3");
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
2022-02-12 15:04:09 +00:00
|
|
|
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"filter_policy=rocksdb.internal.FastLocalBloomFilter:1.234", &new_opt));
|
2022-02-16 16:27:37 +00:00
|
|
|
builtin =
|
|
|
|
dynamic_cast<const BuiltinFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(builtin->GetId(), "rocksdb.internal.FastLocalBloomFilter:1.234");
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
2022-02-12 15:04:09 +00:00
|
|
|
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"filter_policy=rocksdb.internal.Standard128RibbonFilter:1.234",
|
|
|
|
&new_opt));
|
2022-02-16 16:27:37 +00:00
|
|
|
builtin =
|
|
|
|
dynamic_cast<const BuiltinFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(builtin->GetId(), "rocksdb.internal.Standard128RibbonFilter:1.234");
|
Hide deprecated, inefficient block-based filter from public API (#9535)
Summary:
This change removes the ability to configure the deprecated,
inefficient block-based filter in the public API. Options that would
have enabled it now use "full" (and optionally partitioned) filters.
Existing block-based filters can still be read and used, and a "back
door" way to build them still exists, for testing and in case of trouble.
About the only way this removal would cause an issue for users is if
temporary memory for filter construction greatly increases. In
HISTORY.md we suggest a few possible mitigations: partitioned filters,
smaller SST files, or setting reserve_table_builder_memory=true.
Or users who have customized a FilterPolicy using the
CreateFilter/KeyMayMatch mechanism removed in https://github.com/facebook/rocksdb/issues/9501 will have to upgrade
their code. (It's long past time for people to move to the new
builder/reader customization interface.)
This change also introduces some internal-use-only configuration strings
for testing specific filter implementations while bypassing some
compatibility / intelligence logic. This is intended to hint at a path
toward making FilterPolicy Customizable, but it also gives us a "back
door" way to configure block-based filter.
Aside: updated db_bench so that -readonly implies -use_existing_db
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9535
Test Plan:
Unit tests updated. Specifically,
* BlockBasedTableTest.BlockReadCountTest is tweaked to validate the back
door configuration interface and ignoring of `use_block_based_builder`.
* BlockBasedTableTest.TracingGetTest is migrated from testing
block-based filter access pattern to full filter access patter, by
re-ordering some things.
* Options test (pretty self-explanatory)
Performance test - create with `./db_bench -db=/dev/shm/rocksdb1 -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=fillrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0` with and without `-use_block_based_filter`, which creates a DB with 21 SST files in L0. Read with `./db_bench -db=/dev/shm/rocksdb1 -readonly -bloom_bits=10 -cache_index_and_filter_blocks=1 -benchmarks=readrandom -num=10000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -duration=30`
Without -use_block_based_filter: readrandom 464 ops/sec, 689280 KB DB
With -use_block_based_filter: readrandom 169 ops/sec, 690996 KB DB
No consistent difference with fillrandom
Reviewed By: jay-zhuang
Differential Revision: D34153871
Pulled By: pdillinger
fbshipit-source-id: 31f4a933c542f8f09aca47fa64aec67832a69738
2022-02-12 15:04:09 +00:00
|
|
|
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
// Ribbon filter policy (no Bloom hybrid)
|
Experimental (production candidate) SST schema for Ribbon filter (#7658)
Summary:
Added experimental public API for Ribbon filter:
NewExperimentalRibbonFilterPolicy(). This experimental API will
take a "Bloom equivalent" bits per key, and configure the Ribbon
filter for the same FP rate as Bloom would have but ~30% space
savings. (Note: optimize_filters_for_memory is not yet implemented
for Ribbon filter. That can be added with no effect on schema.)
Internally, the Ribbon filter is configured using a "one_in_fp_rate"
value, which is 1 over desired FP rate. For example, use 100 for 1%
FP rate. I'm expecting this will be used in the future for configuring
Bloom-like filters, as I expect people to more commonly hold constant
the filter accuracy and change the space vs. time trade-off, rather than
hold constant the space (per key) and change the accuracy vs. time
trade-off, though we might make that available.
### Benchmarking
```
$ ./filter_bench -impl=2 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 34.1341
Number of filters: 1993
Total size (MB): 238.488
Reported total allocated memory (MB): 262.875
Reported internal fragmentation: 10.2255%
Bits/key stored: 10.0029
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 18.7508
Random filter net ns/op: 258.246
Average FP rate %: 0.968672
----------------------------
Done. (For more info, run with -legend or -help.)
$ ./filter_bench -impl=3 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 130.851
Number of filters: 1993
Total size (MB): 168.166
Reported total allocated memory (MB): 183.211
Reported internal fragmentation: 8.94626%
Bits/key stored: 7.05341
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 58.4523
Random filter net ns/op: 363.717
Average FP rate %: 0.952978
----------------------------
Done. (For more info, run with -legend or -help.)
```
168.166 / 238.488 = 0.705 -> 29.5% space reduction
130.851 / 34.1341 = 3.83x construction time for this Ribbon filter vs. lastest Bloom filter (could make that as little as about 2.5x for less space reduction)
### Working around a hashing "flaw"
bloom_test discovered a flaw in the simple hashing applied in
StandardHasher when num_starts == 1 (num_slots == 128), showing an
excessively high FP rate. The problem is that when many entries, on the
order of number of hash bits or kCoeffBits, are associated with the same
start location, the correlation between the CoeffRow and ResultRow (for
efficiency) can lead to a solution that is "universal," or nearly so, for
entries mapping to that start location. (Normally, variance in start
location breaks the effective association between CoeffRow and
ResultRow; the same value for CoeffRow is effectively different if start
locations are different.) Without kUseSmash and with num_starts > 1 (thus
num_starts ~= num_slots), this flaw should be completely irrelevant. Even
with 10M slots, the chances of a single slot having just 16 (or more)
entries map to it--not enough to cause an FP problem, which would be local
to that slot if it happened--is 1 in millions. This spreadsheet formula
shows that: =1/(10000000*(1 - POISSON(15, 1, TRUE)))
As kUseSmash==false (the setting for Standard128RibbonBitsBuilder) is
intended for CPU efficiency of filters with many more entries/slots than
kCoeffBits, a very reasonable work-around is to disallow num_starts==1
when !kUseSmash, by making the minimum non-zero number of slots
2*kCoeffBits. This is the work-around I've applied. This also means that
the new Ribbon filter schema (Standard128RibbonBitsBuilder) is not
space-efficient for less than a few hundred entries. Because of this, I
have made it fall back on constructing a Bloom filter, under existing
schema, when that is more space efficient for small filters. (We can
change this in the future if we want.)
TODO: better unit tests for this case in ribbon_test, and probably
update StandardHasher for kUseSmash case so that it can scale nicely to
small filters.
### Other related changes
* Add Ribbon filter to stress/crash test
* Add Ribbon filter to filter_bench as -impl=3
* Add option string support, as in "filter_policy=experimental_ribbon:5.678;"
where 5.678 is the Bloom equivalent bits per key.
* Rename internal mode BloomFilterPolicy::kAuto to kAutoBloom
* Add a general BuiltinFilterBitsBuilder::CalculateNumEntry based on
binary searching CalculateSpace (inefficient), so that subclasses
(especially experimental ones) don't have to provide an efficient
implementation inverting CalculateSpace.
* Minor refactor FastLocalBloomBitsBuilder for new base class
XXH3pFilterBitsBuilder shared with new Standard128RibbonBitsBuilder,
which allows the latter to fall back on Bloom construction in some
extreme cases.
* Mostly updated bloom_test for Ribbon filter, though a test like
FullBloomTest::Schema is a next TODO to ensure schema stability
(in case this becomes production-ready schema as it is).
* Add some APIs to ribbon_impl.h for configuring Ribbon filters.
Although these are reasonably covered by bloom_test, TODO more unit
tests in ribbon_test
* Added a "tool" FindOccupancyForSuccessRate to ribbon_test to get data
for constructing the linear approximations in GetNumSlotsFor95PctSuccess.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7658
Test Plan:
Some unit tests updated but other testing is left TODO. This
is considered experimental but laying down schema compatibility as early
as possible in case it proves production-quality. Also tested in
stress/crash test.
Reviewed By: jay-zhuang
Differential Revision: D24899349
Pulled By: pdillinger
fbshipit-source-id: 9715f3e6371c959d923aea8077c9423c7a9f82b8
2020-11-13 04:45:02 +00:00
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
config_options, table_opt, "filter_policy=ribbonfilter:5.678:-1;",
|
Experimental (production candidate) SST schema for Ribbon filter (#7658)
Summary:
Added experimental public API for Ribbon filter:
NewExperimentalRibbonFilterPolicy(). This experimental API will
take a "Bloom equivalent" bits per key, and configure the Ribbon
filter for the same FP rate as Bloom would have but ~30% space
savings. (Note: optimize_filters_for_memory is not yet implemented
for Ribbon filter. That can be added with no effect on schema.)
Internally, the Ribbon filter is configured using a "one_in_fp_rate"
value, which is 1 over desired FP rate. For example, use 100 for 1%
FP rate. I'm expecting this will be used in the future for configuring
Bloom-like filters, as I expect people to more commonly hold constant
the filter accuracy and change the space vs. time trade-off, rather than
hold constant the space (per key) and change the accuracy vs. time
trade-off, though we might make that available.
### Benchmarking
```
$ ./filter_bench -impl=2 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 34.1341
Number of filters: 1993
Total size (MB): 238.488
Reported total allocated memory (MB): 262.875
Reported internal fragmentation: 10.2255%
Bits/key stored: 10.0029
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 18.7508
Random filter net ns/op: 258.246
Average FP rate %: 0.968672
----------------------------
Done. (For more info, run with -legend or -help.)
$ ./filter_bench -impl=3 -quick -m_keys_total_max=200 -average_keys_per_filter=100000 -net_includes_hashing
Building...
Build avg ns/key: 130.851
Number of filters: 1993
Total size (MB): 168.166
Reported total allocated memory (MB): 183.211
Reported internal fragmentation: 8.94626%
Bits/key stored: 7.05341
----------------------------
Mixed inside/outside queries...
Single filter net ns/op: 58.4523
Random filter net ns/op: 363.717
Average FP rate %: 0.952978
----------------------------
Done. (For more info, run with -legend or -help.)
```
168.166 / 238.488 = 0.705 -> 29.5% space reduction
130.851 / 34.1341 = 3.83x construction time for this Ribbon filter vs. lastest Bloom filter (could make that as little as about 2.5x for less space reduction)
### Working around a hashing "flaw"
bloom_test discovered a flaw in the simple hashing applied in
StandardHasher when num_starts == 1 (num_slots == 128), showing an
excessively high FP rate. The problem is that when many entries, on the
order of number of hash bits or kCoeffBits, are associated with the same
start location, the correlation between the CoeffRow and ResultRow (for
efficiency) can lead to a solution that is "universal," or nearly so, for
entries mapping to that start location. (Normally, variance in start
location breaks the effective association between CoeffRow and
ResultRow; the same value for CoeffRow is effectively different if start
locations are different.) Without kUseSmash and with num_starts > 1 (thus
num_starts ~= num_slots), this flaw should be completely irrelevant. Even
with 10M slots, the chances of a single slot having just 16 (or more)
entries map to it--not enough to cause an FP problem, which would be local
to that slot if it happened--is 1 in millions. This spreadsheet formula
shows that: =1/(10000000*(1 - POISSON(15, 1, TRUE)))
As kUseSmash==false (the setting for Standard128RibbonBitsBuilder) is
intended for CPU efficiency of filters with many more entries/slots than
kCoeffBits, a very reasonable work-around is to disallow num_starts==1
when !kUseSmash, by making the minimum non-zero number of slots
2*kCoeffBits. This is the work-around I've applied. This also means that
the new Ribbon filter schema (Standard128RibbonBitsBuilder) is not
space-efficient for less than a few hundred entries. Because of this, I
have made it fall back on constructing a Bloom filter, under existing
schema, when that is more space efficient for small filters. (We can
change this in the future if we want.)
TODO: better unit tests for this case in ribbon_test, and probably
update StandardHasher for kUseSmash case so that it can scale nicely to
small filters.
### Other related changes
* Add Ribbon filter to stress/crash test
* Add Ribbon filter to filter_bench as -impl=3
* Add option string support, as in "filter_policy=experimental_ribbon:5.678;"
where 5.678 is the Bloom equivalent bits per key.
* Rename internal mode BloomFilterPolicy::kAuto to kAutoBloom
* Add a general BuiltinFilterBitsBuilder::CalculateNumEntry based on
binary searching CalculateSpace (inefficient), so that subclasses
(especially experimental ones) don't have to provide an efficient
implementation inverting CalculateSpace.
* Minor refactor FastLocalBloomBitsBuilder for new base class
XXH3pFilterBitsBuilder shared with new Standard128RibbonBitsBuilder,
which allows the latter to fall back on Bloom construction in some
extreme cases.
* Mostly updated bloom_test for Ribbon filter, though a test like
FullBloomTest::Schema is a next TODO to ensure schema stability
(in case this becomes production-ready schema as it is).
* Add some APIs to ribbon_impl.h for configuring Ribbon filters.
Although these are reasonably covered by bloom_test, TODO more unit
tests in ribbon_test
* Added a "tool" FindOccupancyForSuccessRate to ribbon_test to get data
for constructing the linear approximations in GetNumSlotsFor95PctSuccess.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7658
Test Plan:
Some unit tests updated but other testing is left TODO. This
is considered experimental but laying down schema compatibility as early
as possible in case it proves production-quality. Also tested in
stress/crash test.
Reviewed By: jay-zhuang
Differential Revision: D24899349
Pulled By: pdillinger
fbshipit-source-id: 9715f3e6371c959d923aea8077c9423c7a9f82b8
2020-11-13 04:45:02 +00:00
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
2022-02-16 16:27:37 +00:00
|
|
|
auto rfp =
|
|
|
|
dynamic_cast<const RibbonFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(rfp->GetMillibitsPerKey(), 5678);
|
|
|
|
EXPECT_EQ(rfp->GetBloomBeforeLevel(), -1);
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
|
|
|
|
// Ribbon filter policy (default Bloom hybrid)
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=ribbonfilter:6.789;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
2022-02-16 16:27:37 +00:00
|
|
|
rfp = dynamic_cast<const RibbonFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(rfp->GetMillibitsPerKey(), 6789);
|
|
|
|
EXPECT_EQ(rfp->GetBloomBeforeLevel(), 0);
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
|
|
|
|
// Ribbon filter policy (custom Bloom hybrid)
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=ribbonfilter:6.789:5;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
2022-02-16 16:27:37 +00:00
|
|
|
rfp = dynamic_cast<const RibbonFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(rfp->GetMillibitsPerKey(), 6789);
|
|
|
|
EXPECT_EQ(rfp->GetBloomBeforeLevel(), 5);
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
// Check block cache options are overwritten when specified
|
|
|
|
// in new format as a struct.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
|
|
|
|
// Set only block cache capacity. Check other values are
|
|
|
|
// reset to default values.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=2M};"
|
|
|
|
"block_cache_compressed={capacity=2M}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 2*1024UL*1024UL);
|
|
|
|
// Default values
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
GetDefaultCacheShardBits(new_opt.block_cache->GetCapacity()));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={num_shard_bits=5;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={num_shard_bits=5;"
|
|
|
|
"high_pri_pool_ratio=0.0;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 0);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
5);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;}",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
2022-02-18 20:23:48 +00:00
|
|
|
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=rocksdb.BloomFilter:1.234",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(BloomFilterPolicy::kClassName()));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(BloomFilterPolicy::kNickName()));
|
|
|
|
|
|
|
|
// Ribbon filter policy alternative name
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt, "filter_policy=rocksdb.RibbonFilter:6.789:5;",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(RibbonFilterPolicy::kClassName()));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
new_opt.filter_policy->IsInstanceOf(RibbonFilterPolicy::kNickName()));
|
2020-04-22 00:35:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetPlainTableOptionsFromString) {
|
|
|
|
PlainTableOptions table_opt;
|
|
|
|
PlainTableOptions new_opt;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
// make sure default values are overwritten by something else
|
|
|
|
ASSERT_OK(GetPlainTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
|
|
|
|
"full_scan_mode=true;store_index_in_file=true",
|
|
|
|
&new_opt));
|
|
|
|
ASSERT_EQ(new_opt.user_key_len, 66u);
|
|
|
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
|
|
|
|
ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
|
|
|
|
ASSERT_EQ(new_opt.index_sparseness, 8);
|
|
|
|
ASSERT_EQ(new_opt.huge_page_tlb_size, 4);
|
|
|
|
ASSERT_EQ(new_opt.encoding_type, EncodingType::kPrefix);
|
|
|
|
ASSERT_TRUE(new_opt.full_scan_mode);
|
|
|
|
ASSERT_TRUE(new_opt.store_index_in_file);
|
|
|
|
|
|
|
|
// unknown option
|
2020-10-20 18:51:51 +00:00
|
|
|
Status s = GetPlainTableOptionsFromString(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"bad_option=1",
|
2020-10-20 18:51:51 +00:00
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
// unrecognized EncodingType
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetPlainTableOptionsFromString(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"encoding_type=kPrefixXX",
|
2020-10-20 18:51:51 +00:00
|
|
|
&new_opt);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-04-22 00:35:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetMemTableRepFactoryFromString) {
|
|
|
|
std::unique_ptr<MemTableRepFactory> new_mem_factory = nullptr;
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("skip_list", &new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("skip_list:16", &new_mem_factory));
|
2021-09-08 14:45:59 +00:00
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "SkipListFactory");
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("skip_list:16:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("prefix_hash", &new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("prefix_hash:1000",
|
|
|
|
&new_mem_factory));
|
2021-09-08 14:45:59 +00:00
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "HashSkipListRepFactory");
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("prefix_hash:1000:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("hash_linkedlist",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("hash_linkedlist:1000",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_EQ(std::string(new_mem_factory->Name()), "HashLinkListRepFactory");
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("hash_linkedlist:1000:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("vector", &new_mem_factory));
|
|
|
|
ASSERT_OK(GetMemTableRepFactoryFromString("vector:1024", &new_mem_factory));
|
|
|
|
ASSERT_EQ(std::string(new_mem_factory->Name()), "VectorRepFactory");
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("vector:1024:invalid_opt",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("cuckoo", &new_mem_factory));
|
|
|
|
// CuckooHash memtable is already removed.
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("cuckoo:1024", &new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(GetMemTableRepFactoryFromString("bad_factory", &new_mem_factory));
|
|
|
|
}
|
|
|
|
|
2021-09-08 14:45:59 +00:00
|
|
|
TEST_F(OptionsTest, MemTableRepFactoryCreateFromString) {
|
|
|
|
std::unique_ptr<MemTableRepFactory> new_mem_factory = nullptr;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "skip_list",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "skip_list:16",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "SkipListFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("skip_list"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("SkipListFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "skip_list:16:invalid_opt", &new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "invalid_opt=10", &new_mem_factory));
|
|
|
|
|
|
|
|
// Test a reset
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_EQ(new_mem_factory, nullptr);
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "invalid_opt=10", &new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "id=skip_list; lookahead=32", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "prefix_hash",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "prefix_hash:1000", &new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "HashSkipListRepFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("prefix_hash"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("HashSkipListRepFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "prefix_hash:1000:invalid_opt", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=prefix_hash; bucket_count=32; skiplist_height=64; "
|
|
|
|
"branching_factor=16",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=prefix_hash; bucket_count=32; skiplist_height=64; "
|
|
|
|
"branching_factor=16; invalid=unknown",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "hash_linkedlist", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "hash_linkedlist:1000", &new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "HashLinkListRepFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("hash_linkedlist"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("HashLinkListRepFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "hash_linkedlist:1000:invalid_opt", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=hash_linkedlist; bucket_count=32; threshold=64; huge_page_size=16; "
|
|
|
|
"logging_threshold=12; log_when_flash=true",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options,
|
|
|
|
"id=hash_linkedlist; bucket_count=32; threshold=64; huge_page_size=16; "
|
|
|
|
"logging_threshold=12; log_when_flash=true; invalid=unknown",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "vector",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(config_options, "vector:1024",
|
|
|
|
&new_mem_factory));
|
|
|
|
ASSERT_STREQ(new_mem_factory->Name(), "VectorRepFactory");
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("vector"));
|
|
|
|
ASSERT_TRUE(new_mem_factory->IsInstanceOf("VectorRepFactory"));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "vector:1024:invalid_opt", &new_mem_factory));
|
|
|
|
ASSERT_OK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "id=vector; count=42", &new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(
|
|
|
|
config_options, "id=vector; invalid=unknown", &new_mem_factory));
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(config_options, "cuckoo",
|
|
|
|
&new_mem_factory));
|
|
|
|
// CuckooHash memtable is already removed.
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(config_options, "cuckoo:1024",
|
|
|
|
&new_mem_factory));
|
|
|
|
|
|
|
|
ASSERT_NOK(MemTableRepFactory::CreateFromString(config_options, "bad_factory",
|
|
|
|
&new_mem_factory));
|
|
|
|
}
|
|
|
|
|
2022-01-05 00:44:54 +00:00
|
|
|
class CustomEnv : public EnvWrapper {
|
|
|
|
public:
|
|
|
|
explicit CustomEnv(Env* _target) : EnvWrapper(_target) {}
|
|
|
|
static const char* kClassName() { return "CustomEnv"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
};
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsTest, GetOptionsFromStringTest) {
|
|
|
|
Options base_options, new_options;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
base_options.write_buffer_size = 20;
|
|
|
|
base_options.min_write_buffer_number_to_merge = 15;
|
|
|
|
BlockBasedTableOptions block_based_table_options;
|
|
|
|
block_based_table_options.cache_index_and_filter_blocks = true;
|
|
|
|
base_options.table_factory.reset(
|
|
|
|
NewBlockBasedTableFactory(block_based_table_options));
|
|
|
|
|
|
|
|
// Register an Env with object registry.
|
2022-01-11 14:32:42 +00:00
|
|
|
ObjectLibrary::Default()->AddFactory<Env>(
|
2022-01-05 00:44:54 +00:00
|
|
|
CustomEnv::kClassName(),
|
2020-04-22 00:35:28 +00:00
|
|
|
[](const std::string& /*name*/, std::unique_ptr<Env>* /*env_guard*/,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
static CustomEnv env(Env::Default());
|
|
|
|
return &env;
|
|
|
|
});
|
|
|
|
|
|
|
|
ASSERT_OK(GetOptionsFromString(
|
|
|
|
config_options, base_options,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"compression_opts=4:5:6;create_if_missing=true;max_open_files=1;"
|
|
|
|
"bottommost_compression_opts=5:6:7;create_if_missing=true;max_open_files="
|
|
|
|
"1;"
|
|
|
|
"rate_limiter_bytes_per_sec=1024;env=CustomEnv",
|
|
|
|
&new_options));
|
|
|
|
|
|
|
|
ASSERT_EQ(new_options.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.strategy, 6);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.zstd_max_train_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.parallel_threads, 1u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_options.compression_opts.use_zstd_dict_trainer, true);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.strategy, 7);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.zstd_max_train_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.parallel_threads, 1u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
true);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(new_options.write_buffer_size, 10U);
|
|
|
|
ASSERT_EQ(new_options.max_write_buffer_number, 16);
|
2020-09-14 23:59:00 +00:00
|
|
|
const auto new_bbto =
|
|
|
|
new_options.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(new_bbto, nullptr);
|
|
|
|
ASSERT_EQ(new_bbto->block_cache->GetCapacity(), 1U << 20);
|
|
|
|
ASSERT_EQ(new_bbto->block_size, 4U);
|
2020-04-22 00:35:28 +00:00
|
|
|
// don't overwrite block based table options
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_TRUE(new_bbto->cache_index_and_filter_blocks);
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(new_options.create_if_missing, true);
|
|
|
|
ASSERT_EQ(new_options.max_open_files, 1);
|
|
|
|
ASSERT_TRUE(new_options.rate_limiter.get() != nullptr);
|
|
|
|
Env* newEnv = new_options.env;
|
2023-01-25 20:08:49 +00:00
|
|
|
ASSERT_OK(Env::CreateFromString({}, CustomEnv::kClassName(), &newEnv));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_EQ(newEnv, new_options.env);
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
// Test a bad value for a DBOption returns a failure
|
|
|
|
base_options.dump_malloc_stats = false;
|
|
|
|
base_options.write_buffer_size = 1024;
|
|
|
|
Options bad_options = new_options;
|
2020-10-20 18:51:51 +00:00
|
|
|
Status s = GetOptionsFromString(config_options, base_options,
|
2020-09-14 23:59:00 +00:00
|
|
|
"create_if_missing=XX;dump_malloc_stats=true",
|
2020-10-20 18:51:51 +00:00
|
|
|
&bad_options);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, false);
|
|
|
|
|
|
|
|
bad_options = new_options;
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetOptionsFromString(config_options, base_options,
|
|
|
|
"write_buffer_size=XX;dump_malloc_stats=true",
|
|
|
|
&bad_options);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, false);
|
|
|
|
|
|
|
|
// Test a bad value for a TableFactory Option returns a failure
|
|
|
|
bad_options = new_options;
|
2020-10-20 18:51:51 +00:00
|
|
|
s = GetOptionsFromString(config_options, base_options,
|
|
|
|
"write_buffer_size=16;dump_malloc_stats=true"
|
|
|
|
"block_based_table_factory={block_size=XX;};",
|
|
|
|
&bad_options);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, false);
|
|
|
|
ASSERT_EQ(bad_options.write_buffer_size, 1024);
|
|
|
|
|
|
|
|
config_options.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetOptionsFromString(config_options, base_options,
|
|
|
|
"create_if_missing=XX;dump_malloc_stats=true;"
|
|
|
|
"write_buffer_size=XX;"
|
|
|
|
"block_based_table_factory={block_size=XX;};",
|
|
|
|
&bad_options));
|
|
|
|
ASSERT_EQ(bad_options.create_if_missing, base_options.create_if_missing);
|
|
|
|
ASSERT_EQ(bad_options.dump_malloc_stats, true);
|
|
|
|
ASSERT_EQ(bad_options.write_buffer_size, base_options.write_buffer_size);
|
|
|
|
|
|
|
|
// Test the old interface
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(GetOptionsFromString(
|
|
|
|
base_options,
|
|
|
|
"write_buffer_size=22;max_write_buffer_number=33;max_open_files=44;",
|
|
|
|
&new_options));
|
|
|
|
ASSERT_EQ(new_options.write_buffer_size, 22U);
|
|
|
|
ASSERT_EQ(new_options.max_write_buffer_number, 33);
|
|
|
|
ASSERT_EQ(new_options.max_open_files, 44);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, DBOptionsSerialization) {
|
|
|
|
Options base_options, new_options;
|
|
|
|
Random rnd(301);
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
// Phase 1: Make big change in base_options
|
|
|
|
test::RandomInitDBOptions(&base_options, &rnd);
|
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_option
|
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(config_options, base_options,
|
|
|
|
&base_options_file_content));
|
|
|
|
|
|
|
|
// Phase 3: Set new_options from the derived string and expect
|
|
|
|
// new_options == base_options
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_options, DBOptions(),
|
|
|
|
base_options_file_content, &new_options));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_options,
|
|
|
|
new_options));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OptionsComposeDecompose) {
|
|
|
|
// build an Options from DBOptions + CFOptions, then decompose it to verify
|
|
|
|
// we get same constituent options.
|
|
|
|
DBOptions base_db_opts;
|
|
|
|
ColumnFamilyOptions base_cf_opts;
|
|
|
|
ConfigOptions
|
|
|
|
config_options; // Use default for ignore(false) and check (exact)
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
test::RandomInitDBOptions(&base_db_opts, &rnd);
|
|
|
|
test::RandomInitCFOptions(&base_cf_opts, base_db_opts, &rnd);
|
|
|
|
|
|
|
|
Options base_opts(base_db_opts, base_cf_opts);
|
|
|
|
DBOptions new_db_opts(base_opts);
|
|
|
|
ColumnFamilyOptions new_cf_opts(base_opts);
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_db_opts,
|
|
|
|
new_db_opts));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_cf_opts,
|
|
|
|
new_cf_opts));
|
|
|
|
delete new_cf_opts.compaction_filter;
|
|
|
|
}
|
|
|
|
|
2021-04-23 03:42:50 +00:00
|
|
|
TEST_F(OptionsTest, DBOptionsComposeImmutable) {
|
|
|
|
// Build a DBOptions from an Immutable/Mutable one and verify that
|
|
|
|
// we get same constituent options.
|
|
|
|
ConfigOptions config_options;
|
|
|
|
Random rnd(301);
|
|
|
|
DBOptions base_opts, new_opts;
|
|
|
|
test::RandomInitDBOptions(&base_opts, &rnd);
|
|
|
|
MutableDBOptions m_opts(base_opts);
|
|
|
|
ImmutableDBOptions i_opts(base_opts);
|
|
|
|
new_opts = BuildDBOptions(i_opts, m_opts);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_options, base_opts,
|
|
|
|
new_opts));
|
|
|
|
}
|
|
|
|
|
2021-05-11 23:14:33 +00:00
|
|
|
TEST_F(OptionsTest, GetMutableDBOptions) {
|
|
|
|
Random rnd(228);
|
|
|
|
DBOptions base_opts;
|
|
|
|
std::string opts_str;
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
|
|
|
|
test::RandomInitDBOptions(&base_opts, &rnd);
|
|
|
|
ImmutableDBOptions i_opts(base_opts);
|
|
|
|
MutableDBOptions m_opts(base_opts);
|
|
|
|
MutableDBOptions new_opts;
|
|
|
|
ASSERT_OK(GetStringFromMutableDBOptions(config_options, m_opts, &opts_str));
|
|
|
|
ASSERT_OK(StringToMap(opts_str, &opts_map));
|
|
|
|
ASSERT_OK(GetMutableDBOptionsFromStrings(m_opts, opts_map, &new_opts));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(
|
|
|
|
config_options, base_opts, BuildDBOptions(i_opts, new_opts)));
|
|
|
|
}
|
|
|
|
|
2021-04-23 03:42:50 +00:00
|
|
|
TEST_F(OptionsTest, CFOptionsComposeImmutable) {
|
|
|
|
// Build a DBOptions from an Immutable/Mutable one and verify that
|
|
|
|
// we get same constituent options.
|
|
|
|
ConfigOptions config_options;
|
|
|
|
Random rnd(301);
|
|
|
|
ColumnFamilyOptions base_opts, new_opts;
|
|
|
|
DBOptions dummy; // Needed to create ImmutableCFOptions
|
|
|
|
test::RandomInitCFOptions(&base_opts, dummy, &rnd);
|
|
|
|
MutableCFOptions m_opts(base_opts);
|
2021-05-05 20:59:21 +00:00
|
|
|
ImmutableCFOptions i_opts(base_opts);
|
|
|
|
UpdateColumnFamilyOptions(i_opts, &new_opts);
|
|
|
|
UpdateColumnFamilyOptions(m_opts, &new_opts);
|
2021-04-23 03:42:50 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base_opts,
|
|
|
|
new_opts));
|
|
|
|
delete new_opts.compaction_filter;
|
|
|
|
}
|
|
|
|
|
2021-05-11 23:14:33 +00:00
|
|
|
TEST_F(OptionsTest, GetMutableCFOptions) {
|
|
|
|
Random rnd(228);
|
|
|
|
ColumnFamilyOptions base, copy;
|
|
|
|
std::string opts_str;
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
DBOptions dummy; // Needed to create ImmutableCFOptions
|
|
|
|
|
|
|
|
test::RandomInitCFOptions(&base, dummy, &rnd);
|
|
|
|
ColumnFamilyOptions result;
|
|
|
|
MutableCFOptions m_opts(base), new_opts;
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromMutableCFOptions(config_options, m_opts, &opts_str));
|
|
|
|
ASSERT_OK(StringToMap(opts_str, &opts_map));
|
|
|
|
ASSERT_OK(GetMutableOptionsFromStrings(m_opts, opts_map, nullptr, &new_opts));
|
|
|
|
UpdateColumnFamilyOptions(ImmutableCFOptions(base), ©);
|
|
|
|
UpdateColumnFamilyOptions(new_opts, ©);
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, base, copy));
|
|
|
|
delete copy.compaction_filter;
|
|
|
|
}
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsTest, ColumnFamilyOptionsSerialization) {
|
|
|
|
Options options;
|
|
|
|
ColumnFamilyOptions base_opt, new_opt;
|
2023-11-02 20:27:59 +00:00
|
|
|
base_opt.comparator = test::BytewiseComparatorWithU64TsWrapper();
|
2020-04-22 00:35:28 +00:00
|
|
|
Random rnd(302);
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
|
|
|
|
// Phase 1: randomly assign base_opt
|
|
|
|
// custom type options
|
|
|
|
test::RandomInitCFOptions(&base_opt, options, &rnd);
|
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_opt
|
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(config_options, base_opt,
|
|
|
|
&base_options_file_content));
|
|
|
|
|
|
|
|
// Phase 3: Set new_opt from the derived string and expect
|
|
|
|
// new_opt == base_opt
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(config_options, ColumnFamilyOptions(),
|
|
|
|
base_options_file_content, &new_opt));
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyCFOptions(config_options, base_opt, new_opt));
|
2023-11-02 20:27:59 +00:00
|
|
|
ASSERT_EQ(base_opt.comparator, new_opt.comparator);
|
2020-04-22 00:35:28 +00:00
|
|
|
if (base_opt.compaction_filter) {
|
|
|
|
delete base_opt.compaction_filter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
TEST_F(OptionsTest, CheckBlockBasedTableOptions) {
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
DBOptions db_opts;
|
|
|
|
ConfigOptions config_opts;
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_opts, cf_opts, "prefix_extractor=capped:8", &cf_opts));
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(config_opts, "BlockBasedTable",
|
|
|
|
&cf_opts.table_factory));
|
|
|
|
ASSERT_NE(cf_opts.table_factory.get(), nullptr);
|
|
|
|
ASSERT_TRUE(cf_opts.table_factory->IsInstanceOf(
|
|
|
|
TableFactory::kBlockBasedTableName()));
|
|
|
|
auto bbto = cf_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_OK(cf_opts.table_factory->ConfigureFromString(
|
|
|
|
config_opts,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;};"
|
|
|
|
"block_size_deviation=101;"
|
|
|
|
"block_restart_interval=0;"
|
|
|
|
"index_block_restart_interval=5;"
|
|
|
|
"partition_filters=true;"
|
|
|
|
"index_type=kHashSearch;"
|
|
|
|
"no_block_cache=1;"));
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_EQ(bbto->block_cache.get(), nullptr);
|
|
|
|
ASSERT_EQ(bbto->block_size_deviation, 0);
|
|
|
|
ASSERT_EQ(bbto->block_restart_interval, 1);
|
|
|
|
ASSERT_EQ(bbto->index_block_restart_interval, 1);
|
|
|
|
ASSERT_FALSE(bbto->partition_filters);
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(config_opts, "BlockBasedTable",
|
|
|
|
&cf_opts.table_factory));
|
|
|
|
bbto = cf_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
|
|
|
|
ASSERT_OK(cf_opts.table_factory->ConfigureFromString(config_opts,
|
|
|
|
"no_block_cache=0;"));
|
|
|
|
ASSERT_NE(bbto->block_cache.get(), nullptr);
|
|
|
|
ASSERT_OK(cf_opts.table_factory->ValidateOptions(db_opts, cf_opts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, MutableTableOptions) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::shared_ptr<TableFactory> bbtf;
|
|
|
|
bbtf.reset(NewBlockBasedTableFactory());
|
|
|
|
auto bbto = bbtf->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
2024-10-15 00:49:26 +00:00
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "no_block_cache", "true"));
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_size", "1024"));
|
2024-10-15 00:49:26 +00:00
|
|
|
ASSERT_EQ(bbto->no_block_cache, true);
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(bbto->block_size, 1024);
|
|
|
|
ASSERT_OK(bbtf->PrepareOptions(config_options));
|
2021-02-19 18:25:39 +00:00
|
|
|
config_options.mutable_options_only = true;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_size", "1024"));
|
2024-10-15 00:49:26 +00:00
|
|
|
ASSERT_EQ(bbto->no_block_cache, true);
|
|
|
|
ASSERT_NOK(bbtf->ConfigureOption(config_options, "no_block_cache", "false"));
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(bbtf->ConfigureOption(config_options, "block_size", "2048"));
|
2024-10-15 00:49:26 +00:00
|
|
|
ASSERT_EQ(bbto->no_block_cache, true);
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(bbto->block_size, 2048);
|
|
|
|
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
cf_opts.table_factory = bbtf;
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
2024-10-15 00:49:26 +00:00
|
|
|
config_options, cf_opts, "block_based_table_factory.no_block_cache=false",
|
2020-09-14 23:59:00 +00:00
|
|
|
&cf_opts));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, cf_opts, "block_based_table_factory.block_size=8192",
|
|
|
|
&cf_opts));
|
2024-10-15 00:49:26 +00:00
|
|
|
ASSERT_EQ(bbto->no_block_cache, true);
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(bbto->block_size, 8192);
|
|
|
|
}
|
|
|
|
|
2021-02-19 18:25:39 +00:00
|
|
|
TEST_F(OptionsTest, MutableCFOptions) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, cf_opts,
|
|
|
|
"paranoid_file_checks=true; block_based_table_factory.block_align=false; "
|
|
|
|
"block_based_table_factory.block_size=8192;",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_TRUE(cf_opts.paranoid_file_checks);
|
|
|
|
ASSERT_NE(cf_opts.table_factory.get(), nullptr);
|
|
|
|
const auto bbto = cf_opts.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_EQ(bbto->block_size, 8192);
|
|
|
|
ASSERT_EQ(bbto->block_align, false);
|
|
|
|
std::unordered_map<std::string, std::string> unused_opts;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"paranoid_file_checks", "false"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(cf_opts.paranoid_file_checks, false);
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory.block_size", "16384"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 16384);
|
|
|
|
|
|
|
|
config_options.mutable_options_only = true;
|
|
|
|
// Force consistency checks is not mutable
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"force_consistency_checks", "true"}},
|
|
|
|
&cf_opts));
|
|
|
|
|
|
|
|
// Attempt to change the table. It is not mutable, so this should fail and
|
|
|
|
// leave the original intact
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"table_factory", "PlainTable"}}, &cf_opts));
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts, {{"table_factory.id", "PlainTable"}}, &cf_opts));
|
|
|
|
ASSERT_NE(cf_opts.table_factory.get(), nullptr);
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
|
|
|
|
// Change the block size. Should update the value in the current table
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory.block_size", "8192"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 8192);
|
|
|
|
|
|
|
|
// Attempt to turn off block cache fails, as this option is not mutable
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory.no_block_cache", "true"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
|
|
|
|
// Attempt to change the block size via a config string/map. Should update
|
|
|
|
// the current value
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory", "{block_size=32768}"}}, &cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 32768);
|
|
|
|
|
|
|
|
// Attempt to change the block size and no cache through the map. Should
|
|
|
|
// fail, leaving the old values intact
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(
|
|
|
|
config_options, cf_opts,
|
|
|
|
{{"block_based_table_factory",
|
|
|
|
"{block_size=16384; no_block_cache=true}"}},
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_EQ(bbto, cf_opts.table_factory->GetOptions<BlockBasedTableOptions>());
|
|
|
|
ASSERT_EQ(bbto->block_size, 32768);
|
|
|
|
}
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
Status StringToMap(
|
|
|
|
const std::string& opts_str,
|
|
|
|
std::unordered_map<std::string, std::string>* opts_map);
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, StringToMapTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
// Regular options
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=v2;k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "v2");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
// Value with '='
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1==v1;k2=v2=;", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "=v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "v2=");
|
|
|
|
// Overwrriten option
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k1=v2;k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v2");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
// Empty value
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=v3;k4=", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
ASSERT_TRUE(opts_map.find("k4") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k4"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=v3;k4= ", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
ASSERT_TRUE(opts_map.find("k4") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k4"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_TRUE(opts_map.find("k3") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k3"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=;k3=;", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
ASSERT_TRUE(opts_map.find("k3") != opts_map.end());
|
|
|
|
ASSERT_EQ(opts_map["k3"], "");
|
|
|
|
// Regular nested options
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={nk1=nv1;nk2=nv2};k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "nk1=nv1;nk2=nv2");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
// Multi-level nested options
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={nk1=nv1;nk2={nnk1=nnk2}};"
|
|
|
|
"k3={nk1={nnk1={nnnk1=nnnv1;nnnk2;nnnv2}}};k4=v4",
|
|
|
|
&opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "nk1=nv1;nk2={nnk1=nnk2}");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "nk1={nnk1={nnnk1=nnnv1;nnnk2;nnnv2}}");
|
|
|
|
ASSERT_EQ(opts_map["k4"], "v4");
|
|
|
|
// Garbage inside curly braces
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={dfad=};k3={=};k4=v4",
|
|
|
|
&opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "dfad=");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "=");
|
|
|
|
ASSERT_EQ(opts_map["k4"], "v4");
|
|
|
|
// Empty nested options
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={};", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "");
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2={{{{}}}{}{}};", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "{{{}}}{}{}");
|
|
|
|
// With random spaces
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap(" k1 = v1 ; k2= {nk1=nv1; nk2={nnk1=nnk2}} ; "
|
|
|
|
"k3={ { } }; k4= v4 ",
|
|
|
|
&opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "nk1=nv1; nk2={nnk1=nnk2}");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "{ }");
|
|
|
|
ASSERT_EQ(opts_map["k4"], "v4");
|
|
|
|
|
|
|
|
// Empty key
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2=v2;=", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("=v1;k2=v2", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2v2;", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2=v2;fadfa", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2=v2;;", &opts_map));
|
|
|
|
// Mismatch curly braces
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={;k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{};k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={}};k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}{}}};k3=v3", &opts_map));
|
|
|
|
// However this is valid!
|
|
|
|
opts_map.clear();
|
|
|
|
ASSERT_OK(StringToMap("k1=v1;k2=};k3=v3", &opts_map));
|
|
|
|
ASSERT_EQ(opts_map["k1"], "v1");
|
|
|
|
ASSERT_EQ(opts_map["k2"], "}");
|
|
|
|
ASSERT_EQ(opts_map["k3"], "v3");
|
|
|
|
|
|
|
|
// Invalid chars after closing curly brace
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}}{};k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}}cfda;k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}} cfda;k3=v3", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}} cfda", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{}}{}", &opts_map));
|
|
|
|
ASSERT_NOK(StringToMap("k1=v1;k2={{dfdl}adfa}{}", &opts_map));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, StringToMapRandomTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opts_map;
|
|
|
|
// Make sure segfault is not hit by semi-random strings
|
|
|
|
|
|
|
|
std::vector<std::string> bases = {
|
|
|
|
"a={aa={};tt={xxx={}}};c=defff",
|
|
|
|
"a={aa={};tt={xxx={}}};c=defff;d={{}yxx{}3{xx}}",
|
|
|
|
"abc={{}{}{}{{{}}}{{}{}{}{}{}{}{}"};
|
|
|
|
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const std::string& base : bases) {
|
2020-04-22 00:35:28 +00:00
|
|
|
for (int rand_seed = 301; rand_seed < 401; rand_seed++) {
|
|
|
|
Random rnd(rand_seed);
|
|
|
|
for (int attempt = 0; attempt < 10; attempt++) {
|
|
|
|
std::string str = base;
|
|
|
|
// Replace random position to space
|
|
|
|
size_t pos = static_cast<size_t>(
|
|
|
|
rnd.Uniform(static_cast<int>(base.size())));
|
|
|
|
str[pos] = ' ';
|
|
|
|
Status s = StringToMap(str, &opts_map);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
|
|
|
|
opts_map.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Random Construct a string
|
|
|
|
std::vector<char> chars = {'{', '}', ' ', '=', ';', 'c'};
|
|
|
|
for (int rand_seed = 301; rand_seed < 1301; rand_seed++) {
|
|
|
|
Random rnd(rand_seed);
|
|
|
|
int len = rnd.Uniform(30);
|
2023-12-01 19:10:30 +00:00
|
|
|
std::string str;
|
2020-04-22 00:35:28 +00:00
|
|
|
for (int attempt = 0; attempt < len; attempt++) {
|
|
|
|
// Add a random character
|
|
|
|
size_t pos = static_cast<size_t>(
|
|
|
|
rnd.Uniform(static_cast<int>(chars.size())));
|
|
|
|
str.append(1, chars[pos]);
|
|
|
|
}
|
|
|
|
Status s = StringToMap(str, &opts_map);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
|
|
|
|
s = StringToMap("name=" + str, &opts_map);
|
|
|
|
ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
|
|
|
|
opts_map.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, GetStringFromCompressionType) {
|
|
|
|
std::string res;
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kNoCompression));
|
|
|
|
ASSERT_EQ(res, "kNoCompression");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kSnappyCompression));
|
|
|
|
ASSERT_EQ(res, "kSnappyCompression");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kDisableCompressionOption));
|
|
|
|
ASSERT_EQ(res, "kDisableCompressionOption");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kLZ4Compression));
|
|
|
|
ASSERT_EQ(res, "kLZ4Compression");
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromCompressionType(&res, kZlibCompression));
|
|
|
|
ASSERT_EQ(res, "kZlibCompression");
|
|
|
|
|
|
|
|
ASSERT_NOK(
|
|
|
|
GetStringFromCompressionType(&res, static_cast<CompressionType>(-10)));
|
|
|
|
}
|
2021-02-19 18:25:39 +00:00
|
|
|
|
|
|
|
TEST_F(OptionsTest, OnlyMutableDBOptions) {
|
|
|
|
std::string opt_str;
|
|
|
|
Random rnd(302);
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
DBOptions db_opts;
|
|
|
|
DBOptions mdb_opts;
|
|
|
|
std::unordered_set<std::string> m_names;
|
|
|
|
std::unordered_set<std::string> a_names;
|
|
|
|
|
|
|
|
test::RandomInitDBOptions(&db_opts, &rnd);
|
|
|
|
auto db_config = DBOptionsAsConfigurable(db_opts);
|
|
|
|
|
|
|
|
// Get all of the DB Option names (mutable or not)
|
|
|
|
ASSERT_OK(db_config->GetOptionNames(cfg_opts, &a_names));
|
|
|
|
|
|
|
|
// Get only the mutable options from db_opts and set those in mdb_opts
|
|
|
|
cfg_opts.mutable_options_only = true;
|
|
|
|
|
|
|
|
// Get only the Mutable DB Option names
|
|
|
|
ASSERT_OK(db_config->GetOptionNames(cfg_opts, &m_names));
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(cfg_opts, db_opts, &opt_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, mdb_opts, opt_str, &mdb_opts));
|
|
|
|
std::string mismatch;
|
|
|
|
// Comparing only the mutable options, the two are equivalent
|
|
|
|
auto mdb_config = DBOptionsAsConfigurable(mdb_opts);
|
|
|
|
ASSERT_TRUE(mdb_config->AreEquivalent(cfg_opts, db_config.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(db_config->AreEquivalent(cfg_opts, mdb_config.get(), &mismatch));
|
|
|
|
|
|
|
|
ASSERT_GT(a_names.size(), m_names.size());
|
|
|
|
for (const auto& n : m_names) {
|
|
|
|
std::string m, d;
|
|
|
|
ASSERT_OK(mdb_config->GetOption(cfg_opts, n, &m));
|
|
|
|
ASSERT_OK(db_config->GetOption(cfg_opts, n, &d));
|
|
|
|
ASSERT_EQ(m, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg_opts.mutable_options_only = false;
|
|
|
|
// Comparing all of the options, the two are not equivalent
|
|
|
|
ASSERT_FALSE(mdb_config->AreEquivalent(cfg_opts, db_config.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(db_config->AreEquivalent(cfg_opts, mdb_config.get(), &mismatch));
|
2021-06-28 19:27:39 +00:00
|
|
|
|
|
|
|
// Make sure there are only mutable options being configured
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, DBOptions(), opt_str, &db_opts));
|
2021-02-19 18:25:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OnlyMutableCFOptions) {
|
|
|
|
std::string opt_str;
|
|
|
|
Random rnd(302);
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
DBOptions db_opts;
|
|
|
|
ColumnFamilyOptions mcf_opts;
|
|
|
|
ColumnFamilyOptions cf_opts;
|
|
|
|
std::unordered_set<std::string> m_names;
|
|
|
|
std::unordered_set<std::string> a_names;
|
|
|
|
|
|
|
|
test::RandomInitCFOptions(&cf_opts, db_opts, &rnd);
|
2021-06-28 19:27:39 +00:00
|
|
|
cf_opts.comparator = ReverseBytewiseComparator();
|
2021-02-19 18:25:39 +00:00
|
|
|
auto cf_config = CFOptionsAsConfigurable(cf_opts);
|
|
|
|
|
|
|
|
// Get all of the CF Option names (mutable or not)
|
|
|
|
ASSERT_OK(cf_config->GetOptionNames(cfg_opts, &a_names));
|
|
|
|
|
|
|
|
// Get only the mutable options from cf_opts and set those in mcf_opts
|
|
|
|
cfg_opts.mutable_options_only = true;
|
|
|
|
// Get only the Mutable CF Option names
|
|
|
|
ASSERT_OK(cf_config->GetOptionNames(cfg_opts, &m_names));
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(cfg_opts, cf_opts, &opt_str));
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(cfg_opts, mcf_opts, opt_str, &mcf_opts));
|
|
|
|
std::string mismatch;
|
|
|
|
|
|
|
|
auto mcf_config = CFOptionsAsConfigurable(mcf_opts);
|
|
|
|
// Comparing only the mutable options, the two are equivalent
|
|
|
|
ASSERT_TRUE(mcf_config->AreEquivalent(cfg_opts, cf_config.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(cf_config->AreEquivalent(cfg_opts, mcf_config.get(), &mismatch));
|
|
|
|
|
|
|
|
ASSERT_GT(a_names.size(), m_names.size());
|
|
|
|
for (const auto& n : m_names) {
|
|
|
|
std::string m, d;
|
|
|
|
ASSERT_OK(mcf_config->GetOption(cfg_opts, n, &m));
|
|
|
|
ASSERT_OK(cf_config->GetOption(cfg_opts, n, &d));
|
|
|
|
ASSERT_EQ(m, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg_opts.mutable_options_only = false;
|
|
|
|
// Comparing all of the options, the two are not equivalent
|
|
|
|
ASSERT_FALSE(mcf_config->AreEquivalent(cfg_opts, cf_config.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(cf_config->AreEquivalent(cfg_opts, mcf_config.get(), &mismatch));
|
2021-06-28 19:27:39 +00:00
|
|
|
delete cf_opts.compaction_filter;
|
2021-02-19 18:25:39 +00:00
|
|
|
|
2021-06-28 19:27:39 +00:00
|
|
|
// Make sure the options string contains only mutable options
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(cfg_opts, ColumnFamilyOptions(),
|
|
|
|
opt_str, &cf_opts));
|
2021-02-19 18:25:39 +00:00
|
|
|
delete cf_opts.compaction_filter;
|
|
|
|
}
|
2021-09-28 12:30:32 +00:00
|
|
|
|
|
|
|
TEST_F(OptionsTest, SstPartitionerTest) {
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
ColumnFamilyOptions cf_opts, new_opt;
|
|
|
|
std::string opts_str, mismatch;
|
|
|
|
|
|
|
|
ASSERT_OK(SstPartitionerFactory::CreateFromString(
|
|
|
|
cfg_opts, SstPartitionerFixedPrefixFactory::kClassName(),
|
|
|
|
&cf_opts.sst_partitioner_factory));
|
|
|
|
ASSERT_NE(cf_opts.sst_partitioner_factory, nullptr);
|
|
|
|
ASSERT_STREQ(cf_opts.sst_partitioner_factory->Name(),
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName());
|
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
cfg_opts, ColumnFamilyOptions(),
|
|
|
|
std::string("sst_partitioner_factory={id=") +
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName() + "; unknown=10;}",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
cfg_opts, ColumnFamilyOptions(),
|
|
|
|
std::string("sst_partitioner_factory={id=") +
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName() + "; length=10;}",
|
|
|
|
&cf_opts));
|
|
|
|
ASSERT_NE(cf_opts.sst_partitioner_factory, nullptr);
|
|
|
|
ASSERT_STREQ(cf_opts.sst_partitioner_factory->Name(),
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName());
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(cfg_opts, cf_opts, &opts_str));
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(cfg_opts, cf_opts, opts_str, &new_opt));
|
|
|
|
ASSERT_NE(new_opt.sst_partitioner_factory, nullptr);
|
|
|
|
ASSERT_STREQ(new_opt.sst_partitioner_factory->Name(),
|
|
|
|
SstPartitionerFixedPrefixFactory::kClassName());
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(cfg_opts, cf_opts, new_opt));
|
|
|
|
ASSERT_TRUE(cf_opts.sst_partitioner_factory->AreEquivalent(
|
|
|
|
cfg_opts, new_opt.sst_partitioner_factory.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, FileChecksumGenFactoryTest) {
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
DBOptions db_opts, new_opt;
|
|
|
|
std::string opts_str, mismatch;
|
|
|
|
auto factory = GetFileChecksumGenCrc32cFactory();
|
|
|
|
|
|
|
|
cfg_opts.ignore_unsupported_options = false;
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(cfg_opts, db_opts, &opts_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, db_opts, opts_str, &new_opt));
|
|
|
|
|
|
|
|
ASSERT_NE(factory, nullptr);
|
|
|
|
ASSERT_OK(FileChecksumGenFactory::CreateFromString(
|
|
|
|
cfg_opts, factory->Name(), &db_opts.file_checksum_gen_factory));
|
|
|
|
ASSERT_NE(db_opts.file_checksum_gen_factory, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts.file_checksum_gen_factory->Name(), factory->Name());
|
|
|
|
ASSERT_NOK(GetDBOptionsFromString(
|
|
|
|
cfg_opts, DBOptions(), "file_checksum_gen_factory=unknown", &db_opts));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(
|
|
|
|
cfg_opts, DBOptions(),
|
|
|
|
std::string("file_checksum_gen_factory=") + factory->Name(), &db_opts));
|
|
|
|
ASSERT_NE(db_opts.file_checksum_gen_factory, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts.file_checksum_gen_factory->Name(), factory->Name());
|
|
|
|
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(cfg_opts, db_opts, &opts_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(cfg_opts, db_opts, opts_str, &new_opt));
|
|
|
|
ASSERT_NE(new_opt.file_checksum_gen_factory, nullptr);
|
|
|
|
ASSERT_STREQ(new_opt.file_checksum_gen_factory->Name(), factory->Name());
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(cfg_opts, db_opts, new_opt));
|
|
|
|
ASSERT_TRUE(factory->AreEquivalent(
|
|
|
|
cfg_opts, new_opt.file_checksum_gen_factory.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(db_opts.file_checksum_gen_factory->AreEquivalent(
|
|
|
|
cfg_opts, new_opt.file_checksum_gen_factory.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
class TestTablePropertiesCollectorFactory
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
private:
|
|
|
|
std::string id_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit TestTablePropertiesCollectorFactory(const std::string& id)
|
|
|
|
: id_(id) {}
|
|
|
|
TablePropertiesCollector* CreateTablePropertiesCollector(
|
|
|
|
TablePropertiesCollectorFactory::Context /*context*/) override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
static const char* kClassName() { return "TestCollector"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::string GetId() const override {
|
|
|
|
return std::string(kClassName()) + ":" + id_;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(OptionsTest, OptionTablePropertiesTest) {
|
|
|
|
ConfigOptions cfg_opts;
|
|
|
|
ColumnFamilyOptions orig, copy;
|
|
|
|
orig.table_properties_collector_factories.push_back(
|
|
|
|
std::make_shared<TestTablePropertiesCollectorFactory>("1"));
|
|
|
|
orig.table_properties_collector_factories.push_back(
|
|
|
|
std::make_shared<TestTablePropertiesCollectorFactory>("2"));
|
|
|
|
|
|
|
|
// Push two TablePropertiesCollectorFactories then create a new
|
|
|
|
// ColumnFamilyOptions based on those settings. The copy should
|
|
|
|
// have no properties but still match the original
|
|
|
|
std::string opts_str;
|
|
|
|
ASSERT_OK(GetStringFromColumnFamilyOptions(cfg_opts, orig, &opts_str));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(cfg_opts, orig, opts_str, ©));
|
|
|
|
ASSERT_EQ(copy.table_properties_collector_factories.size(), 0);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(cfg_opts, orig, copy));
|
|
|
|
|
|
|
|
// Now register a TablePropertiesCollectorFactory
|
|
|
|
// Repeat the experiment. The copy should have the same
|
|
|
|
// properties as the original
|
|
|
|
cfg_opts.registry->AddLibrary("collector")
|
2022-01-11 14:32:42 +00:00
|
|
|
->AddFactory<TablePropertiesCollectorFactory>(
|
2021-12-29 15:55:17 +00:00
|
|
|
ObjectLibrary::PatternEntry(
|
|
|
|
TestTablePropertiesCollectorFactory::kClassName(), false)
|
|
|
|
.AddSeparator(":"),
|
2021-09-28 12:30:32 +00:00
|
|
|
[](const std::string& name,
|
|
|
|
std::unique_ptr<TablePropertiesCollectorFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
std::string id = name.substr(
|
|
|
|
strlen(TestTablePropertiesCollectorFactory::kClassName()) + 1);
|
|
|
|
guard->reset(new TestTablePropertiesCollectorFactory(id));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(cfg_opts, orig, opts_str, ©));
|
|
|
|
ASSERT_EQ(copy.table_properties_collector_factories.size(), 2);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(cfg_opts, orig, copy));
|
|
|
|
}
|
2020-04-22 00:35:28 +00:00
|
|
|
|
|
|
|
TEST_F(OptionsTest, ConvertOptionsTest) {
|
|
|
|
LevelDBOptions leveldb_opt;
|
|
|
|
Options converted_opt = ConvertOptions(leveldb_opt);
|
|
|
|
|
|
|
|
ASSERT_EQ(converted_opt.create_if_missing, leveldb_opt.create_if_missing);
|
|
|
|
ASSERT_EQ(converted_opt.error_if_exists, leveldb_opt.error_if_exists);
|
|
|
|
ASSERT_EQ(converted_opt.paranoid_checks, leveldb_opt.paranoid_checks);
|
|
|
|
ASSERT_EQ(converted_opt.env, leveldb_opt.env);
|
|
|
|
ASSERT_EQ(converted_opt.info_log.get(), leveldb_opt.info_log);
|
|
|
|
ASSERT_EQ(converted_opt.write_buffer_size, leveldb_opt.write_buffer_size);
|
|
|
|
ASSERT_EQ(converted_opt.max_open_files, leveldb_opt.max_open_files);
|
|
|
|
ASSERT_EQ(converted_opt.compression, leveldb_opt.compression);
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
std::shared_ptr<TableFactory> table_factory = converted_opt.table_factory;
|
|
|
|
const auto table_opt = table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(table_opt, nullptr);
|
2020-04-22 00:35:28 +00:00
|
|
|
|
2023-04-04 22:33:24 +00:00
|
|
|
ASSERT_EQ(table_opt->block_cache->GetCapacity(), 32UL << 20);
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(table_opt->block_size, leveldb_opt.block_size);
|
|
|
|
ASSERT_EQ(table_opt->block_restart_interval,
|
2020-04-22 00:35:28 +00:00
|
|
|
leveldb_opt.block_restart_interval);
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(table_opt->filter_policy.get(), leveldb_opt.filter_policy);
|
2020-04-22 00:35:28 +00:00
|
|
|
}
|
2021-07-27 14:46:09 +00:00
|
|
|
class TestEventListener : public EventListener {
|
|
|
|
private:
|
|
|
|
std::string id_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit TestEventListener(const std::string& id) : id_("Test" + id) {}
|
|
|
|
const char* Name() const override { return id_.c_str(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo>
|
|
|
|
test_listener_option_info = {
|
|
|
|
{"s",
|
|
|
|
{0, OptionType::kString, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone}},
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
class TestConfigEventListener : public TestEventListener {
|
|
|
|
private:
|
|
|
|
std::string s_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit TestConfigEventListener(const std::string& id)
|
|
|
|
: TestEventListener("Config" + id) {
|
|
|
|
s_ = id;
|
|
|
|
RegisterOptions("Test", &s_, &test_listener_option_info);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static int RegisterTestEventListener(ObjectLibrary& library,
|
|
|
|
const std::string& arg) {
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<EventListener>(
|
2021-07-27 14:46:09 +00:00
|
|
|
"Test" + arg,
|
|
|
|
[](const std::string& name, std::unique_ptr<EventListener>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestEventListener(name.substr(4)));
|
|
|
|
return guard->get();
|
|
|
|
});
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<EventListener>(
|
2021-07-27 14:46:09 +00:00
|
|
|
"TestConfig" + arg,
|
|
|
|
[](const std::string& name, std::unique_ptr<EventListener>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestConfigEventListener(name.substr(10)));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
TEST_F(OptionsTest, OptionsListenerTest) {
|
|
|
|
DBOptions orig, copy;
|
|
|
|
orig.listeners.push_back(std::make_shared<TestEventListener>("1"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestEventListener>("2"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestEventListener>(""));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestConfigEventListener>("1"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestConfigEventListener>("2"));
|
|
|
|
orig.listeners.push_back(std::make_shared<TestConfigEventListener>(""));
|
|
|
|
ConfigOptions config_opts(orig);
|
|
|
|
config_opts.registry->AddLibrary("listener", RegisterTestEventListener, "1");
|
|
|
|
std::string opts_str;
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(config_opts, orig, &opts_str));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_opts, orig, opts_str, ©));
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(config_opts, copy, &opts_str));
|
|
|
|
ASSERT_EQ(
|
|
|
|
copy.listeners.size(),
|
|
|
|
2); // The Test{Config}1 Listeners could be loaded but not the others
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(config_opts, orig, copy));
|
|
|
|
}
|
2020-04-22 00:35:28 +00:00
|
|
|
|
2021-05-11 13:45:49 +00:00
|
|
|
const static std::string kCustomEnvName = "Custom";
|
|
|
|
const static std::string kCustomEnvProp = "env=" + kCustomEnvName;
|
|
|
|
|
|
|
|
static int RegisterCustomEnv(ObjectLibrary& library, const std::string& arg) {
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<Env>(
|
2021-05-11 13:45:49 +00:00
|
|
|
arg, [](const std::string& /*name*/, std::unique_ptr<Env>* /*env_guard*/,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
static CustomEnv env(Env::Default());
|
|
|
|
return &env;
|
|
|
|
});
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
// This test suite tests the old APIs into the Configure options methods.
|
|
|
|
// Once those APIs are officially deprecated, this test suite can be deleted.
|
|
|
|
class OptionsOldApiTest : public testing::Test {};
|
2020-08-19 01:31:31 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, GetOptionsFromMapTest) {
|
|
|
|
std::unordered_map<std::string, std::string> cf_options_map = {
|
|
|
|
{"write_buffer_size", "1"},
|
|
|
|
{"max_write_buffer_number", "2"},
|
|
|
|
{"min_write_buffer_number_to_merge", "3"},
|
|
|
|
{"max_write_buffer_number_to_maintain", "99"},
|
|
|
|
{"max_write_buffer_size_to_maintain", "-99999"},
|
|
|
|
{"compression", "kSnappyCompression"},
|
|
|
|
{"compression_per_level",
|
|
|
|
"kNoCompression:"
|
|
|
|
"kSnappyCompression:"
|
|
|
|
"kZlibCompression:"
|
|
|
|
"kBZip2Compression:"
|
|
|
|
"kLZ4Compression:"
|
|
|
|
"kLZ4HCCompression:"
|
|
|
|
"kXpressCompression:"
|
|
|
|
"kZSTD:"
|
|
|
|
"kZSTDNotFinalCompression"},
|
|
|
|
{"bottommost_compression", "kLZ4Compression"},
|
2020-04-30 23:59:16 +00:00
|
|
|
{"bottommost_compression_opts", "5:6:7:8:9:true"},
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
{"compression_opts", "4:5:6:7:8:9:true:10:false"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"num_levels", "8"},
|
|
|
|
{"level0_file_num_compaction_trigger", "8"},
|
|
|
|
{"level0_slowdown_writes_trigger", "9"},
|
|
|
|
{"level0_stop_writes_trigger", "10"},
|
|
|
|
{"target_file_size_base", "12"},
|
|
|
|
{"target_file_size_multiplier", "13"},
|
|
|
|
{"max_bytes_for_level_base", "14"},
|
|
|
|
{"level_compaction_dynamic_level_bytes", "true"},
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
{"level_compaction_dynamic_file_size", "true"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"max_bytes_for_level_multiplier", "15.0"},
|
|
|
|
{"max_bytes_for_level_multiplier_additional", "16:17:18"},
|
|
|
|
{"max_compaction_bytes", "21"},
|
|
|
|
{"soft_rate_limit", "1.1"},
|
|
|
|
{"hard_rate_limit", "2.1"},
|
2022-01-29 00:10:59 +00:00
|
|
|
{"rate_limit_delay_max_milliseconds", "100"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"hard_pending_compaction_bytes_limit", "211"},
|
|
|
|
{"arena_block_size", "22"},
|
|
|
|
{"disable_auto_compactions", "true"},
|
|
|
|
{"compaction_style", "kCompactionStyleLevel"},
|
|
|
|
{"compaction_pri", "kOldestSmallestSeqFirst"},
|
|
|
|
{"verify_checksums_in_compaction", "false"},
|
2023-05-11 23:40:59 +00:00
|
|
|
{"compaction_options_fifo",
|
|
|
|
"{allow_compaction=true;max_table_files_size=11002244;"
|
|
|
|
"file_temperature_age_thresholds={{temperature=kCold;age=12345}}}"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"max_sequential_skip_in_iterations", "24"},
|
|
|
|
{"inplace_update_support", "true"},
|
|
|
|
{"report_bg_io_stats", "true"},
|
|
|
|
{"compaction_measure_io_stats", "false"},
|
2022-01-27 06:02:02 +00:00
|
|
|
{"purge_redundant_kvs_while_flush", "false"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"inplace_update_num_locks", "25"},
|
|
|
|
{"memtable_prefix_bloom_size_ratio", "0.26"},
|
|
|
|
{"memtable_whole_key_filtering", "true"},
|
|
|
|
{"memtable_huge_page_size", "28"},
|
|
|
|
{"bloom_locality", "29"},
|
|
|
|
{"max_successive_merges", "30"},
|
2024-02-21 21:15:27 +00:00
|
|
|
{"strict_max_successive_merges", "true"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"min_partial_merge_operands", "31"},
|
|
|
|
{"prefix_extractor", "fixed:31"},
|
2022-06-23 16:42:18 +00:00
|
|
|
{"experimental_mempurge_threshold", "0.003"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"optimize_filters_for_hits", "true"},
|
2020-08-19 01:31:31 +00:00
|
|
|
{"enable_blob_files", "true"},
|
|
|
|
{"min_blob_size", "1K"},
|
|
|
|
{"blob_file_size", "1G"},
|
|
|
|
{"blob_compression_type", "kZSTD"},
|
2020-11-13 02:57:20 +00:00
|
|
|
{"enable_blob_garbage_collection", "true"},
|
|
|
|
{"blob_garbage_collection_age_cutoff", "0.5"},
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
2021-10-12 01:00:44 +00:00
|
|
|
{"blob_garbage_collection_force_threshold", "0.75"},
|
2021-11-20 01:52:42 +00:00
|
|
|
{"blob_compaction_readahead_size", "256K"},
|
2022-06-03 03:04:33 +00:00
|
|
|
{"blob_file_starting_level", "1"},
|
2022-07-17 14:13:59 +00:00
|
|
|
{"prepopulate_blob_cache", "kDisable"},
|
2022-08-08 21:36:34 +00:00
|
|
|
{"last_level_temperature", "kWarm"},
|
2024-02-28 22:36:13 +00:00
|
|
|
{"default_write_temperature", "kCold"},
|
2023-08-18 00:06:57 +00:00
|
|
|
{"default_temperature", "kHot"},
|
2023-04-12 00:50:34 +00:00
|
|
|
{"persist_user_defined_timestamps", "true"},
|
2023-08-03 02:58:56 +00:00
|
|
|
{"memtable_max_range_deletions", "0"},
|
2020-04-22 00:35:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<std::string, std::string> db_options_map = {
|
|
|
|
{"create_if_missing", "false"},
|
|
|
|
{"create_missing_column_families", "true"},
|
|
|
|
{"error_if_exists", "false"},
|
|
|
|
{"paranoid_checks", "true"},
|
2020-10-09 23:40:25 +00:00
|
|
|
{"track_and_verify_wals_in_manifest", "true"},
|
2022-05-19 18:04:21 +00:00
|
|
|
{"verify_sst_unique_id_in_manifest", "true"},
|
2020-04-22 00:35:28 +00:00
|
|
|
{"max_open_files", "32"},
|
|
|
|
{"max_total_wal_size", "33"},
|
|
|
|
{"use_fsync", "true"},
|
|
|
|
{"db_log_dir", "/db_log_dir"},
|
|
|
|
{"wal_dir", "/wal_dir"},
|
|
|
|
{"delete_obsolete_files_period_micros", "34"},
|
|
|
|
{"max_background_compactions", "35"},
|
|
|
|
{"max_background_flushes", "36"},
|
|
|
|
{"max_log_file_size", "37"},
|
|
|
|
{"log_file_time_to_roll", "38"},
|
|
|
|
{"keep_log_file_num", "39"},
|
|
|
|
{"recycle_log_file_num", "5"},
|
|
|
|
{"max_manifest_file_size", "40"},
|
|
|
|
{"table_cache_numshardbits", "41"},
|
|
|
|
{"WAL_ttl_seconds", "43"},
|
|
|
|
{"WAL_size_limit_MB", "44"},
|
|
|
|
{"manifest_preallocation_size", "45"},
|
|
|
|
{"allow_mmap_reads", "true"},
|
|
|
|
{"allow_mmap_writes", "false"},
|
|
|
|
{"use_direct_reads", "false"},
|
|
|
|
{"use_direct_io_for_flush_and_compaction", "false"},
|
|
|
|
{"is_fd_close_on_exec", "true"},
|
|
|
|
{"skip_log_error_on_recovery", "false"},
|
|
|
|
{"stats_dump_period_sec", "46"},
|
|
|
|
{"stats_persist_period_sec", "57"},
|
|
|
|
{"persist_stats_to_disk", "false"},
|
|
|
|
{"stats_history_buffer_size", "69"},
|
|
|
|
{"advise_random_on_open", "true"},
|
|
|
|
{"use_adaptive_mutex", "false"},
|
|
|
|
{"compaction_readahead_size", "100"},
|
|
|
|
{"random_access_max_buffer_size", "3145728"},
|
|
|
|
{"writable_file_max_buffer_size", "314159"},
|
|
|
|
{"bytes_per_sync", "47"},
|
|
|
|
{"wal_bytes_per_sync", "48"},
|
|
|
|
{"strict_bytes_per_sync", "true"},
|
2022-01-28 21:26:32 +00:00
|
|
|
{"preserve_deletes", "false"},
|
2020-04-22 00:35:28 +00:00
|
|
|
};
|
|
|
|
|
2014-10-10 17:00:12 +00:00
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions cf_config_options;
|
|
|
|
cf_config_options.ignore_unknown_options = false;
|
|
|
|
cf_config_options.input_strings_escaped = false;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 1U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 2);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_write_buffer_number_to_merge, 3);
|
2015-07-03 00:23:41 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number_to_maintain, 99);
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_size_to_maintain, -99999);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression, kSnappyCompression);
|
2016-09-01 22:28:40 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level.size(), 9U);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[0], kNoCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[1], kSnappyCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[2], kZlibCompression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[3], kBZip2Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[4], kLZ4Compression);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[5], kLZ4HCCompression);
|
2016-04-20 05:54:24 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[6], kXpressCompression);
|
2016-09-01 22:28:40 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[7], kZSTD);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_per_level[8], kZSTDNotFinalCompression);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.parallel_threads, 9u);
|
2018-06-28 00:34:07 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_buffer_bytes, 10u);
|
|
|
|
ASSERT_EQ(new_cf_opt.compression_opts.use_zstd_dict_trainer, false);
|
2016-05-09 22:57:19 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
|
2018-06-28 00:34:07 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
2020-04-30 23:59:16 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.parallel_threads,
|
|
|
|
CompressionOptions().parallel_threads);
|
2018-06-28 00:34:07 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_buffer_bytes,
|
|
|
|
CompressionOptions().max_dict_buffer_bytes);
|
|
|
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
CompressionOptions().use_zstd_dict_trainer);
|
2016-04-20 05:54:24 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.num_levels, 8);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_slowdown_writes_trigger, 9);
|
|
|
|
ASSERT_EQ(new_cf_opt.level0_stop_writes_trigger, 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_base, static_cast<uint64_t>(12));
|
|
|
|
ASSERT_EQ(new_cf_opt.target_file_size_multiplier, 13);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_base, 14U);
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.level_compaction_dynamic_level_bytes, true);
|
2016-11-02 04:05:32 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier, 15.0);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional.size(), 3U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[1], 17);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[2], 18);
|
2016-06-16 23:02:52 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_compaction_bytes, 21);
|
2015-09-11 21:31:23 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.hard_pending_compaction_bytes_limit, 211);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
|
|
|
|
ASSERT_EQ(new_cf_opt.disable_auto_compactions, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_style, kCompactionStyleLevel);
|
2017-03-02 18:08:49 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compaction_pri, kOldestSmallestSeqFirst);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,
|
2023-05-11 23:40:59 +00:00
|
|
|
static_cast<uint64_t>(11002244));
|
|
|
|
ASSERT_EQ(new_cf_opt.compaction_options_fifo.allow_compaction, true);
|
|
|
|
ASSERT_EQ(
|
|
|
|
new_cf_opt.compaction_options_fifo.file_temperature_age_thresholds.size(),
|
|
|
|
1);
|
|
|
|
ASSERT_EQ(
|
|
|
|
new_cf_opt.compaction_options_fifo.file_temperature_age_thresholds[0]
|
|
|
|
.temperature,
|
|
|
|
Temperature::kCold);
|
|
|
|
ASSERT_EQ(
|
|
|
|
new_cf_opt.compaction_options_fifo.file_temperature_age_thresholds[0].age,
|
|
|
|
12345);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations,
|
2014-09-17 22:40:25 +00:00
|
|
|
static_cast<uint64_t>(24));
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_support, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 25U);
|
2016-06-04 00:02:10 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_size_ratio, 0.26);
|
2019-02-19 20:12:25 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.memtable_whole_key_filtering, true);
|
2016-07-27 01:05:30 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.memtable_huge_page_size, 28U);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.bloom_locality, 29U);
|
|
|
|
ASSERT_EQ(new_cf_opt.max_successive_merges, 30U);
|
2024-02-21 21:15:27 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.strict_max_successive_merges, true);
|
2015-01-15 23:33:12 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor != nullptr);
|
2015-02-17 16:03:45 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.optimize_filters_for_hits, true);
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.FixedPrefix.31");
|
2022-06-23 16:42:18 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.experimental_mempurge_threshold, 0.003);
|
2020-08-19 01:31:31 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_files, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.min_blob_size, 1ULL << 10);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_size, 1ULL << 30);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_compression_type, kZSTD);
|
2020-11-13 02:57:20 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.enable_blob_garbage_collection, true);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_age_cutoff, 0.5);
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
2021-10-12 01:00:44 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.blob_garbage_collection_force_threshold, 0.75);
|
2021-11-20 01:52:42 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.blob_compaction_readahead_size, 262144);
|
2022-06-03 03:04:33 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.blob_file_starting_level, 1);
|
2022-07-17 14:13:59 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.prepopulate_blob_cache, PrepopulateBlobCache::kDisable);
|
2022-08-08 21:36:34 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.last_level_temperature, Temperature::kWarm);
|
2024-02-28 22:36:13 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.default_write_temperature, Temperature::kCold);
|
2023-08-18 00:06:57 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.default_temperature, Temperature::kHot);
|
2023-04-12 00:50:34 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.persist_user_defined_timestamps, true);
|
2023-08-03 02:58:56 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.memtable_max_range_deletions, 0);
|
2014-10-10 17:00:12 +00:00
|
|
|
|
|
|
|
cf_options_map["write_buffer_size"] = "hello";
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions exact, loose;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
loose.sanity_level = ConfigOptions::kSanityLevelLooselyCompatible;
|
|
|
|
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2014-10-10 17:00:12 +00:00
|
|
|
cf_options_map["write_buffer_size"] = "1";
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2017-06-13 23:55:08 +00:00
|
|
|
cf_options_map["unknown_option"] = "1";
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2014-10-10 17:00:12 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
cf_config_options.input_strings_escaped = false;
|
|
|
|
cf_config_options.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromMap(cf_config_options, base_cf_opt,
|
|
|
|
cf_options_map, &new_cf_opt));
|
2017-06-13 23:55:08 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
loose, base_cf_opt, new_cf_opt, nullptr /* new_opt_map */));
|
2017-06-13 23:55:08 +00:00
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
exact /* default for VerifyCFOptions */, base_cf_opt, new_cf_opt, nullptr));
|
2017-06-13 23:55:08 +00:00
|
|
|
|
2014-10-10 17:00:12 +00:00
|
|
|
DBOptions base_db_opt;
|
|
|
|
DBOptions new_db_opt;
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions db_config_options(base_db_opt);
|
|
|
|
db_config_options.input_strings_escaped = false;
|
|
|
|
db_config_options.ignore_unknown_options = false;
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt));
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.create_if_missing, false);
|
|
|
|
ASSERT_EQ(new_db_opt.create_missing_column_families, true);
|
|
|
|
ASSERT_EQ(new_db_opt.error_if_exists, false);
|
|
|
|
ASSERT_EQ(new_db_opt.paranoid_checks, true);
|
2020-10-09 23:40:25 +00:00
|
|
|
ASSERT_EQ(new_db_opt.track_and_verify_wals_in_manifest, true);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.max_open_files, 32);
|
|
|
|
ASSERT_EQ(new_db_opt.max_total_wal_size, static_cast<uint64_t>(33));
|
|
|
|
ASSERT_EQ(new_db_opt.use_fsync, true);
|
|
|
|
ASSERT_EQ(new_db_opt.db_log_dir, "/db_log_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.wal_dir, "/wal_dir");
|
|
|
|
ASSERT_EQ(new_db_opt.delete_obsolete_files_period_micros,
|
2014-09-17 22:40:25 +00:00
|
|
|
static_cast<uint64_t>(34));
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.max_background_compactions, 35);
|
|
|
|
ASSERT_EQ(new_db_opt.max_background_flushes, 36);
|
|
|
|
ASSERT_EQ(new_db_opt.max_log_file_size, 37U);
|
|
|
|
ASSERT_EQ(new_db_opt.log_file_time_to_roll, 38U);
|
|
|
|
ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
|
2015-10-08 01:06:28 +00:00
|
|
|
ASSERT_EQ(new_db_opt.recycle_log_file_num, 5U);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
|
|
|
|
ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
|
|
|
|
ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
|
|
|
|
ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_reads, true);
|
|
|
|
ASSERT_EQ(new_db_opt.allow_mmap_writes, false);
|
2016-12-22 20:51:29 +00:00
|
|
|
ASSERT_EQ(new_db_opt.use_direct_reads, false);
|
2017-04-13 20:07:33 +00:00
|
|
|
ASSERT_EQ(new_db_opt.use_direct_io_for_flush_and_compaction, false);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.is_fd_close_on_exec, true);
|
|
|
|
ASSERT_EQ(new_db_opt.stats_dump_period_sec, 46U);
|
2019-02-20 23:46:59 +00:00
|
|
|
ASSERT_EQ(new_db_opt.stats_persist_period_sec, 57U);
|
2019-06-17 22:17:43 +00:00
|
|
|
ASSERT_EQ(new_db_opt.persist_stats_to_disk, false);
|
2019-02-20 23:46:59 +00:00
|
|
|
ASSERT_EQ(new_db_opt.stats_history_buffer_size, 69U);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.advise_random_on_open, true);
|
|
|
|
ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
|
2015-08-26 22:25:59 +00:00
|
|
|
ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
|
2015-10-27 21:44:16 +00:00
|
|
|
ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
|
2015-10-30 05:10:25 +00:00
|
|
|
ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_db_opt.bytes_per_sync, static_cast<uint64_t>(47));
|
2015-05-19 00:03:59 +00:00
|
|
|
ASSERT_EQ(new_db_opt.wal_bytes_per_sync, static_cast<uint64_t>(48));
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
ASSERT_EQ(new_db_opt.strict_bytes_per_sync, true);
|
2017-06-13 23:55:08 +00:00
|
|
|
|
|
|
|
db_options_map["max_open_files"] = "hello";
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
2017-06-13 23:55:08 +00:00
|
|
|
|
|
|
|
// unknow options should fail parsing without ignore_unknown_options = true
|
|
|
|
db_options_map["unknown_db_option"] = "1";
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
2017-06-13 23:55:08 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
db_config_options.input_strings_escaped = false;
|
|
|
|
db_config_options.ignore_unknown_options = true;
|
|
|
|
ASSERT_OK(GetDBOptionsFromMap(db_config_options, base_db_opt, db_options_map,
|
|
|
|
&new_db_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(loose, base_db_opt, new_db_opt));
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyDBOptions(exact, base_db_opt, new_db_opt));
|
2014-10-10 17:00:12 +00:00
|
|
|
}
|
2014-09-17 19:46:32 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, GetColumnFamilyOptionsFromStringTest) {
|
2014-10-10 17:00:12 +00:00
|
|
|
ColumnFamilyOptions base_cf_opt;
|
|
|
|
ColumnFamilyOptions new_cf_opt;
|
2014-12-22 21:18:57 +00:00
|
|
|
base_cf_opt.table_factory.reset();
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt, "",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=5", &new_cf_opt));
|
2014-10-10 21:19:51 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 5U);
|
2014-12-22 21:18:57 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory == nullptr);
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=6;", &new_cf_opt));
|
2014-10-10 21:19:51 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 6U);
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, " write_buffer_size = 7 ", &new_cf_opt));
|
2014-10-10 21:19:51 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 7U);
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, " write_buffer_size = 8 ; ", &new_cf_opt));
|
2014-10-10 21:19:51 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 8U);
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=9;max_write_buffer_number=10", &new_cf_opt));
|
2014-10-10 21:19:51 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 9U);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 10);
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=11; max_write_buffer_number = 12 ;", &new_cf_opt));
|
2014-10-10 21:19:51 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 11U);
|
2014-10-10 17:00:12 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 12);
|
|
|
|
// Wrong name "max_write_buffer_number_"
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number_=14;", &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions exact;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2019-03-26 21:15:26 +00:00
|
|
|
// Comparator from object registry
|
|
|
|
std::string kCompName = "reverse_comp";
|
2022-01-11 14:32:42 +00:00
|
|
|
ObjectLibrary::Default()->AddFactory<const Comparator>(
|
2019-07-24 00:08:26 +00:00
|
|
|
kCompName,
|
|
|
|
[](const std::string& /*name*/,
|
|
|
|
std::unique_ptr<const Comparator>* /*guard*/,
|
|
|
|
std::string* /* errmsg */) { return ReverseBytewiseComparator(); });
|
2019-03-26 21:15:26 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"comparator=" + kCompName + ";",
|
|
|
|
&new_cf_opt));
|
2019-03-26 21:15:26 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.comparator, ReverseBytewiseComparator());
|
|
|
|
|
2019-03-28 21:50:06 +00:00
|
|
|
// MergeOperator from object registry
|
|
|
|
std::unique_ptr<BytesXOROperator> bxo(new BytesXOROperator());
|
|
|
|
std::string kMoName = bxo->Name();
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"merge_operator=" + kMoName + ";",
|
|
|
|
&new_cf_opt));
|
2019-03-28 21:50:06 +00:00
|
|
|
ASSERT_EQ(kMoName, std::string(new_cf_opt.merge_operator->Name()));
|
|
|
|
|
2014-10-10 17:00:12 +00:00
|
|
|
// Wrong key/value pair
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number;", &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2014-10-10 17:00:12 +00:00
|
|
|
// Error Paring value
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=13;max_write_buffer_number=;", &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2014-10-10 17:00:12 +00:00
|
|
|
// Missing option name
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=13; =100;", &new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2015-07-01 23:13:49 +00:00
|
|
|
|
2019-09-09 18:22:28 +00:00
|
|
|
const uint64_t kilo = 1024UL;
|
|
|
|
const uint64_t mega = 1024 * kilo;
|
|
|
|
const uint64_t giga = 1024 * mega;
|
|
|
|
const uint64_t tera = 1024 * giga;
|
2015-07-01 23:13:49 +00:00
|
|
|
|
2014-11-17 21:47:51 +00:00
|
|
|
// Units (k)
|
2016-06-04 00:02:10 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
config_options, base_cf_opt, "max_write_buffer_number=15K", &new_cf_opt));
|
2016-11-29 00:35:21 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 15 * kilo);
|
2014-11-17 21:47:51 +00:00
|
|
|
// Units (m)
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"max_write_buffer_number=16m;inplace_update_num_locks=17M", &new_cf_opt));
|
2015-07-13 19:11:05 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17u * mega);
|
2014-11-17 21:47:51 +00:00
|
|
|
// Units (g)
|
2015-01-21 19:09:56 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
config_options, base_cf_opt,
|
2015-01-21 19:09:56 +00:00
|
|
|
"write_buffer_size=18g;prefix_extractor=capped:8;"
|
|
|
|
"arena_block_size=19G",
|
|
|
|
&new_cf_opt));
|
2015-07-01 23:13:49 +00:00
|
|
|
|
2015-07-13 19:11:05 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 18 * giga);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 19 * giga);
|
2015-01-21 19:09:56 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.prefix_extractor->AsString(), "rocksdb.CappedPrefix.8");
|
2015-01-21 19:09:56 +00:00
|
|
|
|
2014-11-17 21:47:51 +00:00
|
|
|
// Units (t)
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt, "write_buffer_size=20t;arena_block_size=21T",
|
|
|
|
&new_cf_opt));
|
2015-07-13 19:11:05 +00:00
|
|
|
ASSERT_EQ(new_cf_opt.write_buffer_size, 20 * tera);
|
|
|
|
ASSERT_EQ(new_cf_opt.arena_block_size, 21 * tera);
|
2014-12-22 21:18:57 +00:00
|
|
|
|
|
|
|
// Nested block based table options
|
2017-06-05 18:23:31 +00:00
|
|
|
// Empty
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2014-12-22 21:18:57 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Non-empty
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2014-12-22 21:18:57 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Last one
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;}",
|
|
|
|
&new_cf_opt));
|
2014-12-22 21:18:57 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
|
|
|
// Mismatch curly braces
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={{{block_size=4;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2014-12-22 21:18:57 +00:00
|
|
|
// Unexpected chars after closing curly brace
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa;"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_size=4;}xdfa",
|
|
|
|
&new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2014-12-22 21:18:57 +00:00
|
|
|
// Invalid block based table option
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={xx_block_size=4;}",
|
|
|
|
&new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=true",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=false",
|
|
|
|
&new_cf_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetColumnFamilyOptionsFromString(config_options, base_cf_opt,
|
|
|
|
"optimize_filters_for_hits=junk",
|
|
|
|
&new_cf_opt));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(exact, base_cf_opt, new_cf_opt));
|
2015-10-28 17:46:01 +00:00
|
|
|
|
|
|
|
// Nested plain table options
|
2017-06-05 18:23:31 +00:00
|
|
|
// Empty
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={};arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2015-10-28 17:46:01 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
2015-10-30 18:50:28 +00:00
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
2015-10-28 17:46:01 +00:00
|
|
|
// Non-empty
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"plain_table_factory={user_key_len=66;bloom_bits_per_key=20;};"
|
|
|
|
"arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2015-10-28 17:46:01 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
|
2015-10-30 18:50:28 +00:00
|
|
|
ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
|
2015-11-17 22:29:01 +00:00
|
|
|
|
|
|
|
// memtable factory
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
|
|
|
config_options, base_cf_opt,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"memtable=skip_list:10;arena_block_size=1024",
|
|
|
|
&new_cf_opt));
|
2015-11-17 22:29:01 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory != nullptr);
|
2021-09-08 14:45:59 +00:00
|
|
|
ASSERT_TRUE(new_cf_opt.memtable_factory->IsInstanceOf("SkipListFactory"));
|
2022-06-14 21:19:26 +00:00
|
|
|
|
|
|
|
// blob cache
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
config_options, base_cf_opt,
|
2022-06-14 21:19:26 +00:00
|
|
|
"blob_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};",
|
|
|
|
&new_cf_opt));
|
|
|
|
ASSERT_NE(new_cf_opt.blob_cache, nullptr);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->GetCapacity(), 1024UL * 1024UL);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(static_cast<ShardedCacheBase*>(new_cf_opt.blob_cache.get())
|
2022-06-14 21:19:26 +00:00
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
|
|
|
ASSERT_EQ(new_cf_opt.blob_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(static_cast<LRUCache*>(new_cf_opt.blob_cache.get())
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
2014-12-22 21:18:57 +00:00
|
|
|
}
|
2020-08-19 01:31:31 +00:00
|
|
|
|
2021-09-27 14:42:36 +00:00
|
|
|
TEST_F(OptionsTest, SliceTransformCreateFromString) {
|
|
|
|
std::shared_ptr<const SliceTransform> transform = nullptr;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "fixed:31", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.FixedPrefix"));
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.FixedPrefix.31");
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix.42", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.FixedPrefix.42");
|
|
|
|
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(config_options, "capped:16",
|
|
|
|
&transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.CappedPrefix"));
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.CappedPrefix.16");
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix.42", &transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.CappedPrefix.42");
|
|
|
|
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(config_options, "rocksdb.Noop",
|
|
|
|
&transform));
|
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(config_options,
|
|
|
|
"fixed:21:invalid", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(config_options,
|
|
|
|
"capped:21:invalid", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "fixed", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "capped", &transform));
|
2022-01-27 18:04:35 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "fixed:", &transform));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "capped:", &transform));
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix:42", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix:42", &transform));
|
2022-01-27 18:04:35 +00:00
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix.", &transform));
|
|
|
|
ASSERT_NOK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.CappedPrefix.", &transform));
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
SliceTransform::CreateFromString(config_options, "invalid", &transform));
|
|
|
|
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
2022-01-27 18:04:35 +00:00
|
|
|
config_options, "rocksdb.CappedPrefix.11", &transform));
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_NE(transform, nullptr);
|
|
|
|
ASSERT_EQ(transform->GetId(), "rocksdb.CappedPrefix.11");
|
2022-01-27 18:04:35 +00:00
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("capped:11"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.CappedPrefix"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.CappedPrefix.11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("fixed:11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.FixedPrefix"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.FixedPrefix.11"));
|
2021-09-27 14:42:36 +00:00
|
|
|
|
2022-01-27 18:04:35 +00:00
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options, "rocksdb.FixedPrefix.11", &transform));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("fixed:11"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.FixedPrefix"));
|
|
|
|
ASSERT_TRUE(transform->IsInstanceOf("rocksdb.FixedPrefix.11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("capped"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("capped:11"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.CappedPrefix"));
|
|
|
|
ASSERT_FALSE(transform->IsInstanceOf("rocksdb.CappedPrefix.11"));
|
2021-09-27 14:42:36 +00:00
|
|
|
}
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, GetBlockBasedTableOptionsFromString) {
|
2014-12-22 21:18:57 +00:00
|
|
|
BlockBasedTableOptions table_opt;
|
|
|
|
BlockBasedTableOptions new_opt;
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
config_options.ignore_unknown_options = false;
|
|
|
|
config_options.invoke_prepare_options = false;
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
|
2014-12-22 21:18:57 +00:00
|
|
|
// make sure default values are overwritten by something else
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
2019-11-26 23:49:16 +00:00
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
config_options, table_opt,
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
2019-11-26 23:49:16 +00:00
|
|
|
"cache_index_and_filter_blocks=1;index_type=kHashSearch;"
|
2022-03-01 21:58:02 +00:00
|
|
|
"checksum=kxxHash;no_block_cache=1;"
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
2019-11-26 23:49:16 +00:00
|
|
|
"block_cache=1M;block_cache_compressed=1k;block_size=1024;"
|
|
|
|
"block_size_deviation=8;block_restart_interval=4;"
|
|
|
|
"format_version=5;whole_key_filtering=1;"
|
|
|
|
"filter_policy=bloomfilter:4.567:false;",
|
|
|
|
&new_opt));
|
2014-12-22 21:18:57 +00:00
|
|
|
ASSERT_TRUE(new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(new_opt.index_type, BlockBasedTableOptions::kHashSearch);
|
|
|
|
ASSERT_EQ(new_opt.checksum, ChecksumType::kxxHash);
|
|
|
|
ASSERT_TRUE(new_opt.no_block_cache);
|
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size, 1024UL);
|
|
|
|
ASSERT_EQ(new_opt.block_size_deviation, 8);
|
|
|
|
ASSERT_EQ(new_opt.block_restart_interval, 4);
|
Allow fractional bits/key in BloomFilterPolicy (#6092)
Summary:
There's no technological impediment to allowing the Bloom
filter bits/key to be non-integer (fractional/decimal) values, and it
provides finer control over the memory vs. accuracy trade-off. This is
especially handy in using the format_version=5 Bloom filter in place
of the old one, because bits_per_key=9.55 provides the same accuracy as
the old bits_per_key=10.
This change not only requires refining the logic for choosing the best
num_probes for a given bits/key setting, it revealed a flaw in that logic.
As bits/key gets higher, the best num_probes for a cache-local Bloom
filter is closer to bpk / 2 than to bpk * 0.69, the best choice for a
standard Bloom filter. For example, at 16 bits per key, the best
num_probes is 9 (FP rate = 0.0843%) not 11 (FP rate = 0.0884%).
This change fixes and refines that logic (for the format_version=5
Bloom filter only, just in case) based on empirical tests to find
accuracy inflection points between each num_probes.
Although bits_per_key is now specified as a double, the new Bloom
filter converts/rounds this to "millibits / key" for predictable/precise
internal computations. Just in case of unforeseen compatibility
issues, we round to the nearest whole number bits / key for the
legacy Bloom filter, so as not to unlock new behaviors for it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6092
Test Plan: unit tests included
Differential Revision: D18711313
Pulled By: pdillinger
fbshipit-source-id: 1aa73295f152a995328cb846ef9157ae8a05522a
2019-11-26 23:49:16 +00:00
|
|
|
ASSERT_EQ(new_opt.format_version, 5U);
|
|
|
|
ASSERT_EQ(new_opt.whole_key_filtering, true);
|
2014-12-22 21:18:57 +00:00
|
|
|
ASSERT_TRUE(new_opt.filter_policy != nullptr);
|
2022-02-08 21:54:29 +00:00
|
|
|
const BloomFilterPolicy* bfp =
|
|
|
|
dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4567);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 5);
|
2014-12-22 21:18:57 +00:00
|
|
|
|
|
|
|
// unknown option
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
|
|
|
|
"bad_option=1",
|
|
|
|
&new_opt));
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(static_cast<bool>(table_opt.cache_index_and_filter_blocks),
|
2016-08-11 21:54:29 +00:00
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
2014-12-22 21:18:57 +00:00
|
|
|
|
|
|
|
// unrecognized index type
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearchXX", &new_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
2014-12-22 21:18:57 +00:00
|
|
|
|
|
|
|
// unrecognized checksum type
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;checksum=kxxHashXX", &new_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
2014-12-22 21:18:57 +00:00
|
|
|
|
|
|
|
// unrecognized filter policy name
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
GetBlockBasedTableOptionsFromString(config_options, table_opt,
|
|
|
|
"cache_index_and_filter_blocks=1;"
|
|
|
|
"filter_policy=bloomfilterxx:4:true",
|
|
|
|
&new_opt));
|
2016-08-11 21:54:29 +00:00
|
|
|
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
|
|
|
new_opt.cache_index_and_filter_blocks);
|
|
|
|
ASSERT_EQ(table_opt.filter_policy, new_opt.filter_policy);
|
|
|
|
|
2022-02-08 21:54:29 +00:00
|
|
|
// Used to be rejected, now accepted
|
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
config_options, table_opt, "filter_policy=bloomfilter:4", &new_opt));
|
2022-02-08 21:54:29 +00:00
|
|
|
bfp = dynamic_cast<const BloomFilterPolicy*>(new_opt.filter_policy.get());
|
|
|
|
EXPECT_EQ(bfp->GetMillibitsPerKey(), 4000);
|
|
|
|
EXPECT_EQ(bfp->GetWholeBitsPerKey(), 4);
|
2017-11-28 18:35:17 +00:00
|
|
|
|
|
|
|
// Check block cache options are overwritten when specified
|
|
|
|
// in new format as a struct.
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;high_pri_pool_ratio=0.5;}",
|
|
|
|
&new_opt));
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
|
|
|
|
// Set only block cache capacity. Check other values are
|
|
|
|
// reset to default values.
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=2M};"
|
|
|
|
"block_cache_compressed={capacity=2M}",
|
|
|
|
&new_opt));
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 2*1024UL*1024UL);
|
|
|
|
// Default values
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
GetDefaultCacheShardBits(new_opt.block_cache->GetCapacity()));
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
2019-06-27 17:16:21 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
2017-11-28 18:35:17 +00:00
|
|
|
|
|
|
|
// Set couple of block cache options.
|
2019-06-27 17:16:21 +00:00
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
2023-02-07 22:11:53 +00:00
|
|
|
config_options, table_opt,
|
2019-06-27 17:16:21 +00:00
|
|
|
"block_cache={num_shard_bits=5;high_pri_pool_ratio=0.5;};"
|
|
|
|
"block_cache_compressed={num_shard_bits=5;"
|
|
|
|
"high_pri_pool_ratio=0.0;}",
|
|
|
|
&new_opt));
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 0);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
5);
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), false);
|
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(
|
|
|
|
new_opt.block_cache)->GetHighPriPoolRatio(), 0.5);
|
|
|
|
|
|
|
|
// Set couple of block cache options.
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_OK(GetBlockBasedTableOptionsFromString(
|
|
|
|
config_options, table_opt,
|
|
|
|
"block_cache={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;};"
|
|
|
|
"block_cache_compressed={capacity=1M;num_shard_bits=4;"
|
|
|
|
"strict_capacity_limit=true;}",
|
|
|
|
&new_opt));
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_TRUE(new_opt.block_cache != nullptr);
|
|
|
|
ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
|
2022-10-19 05:06:57 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<ShardedCacheBase>(new_opt.block_cache)
|
|
|
|
->GetNumShardBits(),
|
|
|
|
4);
|
2017-11-28 18:35:17 +00:00
|
|
|
ASSERT_EQ(new_opt.block_cache->HasStrictCapacityLimit(), true);
|
2019-06-27 17:16:21 +00:00
|
|
|
ASSERT_EQ(std::dynamic_pointer_cast<LRUCache>(new_opt.block_cache)
|
|
|
|
->GetHighPriPoolRatio(),
|
|
|
|
0.5);
|
2014-12-22 21:18:57 +00:00
|
|
|
}
|
2020-08-19 01:31:31 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, GetPlainTableOptionsFromString) {
|
2015-10-28 17:46:01 +00:00
|
|
|
PlainTableOptions table_opt;
|
|
|
|
PlainTableOptions new_opt;
|
|
|
|
// make sure default values are overwritten by something else
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions config_options_from_string;
|
|
|
|
config_options_from_string.input_strings_escaped = false;
|
|
|
|
config_options_from_string.ignore_unknown_options = false;
|
|
|
|
config_options_from_string.invoke_prepare_options = false;
|
|
|
|
ASSERT_OK(GetPlainTableOptionsFromString(
|
|
|
|
config_options_from_string, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
|
|
|
|
"full_scan_mode=true;store_index_in_file=true",
|
|
|
|
&new_opt));
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(new_opt.user_key_len, 66u);
|
2015-10-28 17:46:01 +00:00
|
|
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
|
|
|
|
ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
|
|
|
|
ASSERT_EQ(new_opt.index_sparseness, 8);
|
|
|
|
ASSERT_EQ(new_opt.huge_page_tlb_size, 4);
|
|
|
|
ASSERT_EQ(new_opt.encoding_type, EncodingType::kPrefix);
|
|
|
|
ASSERT_TRUE(new_opt.full_scan_mode);
|
2019-03-01 23:41:55 +00:00
|
|
|
ASSERT_TRUE(new_opt.store_index_in_file);
|
2015-10-28 17:46:01 +00:00
|
|
|
|
2021-09-08 14:45:59 +00:00
|
|
|
std::unordered_map<std::string, std::string> opt_map;
|
|
|
|
ASSERT_OK(StringToMap(
|
|
|
|
"user_key_len=55;bloom_bits_per_key=10;huge_page_tlb_size=8;", &opt_map));
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions config_options_from_map;
|
|
|
|
config_options_from_map.input_strings_escaped = false;
|
|
|
|
config_options_from_map.ignore_unknown_options = false;
|
|
|
|
ASSERT_OK(GetPlainTableOptionsFromMap(config_options_from_map, table_opt,
|
|
|
|
opt_map, &new_opt));
|
2021-09-08 14:45:59 +00:00
|
|
|
ASSERT_EQ(new_opt.user_key_len, 55u);
|
|
|
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 10);
|
|
|
|
ASSERT_EQ(new_opt.huge_page_tlb_size, 8);
|
|
|
|
|
2015-10-28 17:46:01 +00:00
|
|
|
// unknown option
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetPlainTableOptionsFromString(
|
|
|
|
config_options_from_string, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"bad_option=1",
|
|
|
|
&new_opt));
|
2015-10-28 17:46:01 +00:00
|
|
|
|
|
|
|
// unrecognized EncodingType
|
2023-02-07 22:11:53 +00:00
|
|
|
ASSERT_NOK(GetPlainTableOptionsFromString(
|
|
|
|
config_options_from_string, table_opt,
|
|
|
|
"user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
|
|
|
|
"encoding_type=kPrefixXX",
|
|
|
|
&new_opt));
|
2015-10-28 17:46:01 +00:00
|
|
|
}
|
2020-08-19 01:31:31 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, GetOptionsFromStringTest) {
|
2015-02-20 03:11:14 +00:00
|
|
|
Options base_options, new_options;
|
|
|
|
base_options.write_buffer_size = 20;
|
|
|
|
base_options.min_write_buffer_number_to_merge = 15;
|
|
|
|
BlockBasedTableOptions block_based_table_options;
|
|
|
|
block_based_table_options.cache_index_and_filter_blocks = true;
|
|
|
|
base_options.table_factory.reset(
|
|
|
|
NewBlockBasedTableFactory(block_based_table_options));
|
2019-04-25 18:31:58 +00:00
|
|
|
|
|
|
|
// Register an Env with object registry.
|
2022-01-11 14:32:42 +00:00
|
|
|
ObjectLibrary::Default()->AddFactory<Env>(
|
2021-05-11 13:45:49 +00:00
|
|
|
"CustomEnvDefault",
|
2019-07-24 00:08:26 +00:00
|
|
|
[](const std::string& /*name*/, std::unique_ptr<Env>* /*env_guard*/,
|
|
|
|
std::string* /* errmsg */) {
|
2019-04-25 18:31:58 +00:00
|
|
|
static CustomEnv env(Env::Default());
|
|
|
|
return &env;
|
|
|
|
});
|
|
|
|
|
2015-02-20 03:11:14 +00:00
|
|
|
ASSERT_OK(GetOptionsFromString(
|
|
|
|
base_options,
|
|
|
|
"write_buffer_size=10;max_write_buffer_number=16;"
|
|
|
|
"block_based_table_factory={block_cache=1M;block_size=4;};"
|
2016-05-06 18:27:28 +00:00
|
|
|
"compression_opts=4:5:6;create_if_missing=true;max_open_files=1;"
|
2018-06-28 00:34:07 +00:00
|
|
|
"bottommost_compression_opts=5:6:7;create_if_missing=true;max_open_files="
|
|
|
|
"1;"
|
2021-05-11 13:45:49 +00:00
|
|
|
"rate_limiter_bytes_per_sec=1024;env=CustomEnvDefault",
|
2015-02-20 03:11:14 +00:00
|
|
|
&new_options));
|
|
|
|
|
2016-05-06 18:27:28 +00:00
|
|
|
ASSERT_EQ(new_options.compression_opts.window_bits, 4);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.level, 5);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.strategy, 6);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.compression_opts.zstd_max_train_bytes, 0u);
|
2020-04-01 23:37:54 +00:00
|
|
|
ASSERT_EQ(new_options.compression_opts.parallel_threads, 1u);
|
2018-06-28 00:34:07 +00:00
|
|
|
ASSERT_EQ(new_options.compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_options.compression_opts.use_zstd_dict_trainer, true);
|
2016-05-09 22:57:19 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
|
2018-06-28 00:34:07 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.window_bits, 5);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.level, 6);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.strategy, 7);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.max_dict_bytes, 0u);
|
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.zstd_max_train_bytes, 0u);
|
2020-04-01 23:37:54 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.parallel_threads, 1u);
|
2018-06-28 00:34:07 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.enabled, false);
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
ASSERT_EQ(new_options.bottommost_compression_opts.use_zstd_dict_trainer,
|
|
|
|
true);
|
2015-02-20 03:11:14 +00:00
|
|
|
ASSERT_EQ(new_options.write_buffer_size, 10U);
|
2015-02-20 03:26:38 +00:00
|
|
|
ASSERT_EQ(new_options.max_write_buffer_number, 16);
|
2020-09-14 23:59:00 +00:00
|
|
|
|
|
|
|
auto new_block_based_table_options =
|
|
|
|
new_options.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(new_block_based_table_options, nullptr);
|
|
|
|
ASSERT_EQ(new_block_based_table_options->block_cache->GetCapacity(),
|
|
|
|
1U << 20);
|
|
|
|
ASSERT_EQ(new_block_based_table_options->block_size, 4U);
|
2015-02-20 03:11:14 +00:00
|
|
|
// don't overwrite block based table options
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_TRUE(new_block_based_table_options->cache_index_and_filter_blocks);
|
2015-02-20 03:11:14 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(new_options.create_if_missing, true);
|
|
|
|
ASSERT_EQ(new_options.max_open_files, 1);
|
2015-03-06 22:21:15 +00:00
|
|
|
ASSERT_TRUE(new_options.rate_limiter.get() != nullptr);
|
2019-07-24 00:08:26 +00:00
|
|
|
Env* newEnv = new_options.env;
|
2023-01-25 20:08:49 +00:00
|
|
|
ASSERT_OK(Env::CreateFromString({}, "CustomEnvDefault", &newEnv));
|
2019-07-24 00:08:26 +00:00
|
|
|
ASSERT_EQ(newEnv, new_options.env);
|
2015-02-20 03:11:14 +00:00
|
|
|
}
|
2015-08-18 20:30:18 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, DBOptionsSerialization) {
|
2015-08-18 20:30:18 +00:00
|
|
|
Options base_options, new_options;
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// Phase 1: Make big change in base_options
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
test::RandomInitDBOptions(&base_options, &rnd);
|
2015-08-18 20:30:18 +00:00
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_option
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(GetStringFromDBOptions(&base_options_file_content, base_options));
|
2015-08-18 20:30:18 +00:00
|
|
|
|
|
|
|
// Phase 3: Set new_options from the derived string and expect
|
|
|
|
// new_options == base_options
|
2023-02-07 22:11:53 +00:00
|
|
|
const DBOptions base_db_options;
|
|
|
|
ConfigOptions db_config_options(base_db_options);
|
|
|
|
db_config_options.input_strings_escaped = false;
|
|
|
|
db_config_options.ignore_unknown_options = false;
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(db_config_options, base_db_options,
|
|
|
|
base_options_file_content, &new_options));
|
|
|
|
ConfigOptions verify_db_config_options;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(verify_db_config_options,
|
|
|
|
base_options, new_options));
|
2017-09-13 18:48:16 +00:00
|
|
|
}
|
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
TEST_F(OptionsOldApiTest, ColumnFamilyOptionsSerialization) {
|
2019-06-04 02:47:02 +00:00
|
|
|
Options options;
|
2015-08-26 23:13:56 +00:00
|
|
|
ColumnFamilyOptions base_opt, new_opt;
|
|
|
|
Random rnd(302);
|
|
|
|
// Phase 1: randomly assign base_opt
|
|
|
|
// custom type options
|
2019-06-04 02:47:02 +00:00
|
|
|
test::RandomInitCFOptions(&base_opt, options, &rnd);
|
2015-08-26 23:13:56 +00:00
|
|
|
|
|
|
|
// Phase 2: obtain a string from base_opt
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
std::string base_options_file_content;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetStringFromColumnFamilyOptions(&base_options_file_content, base_opt));
|
2015-08-26 23:13:56 +00:00
|
|
|
|
|
|
|
// Phase 3: Set new_opt from the derived string and expect
|
|
|
|
// new_opt == base_opt
|
2023-02-07 22:11:53 +00:00
|
|
|
ConfigOptions cf_config_options;
|
|
|
|
cf_config_options.input_strings_escaped = false;
|
|
|
|
cf_config_options.ignore_unknown_options = false;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetColumnFamilyOptionsFromString(cf_config_options, ColumnFamilyOptions(),
|
|
|
|
base_options_file_content, &new_opt));
|
|
|
|
ConfigOptions verify_cf_config_options;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(verify_cf_config_options,
|
|
|
|
base_opt, new_opt));
|
2015-10-02 22:35:32 +00:00
|
|
|
if (base_opt.compaction_filter) {
|
|
|
|
delete base_opt.compaction_filter;
|
|
|
|
}
|
2015-08-26 23:13:56 +00:00
|
|
|
}
|
2014-09-17 19:46:32 +00:00
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
class OptionsParserTest : public testing::Test {
|
|
|
|
public:
|
2021-01-04 23:59:52 +00:00
|
|
|
OptionsParserTest() { fs_.reset(new test::StringFS(FileSystem::Default())); }
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
|
|
|
|
protected:
|
2021-01-04 23:59:52 +00:00
|
|
|
std::shared_ptr<test::StringFS> fs_;
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, Comment) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[ DBOptions ]\n"
|
|
|
|
" # note that we don't support space around \"=\"\n"
|
|
|
|
" max_open_files=12345;\n"
|
|
|
|
" max_background_flushes=301 # comment after a statement is fine\n"
|
|
|
|
" # max_background_flushes=1000 # this line would be ignored\n"
|
|
|
|
" # max_background_compactions=2000 # so does this one\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"default\"] # column family must be specified\n"
|
|
|
|
" # in the correct order\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions exact;
|
|
|
|
exact.input_strings_escaped = false;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::VerifyDBOptions(exact, *parser.db_opt(), db_opt));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
ASSERT_EQ(parser.NumColumnFamilies(), 1U);
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
exact, *parser.GetCFOptions("default"), cf_opt));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, ExtraSpace) {
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[ Version ]\n"
|
|
|
|
" rocksdb_version = 3.14.0 \n"
|
|
|
|
" options_file_version=1 # some comment\n"
|
|
|
|
"[DBOptions ] # some comment\n"
|
|
|
|
"max_open_files=12345 \n"
|
|
|
|
" max_background_flushes = 301 \n"
|
|
|
|
" max_total_wal_size = 1024 # keep_log_file_num=1000\n"
|
|
|
|
" [CFOptions \"default\" ]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, MissingDBOptions) {
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
2020-04-22 00:35:28 +00:00
|
|
|
;
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DoubleDBOptions) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, NoDefaultCFOptions) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DefaultCFOptionsMustBeTheFirst) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionsParserTest, DuplicateCFOptions) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.14.0\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
"[CFOptions \"something_else\"]\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
parser.Parse(kTestFileName, fs_.get(), false, 4096 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
2017-06-13 23:55:08 +00:00
|
|
|
TEST_F(OptionsParserTest, IgnoreUnknownOptions) {
|
2024-09-04 18:42:04 +00:00
|
|
|
auto testCase = [&](bool should_ignore, const std::string& version_string) {
|
|
|
|
SCOPED_TRACE(std::to_string(should_ignore) + ", " + version_string);
|
2017-06-13 23:55:08 +00:00
|
|
|
|
2018-02-22 21:23:53 +00:00
|
|
|
std::string options_file_content =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=" +
|
|
|
|
version_string +
|
|
|
|
"\n"
|
|
|
|
" options_file_version=1\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
" max_open_files=12345\n"
|
|
|
|
" max_background_flushes=301\n"
|
|
|
|
" max_total_wal_size=1024 # keep_log_file_num=1000\n"
|
|
|
|
" unknown_db_option1=321\n"
|
|
|
|
" unknown_db_option2=false\n"
|
|
|
|
"[CFOptions \"default\"]\n"
|
|
|
|
" unknown_cf_option1=hello\n"
|
|
|
|
"[CFOptions \"something_else\"]\n"
|
|
|
|
" unknown_cf_option2=world\n"
|
|
|
|
" # if a section is blank, we will use the default\n";
|
|
|
|
|
|
|
|
const std::string kTestFileName = "test-rocksdb-options.ini";
|
2021-01-04 23:59:52 +00:00
|
|
|
auto s = fs_->FileExists(kTestFileName, IOOptions(), nullptr);
|
2020-05-08 19:36:49 +00:00
|
|
|
ASSERT_TRUE(s.ok() || s.IsNotFound());
|
|
|
|
if (s.ok()) {
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->DeleteFile(kTestFileName, IOOptions(), nullptr));
|
2020-05-08 19:36:49 +00:00
|
|
|
}
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(kTestFileName, options_file_content));
|
2018-02-22 21:23:53 +00:00
|
|
|
RocksDBOptionsParser parser;
|
2020-10-29 20:45:17 +00:00
|
|
|
ASSERT_NOK(parser.Parse(kTestFileName, fs_.get(), false,
|
|
|
|
4096 /* readahead_size */));
|
2024-09-04 18:42:04 +00:00
|
|
|
Status parse_status = parser.Parse(kTestFileName, fs_.get(),
|
|
|
|
true /* ignore_unknown_options */,
|
|
|
|
4096 /* readahead_size */);
|
2018-02-22 21:23:53 +00:00
|
|
|
if (should_ignore) {
|
2024-09-04 18:42:04 +00:00
|
|
|
ASSERT_OK(parse_status);
|
2018-02-22 21:23:53 +00:00
|
|
|
} else {
|
2024-09-04 18:42:04 +00:00
|
|
|
ASSERT_NOK(parse_status);
|
2018-02-22 21:23:53 +00:00
|
|
|
}
|
2024-09-04 18:42:04 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Same version
|
|
|
|
testCase(false, GetRocksVersionAsString());
|
|
|
|
// Same except .0 patch
|
|
|
|
testCase(false, std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + ".0");
|
|
|
|
// Higher major version
|
|
|
|
testCase(true, std::to_string(ROCKSDB_MAJOR + 1) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + ".0");
|
|
|
|
// Higher minor version
|
|
|
|
testCase(true, std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR + 1) + ".0");
|
|
|
|
// Higher patch version
|
|
|
|
testCase(true, std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_PATCH + 1));
|
|
|
|
// Lower major version
|
|
|
|
testCase(false, std::to_string(ROCKSDB_MAJOR - 1) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + ".0");
|
|
|
|
#if ROCKSDB_MINOR > 0
|
|
|
|
// Lower minor version
|
|
|
|
testCase(false, std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR - 1) + ".0");
|
|
|
|
#endif
|
|
|
|
#if ROCKSDB_PATCH > 0
|
|
|
|
// Lower patch version
|
|
|
|
testCase(false, std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR - 1) + "." +
|
|
|
|
std::to_string(ROCKSDB_PATCH - 1));
|
|
|
|
#endif
|
2017-06-13 23:55:08 +00:00
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
TEST_F(OptionsParserTest, ParseVersion) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
db_opt.max_open_files = 12345;
|
|
|
|
db_opt.max_background_flushes = 301;
|
|
|
|
db_opt.max_total_wal_size = 1024;
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
|
|
|
|
std::string file_template =
|
|
|
|
"# This is a testing option string.\n"
|
|
|
|
"# Currently we only support \"#\" styled comment.\n"
|
|
|
|
"\n"
|
|
|
|
"[Version]\n"
|
|
|
|
" rocksdb_version=3.13.1\n"
|
|
|
|
" options_file_version=%s\n"
|
|
|
|
"[DBOptions]\n"
|
|
|
|
"[CFOptions \"default\"]\n";
|
|
|
|
const int kLength = 1000;
|
|
|
|
char buffer[kLength];
|
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
|
|
|
|
const std::vector<std::string> invalid_versions = {
|
|
|
|
"a.b.c", "3.2.2b", "3.-12", "3. 1", // only digits and dots are allowed
|
|
|
|
"1.2.3.4",
|
|
|
|
"1.2.3" // can only contains at most one dot.
|
|
|
|
"0", // options_file_version must be at least one
|
|
|
|
"3..2",
|
|
|
|
".", ".1.2", // must have at least one digit before each dot
|
|
|
|
"1.2.", "1.", "2.34."}; // must have at least one digit after each dot
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iv : invalid_versions) {
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
snprintf(buffer, kLength - 1, file_template.c_str(), iv.c_str());
|
|
|
|
|
|
|
|
parser.Reset();
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(iv, buffer));
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_NOK(parser.Parse(iv, fs_.get(), false, 0 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const std::vector<std::string> valid_versions = {
|
|
|
|
"1.232", "100", "3.12", "1", "12.3 ", " 1.25 "};
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& vv : valid_versions) {
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
snprintf(buffer, kLength - 1, file_template.c_str(), vv.c_str());
|
|
|
|
parser.Reset();
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(fs_->WriteToNewFile(vv, buffer));
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_OK(parser.Parse(vv, fs_.get(), false, 0 /* readahead_size */));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-02 22:35:32 +00:00
|
|
|
void VerifyCFPointerTypedOptions(
|
|
|
|
ColumnFamilyOptions* base_cf_opt, const ColumnFamilyOptions* new_cf_opt,
|
|
|
|
const std::unordered_map<std::string, std::string>* new_cf_opt_map) {
|
|
|
|
std::string name_buffer;
|
2020-04-22 00:35:28 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
config_options.input_strings_escaped = false;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(config_options, *base_cf_opt,
|
|
|
|
*new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
|
|
|
|
// change the name of merge operator back-and-forth
|
|
|
|
{
|
2021-08-06 15:26:23 +00:00
|
|
|
auto* merge_operator = base_cf_opt->merge_operator
|
|
|
|
->CheckedCast<test::ChanglingMergeOperator>();
|
2015-10-02 22:35:32 +00:00
|
|
|
if (merge_operator != nullptr) {
|
|
|
|
name_buffer = merge_operator->Name();
|
|
|
|
// change the name and expect non-ok status
|
|
|
|
merge_operator->SetName("some-other-name");
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
// change the name back and expect ok status
|
|
|
|
merge_operator->SetName(name_buffer);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// change the name of the compaction filter factory back-and-forth
|
|
|
|
{
|
|
|
|
auto* compaction_filter_factory =
|
2021-08-06 15:26:23 +00:00
|
|
|
base_cf_opt->compaction_filter_factory
|
|
|
|
->CheckedCast<test::ChanglingCompactionFilterFactory>();
|
2015-10-02 22:35:32 +00:00
|
|
|
if (compaction_filter_factory != nullptr) {
|
|
|
|
name_buffer = compaction_filter_factory->Name();
|
|
|
|
// change the name and expect non-ok status
|
|
|
|
compaction_filter_factory->SetName("some-other-name");
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
// change the name back and expect ok status
|
|
|
|
compaction_filter_factory->SetName(name_buffer);
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test by setting compaction_filter to nullptr
|
|
|
|
{
|
|
|
|
auto* tmp_compaction_filter = base_cf_opt->compaction_filter;
|
|
|
|
if (tmp_compaction_filter != nullptr) {
|
|
|
|
base_cf_opt->compaction_filter = nullptr;
|
|
|
|
// set compaction_filter to nullptr and expect non-ok status
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
// set the value back and expect ok status
|
|
|
|
base_cf_opt->compaction_filter = tmp_compaction_filter;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test by setting table_factory to nullptr
|
|
|
|
{
|
|
|
|
auto tmp_table_factory = base_cf_opt->table_factory;
|
|
|
|
if (tmp_table_factory != nullptr) {
|
|
|
|
base_cf_opt->table_factory.reset();
|
|
|
|
// set table_factory to nullptr and expect non-ok status
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
// set the value back and expect ok status
|
|
|
|
base_cf_opt->table_factory = tmp_table_factory;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test by setting memtable_factory to nullptr
|
|
|
|
{
|
|
|
|
auto tmp_memtable_factory = base_cf_opt->memtable_factory;
|
|
|
|
if (tmp_memtable_factory != nullptr) {
|
|
|
|
base_cf_opt->memtable_factory.reset();
|
|
|
|
// set memtable_factory to nullptr and expect non-ok status
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
// set the value back and expect ok status
|
|
|
|
base_cf_opt->memtable_factory = tmp_memtable_factory;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
config_options, *base_cf_opt, *new_cf_opt, new_cf_opt_map));
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-07 23:16:29 +00:00
|
|
|
TEST_F(OptionsParserTest, Readahead) {
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
std::vector<ColumnFamilyOptions> base_cf_opts;
|
|
|
|
base_cf_opts.emplace_back();
|
|
|
|
base_cf_opts.emplace_back();
|
|
|
|
|
|
|
|
std::string one_mb_string = std::string(1024 * 1024, 'x');
|
|
|
|
std::vector<std::string> cf_names = {"default", one_mb_string};
|
|
|
|
const std::string kOptionsFileName = "test-persisted-options.ini";
|
|
|
|
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), base_db_opt, cf_names,
|
|
|
|
base_cf_opts, kOptionsFileName, fs_.get()));
|
2020-02-07 23:16:29 +00:00
|
|
|
|
2020-02-10 23:42:23 +00:00
|
|
|
uint64_t file_size = 0;
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
fs_->GetFileSize(kOptionsFileName, IOOptions(), &file_size, nullptr));
|
2020-02-10 23:42:23 +00:00
|
|
|
assert(file_size > 0);
|
2020-08-19 01:31:31 +00:00
|
|
|
|
2020-02-07 23:16:29 +00:00
|
|
|
RocksDBOptionsParser parser;
|
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
fs_->num_seq_file_read_ = 0;
|
2020-02-07 23:16:29 +00:00
|
|
|
size_t readahead_size = 128 * 1024;
|
|
|
|
|
|
|
|
ASSERT_OK(parser.Parse(kOptionsFileName, fs_.get(), false, readahead_size));
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_EQ(fs_->num_seq_file_read_.load(),
|
2020-02-07 23:16:29 +00:00
|
|
|
(file_size - 1) / readahead_size + 1);
|
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
fs_->num_seq_file_read_.store(0);
|
2020-02-07 23:16:29 +00:00
|
|
|
readahead_size = 1024 * 1024;
|
|
|
|
ASSERT_OK(parser.Parse(kOptionsFileName, fs_.get(), false, readahead_size));
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_EQ(fs_->num_seq_file_read_.load(),
|
2020-02-07 23:16:29 +00:00
|
|
|
(file_size - 1) / readahead_size + 1);
|
|
|
|
|
|
|
|
// Tiny readahead. 8 KB is read each time.
|
2021-01-04 23:59:52 +00:00
|
|
|
fs_->num_seq_file_read_.store(0);
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kOptionsFileName, fs_.get(), false, 1 /* readahead_size */));
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_GE(fs_->num_seq_file_read_.load(), file_size / (8 * 1024));
|
|
|
|
ASSERT_LT(fs_->num_seq_file_read_.load(), file_size / (8 * 1024) * 2);
|
2020-02-07 23:16:29 +00:00
|
|
|
|
|
|
|
// Disable readahead means 512KB readahead.
|
2021-01-04 23:59:52 +00:00
|
|
|
fs_->num_seq_file_read_.store(0);
|
2020-02-07 23:16:29 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
parser.Parse(kOptionsFileName, fs_.get(), false, 0 /* readahead_size */));
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_GE(fs_->num_seq_file_read_.load(), (file_size - 1) / (512 * 1024) + 1);
|
2020-02-07 23:16:29 +00:00
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
TEST_F(OptionsParserTest, DumpAndParse) {
|
|
|
|
DBOptions base_db_opt;
|
|
|
|
std::vector<ColumnFamilyOptions> base_cf_opts;
|
2015-10-02 22:35:32 +00:00
|
|
|
std::vector<std::string> cf_names = {"default", "cf1", "cf2", "cf3",
|
|
|
|
"c:f:4:4:4"
|
|
|
|
"p\\i\\k\\a\\chu\\\\\\",
|
|
|
|
"###rocksdb#1-testcf#2###"};
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
const int num_cf = static_cast<int>(cf_names.size());
|
|
|
|
Random rnd(302);
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
test::RandomInitDBOptions(&base_db_opt, &rnd);
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
base_db_opt.db_log_dir += "/#odd #but #could #happen #path #/\\\\#OMG";
|
2017-07-28 23:23:50 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions special_bbto;
|
|
|
|
special_bbto.cache_index_and_filter_blocks = true;
|
|
|
|
special_bbto.block_size = 999999;
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
ColumnFamilyOptions cf_opt;
|
|
|
|
Random cf_rnd(0xFB + c);
|
2019-06-04 02:47:02 +00:00
|
|
|
test::RandomInitCFOptions(&cf_opt, base_db_opt, &cf_rnd);
|
2015-10-02 22:35:32 +00:00
|
|
|
if (c < 4) {
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
cf_opt.prefix_extractor.reset(test::RandomSliceTransform(&rnd, c));
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
|
|
|
if (c < 3) {
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
cf_opt.table_factory.reset(test::RandomTableFactory(&rnd, c));
|
2017-07-28 23:23:50 +00:00
|
|
|
} else if (c == 4) {
|
|
|
|
cf_opt.table_factory.reset(NewBlockBasedTableFactory(special_bbto));
|
2022-06-14 20:20:54 +00:00
|
|
|
} else if (c == 5) {
|
|
|
|
// A table factory that doesn't support deserialization should be
|
|
|
|
// supported.
|
|
|
|
cf_opt.table_factory.reset(new UnregisteredTableFactory());
|
2015-10-02 22:35:32 +00:00
|
|
|
}
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
base_cf_opts.emplace_back(cf_opt);
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string kOptionsFileName = "test-persisted-options.ini";
|
2020-04-22 00:35:28 +00:00
|
|
|
// Use default for escaped(true), unknown(false) and check (exact)
|
|
|
|
ConfigOptions config_options;
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), base_db_opt, cf_names,
|
|
|
|
base_cf_opts, kOptionsFileName, fs_.get()));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
|
|
|
|
RocksDBOptionsParser parser;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(parser.Parse(config_options, kOptionsFileName, fs_.get()));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
|
2017-07-28 23:23:50 +00:00
|
|
|
// Make sure block-based table factory options was deserialized correctly
|
|
|
|
std::shared_ptr<TableFactory> ttf = (*parser.cf_opts())[4].table_factory;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_EQ(TableFactory::kBlockBasedTableName(), std::string(ttf->Name()));
|
|
|
|
const auto parsed_bbto = ttf->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(parsed_bbto, nullptr);
|
|
|
|
ASSERT_EQ(special_bbto.block_size, parsed_bbto->block_size);
|
2017-07-28 23:23:50 +00:00
|
|
|
ASSERT_EQ(special_bbto.cache_index_and_filter_blocks,
|
2020-09-14 23:59:00 +00:00
|
|
|
parsed_bbto->cache_index_and_filter_blocks);
|
2017-07-28 23:23:50 +00:00
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, base_db_opt, cf_names, base_cf_opts, kOptionsFileName,
|
|
|
|
fs_.get()));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(
|
|
|
|
config_options, *parser.db_opt(), base_db_opt));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
const auto* cf_opt = parser.GetCFOptions(cf_names[c]);
|
|
|
|
ASSERT_NE(cf_opt, nullptr);
|
2015-10-02 22:35:32 +00:00
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, base_cf_opts[c], *cf_opt,
|
|
|
|
&(parser.cf_opt_maps()->at(c))));
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
2015-10-02 22:35:32 +00:00
|
|
|
|
|
|
|
// Further verify pointer-typed options
|
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
const auto* cf_opt = parser.GetCFOptions(cf_names[c]);
|
|
|
|
ASSERT_NE(cf_opt, nullptr);
|
|
|
|
VerifyCFPointerTypedOptions(&base_cf_opts[c], cf_opt,
|
|
|
|
&(parser.cf_opt_maps()->at(c)));
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
ASSERT_EQ(parser.GetCFOptions("does not exist"), nullptr);
|
|
|
|
|
|
|
|
base_db_opt.max_open_files++;
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options, base_db_opt, cf_names, base_cf_opts, kOptionsFileName,
|
|
|
|
fs_.get()));
|
2015-10-02 22:35:32 +00:00
|
|
|
|
|
|
|
for (int c = 0; c < num_cf; ++c) {
|
|
|
|
if (base_cf_opts[c].compaction_filter) {
|
|
|
|
delete base_cf_opts[c].compaction_filter;
|
|
|
|
}
|
|
|
|
}
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
}
|
|
|
|
|
2015-10-11 19:17:42 +00:00
|
|
|
TEST_F(OptionsParserTest, DifferentDefault) {
|
|
|
|
const std::string kOptionsFileName = "test-persisted-options.ini";
|
|
|
|
|
|
|
|
ColumnFamilyOptions cf_level_opts;
|
2019-01-24 00:44:02 +00:00
|
|
|
ASSERT_EQ(CompactionPri::kMinOverlappingRatio, cf_level_opts.compaction_pri);
|
2015-10-11 19:17:42 +00:00
|
|
|
cf_level_opts.OptimizeLevelStyleCompaction();
|
|
|
|
|
|
|
|
ColumnFamilyOptions cf_univ_opts;
|
|
|
|
cf_univ_opts.OptimizeUniversalStyleCompaction();
|
|
|
|
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ASSERT_OK(PersistRocksDBOptions(
|
|
|
|
WriteOptions(), DBOptions(), {"default", "universal"},
|
|
|
|
{cf_level_opts, cf_univ_opts}, kOptionsFileName, fs_.get()));
|
2015-10-11 19:17:42 +00:00
|
|
|
|
|
|
|
RocksDBOptionsParser parser;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(parser.Parse(kOptionsFileName, fs_.get(), false,
|
|
|
|
4096 /* readahead_size */));
|
2016-03-24 19:45:50 +00:00
|
|
|
|
2016-04-08 00:40:42 +00:00
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults();
|
|
|
|
ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
|
|
|
ASSERT_EQ(5000, old_default_opts.max_open_files);
|
2017-02-02 04:25:01 +00:00
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
2016-04-08 00:40:42 +00:00
|
|
|
ASSERT_EQ(WALRecoveryMode::kTolerateCorruptedTailRecords,
|
|
|
|
old_default_opts.wal_recovery_mode);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(4, 6);
|
|
|
|
ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
|
|
|
ASSERT_EQ(5000, old_default_opts.max_open_files);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(4, 7);
|
|
|
|
ASSERT_NE(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
|
|
|
ASSERT_NE(4, old_default_opts.table_cache_numshardbits);
|
|
|
|
ASSERT_EQ(5000, old_default_opts.max_open_files);
|
2017-02-02 04:25:01 +00:00
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
2016-04-08 00:40:42 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
ColumnFamilyOptions old_default_cf_opts;
|
|
|
|
old_default_cf_opts.OldDefaults();
|
|
|
|
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(4 << 20, old_default_cf_opts.write_buffer_size);
|
|
|
|
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(0, old_default_cf_opts.soft_pending_compaction_bytes_limit);
|
|
|
|
ASSERT_EQ(0, old_default_cf_opts.hard_pending_compaction_bytes_limit);
|
|
|
|
ASSERT_EQ(CompactionPri::kByCompensatedSize,
|
|
|
|
old_default_cf_opts.compaction_pri);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
ColumnFamilyOptions old_default_cf_opts;
|
|
|
|
old_default_cf_opts.OldDefaults(4, 6);
|
|
|
|
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(CompactionPri::kByCompensatedSize,
|
|
|
|
old_default_cf_opts.compaction_pri);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
ColumnFamilyOptions old_default_cf_opts;
|
|
|
|
old_default_cf_opts.OldDefaults(4, 7);
|
|
|
|
ASSERT_NE(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
|
|
|
ASSERT_EQ(CompactionPri::kByCompensatedSize,
|
|
|
|
old_default_cf_opts.compaction_pri);
|
|
|
|
}
|
2017-02-02 04:25:01 +00:00
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(5, 1);
|
|
|
|
ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(5, 2);
|
|
|
|
ASSERT_EQ(16 * 1024U * 1024U, old_default_opts.delayed_write_rate);
|
2019-01-24 00:44:02 +00:00
|
|
|
ASSERT_TRUE(old_default_opts.compaction_pri ==
|
|
|
|
CompactionPri::kByCompensatedSize);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
Options old_default_opts;
|
|
|
|
old_default_opts.OldDefaults(5, 18);
|
|
|
|
ASSERT_TRUE(old_default_opts.compaction_pri ==
|
|
|
|
CompactionPri::kByCompensatedSize);
|
2017-02-02 04:25:01 +00:00
|
|
|
}
|
2016-03-24 19:45:50 +00:00
|
|
|
|
2016-05-05 23:50:32 +00:00
|
|
|
Options small_opts;
|
|
|
|
small_opts.OptimizeForSmallDb();
|
|
|
|
ASSERT_EQ(2 << 20, small_opts.write_buffer_size);
|
|
|
|
ASSERT_EQ(5000, small_opts.max_open_files);
|
2015-10-11 19:17:42 +00:00
|
|
|
}
|
|
|
|
|
2021-10-19 17:42:04 +00:00
|
|
|
class OptionsSanityCheckTest : public OptionsParserTest,
|
|
|
|
public ::testing::WithParamInterface<bool> {
|
|
|
|
protected:
|
|
|
|
ConfigOptions config_options_;
|
|
|
|
|
2015-11-05 02:53:30 +00:00
|
|
|
public:
|
2021-10-19 17:42:04 +00:00
|
|
|
OptionsSanityCheckTest() {
|
|
|
|
config_options_.ignore_unknown_options = false;
|
|
|
|
config_options_.ignore_unsupported_options = GetParam();
|
|
|
|
config_options_.input_strings_escaped = true;
|
|
|
|
}
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
protected:
|
2021-10-19 17:42:04 +00:00
|
|
|
Status SanityCheckOptions(const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts,
|
|
|
|
ConfigOptions::SanityLevel level) {
|
|
|
|
config_options_.sanity_level = level;
|
2015-11-05 02:53:30 +00:00
|
|
|
return RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
|
2021-10-19 17:42:04 +00:00
|
|
|
config_options_, db_opts, {"default"}, {cf_opts}, kOptionsFileName,
|
2020-04-22 00:35:28 +00:00
|
|
|
fs_.get());
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
|
2021-10-19 17:42:04 +00:00
|
|
|
Status SanityCheckCFOptions(const ColumnFamilyOptions& cf_opts,
|
|
|
|
ConfigOptions::SanityLevel level) {
|
|
|
|
return SanityCheckOptions(DBOptions(), cf_opts, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SanityCheckCFOptions(const ColumnFamilyOptions& opts, bool exact) {
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
if (exact) {
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
} else {
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SanityCheckDBOptions(const DBOptions& db_opts,
|
|
|
|
ConfigOptions::SanityLevel level) {
|
|
|
|
return SanityCheckOptions(db_opts, ColumnFamilyOptions(), level);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SanityCheckDBOptions(const DBOptions& opts, bool exact) {
|
|
|
|
ASSERT_OK(SanityCheckDBOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelNone));
|
|
|
|
if (exact) {
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
} else {
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PersistOptions(const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts) {
|
2021-01-04 23:59:52 +00:00
|
|
|
Status s = fs_->DeleteFile(kOptionsFileName, IOOptions(), nullptr);
|
2015-11-05 02:53:30 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
return PersistRocksDBOptions(WriteOptions(), db_opts, {"default"},
|
|
|
|
{cf_opts}, kOptionsFileName, fs_.get());
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
|
2021-10-19 17:42:04 +00:00
|
|
|
Status PersistCFOptions(const ColumnFamilyOptions& cf_opts) {
|
|
|
|
return PersistOptions(DBOptions(), cf_opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PersistDBOptions(const DBOptions& db_opts) {
|
|
|
|
return PersistOptions(db_opts, ColumnFamilyOptions());
|
|
|
|
}
|
|
|
|
|
2015-11-05 02:53:30 +00:00
|
|
|
const std::string kOptionsFileName = "OPTIONS";
|
|
|
|
};
|
|
|
|
|
2023-04-21 23:54:02 +00:00
|
|
|
TEST_P(OptionsSanityCheckTest, MergeOperatorErrorMessage) {
|
|
|
|
ColumnFamilyOptions opts;
|
|
|
|
Random rnd(301);
|
|
|
|
opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
|
|
|
|
std::string merge_op_name = opts.merge_operator->Name();
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
|
|
|
|
// Test when going from merge operator -> nullptr
|
|
|
|
opts.merge_operator = nullptr;
|
|
|
|
Status s =
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelLooselyCompatible);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
std::string err_msg = s.ToString();
|
|
|
|
std::string specified = "The specified one is " + kNullptrString;
|
|
|
|
std::string persisted = "the persisted one is " + merge_op_name;
|
|
|
|
ASSERT_TRUE(err_msg.find(specified) != std::string::npos);
|
|
|
|
ASSERT_TRUE(err_msg.find(persisted) != std::string::npos);
|
|
|
|
|
|
|
|
// Test when using a different merge operator
|
|
|
|
opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
|
|
|
|
s = SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelLooselyCompatible);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
err_msg = s.ToString();
|
|
|
|
specified =
|
|
|
|
"The specified one is " + std::string(opts.merge_operator->Name());
|
|
|
|
persisted = "the persisted one is " + merge_op_name;
|
|
|
|
ASSERT_TRUE(err_msg.find(specified) != std::string::npos);
|
|
|
|
ASSERT_TRUE(err_msg.find(persisted) != std::string::npos);
|
|
|
|
}
|
|
|
|
|
2021-10-19 17:42:04 +00:00
|
|
|
TEST_P(OptionsSanityCheckTest, CFOptionsSanityCheck) {
|
2015-11-05 02:53:30 +00:00
|
|
|
ColumnFamilyOptions opts;
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// default ColumnFamilyOptions
|
|
|
|
{
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// prefix_extractor
|
|
|
|
{
|
2016-02-19 22:42:24 +00:00
|
|
|
// Okay to change prefix_extractor form nullptr to non-nullptr
|
|
|
|
ASSERT_EQ(opts.prefix_extractor.get(), nullptr);
|
2015-11-05 02:53:30 +00:00
|
|
|
opts.prefix_extractor.reset(NewCappedPrefixTransform(10));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// use same prefix extractor but with different parameter
|
|
|
|
opts.prefix_extractor.reset(NewCappedPrefixTransform(15));
|
2020-04-22 00:35:28 +00:00
|
|
|
// expect pass only in
|
|
|
|
// ConfigOptions::kSanityLevelLooselyCompatible
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// repeat the test with FixedPrefixTransform
|
|
|
|
opts.prefix_extractor.reset(NewFixedPrefixTransform(10));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// persist the change of prefix_extractor
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// use same prefix extractor but with different parameter
|
|
|
|
opts.prefix_extractor.reset(NewFixedPrefixTransform(15));
|
2020-04-22 00:35:28 +00:00
|
|
|
// expect pass only in
|
|
|
|
// ConfigOptions::kSanityLevelLooselyCompatible
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, false);
|
2016-02-19 22:42:24 +00:00
|
|
|
|
|
|
|
// Change prefix extractor from non-nullptr to nullptr
|
|
|
|
opts.prefix_extractor.reset();
|
|
|
|
// expect pass as it's safe to change prefix_extractor
|
|
|
|
// from non-null to null
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
2016-02-19 22:42:24 +00:00
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// table_factory
|
|
|
|
{
|
2016-02-19 22:42:24 +00:00
|
|
|
for (int tb = 0; tb <= 2; ++tb) {
|
2015-11-05 02:53:30 +00:00
|
|
|
// change the table factory
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
opts.table_factory.reset(test::RandomTableFactory(&rnd, tb));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// merge_operator
|
|
|
|
{
|
2017-10-04 16:44:18 +00:00
|
|
|
// Test when going from nullptr -> merge operator
|
|
|
|
opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_OK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2017-10-04 16:44:18 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
2017-10-04 16:44:18 +00:00
|
|
|
|
2015-11-05 02:53:30 +00:00
|
|
|
for (int test = 0; test < 5; ++test) {
|
|
|
|
// change the merge operator
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
2017-10-04 16:44:18 +00:00
|
|
|
|
|
|
|
// Test when going from merge operator -> nullptr
|
|
|
|
opts.merge_operator = nullptr;
|
2020-04-22 00:35:28 +00:00
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_OK(SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelNone));
|
2017-10-04 16:44:18 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, true);
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// compaction_filter
|
|
|
|
{
|
|
|
|
for (int test = 0; test < 5; ++test) {
|
|
|
|
// change the compaction filter
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
opts.compaction_filter = test::RandomCompactionFilter(&rnd);
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, false);
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
2015-11-05 02:53:30 +00:00
|
|
|
delete opts.compaction_filter;
|
|
|
|
opts.compaction_filter = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// compaction_filter_factory
|
|
|
|
{
|
|
|
|
for (int test = 0; test < 5; ++test) {
|
|
|
|
// change the compaction filter factory
|
Add OptionsUtil::LoadOptionsFromFile() API
Summary:
This patch adds OptionsUtil::LoadOptionsFromFile() and
OptionsUtil::LoadLatestOptionsFromDB(), which allow developers
to construct DBOptions and ColumnFamilyOptions from a RocksDB
options file. Note that most pointer-typed options such as
merge_operator will not be constructed.
With this API, developers no longer need to remember all the
options in order to reopen an existing rocksdb instance like
the following:
DBOptions db_options;
std::vector<std::string> cf_names;
std::vector<ColumnFamilyOptions> cf_opts;
// Load primitive-typed options from an existing DB
OptionsUtil::LoadLatestOptionsFromDB(
dbname, &db_options, &cf_names, &cf_opts);
// Initialize necessary pointer-typed options
cf_opts[0].merge_operator.reset(new MyMergeOperator());
...
// Construct the vector of ColumnFamilyDescriptor
std::vector<ColumnFamilyDescriptor> cf_descs;
for (size_t i = 0; i < cf_opts.size(); ++i) {
cf_descs.emplace_back(cf_names[i], cf_opts[i]);
}
// Open the DB
DB* db = nullptr;
std::vector<ColumnFamilyHandle*> cf_handles;
auto s = DB::Open(db_options, dbname, cf_descs,
&handles, &db);
Test Plan:
Augment existing tests in column_family_test
options_test
db_test
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D49095
2015-11-12 14:52:43 +00:00
|
|
|
opts.compaction_filter_factory.reset(
|
|
|
|
test::RandomCompactionFilterFactory(&rnd));
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, false);
|
2015-11-05 02:53:30 +00:00
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
2021-10-19 17:42:04 +00:00
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
}
|
2023-04-12 00:50:34 +00:00
|
|
|
|
|
|
|
// persist_user_defined_timestamps
|
|
|
|
{
|
|
|
|
// Test change from true to false not allowed in loose and exact mode.
|
|
|
|
opts.persist_user_defined_timestamps = false;
|
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
SanityCheckCFOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
|
|
|
|
// Test change from false to true not allowed in loose and exact mode.
|
|
|
|
opts.persist_user_defined_timestamps = true;
|
|
|
|
ASSERT_NOK(SanityCheckCFOptions(
|
|
|
|
opts, ConfigOptions::kSanityLevelLooselyCompatible));
|
|
|
|
ASSERT_NOK(
|
|
|
|
SanityCheckCFOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistCFOptions(opts));
|
|
|
|
}
|
2015-11-05 02:53:30 +00:00
|
|
|
}
|
|
|
|
|
2021-10-19 17:42:04 +00:00
|
|
|
TEST_P(OptionsSanityCheckTest, DBOptionsSanityCheck) {
|
|
|
|
DBOptions opts;
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// default DBOptions
|
|
|
|
{
|
|
|
|
ASSERT_OK(PersistDBOptions(opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
// File checksum generator
|
|
|
|
{
|
|
|
|
class MockFileChecksumGenFactory : public FileChecksumGenFactory {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::unique_ptr<FileChecksumGenerator> CreateFileChecksumGenerator(
|
|
|
|
const FileChecksumGenContext& /*context*/) override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Okay to change file_checksum_gen_factory form nullptr to non-nullptr
|
|
|
|
ASSERT_EQ(opts.file_checksum_gen_factory.get(), nullptr);
|
|
|
|
opts.file_checksum_gen_factory.reset(new MockFileChecksumGenFactory());
|
|
|
|
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistDBOptions(opts));
|
|
|
|
SanityCheckDBOptions(opts, config_options_.ignore_unsupported_options);
|
|
|
|
|
|
|
|
// Change file_checksum_gen_factory from non-nullptr to nullptr
|
|
|
|
opts.file_checksum_gen_factory.reset();
|
|
|
|
// expect pass as it's safe to change file_checksum_gen_factory
|
|
|
|
// from non-null to null
|
|
|
|
SanityCheckDBOptions(opts, false);
|
|
|
|
}
|
|
|
|
// persist the change
|
|
|
|
ASSERT_OK(PersistDBOptions(opts));
|
|
|
|
ASSERT_OK(SanityCheckDBOptions(opts, ConfigOptions::kSanityLevelExactMatch));
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
namespace {
|
|
|
|
bool IsEscapedString(const std::string& str) {
|
|
|
|
for (size_t i = 0; i < str.size(); ++i) {
|
|
|
|
if (str[i] == '\\') {
|
|
|
|
// since we already handle those two consecutive '\'s in
|
|
|
|
// the next if-then branch, any '\' appear at the end
|
|
|
|
// of an escaped string in such case is not valid.
|
|
|
|
if (i == str.size() - 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (str[i + 1] == '\\') {
|
|
|
|
// if there're two consecutive '\'s, skip the second one.
|
|
|
|
i++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
switch (str[i + 1]) {
|
|
|
|
case ':':
|
|
|
|
case '\\':
|
|
|
|
case '#':
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
// if true, '\' together with str[i + 1] is not a valid escape.
|
|
|
|
if (UnescapeChar(str[i + 1]) == str[i + 1]) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (isSpecialChar(str[i]) && (i == 0 || str[i - 1] != '\\')) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2019-03-12 20:46:12 +00:00
|
|
|
TEST_F(OptionsParserTest, IntegerParsing) {
|
|
|
|
ASSERT_EQ(ParseUint64("18446744073709551615"), 18446744073709551615U);
|
|
|
|
ASSERT_EQ(ParseUint32("4294967295"), 4294967295U);
|
|
|
|
ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U);
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807);
|
2022-05-05 20:08:21 +00:00
|
|
|
ASSERT_EQ(ParseInt64("-9223372036854775808"),
|
|
|
|
std::numeric_limits<int64_t>::min());
|
2019-09-09 18:22:28 +00:00
|
|
|
ASSERT_EQ(ParseInt32("2147483647"), 2147483647);
|
2022-05-05 20:08:21 +00:00
|
|
|
ASSERT_EQ(ParseInt32("-2147483648"), std::numeric_limits<int32_t>::min());
|
2019-03-12 20:46:12 +00:00
|
|
|
ASSERT_EQ(ParseInt("-32767"), -32767);
|
|
|
|
ASSERT_EQ(ParseDouble("-1.234567"), -1.234567);
|
|
|
|
}
|
|
|
|
|
RocksDB Options file format and its serialization / deserialization.
Summary:
This patch defines the format of RocksDB options file, which
follows the INI file format, and implements functions for its
serialization and deserialization. An example RocksDB options
file can be found in examples/rocksdb_option_file_example.ini.
A typical RocksDB options file has three sections, which are
Version, DBOptions, and more than one CFOptions. The RocksDB
options file in general follows the basic INI file format
with the following extensions / modifications:
* Escaped characters
We escaped the following characters:
- \n -- line feed - new line
- \r -- carriage return
- \\ -- backslash \
- \: -- colon symbol :
- \# -- hash tag #
* Comments
We support # style comments. Comments can appear at the ending
part of a line.
* Statements
A statement is of the form option_name = value.
Each statement contains a '=', where extra white-spaces
are supported. However, we don't support multi-lined statement.
Furthermore, each line can only contain at most one statement.
* Section
Sections are of the form [SecitonTitle "SectionArgument"],
where section argument is optional.
* List
We use colon-separated string to represent a list.
For instance, n1:n2:n3:n4 is a list containing four values.
Below is an example of a RocksDB options file:
[Version]
rocksdb_version=4.0.0
options_file_version=1.0
[DBOptions]
max_open_files=12345
max_background_flushes=301
[CFOptions "default"]
[CFOptions "the second column family"]
[CFOptions "the third column family"]
Test Plan: Added many tests in options_test.cc
Reviewers: igor, IslamAbdelRahman, sdong, anthony
Reviewed By: anthony
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D46059
2015-09-29 21:42:40 +00:00
|
|
|
TEST_F(OptionsParserTest, EscapeOptionString) {
|
|
|
|
ASSERT_EQ(UnescapeOptionString(
|
|
|
|
"This is a test string with \\# \\: and \\\\ escape chars."),
|
|
|
|
"This is a test string with # : and \\ escape chars.");
|
|
|
|
|
|
|
|
ASSERT_EQ(
|
|
|
|
EscapeOptionString("This is a test string with # : and \\ escape chars."),
|
|
|
|
"This is a test string with \\# \\: and \\\\ escape chars.");
|
|
|
|
|
|
|
|
std::string readible_chars =
|
|
|
|
"A String like this \"1234567890-=_)(*&^%$#@!ertyuiop[]{POIU"
|
|
|
|
"YTREWQasdfghjkl;':LKJHGFDSAzxcvbnm,.?>"
|
|
|
|
"<MNBVCXZ\\\" should be okay to \\#\\\\\\:\\#\\#\\#\\ "
|
|
|
|
"be serialized and deserialized";
|
|
|
|
|
|
|
|
std::string escaped_string = EscapeOptionString(readible_chars);
|
|
|
|
ASSERT_TRUE(IsEscapedString(escaped_string));
|
|
|
|
// This two transformations should be canceled and should output
|
|
|
|
// the original input.
|
|
|
|
ASSERT_EQ(UnescapeOptionString(escaped_string), readible_chars);
|
|
|
|
|
|
|
|
std::string all_chars;
|
|
|
|
for (unsigned char c = 0;; ++c) {
|
|
|
|
all_chars += c;
|
|
|
|
if (c == 255) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
escaped_string = EscapeOptionString(all_chars);
|
|
|
|
ASSERT_TRUE(IsEscapedString(escaped_string));
|
|
|
|
ASSERT_EQ(UnescapeOptionString(escaped_string), all_chars);
|
|
|
|
|
|
|
|
ASSERT_EQ(RocksDBOptionsParser::TrimAndRemoveComment(
|
|
|
|
" A simple statement with a comment. # like this :)"),
|
|
|
|
"A simple statement with a comment.");
|
|
|
|
|
|
|
|
ASSERT_EQ(RocksDBOptionsParser::TrimAndRemoveComment(
|
|
|
|
"Escape \\# and # comment together ."),
|
|
|
|
"Escape \\# and");
|
|
|
|
}
|
2020-04-29 01:02:11 +00:00
|
|
|
|
|
|
|
static void TestAndCompareOption(const ConfigOptions& config_options,
|
|
|
|
const OptionTypeInfo& opt_info,
|
|
|
|
const std::string& opt_name, void* base_ptr,
|
2021-08-19 17:09:30 +00:00
|
|
|
void* comp_ptr, bool strip = false) {
|
2020-04-29 01:02:11 +00:00
|
|
|
std::string result, mismatch;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_info.Serialize(config_options, opt_name, base_ptr, &result));
|
2021-08-19 17:09:30 +00:00
|
|
|
if (strip) {
|
|
|
|
ASSERT_EQ(result.at(0), '{');
|
|
|
|
ASSERT_EQ(result.at(result.size() - 1), '}');
|
|
|
|
result = result.substr(1, result.size() - 2);
|
|
|
|
}
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_info.Parse(config_options, opt_name, result, comp_ptr));
|
|
|
|
ASSERT_TRUE(opt_info.AreEqual(config_options, opt_name, base_ptr, comp_ptr,
|
2020-05-21 17:56:40 +00:00
|
|
|
&mismatch));
|
2020-04-29 01:02:11 +00:00
|
|
|
}
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
static void TestParseAndCompareOption(const ConfigOptions& config_options,
|
|
|
|
const OptionTypeInfo& opt_info,
|
|
|
|
const std::string& opt_name,
|
|
|
|
const std::string& opt_value,
|
|
|
|
void* base_ptr, void* comp_ptr,
|
|
|
|
bool strip = false) {
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_info.Parse(config_options, opt_name, opt_value, base_ptr));
|
2021-08-19 17:09:30 +00:00
|
|
|
TestAndCompareOption(config_options, opt_info, opt_name, base_ptr, comp_ptr,
|
|
|
|
strip);
|
2020-05-05 22:02:04 +00:00
|
|
|
}
|
|
|
|
|
2020-04-29 01:02:11 +00:00
|
|
|
template <typename T>
|
|
|
|
void TestOptInfo(const ConfigOptions& config_options, OptionType opt_type,
|
|
|
|
T* base, T* comp) {
|
|
|
|
std::string result;
|
|
|
|
OptionTypeInfo opt_info(0, opt_type);
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(opt_info.AreEqual(config_options, "base", base, comp, &result));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(result, "base");
|
|
|
|
ASSERT_NE(*base, *comp);
|
2020-09-14 23:59:00 +00:00
|
|
|
TestAndCompareOption(config_options, opt_info, "base", base, comp);
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(*base, *comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
class OptionTypeInfoTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, BasicTypes) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
{
|
|
|
|
bool a = true, b = false;
|
|
|
|
TestOptInfo(config_options, OptionType::kBoolean, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
int a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kInt, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
int32_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kInt32T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
int64_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kInt64T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
unsigned int a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kUInt, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint32_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kUInt32T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint64_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kUInt64T, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
size_t a = 100, b = 200;
|
|
|
|
TestOptInfo(config_options, OptionType::kSizeT, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
std::string a = "100", b = "200";
|
|
|
|
TestOptInfo(config_options, OptionType::kString, &a, &b);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
double a = 1.0, b = 2.0;
|
|
|
|
TestOptInfo(config_options, OptionType::kDouble, &a, &b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestInvalidArgs) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
bool b;
|
|
|
|
int i;
|
|
|
|
int32_t i32;
|
|
|
|
int64_t i64;
|
|
|
|
unsigned int u;
|
|
|
|
int32_t u32;
|
|
|
|
int64_t u64;
|
|
|
|
size_t sz;
|
|
|
|
double d;
|
|
|
|
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kBoolean)
|
2020-09-14 23:59:00 +00:00
|
|
|
.Parse(config_options, "b", "x", &b));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_NOK(
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeInfo(0, OptionType::kInt).Parse(config_options, "b", "x", &i));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kInt32T)
|
|
|
|
.Parse(config_options, "b", "x", &i32));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kInt64T)
|
|
|
|
.Parse(config_options, "b", "x", &i64));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_NOK(
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeInfo(0, OptionType::kUInt).Parse(config_options, "b", "x", &u));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kUInt32T)
|
|
|
|
.Parse(config_options, "b", "x", &u32));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kUInt64T)
|
|
|
|
.Parse(config_options, "b", "x", &u64));
|
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kSizeT)
|
|
|
|
.Parse(config_options, "b", "x", &sz));
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kDouble)
|
2020-09-14 23:59:00 +00:00
|
|
|
.Parse(config_options, "b", "x", &d));
|
2020-04-29 01:02:11 +00:00
|
|
|
|
|
|
|
// Don't know how to convert Unknowns to anything else
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_NOK(OptionTypeInfo(0, OptionType::kUnknown)
|
2020-09-14 23:59:00 +00:00
|
|
|
.Parse(config_options, "b", "x", &d));
|
2020-04-29 01:02:11 +00:00
|
|
|
|
|
|
|
// Verify that if the parse function throws an exception, it is also trapped
|
|
|
|
OptionTypeInfo func_info(0, OptionType::kUnknown,
|
|
|
|
OptionVerificationType::kNormal,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kNone,
|
2020-04-29 01:02:11 +00:00
|
|
|
[](const ConfigOptions&, const std::string&,
|
2021-05-13 21:28:50 +00:00
|
|
|
const std::string& value, void* addr) {
|
|
|
|
auto ptr = static_cast<int*>(addr);
|
2020-04-29 01:02:11 +00:00
|
|
|
*ptr = ParseInt(value);
|
|
|
|
return Status::OK();
|
|
|
|
});
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(func_info.Parse(config_options, "b", "1", &i));
|
|
|
|
ASSERT_NOK(func_info.Parse(config_options, "b", "x", &i));
|
2020-04-29 01:02:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestParseFunc) {
|
2022-05-13 11:57:08 +00:00
|
|
|
OptionTypeInfo opt_info(0, OptionType::kUnknown,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetParseFunc([](const ConfigOptions& /*opts*/,
|
|
|
|
const std::string& name, const std::string& value,
|
|
|
|
void* addr) {
|
|
|
|
auto ptr = static_cast<std::string*>(addr);
|
|
|
|
if (name == "Oops") {
|
|
|
|
return Status::InvalidArgument(value);
|
|
|
|
} else {
|
|
|
|
*ptr = value + " " + name;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
});
|
2020-04-29 01:02:11 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string base;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_info.Parse(config_options, "World", "Hello", &base));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(base, "Hello World");
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_NOK(opt_info.Parse(config_options, "Oops", "Hello", &base));
|
2020-04-29 01:02:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestSerializeFunc) {
|
2022-05-13 11:57:08 +00:00
|
|
|
OptionTypeInfo opt_info(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetSerializeFunc([](const ConfigOptions& /*opts*/,
|
|
|
|
const std::string& name, const void* /*addr*/,
|
|
|
|
std::string* value) {
|
|
|
|
if (name == "Oops") {
|
|
|
|
return Status::InvalidArgument(name);
|
|
|
|
} else {
|
|
|
|
*value = name;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
});
|
2020-04-29 01:02:11 +00:00
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string base;
|
|
|
|
std::string value;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_info.Serialize(config_options, "Hello", &base, &value));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(value, "Hello");
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_NOK(opt_info.Serialize(config_options, "Oops", &base, &value));
|
2020-04-29 01:02:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestEqualsFunc) {
|
2022-05-13 11:57:08 +00:00
|
|
|
OptionTypeInfo opt_info(0, OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetEqualsFunc([](const ConfigOptions& /*opts*/,
|
|
|
|
const std::string& name, const void* addr1,
|
|
|
|
const void* addr2, std::string* mismatch) {
|
|
|
|
auto i1 = *(static_cast<const int*>(addr1));
|
|
|
|
auto i2 = *(static_cast<const int*>(addr2));
|
|
|
|
if (name == "LT") {
|
|
|
|
return i1 < i2;
|
|
|
|
} else if (name == "GT") {
|
|
|
|
return i1 > i2;
|
|
|
|
} else if (name == "EQ") {
|
|
|
|
return i1 == i2;
|
|
|
|
} else {
|
|
|
|
*mismatch = name + "???";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
});
|
2020-04-29 01:02:11 +00:00
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
int int1 = 100;
|
|
|
|
int int2 = 200;
|
|
|
|
std::string mismatch;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_TRUE(opt_info.AreEqual(config_options, "LT", &int1, &int2, &mismatch));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(mismatch, "");
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(
|
|
|
|
opt_info.AreEqual(config_options, "GT", &int1, &int2, &mismatch));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(mismatch, "GT");
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(
|
|
|
|
opt_info.AreEqual(config_options, "NO", &int1, &int2, &mismatch));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(mismatch, "NO???");
|
|
|
|
}
|
|
|
|
|
2022-05-13 11:57:08 +00:00
|
|
|
TEST_F(OptionTypeInfoTest, TestPrepareFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kInt, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetPrepareFunc(
|
|
|
|
[](const ConfigOptions& /*opts*/, const std::string& name, void* addr) {
|
|
|
|
auto i1 = static_cast<int*>(addr);
|
|
|
|
if (name == "x2") {
|
|
|
|
*i1 *= 2;
|
|
|
|
} else if (name == "/2") {
|
|
|
|
*i1 /= 2;
|
|
|
|
} else {
|
|
|
|
return Status::InvalidArgument("Bad Argument", name);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
});
|
|
|
|
ConfigOptions config_options;
|
|
|
|
int int1 = 100;
|
|
|
|
ASSERT_OK(opt_info.Prepare(config_options, "x2", &int1));
|
|
|
|
ASSERT_EQ(int1, 200);
|
|
|
|
ASSERT_OK(opt_info.Prepare(config_options, "/2", &int1));
|
|
|
|
ASSERT_EQ(int1, 100);
|
|
|
|
ASSERT_NOK(opt_info.Prepare(config_options, "??", &int1));
|
|
|
|
ASSERT_EQ(int1, 100);
|
|
|
|
}
|
|
|
|
TEST_F(OptionTypeInfoTest, TestValidateFunc) {
|
|
|
|
OptionTypeInfo opt_info(0, OptionType::kSizeT,
|
|
|
|
OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone);
|
|
|
|
opt_info.SetValidateFunc([](const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts,
|
|
|
|
const std::string& name, const void* addr) {
|
|
|
|
const auto sz = static_cast<const size_t*>(addr);
|
|
|
|
bool is_valid = false;
|
|
|
|
if (name == "keep_log_file_num") {
|
|
|
|
is_valid = (*sz == db_opts.keep_log_file_num);
|
|
|
|
} else if (name == "write_buffer_size") {
|
|
|
|
is_valid = (*sz == cf_opts.write_buffer_size);
|
|
|
|
}
|
|
|
|
if (is_valid) {
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return Status::InvalidArgument("Mismatched value", name);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ConfigOptions config_options;
|
|
|
|
DBOptions db_options;
|
|
|
|
ColumnFamilyOptions cf_options;
|
|
|
|
|
|
|
|
ASSERT_OK(opt_info.Validate(db_options, cf_options, "keep_log_file_num",
|
|
|
|
&db_options.keep_log_file_num));
|
|
|
|
ASSERT_OK(opt_info.Validate(db_options, cf_options, "write_buffer_size",
|
|
|
|
&cf_options.write_buffer_size));
|
|
|
|
ASSERT_NOK(opt_info.Validate(db_options, cf_options, "keep_log_file_num",
|
|
|
|
&cf_options.write_buffer_size));
|
|
|
|
ASSERT_NOK(opt_info.Validate(db_options, cf_options, "write_buffer_size",
|
|
|
|
&db_options.keep_log_file_num));
|
|
|
|
}
|
|
|
|
|
2020-04-29 01:02:11 +00:00
|
|
|
TEST_F(OptionTypeInfoTest, TestOptionFlags) {
|
|
|
|
OptionTypeInfo opt_none(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kNormal,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kDontSerialize);
|
2020-04-29 01:02:11 +00:00
|
|
|
OptionTypeInfo opt_never(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kNormal,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kCompareNever);
|
2020-04-29 01:02:11 +00:00
|
|
|
OptionTypeInfo opt_alias(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kAlias,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kNone);
|
2020-04-29 01:02:11 +00:00
|
|
|
OptionTypeInfo opt_deprecated(0, OptionType::kString,
|
|
|
|
OptionVerificationType::kDeprecated,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kNone);
|
2020-04-29 01:02:11 +00:00
|
|
|
ConfigOptions config_options;
|
2020-09-14 23:59:00 +00:00
|
|
|
std::string opts_str;
|
2020-04-29 01:02:11 +00:00
|
|
|
std::string base = "base";
|
|
|
|
std::string comp = "comp";
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
// If marked string none, the serialization returns not supported
|
|
|
|
ASSERT_NOK(opt_none.Serialize(config_options, "None", &base, &opts_str));
|
2020-04-29 01:02:11 +00:00
|
|
|
// If marked never compare, they match even when they do not
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_TRUE(opt_never.AreEqual(config_options, "Never", &base, &comp, &base));
|
|
|
|
ASSERT_FALSE(opt_none.AreEqual(config_options, "Never", &base, &comp, &base));
|
2020-04-29 01:02:11 +00:00
|
|
|
|
|
|
|
// An alias can change the value via parse, but does nothing on serialize on
|
|
|
|
// match
|
|
|
|
std::string result;
|
2021-05-13 21:28:50 +00:00
|
|
|
ASSERT_OK(opt_alias.Parse(config_options, "Alias", "Alias", &base));
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_alias.Serialize(config_options, "Alias", &base, &result));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
opt_alias.AreEqual(config_options, "Alias", &base, &comp, &result));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(base, "Alias");
|
|
|
|
ASSERT_NE(base, comp);
|
|
|
|
|
|
|
|
// Deprecated options do nothing on any of the commands
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_deprecated.Parse(config_options, "Alias", "Deprecated", &base));
|
|
|
|
ASSERT_OK(opt_deprecated.Serialize(config_options, "Alias", &base, &result));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
opt_deprecated.AreEqual(config_options, "Alias", &base, &comp, &result));
|
2020-04-29 01:02:11 +00:00
|
|
|
ASSERT_EQ(base, "Alias");
|
|
|
|
ASSERT_NE(base, comp);
|
|
|
|
}
|
|
|
|
|
2020-05-05 22:02:04 +00:00
|
|
|
TEST_F(OptionTypeInfoTest, TestCustomEnum) {
|
|
|
|
enum TestEnum { kA, kB, kC };
|
|
|
|
std::unordered_map<std::string, TestEnum> enum_map = {
|
|
|
|
{"A", TestEnum::kA},
|
|
|
|
{"B", TestEnum::kB},
|
|
|
|
{"C", TestEnum::kC},
|
|
|
|
};
|
|
|
|
OptionTypeInfo opt_info = OptionTypeInfo::Enum<TestEnum>(0, &enum_map);
|
|
|
|
TestEnum e1, e2;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string result, mismatch;
|
|
|
|
|
|
|
|
e2 = TestEnum::kA;
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_OK(opt_info.Parse(config_options, "", "B", &e1));
|
|
|
|
ASSERT_OK(opt_info.Serialize(config_options, "", &e1, &result));
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, TestEnum::kB);
|
|
|
|
ASSERT_EQ(result, "B");
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(opt_info.AreEqual(config_options, "Enum", &e1, &e2, &mismatch));
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(mismatch, "Enum");
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, opt_info, "", "C", &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e2, TestEnum::kC);
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_NOK(opt_info.Parse(config_options, "", "D", &e1));
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, TestEnum::kC);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestBuiltinEnum) {
|
|
|
|
ConfigOptions config_options;
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iter : OptionsHelper::compaction_style_string_map) {
|
2020-05-05 22:02:04 +00:00
|
|
|
CompactionStyle e1, e2;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kCompactionStyle),
|
|
|
|
"CompactionStyle", iter.first, &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iter : OptionsHelper::compaction_pri_string_map) {
|
2020-05-05 22:02:04 +00:00
|
|
|
CompactionPri e1, e2;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kCompactionPri),
|
|
|
|
"CompactionPri", iter.first, &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iter : OptionsHelper::compression_type_string_map) {
|
2020-05-05 22:02:04 +00:00
|
|
|
CompressionType e1, e2;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kCompressionType),
|
|
|
|
"CompressionType", iter.first, &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iter : OptionsHelper::compaction_stop_style_string_map) {
|
2020-05-05 22:02:04 +00:00
|
|
|
CompactionStopStyle e1, e2;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(
|
|
|
|
config_options, OptionTypeInfo(0, OptionType::kCompactionStopStyle),
|
|
|
|
"CompactionStopStyle", iter.first, &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iter : OptionsHelper::checksum_type_string_map) {
|
2020-05-05 22:02:04 +00:00
|
|
|
ChecksumType e1, e2;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kChecksumType),
|
|
|
|
"CheckSumType", iter.first, &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
2023-12-01 19:10:30 +00:00
|
|
|
for (const auto& iter : OptionsHelper::encoding_type_string_map) {
|
2020-05-05 22:02:04 +00:00
|
|
|
EncodingType e1, e2;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options,
|
|
|
|
OptionTypeInfo(0, OptionType::kEncodingType),
|
|
|
|
"EncodingType", iter.first, &e1, &e2);
|
2020-05-05 22:02:04 +00:00
|
|
|
ASSERT_EQ(e1, iter.second);
|
|
|
|
}
|
|
|
|
}
|
2020-05-21 17:56:40 +00:00
|
|
|
|
|
|
|
TEST_F(OptionTypeInfoTest, TestStruct) {
|
|
|
|
struct Basic {
|
|
|
|
int i = 42;
|
|
|
|
std::string s = "Hello";
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Extended {
|
|
|
|
int j = 11;
|
|
|
|
Basic b;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<std::string, OptionTypeInfo> basic_type_map = {
|
|
|
|
{"i", {offsetof(struct Basic, i), OptionType::kInt}},
|
|
|
|
{"s", {offsetof(struct Basic, s), OptionType::kString}},
|
|
|
|
};
|
|
|
|
OptionTypeInfo basic_info = OptionTypeInfo::Struct(
|
|
|
|
"b", &basic_type_map, 0, OptionVerificationType::kNormal,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kMutable);
|
2020-05-21 17:56:40 +00:00
|
|
|
|
|
|
|
std::unordered_map<std::string, OptionTypeInfo> extended_type_map = {
|
|
|
|
{"j", {offsetof(struct Extended, j), OptionType::kInt}},
|
|
|
|
{"b", OptionTypeInfo::Struct(
|
|
|
|
"b", &basic_type_map, offsetof(struct Extended, b),
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone)},
|
2020-05-21 17:56:40 +00:00
|
|
|
{"m", OptionTypeInfo::Struct(
|
|
|
|
"m", &basic_type_map, offsetof(struct Extended, b),
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kMutable)},
|
2020-05-21 17:56:40 +00:00
|
|
|
};
|
|
|
|
OptionTypeInfo extended_info = OptionTypeInfo::Struct(
|
|
|
|
"e", &extended_type_map, 0, OptionVerificationType::kNormal,
|
2020-09-14 23:59:00 +00:00
|
|
|
OptionTypeFlags::kMutable);
|
2020-05-21 17:56:40 +00:00
|
|
|
Extended e1, e2;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::string mismatch;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, basic_info, "b", "{i=33;s=33}",
|
|
|
|
&e1.b, &e2.b);
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(e1.b.i, 33);
|
|
|
|
ASSERT_EQ(e1.b.s, "33");
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, basic_info, "b.i", "44", &e1.b,
|
|
|
|
&e2.b);
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(e1.b.i, 44);
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, basic_info, "i", "55", &e1.b,
|
|
|
|
&e2.b);
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(e1.b.i, 55);
|
|
|
|
|
|
|
|
e1.b.i = 0;
|
|
|
|
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(
|
|
|
|
basic_info.AreEqual(config_options, "b", &e1.b, &e2.b, &mismatch));
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(mismatch, "b.i");
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(
|
2020-09-14 23:59:00 +00:00
|
|
|
basic_info.AreEqual(config_options, "b.i", &e1.b, &e2.b, &mismatch));
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(mismatch, "b.i");
|
|
|
|
mismatch.clear();
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(
|
|
|
|
basic_info.AreEqual(config_options, "i", &e1.b, &e2.b, &mismatch));
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(mismatch, "b.i");
|
|
|
|
mismatch.clear();
|
|
|
|
|
|
|
|
e1 = e2;
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_NOK(basic_info.Parse(config_options, "b", "{i=33;s=33;j=44}", &e1.b));
|
|
|
|
ASSERT_NOK(basic_info.Parse(config_options, "b.j", "44", &e1.b));
|
|
|
|
ASSERT_NOK(basic_info.Parse(config_options, "j", "44", &e1.b));
|
2020-05-21 17:56:40 +00:00
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, extended_info, "e",
|
|
|
|
"b={i=55;s=55}; j=22;", &e1, &e2);
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(e1.b.i, 55);
|
|
|
|
ASSERT_EQ(e1.j, 22);
|
|
|
|
ASSERT_EQ(e1.b.s, "55");
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, extended_info, "e.b",
|
|
|
|
"{i=66;s=66;}", &e1, &e2);
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(e1.b.i, 66);
|
|
|
|
ASSERT_EQ(e1.j, 22);
|
|
|
|
ASSERT_EQ(e1.b.s, "66");
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, extended_info, "e.b.i", "77", &e1,
|
|
|
|
&e2);
|
2020-05-21 17:56:40 +00:00
|
|
|
ASSERT_EQ(e1.b.i, 77);
|
|
|
|
ASSERT_EQ(e1.j, 22);
|
|
|
|
ASSERT_EQ(e1.b.s, "66");
|
|
|
|
}
|
2020-06-03 19:19:54 +00:00
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
TEST_F(OptionTypeInfoTest, TestArrayType) {
|
|
|
|
OptionTypeInfo array_info = OptionTypeInfo::Array<std::string, 4>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
|
|
|
{0, OptionType::kString});
|
|
|
|
std::array<std::string, 4> array1, array2;
|
|
|
|
std::string mismatch;
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
TestParseAndCompareOption(config_options, array_info, "v", "a:b:c:d", &array1,
|
|
|
|
&array2);
|
|
|
|
|
|
|
|
ASSERT_EQ(array1.size(), 4);
|
|
|
|
ASSERT_EQ(array1[0], "a");
|
|
|
|
ASSERT_EQ(array1[1], "b");
|
|
|
|
ASSERT_EQ(array1[2], "c");
|
|
|
|
ASSERT_EQ(array1[3], "d");
|
|
|
|
array1[3] = "e";
|
|
|
|
ASSERT_FALSE(
|
|
|
|
array_info.AreEqual(config_options, "v", &array1, &array2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "v");
|
|
|
|
|
|
|
|
// Test vectors with inner brackets
|
|
|
|
TestParseAndCompareOption(config_options, array_info, "v", "a:{b}:c:d",
|
|
|
|
&array1, &array2);
|
|
|
|
ASSERT_EQ(array1.size(), 4);
|
|
|
|
ASSERT_EQ(array1[0], "a");
|
|
|
|
ASSERT_EQ(array1[1], "b");
|
|
|
|
ASSERT_EQ(array1[2], "c");
|
|
|
|
ASSERT_EQ(array1[3], "d");
|
|
|
|
|
|
|
|
std::array<std::string, 3> array3, array4;
|
|
|
|
OptionTypeInfo bar_info = OptionTypeInfo::Array<std::string, 3>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
|
|
|
{0, OptionType::kString}, '|');
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v", "x|y|z", &array3,
|
|
|
|
&array4);
|
|
|
|
|
|
|
|
// Test arrays with inner array
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"a|{b1|b2}|{c1|c2|{d1|d2}}", &array3, &array4,
|
|
|
|
false);
|
|
|
|
ASSERT_EQ(array3.size(), 3);
|
|
|
|
ASSERT_EQ(array3[0], "a");
|
|
|
|
ASSERT_EQ(array3[1], "b1|b2");
|
|
|
|
ASSERT_EQ(array3[2], "c1|c2|{d1|d2}");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"{a1|a2}|{b1|{c1|c2}}|d1", &array3, &array4, true);
|
|
|
|
ASSERT_EQ(array3.size(), 3);
|
|
|
|
ASSERT_EQ(array3[0], "a1|a2");
|
|
|
|
ASSERT_EQ(array3[1], "b1|{c1|c2}");
|
|
|
|
ASSERT_EQ(array3[2], "d1");
|
|
|
|
|
|
|
|
// Test invalid input: less element than requested
|
|
|
|
auto s = bar_info.Parse(config_options, "opt_name1", "a1|a2", &array3);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
// Test invalid input: more element than requested
|
|
|
|
s = bar_info.Parse(config_options, "opt_name2", "a1|b|c1|d3", &array3);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:19:54 +00:00
|
|
|
TEST_F(OptionTypeInfoTest, TestVectorType) {
|
|
|
|
OptionTypeInfo vec_info = OptionTypeInfo::Vector<std::string>(
|
2020-09-14 23:59:00 +00:00
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
2020-06-03 19:19:54 +00:00
|
|
|
{0, OptionType::kString});
|
|
|
|
std::vector<std::string> vec1, vec2;
|
|
|
|
std::string mismatch;
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, vec_info, "v", "a:b:c:d", &vec1,
|
|
|
|
&vec2);
|
2020-06-03 19:19:54 +00:00
|
|
|
ASSERT_EQ(vec1.size(), 4);
|
|
|
|
ASSERT_EQ(vec1[0], "a");
|
|
|
|
ASSERT_EQ(vec1[1], "b");
|
|
|
|
ASSERT_EQ(vec1[2], "c");
|
|
|
|
ASSERT_EQ(vec1[3], "d");
|
|
|
|
vec1[3] = "e";
|
2020-09-14 23:59:00 +00:00
|
|
|
ASSERT_FALSE(vec_info.AreEqual(config_options, "v", &vec1, &vec2, &mismatch));
|
2020-06-03 19:19:54 +00:00
|
|
|
ASSERT_EQ(mismatch, "v");
|
|
|
|
|
|
|
|
// Test vectors with inner brackets
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, vec_info, "v", "a:{b}:c:d", &vec1,
|
|
|
|
&vec2);
|
2020-06-03 19:19:54 +00:00
|
|
|
ASSERT_EQ(vec1.size(), 4);
|
|
|
|
ASSERT_EQ(vec1[0], "a");
|
|
|
|
ASSERT_EQ(vec1[1], "b");
|
|
|
|
ASSERT_EQ(vec1[2], "c");
|
|
|
|
ASSERT_EQ(vec1[3], "d");
|
|
|
|
|
|
|
|
OptionTypeInfo bar_info = OptionTypeInfo::Vector<std::string>(
|
2020-09-14 23:59:00 +00:00
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone,
|
2020-06-03 19:19:54 +00:00
|
|
|
{0, OptionType::kString}, '|');
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, vec_info, "v", "x|y|z", &vec1,
|
|
|
|
&vec2);
|
2020-06-03 19:19:54 +00:00
|
|
|
// Test vectors with inner vector
|
2021-08-19 17:09:30 +00:00
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"a|{b1|b2}|{c1|c2|{d1|d2}}", &vec1, &vec2, false);
|
2020-06-03 19:19:54 +00:00
|
|
|
ASSERT_EQ(vec1.size(), 3);
|
|
|
|
ASSERT_EQ(vec1[0], "a");
|
|
|
|
ASSERT_EQ(vec1[1], "b1|b2");
|
|
|
|
ASSERT_EQ(vec1[2], "c1|c2|{d1|d2}");
|
2021-08-19 17:09:30 +00:00
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v",
|
|
|
|
"{a1|a2}|{b1|{c1|c2}}|d1", &vec1, &vec2, true);
|
|
|
|
ASSERT_EQ(vec1.size(), 3);
|
|
|
|
ASSERT_EQ(vec1[0], "a1|a2");
|
|
|
|
ASSERT_EQ(vec1[1], "b1|{c1|c2}");
|
|
|
|
ASSERT_EQ(vec1[2], "d1");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v", "{a1}", &vec1, &vec2,
|
|
|
|
false);
|
|
|
|
ASSERT_EQ(vec1.size(), 1);
|
|
|
|
ASSERT_EQ(vec1[0], "a1");
|
|
|
|
|
|
|
|
TestParseAndCompareOption(config_options, bar_info, "v", "{a1|a2}|{b1|b2}",
|
|
|
|
&vec1, &vec2, true);
|
|
|
|
ASSERT_EQ(vec1.size(), 2);
|
|
|
|
ASSERT_EQ(vec1[0], "a1|a2");
|
|
|
|
ASSERT_EQ(vec1[1], "b1|b2");
|
2020-06-03 19:19:54 +00:00
|
|
|
}
|
2021-05-11 13:45:49 +00:00
|
|
|
|
2021-05-11 23:14:33 +00:00
|
|
|
TEST_F(OptionTypeInfoTest, TestStaticType) {
|
|
|
|
struct SimpleOptions {
|
|
|
|
size_t size = 0;
|
|
|
|
bool verify = true;
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> type_map = {
|
|
|
|
{"size", {offsetof(struct SimpleOptions, size), OptionType::kSizeT}},
|
|
|
|
{"verify",
|
|
|
|
{offsetof(struct SimpleOptions, verify), OptionType::kBoolean}},
|
|
|
|
};
|
|
|
|
|
|
|
|
ConfigOptions config_options;
|
|
|
|
SimpleOptions opts, copy;
|
|
|
|
opts.size = 12345;
|
|
|
|
opts.verify = false;
|
|
|
|
std::string str, mismatch;
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
OptionTypeInfo::SerializeType(config_options, type_map, &opts, &str));
|
|
|
|
ASSERT_FALSE(OptionTypeInfo::TypesAreEqual(config_options, type_map, &opts,
|
|
|
|
©, &mismatch));
|
|
|
|
ASSERT_OK(OptionTypeInfo::ParseType(config_options, str, type_map, ©));
|
|
|
|
ASSERT_TRUE(OptionTypeInfo::TypesAreEqual(config_options, type_map, &opts,
|
|
|
|
©, &mismatch));
|
|
|
|
}
|
|
|
|
|
2021-05-11 13:45:49 +00:00
|
|
|
class ConfigOptionsTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(ConfigOptionsTest, EnvFromConfigOptions) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
DBOptions db_opts;
|
|
|
|
Options opts;
|
|
|
|
Env* mem_env = NewMemEnv(Env::Default());
|
|
|
|
config_options.registry->AddLibrary("custom-env", RegisterCustomEnv,
|
|
|
|
kCustomEnvName);
|
|
|
|
|
|
|
|
config_options.env = mem_env;
|
|
|
|
// First test that we can get the env as expected
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_options, DBOptions(), kCustomEnvProp,
|
|
|
|
&db_opts));
|
|
|
|
ASSERT_OK(
|
|
|
|
GetOptionsFromString(config_options, Options(), kCustomEnvProp, &opts));
|
|
|
|
ASSERT_NE(config_options.env, db_opts.env);
|
|
|
|
ASSERT_EQ(opts.env, db_opts.env);
|
|
|
|
Env* custom_env = db_opts.env;
|
|
|
|
|
|
|
|
// Now try a "bad" env" and check that nothing changed
|
|
|
|
config_options.ignore_unsupported_options = true;
|
|
|
|
ASSERT_OK(
|
|
|
|
GetDBOptionsFromString(config_options, db_opts, "env=unknown", &db_opts));
|
|
|
|
ASSERT_OK(GetOptionsFromString(config_options, opts, "env=unknown", &opts));
|
|
|
|
ASSERT_EQ(config_options.env, mem_env);
|
|
|
|
ASSERT_EQ(db_opts.env, custom_env);
|
|
|
|
ASSERT_EQ(opts.env, db_opts.env);
|
|
|
|
|
|
|
|
// Now try a "bad" env" ignoring unknown objects
|
|
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
ASSERT_NOK(
|
|
|
|
GetDBOptionsFromString(config_options, db_opts, "env=unknown", &db_opts));
|
|
|
|
ASSERT_EQ(config_options.env, mem_env);
|
|
|
|
ASSERT_EQ(db_opts.env, custom_env);
|
|
|
|
ASSERT_EQ(opts.env, db_opts.env);
|
|
|
|
|
|
|
|
delete mem_env;
|
|
|
|
}
|
2021-08-06 15:26:23 +00:00
|
|
|
TEST_F(ConfigOptionsTest, MergeOperatorFromString) {
|
|
|
|
ConfigOptions config_options;
|
|
|
|
std::shared_ptr<MergeOperator> merge_op;
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "put", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("put"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), "PutOperator");
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "put_v1", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("PutOperator"));
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "uint64add", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("uint64add"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), "UInt64AddOperator");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "max", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("max"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), "MaxOperator");
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "bytesxor", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("bytesxor"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), BytesXOROperator::kClassName());
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
MergeOperator::CreateFromString(config_options, "sortlist", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("sortlist"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), SortList::kClassName());
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "stringappend",
|
|
|
|
&merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappend"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendOperator::kClassName());
|
|
|
|
auto delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, ",");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, "stringappendtest",
|
|
|
|
&merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappendtest"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendTESTOperator::kClassName());
|
|
|
|
delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, ",");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(
|
|
|
|
config_options, "id=stringappend; delimiter=||", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappend"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendOperator::kClassName());
|
|
|
|
delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, "||");
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(
|
|
|
|
config_options, "id=stringappendtest; delimiter=&&", &merge_op));
|
|
|
|
ASSERT_NE(merge_op, nullptr);
|
|
|
|
ASSERT_TRUE(merge_op->IsInstanceOf("stringappendtest"));
|
|
|
|
ASSERT_STREQ(merge_op->Name(), StringAppendTESTOperator::kClassName());
|
|
|
|
delimiter = merge_op->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, "&&");
|
|
|
|
|
|
|
|
std::shared_ptr<MergeOperator> copy;
|
|
|
|
std::string mismatch;
|
|
|
|
std::string opts_str = merge_op->ToString(config_options);
|
|
|
|
|
|
|
|
ASSERT_OK(MergeOperator::CreateFromString(config_options, opts_str, ©));
|
|
|
|
ASSERT_TRUE(merge_op->AreEquivalent(config_options, copy.get(), &mismatch));
|
|
|
|
ASSERT_NE(copy, nullptr);
|
|
|
|
delimiter = copy->GetOptions<std::string>("Delimiter");
|
|
|
|
ASSERT_NE(delimiter, nullptr);
|
|
|
|
ASSERT_EQ(*delimiter, "&&");
|
|
|
|
}
|
2021-10-19 17:42:04 +00:00
|
|
|
|
2022-07-19 16:31:14 +00:00
|
|
|
TEST_F(ConfigOptionsTest, ConfiguringOptionsDoesNotRevertRateLimiterBandwidth) {
|
|
|
|
// Regression test for bug where rate limiter's dynamically set bandwidth
|
|
|
|
// could be silently reverted when configuring an options structure with an
|
|
|
|
// existing `rate_limiter`.
|
|
|
|
Options base_options;
|
|
|
|
base_options.rate_limiter.reset(
|
|
|
|
NewGenericRateLimiter(1 << 20 /* rate_bytes_per_sec */));
|
|
|
|
Options copy_options(base_options);
|
|
|
|
|
|
|
|
base_options.rate_limiter->SetBytesPerSecond(2 << 20);
|
|
|
|
ASSERT_EQ(2 << 20, base_options.rate_limiter->GetBytesPerSecond());
|
|
|
|
|
|
|
|
ASSERT_OK(GetOptionsFromString(base_options, "", ©_options));
|
|
|
|
ASSERT_EQ(2 << 20, base_options.rate_limiter->GetBytesPerSecond());
|
|
|
|
}
|
|
|
|
|
2021-10-19 17:42:04 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(OptionsSanityCheckTest, OptionsSanityCheckTest,
|
|
|
|
::testing::Bool());
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-05-16 17:35:41 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2014-11-24 20:53:23 +00:00
|
|
|
#ifdef GFLAGS
|
2014-05-16 17:35:41 +00:00
|
|
|
ParseCommandLineFlags(&argc, &argv, true);
|
2014-11-24 20:53:23 +00:00
|
|
|
#endif // GFLAGS
|
2015-03-17 21:08:00 +00:00
|
|
|
return RUN_ALL_TESTS();
|
2014-05-16 17:35:41 +00:00
|
|
|
}
|