2019-05-31 18:52:59 +00:00
|
|
|
|
2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2016-05-06 23:09:09 +00:00
|
|
|
#include "rocksdb/utilities/ldb_cmd.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
|
2023-09-12 23:32:40 +00:00
|
|
|
#include <cstddef>
|
2020-06-25 02:30:15 +00:00
|
|
|
#include <cstdlib>
|
|
|
|
#include <ctime>
|
|
|
|
#include <fstream>
|
|
|
|
#include <functional>
|
|
|
|
#include <iostream>
|
|
|
|
#include <limits>
|
|
|
|
#include <sstream>
|
|
|
|
#include <stdexcept>
|
|
|
|
#include <string>
|
2015-09-08 22:46:16 +00:00
|
|
|
|
2022-04-20 18:10:20 +00:00
|
|
|
#include "db/blob/blob_index.h"
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2016-04-01 18:06:06 +00:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/log_reader.h"
|
2022-03-18 23:35:51 +00:00
|
|
|
#include "db/version_util.h"
|
Wide Column support in ldb (#11754)
Summary:
wide_columns can now be pretty-printed in the following commands
- `./ldb dump_wal`
- `./ldb dump`
- `./ldb idump`
- `./ldb dump_live_files`
- `./ldb scan`
- `./sst_dump --command=scan`
There are opportunities to refactor to reduce some nearly identical code. This PR is initial change to add wide column support in `ldb` and `sst_dump` tool. More PRs to come for the refactor.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11754
Test Plan:
**New Tests added**
- `WideColumnsHelperTest::DumpWideColumns`
- `WideColumnsHelperTest::DumpSliceAsWideColumns`
**Changes added to existing tests**
- `ExternalSSTFileTest::BasicMixed` added to cover mixed case (This test should have been added in https://github.com/facebook/rocksdb/issues/11688). This test does not verify the ldb or sst_dump output. This test was used to create test SST files having some rows with wide columns and some without and the generated SST files were used to manually test sst_dump_tool.
- `createSST()` in `sst_dump_test` now takes `wide_column_one_in` to add wide column value in SST
**dump_wal**
```
./ldb dump_wal --walfile=/tmp/rocksdbtest-226125/db_wide_basic_test_2675429_2308393776696827948/000004.log --print_value --header
```
```
Sequence,Count,ByteSize,Physical Offset,Key(s) : value
1,1,59,0,PUT_ENTITY(0) : 0x:0x68656C6C6F 0x617474725F6E616D6531:0x666F6F 0x617474725F6E616D6532:0x626172
2,1,34,42,PUT_ENTITY(0) : 0x617474725F6F6E65:0x74776F 0x617474725F7468726565:0x666F7572
3,1,17,7d,PUT(0) : 0x7468697264 : 0x62617A
```
**idump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ idump
```
```
'first' seq:1, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:2, type:22 => attr_one:two attr_three:four
'third' seq:3, type:1 => baz
Internal keys in range: 3
```
**SST Dump from dump_live_files**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ compact
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump_live_files
```
```
...
==============================
SST Files
==============================
/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst level:1
------------------------------
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
...
```
**dump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump
```
```
first ==> :hello attr_name1:foo attr_name2:bar
second ==> attr_one:two attr_three:four
third ==> baz
Keys in range: 3
```
**scan**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ scan
```
```
first : :hello attr_name1:foo attr_name2:bar
second : attr_one:two attr_three:four
third : baz
```
**sst_dump**
```
./sst_dump --file=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst --command=scan
```
```
options.env is 0x7ff54b296000
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
from [] to []
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
```
Reviewed By: ltamasi
Differential Revision: D48837999
Pulled By: jaykorean
fbshipit-source-id: b0280f0589d2b9716bb9b50530ffcabb397d140f
2023-08-30 19:45:52 +00:00
|
|
|
#include "db/wide/wide_column_serialization.h"
|
|
|
|
#include "db/wide/wide_columns_helper.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
#include "db/write_batch_internal.h"
|
2019-05-30 03:44:08 +00:00
|
|
|
#include "file/filename.h"
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2022-03-18 23:35:51 +00:00
|
|
|
#include "rocksdb/experimental.h"
|
2020-02-10 23:42:46 +00:00
|
|
|
#include "rocksdb/file_checksum.h"
|
2021-09-29 11:01:57 +00:00
|
|
|
#include "rocksdb/filter_policy.h"
|
2022-03-18 23:35:51 +00:00
|
|
|
#include "rocksdb/options.h"
|
2014-11-24 18:04:16 +00:00
|
|
|
#include "rocksdb/table_properties.h"
|
2022-01-27 23:44:23 +00:00
|
|
|
#include "rocksdb/utilities/backup_engine.h"
|
2017-03-21 18:49:08 +00:00
|
|
|
#include "rocksdb/utilities/checkpoint.h"
|
2017-05-12 21:59:57 +00:00
|
|
|
#include "rocksdb/utilities/debug.h"
|
2017-04-20 17:16:13 +00:00
|
|
|
#include "rocksdb/utilities/options_util.h"
|
2016-04-01 18:06:06 +00:00
|
|
|
#include "rocksdb/write_batch.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/scoped_arena_iterator.h"
|
2020-06-25 02:30:15 +00:00
|
|
|
#include "table/sst_file_dumper.h"
|
2016-05-06 23:09:09 +00:00
|
|
|
#include "tools/ldb_cmd_impl.h"
|
2017-07-28 23:23:50 +00:00
|
|
|
#include "util/cast_util.h"
|
2013-06-19 02:57:54 +00:00
|
|
|
#include "util/coding.h"
|
2020-02-10 23:42:46 +00:00
|
|
|
#include "util/file_checksum_helper.h"
|
2016-04-01 18:06:06 +00:00
|
|
|
#include "util/stderr_logger.h"
|
2015-04-24 02:17:57 +00:00
|
|
|
#include "util/string_util.h"
|
2022-04-22 03:37:07 +00:00
|
|
|
#include "utilities/blob_db/blob_dump_tool.h"
|
2019-07-23 20:56:52 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
2014-04-29 03:34:20 +00:00
|
|
|
#include "utilities/ttl/db_ttl_impl.h"
|
2013-06-19 02:57:54 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2020-03-29 22:57:02 +00:00
|
|
|
class FileChecksumGenCrc32c;
|
|
|
|
class FileChecksumGenCrc32cFactory;
|
2020-02-10 23:42:46 +00:00
|
|
|
|
2019-10-09 02:17:39 +00:00
|
|
|
const std::string LDBCommand::ARG_ENV_URI = "env_uri";
|
2021-03-10 04:47:26 +00:00
|
|
|
const std::string LDBCommand::ARG_FS_URI = "fs_uri";
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string LDBCommand::ARG_DB = "db";
|
|
|
|
const std::string LDBCommand::ARG_PATH = "path";
|
2019-07-09 19:46:01 +00:00
|
|
|
const std::string LDBCommand::ARG_SECONDARY_PATH = "secondary_path";
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string LDBCommand::ARG_HEX = "hex";
|
|
|
|
const std::string LDBCommand::ARG_KEY_HEX = "key_hex";
|
|
|
|
const std::string LDBCommand::ARG_VALUE_HEX = "value_hex";
|
|
|
|
const std::string LDBCommand::ARG_CF_NAME = "column_family";
|
|
|
|
const std::string LDBCommand::ARG_TTL = "ttl";
|
|
|
|
const std::string LDBCommand::ARG_TTL_START = "start_time";
|
|
|
|
const std::string LDBCommand::ARG_TTL_END = "end_time";
|
|
|
|
const std::string LDBCommand::ARG_TIMESTAMP = "timestamp";
|
2017-04-20 17:16:13 +00:00
|
|
|
const std::string LDBCommand::ARG_TRY_LOAD_OPTIONS = "try_load_options";
|
2020-05-08 21:12:18 +00:00
|
|
|
const std::string LDBCommand::ARG_DISABLE_CONSISTENCY_CHECKS =
|
|
|
|
"disable_consistency_checks";
|
2017-06-13 23:55:08 +00:00
|
|
|
const std::string LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS =
|
|
|
|
"ignore_unknown_options";
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string LDBCommand::ARG_FROM = "from";
|
|
|
|
const std::string LDBCommand::ARG_TO = "to";
|
|
|
|
const std::string LDBCommand::ARG_MAX_KEYS = "max_keys";
|
|
|
|
const std::string LDBCommand::ARG_BLOOM_BITS = "bloom_bits";
|
|
|
|
const std::string LDBCommand::ARG_FIX_PREFIX_LEN = "fix_prefix_len";
|
|
|
|
const std::string LDBCommand::ARG_COMPRESSION_TYPE = "compression_type";
|
|
|
|
const std::string LDBCommand::ARG_COMPRESSION_MAX_DICT_BYTES =
|
2016-05-10 23:33:47 +00:00
|
|
|
"compression_max_dict_bytes";
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string LDBCommand::ARG_BLOCK_SIZE = "block_size";
|
|
|
|
const std::string LDBCommand::ARG_AUTO_COMPACTION = "auto_compaction";
|
|
|
|
const std::string LDBCommand::ARG_DB_WRITE_BUFFER_SIZE = "db_write_buffer_size";
|
|
|
|
const std::string LDBCommand::ARG_WRITE_BUFFER_SIZE = "write_buffer_size";
|
|
|
|
const std::string LDBCommand::ARG_FILE_SIZE = "file_size";
|
|
|
|
const std::string LDBCommand::ARG_CREATE_IF_MISSING = "create_if_missing";
|
|
|
|
const std::string LDBCommand::ARG_NO_VALUE = "no_value";
|
2022-02-26 07:13:11 +00:00
|
|
|
const std::string LDBCommand::ARG_ENABLE_BLOB_FILES = "enable_blob_files";
|
|
|
|
const std::string LDBCommand::ARG_MIN_BLOB_SIZE = "min_blob_size";
|
|
|
|
const std::string LDBCommand::ARG_BLOB_FILE_SIZE = "blob_file_size";
|
|
|
|
const std::string LDBCommand::ARG_BLOB_COMPRESSION_TYPE =
|
|
|
|
"blob_compression_type";
|
|
|
|
const std::string LDBCommand::ARG_ENABLE_BLOB_GARBAGE_COLLECTION =
|
|
|
|
"enable_blob_garbage_collection";
|
|
|
|
const std::string LDBCommand::ARG_BLOB_GARBAGE_COLLECTION_AGE_CUTOFF =
|
|
|
|
"blob_garbage_collection_age_cutoff";
|
|
|
|
const std::string LDBCommand::ARG_BLOB_GARBAGE_COLLECTION_FORCE_THRESHOLD =
|
|
|
|
"blob_garbage_collection_force_threshold";
|
|
|
|
const std::string LDBCommand::ARG_BLOB_COMPACTION_READAHEAD_SIZE =
|
|
|
|
"blob_compaction_readahead_size";
|
2022-06-03 03:04:33 +00:00
|
|
|
const std::string LDBCommand::ARG_BLOB_FILE_STARTING_LEVEL =
|
|
|
|
"blob_file_starting_level";
|
2022-07-17 14:13:59 +00:00
|
|
|
const std::string LDBCommand::ARG_PREPOPULATE_BLOB_CACHE =
|
|
|
|
"prepopulate_blob_cache";
|
2022-04-15 16:04:04 +00:00
|
|
|
const std::string LDBCommand::ARG_DECODE_BLOB_INDEX = "decode_blob_index";
|
2022-04-22 03:37:07 +00:00
|
|
|
const std::string LDBCommand::ARG_DUMP_UNCOMPRESSED_BLOBS =
|
|
|
|
"dump_uncompressed_blobs";
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2012-12-26 23:15:54 +00:00
|
|
|
const char* LDBCommand::DELIM = " ==> ";
|
2012-12-05 23:37:03 +00:00
|
|
|
|
2016-01-06 22:19:08 +00:00
|
|
|
namespace {
|
|
|
|
|
2019-01-03 19:11:09 +00:00
|
|
|
void DumpWalFile(Options options, std::string wal_file, bool print_header,
|
|
|
|
bool print_values, bool is_write_committed,
|
|
|
|
LDBCommandExecuteResult* exec_state);
|
2016-01-06 22:19:08 +00:00
|
|
|
|
2019-01-03 19:11:09 +00:00
|
|
|
void DumpSstFile(Options options, std::string filename, bool output_hex,
|
2022-07-27 03:40:18 +00:00
|
|
|
bool show_properties, bool decode_blob_index,
|
|
|
|
std::string from_key = "", std::string to_key = "");
|
2022-04-22 03:37:07 +00:00
|
|
|
|
|
|
|
void DumpBlobFile(const std::string& filename, bool is_key_hex,
|
|
|
|
bool is_value_hex, bool dump_uncompressed_blobs);
|
2022-10-25 21:29:41 +00:00
|
|
|
}; // namespace
|
2016-01-06 22:19:08 +00:00
|
|
|
|
2013-04-12 03:21:49 +00:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
2020-06-09 17:01:12 +00:00
|
|
|
int argc, char const* const* argv, const Options& options,
|
2016-01-22 23:46:32 +00:00
|
|
|
const LDBOptions& ldb_options,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>* column_families) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string> args;
|
2013-01-11 19:09:23 +00:00
|
|
|
for (int i = 1; i < argc; i++) {
|
|
|
|
args.push_back(argv[i]);
|
|
|
|
}
|
2016-05-13 19:12:39 +00:00
|
|
|
return InitFromCmdLineArgs(args, options, ldb_options, column_families,
|
|
|
|
SelectCommand);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse the command-line arguments and create the appropriate LDBCommand2
|
|
|
|
* instance.
|
|
|
|
* The command line arguments must be in the following format:
|
2014-11-01 02:22:49 +00:00
|
|
|
* ./ldb --db=PATH_TO_DB [--commonOpt1=commonOpt1Val] ..
|
|
|
|
* COMMAND <PARAM1> <PARAM2> ... [-cmdSpecificOpt1=cmdSpecificOpt1Val] ..
|
2013-01-11 19:09:23 +00:00
|
|
|
* This is similar to the command line format used by HBaseClientTool.
|
|
|
|
* Command name is not included in args.
|
2013-02-20 02:12:20 +00:00
|
|
|
* Returns nullptr if the command-line cannot be parsed.
|
2013-01-11 19:09:23 +00:00
|
|
|
*/
|
2013-04-12 03:21:49 +00:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::vector<std::string>& args, const Options& options,
|
2016-01-22 23:46:32 +00:00
|
|
|
const LDBOptions& ldb_options,
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<ColumnFamilyDescriptor>* /*column_families*/,
|
2016-05-27 20:04:07 +00:00
|
|
|
const std::function<LDBCommand*(const ParsedParams&)>& selector) {
|
|
|
|
// --x=y command line arguments are added as x->y map entries in
|
|
|
|
// parsed_params.option_map.
|
|
|
|
//
|
|
|
|
// Command-line arguments of the form --hex end up in this array as hex to
|
|
|
|
// parsed_params.flags
|
|
|
|
ParsedParams parsed_params;
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2013-04-12 03:21:49 +00:00
|
|
|
// Everything other than option_map and flags. Represents commands
|
2014-11-01 02:22:49 +00:00
|
|
|
// and their parameters. For eg: put key1 value1 go into this vector.
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string> cmdTokens;
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string OPTION_PREFIX = "--";
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2013-06-20 23:02:36 +00:00
|
|
|
for (const auto& arg : args) {
|
2022-10-25 21:29:41 +00:00
|
|
|
if (arg[0] == '-' && arg[1] == '-') {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string> splits = StringSplit(arg, '=');
|
2019-03-25 20:19:25 +00:00
|
|
|
// --option_name=option_value
|
2013-01-11 19:09:23 +00:00
|
|
|
if (splits.size() == 2) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2016-05-27 20:04:07 +00:00
|
|
|
parsed_params.option_map[optionKey] = splits[1];
|
2019-03-25 20:19:25 +00:00
|
|
|
} else if (splits.size() == 1) {
|
|
|
|
// --flag_name
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2016-05-27 20:04:07 +00:00
|
|
|
parsed_params.flags.push_back(optionKey);
|
2019-03-25 20:19:25 +00:00
|
|
|
} else {
|
|
|
|
// --option_name=option_value, option_value contains '='
|
|
|
|
std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
|
|
|
parsed_params.option_map[optionKey] =
|
|
|
|
arg.substr(splits[0].length() + 1);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
2012-12-05 23:37:03 +00:00
|
|
|
} else {
|
2013-06-20 23:02:36 +00:00
|
|
|
cmdTokens.push_back(arg);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmdTokens.size() < 1) {
|
|
|
|
fprintf(stderr, "Command not specified!");
|
2013-02-20 02:12:20 +00:00
|
|
|
return nullptr;
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2016-05-27 20:04:07 +00:00
|
|
|
parsed_params.cmd = cmdTokens[0];
|
|
|
|
parsed_params.cmd_params.assign(cmdTokens.begin() + 1, cmdTokens.end());
|
|
|
|
|
|
|
|
LDBCommand* command = selector(parsed_params);
|
2013-04-12 03:21:49 +00:00
|
|
|
|
|
|
|
if (command) {
|
2014-06-20 06:54:13 +00:00
|
|
|
command->SetDBOptions(options);
|
|
|
|
command->SetLDBOptions(ldb_options);
|
2013-04-12 03:21:49 +00:00
|
|
|
}
|
|
|
|
return command;
|
|
|
|
}
|
|
|
|
|
2016-05-27 20:04:07 +00:00
|
|
|
LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
|
|
|
|
if (parsed_params.cmd == GetCommand::Name()) {
|
|
|
|
return new GetCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2023-09-12 23:32:40 +00:00
|
|
|
} else if (parsed_params.cmd == GetEntityCommand::Name()) {
|
|
|
|
return new GetEntityCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == PutCommand::Name()) {
|
|
|
|
return new PutCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2023-09-12 23:32:40 +00:00
|
|
|
} else if (parsed_params.cmd == PutEntityCommand::Name()) {
|
|
|
|
return new PutEntityCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == BatchPutCommand::Name()) {
|
|
|
|
return new BatchPutCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ScanCommand::Name()) {
|
|
|
|
return new ScanCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DeleteCommand::Name()) {
|
|
|
|
return new DeleteCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2022-05-10 23:37:19 +00:00
|
|
|
} else if (parsed_params.cmd == SingleDeleteCommand::Name()) {
|
|
|
|
return new SingleDeleteCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-11-15 23:49:15 +00:00
|
|
|
} else if (parsed_params.cmd == DeleteRangeCommand::Name()) {
|
|
|
|
return new DeleteRangeCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == ApproxSizeCommand::Name()) {
|
|
|
|
return new ApproxSizeCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBQuerierCommand::Name()) {
|
|
|
|
return new DBQuerierCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == CompactorCommand::Name()) {
|
|
|
|
return new CompactorCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == WALDumperCommand::Name()) {
|
|
|
|
return new WALDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ReduceDBLevelsCommand::Name()) {
|
|
|
|
return new ReduceDBLevelsCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ChangeCompactionStyleCommand::Name()) {
|
|
|
|
return new ChangeCompactionStyleCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBDumperCommand::Name()) {
|
|
|
|
return new DBDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBLoaderCommand::Name()) {
|
|
|
|
return new DBLoaderCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ManifestDumpCommand::Name()) {
|
|
|
|
return new ManifestDumpCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2020-02-10 23:42:46 +00:00
|
|
|
} else if (parsed_params.cmd == FileChecksumDumpCommand::Name()) {
|
|
|
|
return new FileChecksumDumpCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2020-12-19 15:59:08 +00:00
|
|
|
} else if (parsed_params.cmd == GetPropertyCommand::Name()) {
|
|
|
|
return new GetPropertyCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == ListColumnFamiliesCommand::Name()) {
|
|
|
|
return new ListColumnFamiliesCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == CreateColumnFamilyCommand::Name()) {
|
|
|
|
return new CreateColumnFamilyCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2019-06-27 18:08:45 +00:00
|
|
|
} else if (parsed_params.cmd == DropColumnFamilyCommand::Name()) {
|
|
|
|
return new DropColumnFamilyCommand(parsed_params.cmd_params,
|
2019-09-20 19:00:55 +00:00
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == DBFileDumperCommand::Name()) {
|
|
|
|
return new DBFileDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
|
|
|
} else if (parsed_params.cmd == DBLiveFilesMetadataDumperCommand::Name()) {
|
|
|
|
return new DBLiveFilesMetadataDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == InternalDumpCommand::Name()) {
|
|
|
|
return new InternalDumpCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == CheckConsistencyCommand::Name()) {
|
|
|
|
return new CheckConsistencyCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2017-03-21 18:49:08 +00:00
|
|
|
} else if (parsed_params.cmd == CheckPointCommand::Name()) {
|
|
|
|
return new CheckPointCommand(parsed_params.cmd_params,
|
2022-10-25 21:29:41 +00:00
|
|
|
parsed_params.option_map, parsed_params.flags);
|
2016-05-27 20:04:07 +00:00
|
|
|
} else if (parsed_params.cmd == RepairCommand::Name()) {
|
|
|
|
return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-07-14 21:09:31 +00:00
|
|
|
} else if (parsed_params.cmd == BackupCommand::Name()) {
|
|
|
|
return new BackupCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-07-26 18:13:26 +00:00
|
|
|
} else if (parsed_params.cmd == RestoreCommand::Name()) {
|
|
|
|
return new RestoreCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
2018-08-09 21:18:59 +00:00
|
|
|
} else if (parsed_params.cmd == WriteExternalSstFilesCommand::Name()) {
|
|
|
|
return new WriteExternalSstFilesCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == IngestExternalSstFilesCommand::Name()) {
|
|
|
|
return new IngestExternalSstFilesCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2019-08-15 23:59:42 +00:00
|
|
|
} else if (parsed_params.cmd == ListFileRangeDeletesCommand::Name()) {
|
|
|
|
return new ListFileRangeDeletesCommand(parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2020-09-03 23:53:14 +00:00
|
|
|
} else if (parsed_params.cmd == UnsafeRemoveSstFileCommand::Name()) {
|
|
|
|
return new UnsafeRemoveSstFileCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2022-03-18 23:35:51 +00:00
|
|
|
} else if (parsed_params.cmd == UpdateManifestCommand::Name()) {
|
|
|
|
return new UpdateManifestCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
2013-02-20 02:12:20 +00:00
|
|
|
return nullptr;
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2016-05-06 23:09:09 +00:00
|
|
|
/* Run the command, and return the execute result. */
|
|
|
|
void LDBCommand::Run() {
|
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-09 02:17:39 +00:00
|
|
|
if (!options_.env || options_.env == Env::Default()) {
|
|
|
|
Env* env = Env::Default();
|
2021-06-15 10:42:52 +00:00
|
|
|
Status s = Env::CreateFromUri(config_options_, env_uri_, fs_uri_, &env,
|
|
|
|
&env_guard_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "%s\n", s.ToString().c_str());
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(s.ToString());
|
2019-10-09 02:17:39 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
options_.env = env;
|
|
|
|
}
|
|
|
|
|
2016-05-06 23:09:09 +00:00
|
|
|
if (db_ == nullptr && !NoDBOpen()) {
|
|
|
|
OpenDB();
|
2017-04-20 17:16:13 +00:00
|
|
|
if (exec_state_.IsFailed() && try_load_options_) {
|
|
|
|
// We don't always return if there is a failure because a WAL file or
|
|
|
|
// manifest file can be given to "dump" command so we should continue.
|
|
|
|
// --try_load_options is not valid in those cases.
|
|
|
|
return;
|
|
|
|
}
|
2016-05-06 23:09:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll intentionally proceed even if the DB can't be opened because users
|
|
|
|
// can also specify a filename, not just a directory.
|
|
|
|
DoCommand();
|
|
|
|
|
|
|
|
if (exec_state_.IsNotStarted()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (db_ != nullptr) {
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
LDBCommand::LDBCommand(const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags, bool is_read_only,
|
|
|
|
const std::vector<std::string>& valid_cmd_line_options)
|
2016-05-06 23:09:09 +00:00
|
|
|
: db_(nullptr),
|
2017-11-28 23:15:16 +00:00
|
|
|
db_ttl_(nullptr),
|
2016-05-06 23:09:09 +00:00
|
|
|
is_read_only_(is_read_only),
|
|
|
|
is_key_hex_(false),
|
|
|
|
is_value_hex_(false),
|
|
|
|
is_db_ttl_(false),
|
|
|
|
timestamp_(false),
|
2017-04-20 17:16:13 +00:00
|
|
|
try_load_options_(false),
|
|
|
|
create_if_missing_(false),
|
2016-05-06 23:09:09 +00:00
|
|
|
option_map_(options),
|
|
|
|
flags_(flags),
|
|
|
|
valid_cmd_line_options_(valid_cmd_line_options) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_DB);
|
2016-05-06 23:09:09 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
db_path_ = itr->second;
|
|
|
|
}
|
|
|
|
|
2019-10-09 02:17:39 +00:00
|
|
|
itr = options.find(ARG_ENV_URI);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
env_uri_ = itr->second;
|
|
|
|
}
|
|
|
|
|
2021-03-10 04:47:26 +00:00
|
|
|
itr = options.find(ARG_FS_URI);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
fs_uri_ = itr->second;
|
|
|
|
}
|
|
|
|
|
2016-05-06 23:09:09 +00:00
|
|
|
itr = options.find(ARG_CF_NAME);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
column_family_name_ = itr->second;
|
|
|
|
} else {
|
|
|
|
column_family_name_ = kDefaultColumnFamilyName;
|
|
|
|
}
|
|
|
|
|
2019-07-09 19:46:01 +00:00
|
|
|
itr = options.find(ARG_SECONDARY_PATH);
|
|
|
|
secondary_path_ = "";
|
|
|
|
if (itr != options.end()) {
|
|
|
|
secondary_path_ = itr->second;
|
|
|
|
}
|
|
|
|
|
2016-05-06 23:09:09 +00:00
|
|
|
is_key_hex_ = IsKeyHex(options, flags);
|
|
|
|
is_value_hex_ = IsValueHex(options, flags);
|
|
|
|
is_db_ttl_ = IsFlagPresent(flags, ARG_TTL);
|
|
|
|
timestamp_ = IsFlagPresent(flags, ARG_TIMESTAMP);
|
2022-05-04 15:49:46 +00:00
|
|
|
try_load_options_ = IsTryLoadOptions(options, flags);
|
2020-05-08 21:12:18 +00:00
|
|
|
force_consistency_checks_ =
|
|
|
|
!IsFlagPresent(flags, ARG_DISABLE_CONSISTENCY_CHECKS);
|
2022-02-26 07:13:11 +00:00
|
|
|
enable_blob_files_ = IsFlagPresent(flags, ARG_ENABLE_BLOB_FILES);
|
|
|
|
enable_blob_garbage_collection_ =
|
|
|
|
IsFlagPresent(flags, ARG_ENABLE_BLOB_GARBAGE_COLLECTION);
|
2020-04-22 00:35:28 +00:00
|
|
|
config_options_.ignore_unknown_options =
|
|
|
|
IsFlagPresent(flags, ARG_IGNORE_UNKNOWN_OPTIONS);
|
2016-05-06 23:09:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void LDBCommand::OpenDB() {
|
2020-09-03 23:53:14 +00:00
|
|
|
PrepareOptions();
|
2016-05-06 23:09:09 +00:00
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
2019-07-23 20:56:52 +00:00
|
|
|
if (column_families_.empty() && !options_.merge_operator) {
|
|
|
|
// No harm to add a general merge operator if it is not specified.
|
|
|
|
options_.merge_operator = MergeOperators::CreateStringAppendOperator(':');
|
|
|
|
}
|
2016-05-06 23:09:09 +00:00
|
|
|
// Open the DB.
|
|
|
|
Status st;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles_opened;
|
|
|
|
if (is_db_ttl_) {
|
|
|
|
// ldb doesn't yet support TTL DB with multiple column families
|
|
|
|
if (!column_family_name_.empty() || !column_families_.empty()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"ldb doesn't support TTL DB with multiple column families");
|
|
|
|
}
|
2019-07-09 19:46:01 +00:00
|
|
|
if (!secondary_path_.empty()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Open as secondary is not supported for TTL DB yet.");
|
|
|
|
}
|
2016-05-06 23:09:09 +00:00
|
|
|
if (is_read_only_) {
|
2017-11-29 01:20:47 +00:00
|
|
|
st = DBWithTTL::Open(options_, db_path_, &db_ttl_, 0, true);
|
2016-05-06 23:09:09 +00:00
|
|
|
} else {
|
2017-11-29 01:20:47 +00:00
|
|
|
st = DBWithTTL::Open(options_, db_path_, &db_ttl_);
|
2016-05-06 23:09:09 +00:00
|
|
|
}
|
|
|
|
db_ = db_ttl_;
|
|
|
|
} else {
|
2019-07-09 19:46:01 +00:00
|
|
|
if (is_read_only_ && secondary_path_.empty()) {
|
2016-05-06 23:09:09 +00:00
|
|
|
if (column_families_.empty()) {
|
2017-11-29 01:20:47 +00:00
|
|
|
st = DB::OpenForReadOnly(options_, db_path_, &db_);
|
2016-05-06 23:09:09 +00:00
|
|
|
} else {
|
2017-11-29 01:20:47 +00:00
|
|
|
st = DB::OpenForReadOnly(options_, db_path_, column_families_,
|
2016-05-06 23:09:09 +00:00
|
|
|
&handles_opened, &db_);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (column_families_.empty()) {
|
2019-07-09 19:46:01 +00:00
|
|
|
if (secondary_path_.empty()) {
|
|
|
|
st = DB::Open(options_, db_path_, &db_);
|
|
|
|
} else {
|
|
|
|
st = DB::OpenAsSecondary(options_, db_path_, secondary_path_, &db_);
|
|
|
|
}
|
2016-05-06 23:09:09 +00:00
|
|
|
} else {
|
2019-07-09 19:46:01 +00:00
|
|
|
if (secondary_path_.empty()) {
|
|
|
|
st = DB::Open(options_, db_path_, column_families_, &handles_opened,
|
|
|
|
&db_);
|
|
|
|
} else {
|
|
|
|
st = DB::OpenAsSecondary(options_, db_path_, secondary_path_,
|
|
|
|
column_families_, &handles_opened, &db_);
|
|
|
|
}
|
2016-05-06 23:09:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!st.ok()) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string msg = st.ToString();
|
2016-05-06 23:09:09 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(msg);
|
|
|
|
} else if (!handles_opened.empty()) {
|
|
|
|
assert(handles_opened.size() == column_families_.size());
|
|
|
|
bool found_cf_name = false;
|
|
|
|
for (size_t i = 0; i < handles_opened.size(); i++) {
|
|
|
|
cf_handles_[column_families_[i].name] = handles_opened[i];
|
|
|
|
if (column_family_name_ == column_families_[i].name) {
|
|
|
|
found_cf_name = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found_cf_name) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Non-existing column family " + column_family_name_);
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We successfully opened DB in single column family mode.
|
|
|
|
assert(column_families_.empty());
|
|
|
|
if (column_family_name_ != kDefaultColumnFamilyName) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Non-existing column family " + column_family_name_);
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void LDBCommand::CloseDB() {
|
|
|
|
if (db_ != nullptr) {
|
|
|
|
for (auto& pair : cf_handles_) {
|
|
|
|
delete pair.second;
|
|
|
|
}
|
Explicitly closing all directory file descriptors (#10049)
Summary:
Currently, the DB directory file descriptor is left open until the deconstruction process (`DB::Close()` does not close the file descriptor). To verify this, comment out the lines between `db_ = nullptr` and `db_->Close()` (line 512, 513, 514, 515 in ldb_cmd.cc) to leak the ``db_'' object, build `ldb` tool and run
```
strace --trace=open,openat,close ./ldb --db=$TEST_TMPDIR --ignore_unknown_options put K1 V1 --create_if_missing
```
There is one directory file descriptor that is not closed in the strace log.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10049
Test Plan: Add a new unit test DBBasicTest.DBCloseAllDirectoryFDs: Open a database with different WAL directory and three different data directories, and all directory file descriptors should be closed after calling Close(). Explicitly call Close() after a directory file descriptor is not used so that the counter of directory open and close should be equivalent.
Reviewed By: ajkr, hx235
Differential Revision: D36722135
Pulled By: littlepig2013
fbshipit-source-id: 07bdc2abc417c6b30997b9bbef1f79aa757b21ff
2022-06-02 01:03:34 +00:00
|
|
|
Status s = db_->Close();
|
|
|
|
s.PermitUncheckedError();
|
2016-05-06 23:09:09 +00:00
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ColumnFamilyHandle* LDBCommand::GetCfHandle() {
|
|
|
|
if (!cf_handles_.empty()) {
|
|
|
|
auto it = cf_handles_.find(column_family_name_);
|
|
|
|
if (it == cf_handles_.end()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Cannot find column family " + column_family_name_);
|
|
|
|
} else {
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return db_->DefaultColumnFamily();
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string> LDBCommand::BuildCmdLineOptions(
|
|
|
|
std::vector<std::string> options) {
|
2019-10-09 02:17:39 +00:00
|
|
|
std::vector<std::string> ret = {ARG_ENV_URI,
|
2021-03-10 04:47:26 +00:00
|
|
|
ARG_FS_URI,
|
2019-10-09 02:17:39 +00:00
|
|
|
ARG_DB,
|
2019-07-09 19:46:01 +00:00
|
|
|
ARG_SECONDARY_PATH,
|
2016-05-20 14:42:18 +00:00
|
|
|
ARG_BLOOM_BITS,
|
|
|
|
ARG_BLOCK_SIZE,
|
|
|
|
ARG_AUTO_COMPACTION,
|
|
|
|
ARG_COMPRESSION_TYPE,
|
|
|
|
ARG_COMPRESSION_MAX_DICT_BYTES,
|
|
|
|
ARG_WRITE_BUFFER_SIZE,
|
|
|
|
ARG_FILE_SIZE,
|
|
|
|
ARG_FIX_PREFIX_LEN,
|
2017-04-20 17:16:13 +00:00
|
|
|
ARG_TRY_LOAD_OPTIONS,
|
2020-05-08 21:12:18 +00:00
|
|
|
ARG_DISABLE_CONSISTENCY_CHECKS,
|
2022-02-26 07:13:11 +00:00
|
|
|
ARG_ENABLE_BLOB_FILES,
|
|
|
|
ARG_MIN_BLOB_SIZE,
|
|
|
|
ARG_BLOB_FILE_SIZE,
|
|
|
|
ARG_BLOB_COMPRESSION_TYPE,
|
|
|
|
ARG_ENABLE_BLOB_GARBAGE_COLLECTION,
|
|
|
|
ARG_BLOB_GARBAGE_COLLECTION_AGE_CUTOFF,
|
|
|
|
ARG_BLOB_GARBAGE_COLLECTION_FORCE_THRESHOLD,
|
|
|
|
ARG_BLOB_COMPACTION_READAHEAD_SIZE,
|
2022-06-03 03:04:33 +00:00
|
|
|
ARG_BLOB_FILE_STARTING_LEVEL,
|
2022-07-17 14:13:59 +00:00
|
|
|
ARG_PREPOPULATE_BLOB_CACHE,
|
2017-06-13 23:55:08 +00:00
|
|
|
ARG_IGNORE_UNKNOWN_OPTIONS,
|
2016-05-20 14:42:18 +00:00
|
|
|
ARG_CF_NAME};
|
2016-05-06 23:09:09 +00:00
|
|
|
ret.insert(ret.end(), options.begin(), options.end());
|
|
|
|
return ret;
|
|
|
|
}
|
2013-04-12 03:21:49 +00:00
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
/**
|
|
|
|
* Parses the specific double option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false if the option is not found or if there is an error parsing the
|
|
|
|
* value. If there is an error, the specified exec_state is also
|
|
|
|
* updated.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ParseDoubleOption(
|
|
|
|
const std::map<std::string, std::string>& /*options*/,
|
|
|
|
const std::string& option, double& value,
|
|
|
|
LDBCommandExecuteResult& exec_state) {
|
|
|
|
auto itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
#if defined(CYGWIN)
|
|
|
|
char* str_end = nullptr;
|
|
|
|
value = std::strtod(itr->second.c_str(), &str_end);
|
|
|
|
if (str_end == itr->second.c_str()) {
|
|
|
|
exec_state =
|
|
|
|
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
|
|
|
} else if (errno == ERANGE) {
|
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
option + " has a value out-of-range.");
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
try {
|
|
|
|
value = std::stod(itr->second);
|
|
|
|
return true;
|
|
|
|
} catch (const std::invalid_argument&) {
|
|
|
|
exec_state =
|
|
|
|
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
|
|
|
} catch (const std::out_of_range&) {
|
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
option + " has a value out-of-range.");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
/**
|
|
|
|
* Parses the specific integer option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false if the option is not found or if there is an error parsing the
|
|
|
|
* value. If there is an error, the specified exec_state is also
|
|
|
|
* updated.
|
|
|
|
*/
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::ParseIntOption(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::map<std::string, std::string>& /*options*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string& option, int& value,
|
|
|
|
LDBCommandExecuteResult& exec_state) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = option_map_.find(option);
|
2013-04-12 03:21:49 +00:00
|
|
|
if (itr != option_map_.end()) {
|
2015-04-24 02:17:57 +00:00
|
|
|
#if defined(CYGWIN)
|
2022-02-26 07:13:11 +00:00
|
|
|
char* str_end = nullptr;
|
|
|
|
value = strtol(itr->second.c_str(), &str_end, 10);
|
|
|
|
if (str_end == itr->second.c_str()) {
|
|
|
|
exec_state =
|
|
|
|
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
|
|
|
} else if (errno == ERANGE) {
|
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
option + " has a value out-of-range.");
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
2015-04-24 02:17:57 +00:00
|
|
|
#else
|
2022-02-26 07:13:11 +00:00
|
|
|
try {
|
2016-05-20 14:42:18 +00:00
|
|
|
value = std::stoi(itr->second);
|
2013-01-11 19:09:23 +00:00
|
|
|
return true;
|
2016-05-20 14:42:18 +00:00
|
|
|
} catch (const std::invalid_argument&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state =
|
|
|
|
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
2016-05-20 14:42:18 +00:00
|
|
|
} catch (const std::out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
option + " has a value out-of-range.");
|
2012-12-05 23:37:03 +00:00
|
|
|
}
|
2022-02-26 07:13:11 +00:00
|
|
|
#endif
|
2012-12-05 23:37:03 +00:00
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
return false;
|
2012-12-05 23:37:03 +00:00
|
|
|
}
|
|
|
|
|
2013-06-20 23:02:36 +00:00
|
|
|
/**
|
|
|
|
* Parses the specified option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false otherwise.
|
|
|
|
*/
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::ParseStringOption(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::map<std::string, std::string>& /*options*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string& option, std::string* value) {
|
2013-06-20 23:02:36 +00:00
|
|
|
auto itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
*value = itr->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
/**
|
|
|
|
* Parses the specified compression type and fills in the value.
|
|
|
|
* Returns true if the compression type is found.
|
|
|
|
* Returns false otherwise.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ParseCompressionTypeOption(
|
|
|
|
const std::map<std::string, std::string>& /*options*/,
|
|
|
|
const std::string& option, CompressionType& value,
|
|
|
|
LDBCommandExecuteResult& exec_state) {
|
|
|
|
auto itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
const std::string& comp = itr->second;
|
|
|
|
if (comp == "no") {
|
|
|
|
value = kNoCompression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "snappy") {
|
|
|
|
value = kSnappyCompression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "zlib") {
|
|
|
|
value = kZlibCompression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "bzip2") {
|
|
|
|
value = kBZip2Compression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "lz4") {
|
|
|
|
value = kLZ4Compression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "lz4hc") {
|
|
|
|
value = kLZ4HCCompression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "xpress") {
|
|
|
|
value = kXpressCompression;
|
|
|
|
return true;
|
|
|
|
} else if (comp == "zstd") {
|
|
|
|
value = kZSTD;
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
// Unknown compression.
|
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
"Unknown compression algorithm: " + comp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void LDBCommand::OverrideBaseOptions() {
|
|
|
|
options_.create_if_missing = false;
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2021-01-12 04:54:58 +00:00
|
|
|
int db_write_buffer_size;
|
|
|
|
if (ParseIntOption(option_map_, ARG_DB_WRITE_BUFFER_SIZE,
|
|
|
|
db_write_buffer_size, exec_state_)) {
|
|
|
|
if (db_write_buffer_size >= 0) {
|
|
|
|
options_.db_write_buffer_size = db_write_buffer_size;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_DB_WRITE_BUFFER_SIZE +
|
|
|
|
" must be >= 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options_.db_paths.size() == 0) {
|
|
|
|
options_.db_paths.emplace_back(db_path_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
}
|
|
|
|
|
|
|
|
OverrideBaseCFOptions(static_cast<ColumnFamilyOptions*>(&options_));
|
|
|
|
}
|
|
|
|
|
|
|
|
void LDBCommand::OverrideBaseCFOptions(ColumnFamilyOptions* cf_opts) {
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
2014-10-13 23:08:02 +00:00
|
|
|
bool use_table_options = false;
|
2013-01-11 19:09:23 +00:00
|
|
|
int bits;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOOM_BITS, bits, exec_state_)) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (bits > 0) {
|
2014-10-13 23:08:02 +00:00
|
|
|
use_table_options = true;
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOOM_BITS + " must be > 0.");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int block_size;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOCK_SIZE, block_size, exec_state_)) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (block_size > 0) {
|
2014-10-13 23:08:02 +00:00
|
|
|
use_table_options = true;
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options.block_size = block_size;
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOCK_SIZE + " must be > 0.");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->force_consistency_checks = force_consistency_checks_;
|
2014-10-13 23:08:02 +00:00
|
|
|
if (use_table_options) {
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-10-13 23:08:02 +00:00
|
|
|
}
|
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
cf_opts->enable_blob_files = enable_blob_files_;
|
|
|
|
|
|
|
|
int min_blob_size;
|
|
|
|
if (ParseIntOption(option_map_, ARG_MIN_BLOB_SIZE, min_blob_size,
|
|
|
|
exec_state_)) {
|
|
|
|
if (min_blob_size >= 0) {
|
|
|
|
cf_opts->min_blob_size = min_blob_size;
|
|
|
|
} else {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_MIN_BLOB_SIZE + " must be >= 0.");
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
int blob_file_size;
|
|
|
|
if (ParseIntOption(option_map_, ARG_BLOB_FILE_SIZE, blob_file_size,
|
|
|
|
exec_state_)) {
|
|
|
|
if (blob_file_size > 0) {
|
|
|
|
cf_opts->blob_file_size = blob_file_size;
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ =
|
2022-02-26 07:13:11 +00:00
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOB_FILE_SIZE + " must be > 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cf_opts->enable_blob_garbage_collection = enable_blob_garbage_collection_;
|
|
|
|
|
|
|
|
double blob_garbage_collection_age_cutoff;
|
|
|
|
if (ParseDoubleOption(option_map_, ARG_BLOB_GARBAGE_COLLECTION_AGE_CUTOFF,
|
|
|
|
blob_garbage_collection_age_cutoff, exec_state_)) {
|
|
|
|
if (blob_garbage_collection_age_cutoff >= 0 &&
|
|
|
|
blob_garbage_collection_age_cutoff <= 1) {
|
|
|
|
cf_opts->blob_garbage_collection_age_cutoff =
|
|
|
|
blob_garbage_collection_age_cutoff;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_BLOB_GARBAGE_COLLECTION_AGE_CUTOFF + " must be >= 0 and <= 1.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
double blob_garbage_collection_force_threshold;
|
|
|
|
if (ParseDoubleOption(option_map_,
|
|
|
|
ARG_BLOB_GARBAGE_COLLECTION_FORCE_THRESHOLD,
|
|
|
|
blob_garbage_collection_force_threshold, exec_state_)) {
|
|
|
|
if (blob_garbage_collection_force_threshold >= 0 &&
|
|
|
|
blob_garbage_collection_force_threshold <= 1) {
|
|
|
|
cf_opts->blob_garbage_collection_force_threshold =
|
|
|
|
blob_garbage_collection_force_threshold;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_BLOB_GARBAGE_COLLECTION_FORCE_THRESHOLD +
|
|
|
|
" must be >= 0 and <= 1.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int blob_compaction_readahead_size;
|
|
|
|
if (ParseIntOption(option_map_, ARG_BLOB_COMPACTION_READAHEAD_SIZE,
|
|
|
|
blob_compaction_readahead_size, exec_state_)) {
|
|
|
|
if (blob_compaction_readahead_size > 0) {
|
|
|
|
cf_opts->blob_compaction_readahead_size = blob_compaction_readahead_size;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_BLOB_COMPACTION_READAHEAD_SIZE + " must be > 0.");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-03 03:04:33 +00:00
|
|
|
int blob_file_starting_level;
|
|
|
|
if (ParseIntOption(option_map_, ARG_BLOB_FILE_STARTING_LEVEL,
|
|
|
|
blob_file_starting_level, exec_state_)) {
|
|
|
|
if (blob_file_starting_level >= 0) {
|
|
|
|
cf_opts->blob_file_starting_level = blob_file_starting_level;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_BLOB_FILE_STARTING_LEVEL + " must be >= 0.");
|
|
|
|
}
|
|
|
|
}
|
2022-07-17 14:13:59 +00:00
|
|
|
|
|
|
|
int prepopulate_blob_cache;
|
|
|
|
if (ParseIntOption(option_map_, ARG_PREPOPULATE_BLOB_CACHE,
|
|
|
|
prepopulate_blob_cache, exec_state_)) {
|
|
|
|
switch (prepopulate_blob_cache) {
|
|
|
|
case 0:
|
|
|
|
cf_opts->prepopulate_blob_cache = PrepopulateBlobCache::kDisable;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
cf_opts->prepopulate_blob_cache = PrepopulateBlobCache::kFlushOnly;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_PREPOPULATE_BLOB_CACHE +
|
|
|
|
" must be 0 (disable) or 1 (flush only).");
|
|
|
|
}
|
|
|
|
}
|
2022-06-03 03:04:33 +00:00
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = option_map_.find(ARG_AUTO_COMPACTION);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
cf_opts->disable_auto_compactions = !StringToBool(itr->second);
|
|
|
|
}
|
|
|
|
|
|
|
|
CompressionType compression_type;
|
|
|
|
if (ParseCompressionTypeOption(option_map_, ARG_COMPRESSION_TYPE,
|
|
|
|
compression_type, exec_state_)) {
|
|
|
|
cf_opts->compression = compression_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
CompressionType blob_compression_type;
|
|
|
|
if (ParseCompressionTypeOption(option_map_, ARG_BLOB_COMPRESSION_TYPE,
|
|
|
|
blob_compression_type, exec_state_)) {
|
|
|
|
cf_opts->blob_compression_type = blob_compression_type;
|
|
|
|
}
|
|
|
|
|
2016-05-10 23:33:47 +00:00
|
|
|
int compression_max_dict_bytes;
|
|
|
|
if (ParseIntOption(option_map_, ARG_COMPRESSION_MAX_DICT_BYTES,
|
|
|
|
compression_max_dict_bytes, exec_state_)) {
|
|
|
|
if (compression_max_dict_bytes >= 0) {
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->compression_opts.max_dict_bytes = compression_max_dict_bytes;
|
2016-05-10 23:33:47 +00:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_COMPRESSION_MAX_DICT_BYTES + " must be >= 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
int write_buffer_size;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size,
|
2022-10-25 21:29:41 +00:00
|
|
|
exec_state_)) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (write_buffer_size > 0) {
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->write_buffer_size = write_buffer_size;
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_WRITE_BUFFER_SIZE +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
" must be > 0.");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int file_size;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (ParseIntOption(option_map_, ARG_FILE_SIZE, file_size, exec_state_)) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (file_size > 0) {
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->target_file_size_base = file_size;
|
2012-12-21 06:56:58 +00:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_FILE_SIZE + " must be > 0.");
|
2012-12-05 23:37:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-13 23:08:02 +00:00
|
|
|
int fix_prefix_len;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (ParseIntOption(option_map_, ARG_FIX_PREFIX_LEN, fix_prefix_len,
|
|
|
|
exec_state_)) {
|
2014-10-13 23:08:02 +00:00
|
|
|
if (fix_prefix_len > 0) {
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->prefix_extractor.reset(
|
2014-10-13 23:08:02 +00:00
|
|
|
NewFixedPrefixTransform(static_cast<size_t>(fix_prefix_len)));
|
|
|
|
} else {
|
2014-11-01 02:22:49 +00:00
|
|
|
exec_state_ =
|
2015-03-17 01:08:59 +00:00
|
|
|
LDBCommandExecuteResult::Failed(ARG_FIX_PREFIX_LEN + " must be > 0.");
|
2014-10-13 23:08:02 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
// First, initializes the options state using the OPTIONS file when enabled.
|
|
|
|
// Second, overrides the options according to the CLI arguments and the
|
|
|
|
// specific subcommand being run.
|
|
|
|
void LDBCommand::PrepareOptions() {
|
|
|
|
if (!create_if_missing_ && try_load_options_) {
|
|
|
|
config_options_.env = options_.env;
|
|
|
|
Status s = LoadLatestOptions(config_options_, db_path_, &options_,
|
|
|
|
&column_families_);
|
|
|
|
if (!s.ok() && !s.IsNotFound()) {
|
|
|
|
// Option file exists but load option file error.
|
2023-09-29 16:58:40 +00:00
|
|
|
std::string current_version = std::to_string(ROCKSDB_MAJOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_MINOR) + "." +
|
|
|
|
std::to_string(ROCKSDB_PATCH);
|
|
|
|
std::string msg =
|
|
|
|
s.ToString() + "\nThis tool was built with version " +
|
|
|
|
current_version +
|
|
|
|
". If your db is in a different version, please try again "
|
|
|
|
"with option --" +
|
|
|
|
LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS + ".";
|
2020-09-03 23:53:14 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(msg);
|
|
|
|
db_ = nullptr;
|
|
|
|
return;
|
|
|
|
}
|
2021-07-30 19:15:04 +00:00
|
|
|
if (!options_.wal_dir.empty()) {
|
|
|
|
if (options_.env->FileExists(options_.wal_dir).IsNotFound()) {
|
|
|
|
options_.wal_dir = db_path_;
|
|
|
|
fprintf(
|
|
|
|
stderr,
|
|
|
|
"wal_dir loaded from the option file doesn't exist. Ignore it.\n");
|
|
|
|
}
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If merge operator is not set, set a string append operator.
|
|
|
|
for (auto& cf_entry : column_families_) {
|
|
|
|
if (!cf_entry.options.merge_operator) {
|
|
|
|
cf_entry.options.merge_operator =
|
|
|
|
MergeOperators::CreateStringAppendOperator(':');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-17 21:43:53 +00:00
|
|
|
if (options_.env == Env::Default()) {
|
|
|
|
options_.env = config_options_.env;
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
OverrideBaseOptions();
|
|
|
|
if (exec_state_.IsFailed()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (column_families_.empty()) {
|
|
|
|
// Reads the MANIFEST to figure out what column families exist. In this
|
|
|
|
// case, the option overrides from the CLI argument/specific subcommand
|
|
|
|
// apply to all column families.
|
|
|
|
std::vector<std::string> cf_list;
|
|
|
|
Status st = DB::ListColumnFamilies(options_, db_path_, &cf_list);
|
|
|
|
// It is possible the DB doesn't exist yet, for "create if not
|
|
|
|
// existing" case. The failure is ignored here. We rely on DB::Open()
|
|
|
|
// to give us the correct error message for problem with opening
|
|
|
|
// existing DB.
|
|
|
|
if (st.ok() && cf_list.size() > 1) {
|
|
|
|
// Ignore single column family DB.
|
|
|
|
for (auto cf_name : cf_list) {
|
|
|
|
column_families_.emplace_back(cf_name, options_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We got column families from the OPTIONS file. In this case, the option
|
|
|
|
// overrides from the CLI argument/specific subcommand only apply to the
|
|
|
|
// column family specified by `--column_family_name`.
|
|
|
|
auto column_families_iter =
|
|
|
|
std::find_if(column_families_.begin(), column_families_.end(),
|
|
|
|
[this](const ColumnFamilyDescriptor& cf_desc) {
|
|
|
|
return cf_desc.name == column_family_name_;
|
|
|
|
});
|
|
|
|
if (column_families_iter == column_families_.end()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Non-existing column family " + column_family_name_);
|
|
|
|
return;
|
|
|
|
}
|
2021-01-12 04:54:58 +00:00
|
|
|
OverrideBaseCFOptions(&column_families_iter->options);
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
2012-12-05 23:37:03 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::ParseKeyValue(const std::string& line, std::string* key,
|
|
|
|
std::string* value, bool is_key_hex,
|
|
|
|
bool is_value_hex) {
|
2013-01-11 19:09:23 +00:00
|
|
|
size_t pos = line.find(DELIM);
|
2016-05-20 14:42:18 +00:00
|
|
|
if (pos != std::string::npos) {
|
2013-01-11 19:09:23 +00:00
|
|
|
*key = line.substr(0, pos);
|
|
|
|
*value = line.substr(pos + strlen(DELIM));
|
|
|
|
if (is_key_hex) {
|
|
|
|
*key = HexToString(*key);
|
|
|
|
}
|
|
|
|
if (is_value_hex) {
|
|
|
|
*value = HexToString(*value);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-12-05 23:37:03 +00:00
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
/**
|
|
|
|
* Make sure that ONLY the command-line options and flags expected by this
|
|
|
|
* command are specified on the command-line. Extraneous options are usually
|
|
|
|
* the result of user error.
|
|
|
|
* Returns true if all checks pass. Else returns false, and prints an
|
|
|
|
* appropriate error msg to stderr.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ValidateCmdLineOptions() {
|
2022-02-26 07:13:11 +00:00
|
|
|
for (auto itr = option_map_.begin(); itr != option_map_.end(); ++itr) {
|
2016-05-20 14:42:18 +00:00
|
|
|
if (std::find(valid_cmd_line_options_.begin(),
|
|
|
|
valid_cmd_line_options_.end(),
|
|
|
|
itr->first) == valid_cmd_line_options_.end()) {
|
2013-01-11 19:09:23 +00:00
|
|
|
fprintf(stderr, "Invalid command-line option %s\n", itr->first.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
for (std::vector<std::string>::const_iterator itr = flags_.begin();
|
|
|
|
itr != flags_.end(); ++itr) {
|
|
|
|
if (std::find(valid_cmd_line_options_.begin(),
|
|
|
|
valid_cmd_line_options_.end(),
|
|
|
|
*itr) == valid_cmd_line_options_.end()) {
|
2013-01-11 19:09:23 +00:00
|
|
|
fprintf(stderr, "Invalid command-line flag %s\n", itr->c_str());
|
|
|
|
return false;
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-06 22:19:08 +00:00
|
|
|
if (!NoDBOpen() && option_map_.find(ARG_DB) == option_map_.end() &&
|
|
|
|
option_map_.find(ARG_PATH) == option_map_.end()) {
|
|
|
|
fprintf(stderr, "Either %s or %s must be specified.\n", ARG_DB.c_str(),
|
|
|
|
ARG_PATH.c_str());
|
2013-01-11 19:09:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string LDBCommand::HexToString(const std::string& str) {
|
|
|
|
std::string result;
|
2016-05-06 23:09:09 +00:00
|
|
|
std::string::size_type len = str.length();
|
|
|
|
if (len < 2 || str[0] != '0' || str[1] != 'x') {
|
|
|
|
fprintf(stderr, "Invalid hex input %s. Must start with 0x\n", str.c_str());
|
|
|
|
throw "Invalid hex input";
|
|
|
|
}
|
|
|
|
if (!Slice(str.data() + 2, len - 2).DecodeHex(&result)) {
|
|
|
|
throw "Invalid hex input";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string LDBCommand::StringToHex(const std::string& str) {
|
|
|
|
std::string result("0x");
|
2016-05-06 23:09:09 +00:00
|
|
|
result.append(Slice(str).ToString(true));
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string LDBCommand::PrintKeyValue(const std::string& key,
|
|
|
|
const std::string& value, bool is_key_hex,
|
|
|
|
bool is_value_hex) {
|
|
|
|
std::string result;
|
2016-05-06 23:09:09 +00:00
|
|
|
result.append(is_key_hex ? StringToHex(key) : key);
|
|
|
|
result.append(DELIM);
|
|
|
|
result.append(is_value_hex ? StringToHex(value) : value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string LDBCommand::PrintKeyValue(const std::string& key,
|
|
|
|
const std::string& value, bool is_hex) {
|
2016-05-06 23:09:09 +00:00
|
|
|
return PrintKeyValue(key, value, is_hex, is_hex);
|
|
|
|
}
|
|
|
|
|
2023-08-31 23:17:03 +00:00
|
|
|
std::string LDBCommand::PrintKeyValueOrWideColumns(
|
|
|
|
const Slice& key, const Slice& value, const WideColumns& wide_columns,
|
|
|
|
bool is_key_hex, bool is_value_hex) {
|
|
|
|
if (wide_columns.empty() ||
|
2023-10-02 23:25:25 +00:00
|
|
|
WideColumnsHelper::HasDefaultColumnOnly(wide_columns)) {
|
2023-08-31 23:17:03 +00:00
|
|
|
return PrintKeyValue(key.ToString(), value.ToString(), is_key_hex,
|
|
|
|
is_value_hex);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
// Sample plaintext output (first column is kDefaultWideColumnName)
|
|
|
|
key_1 ==> :foo attr_name1:bar attr_name2:baz
|
|
|
|
|
|
|
|
// Sample hex output (first column is kDefaultWideColumnName)
|
|
|
|
0x6669727374 ==> :0x68656C6C6F 0x617474725F6E616D6531:0x666F6F
|
|
|
|
*/
|
|
|
|
std::ostringstream oss;
|
|
|
|
WideColumnsHelper::DumpWideColumns(wide_columns, oss, is_value_hex);
|
|
|
|
return PrintKeyValue(key.ToString(), oss.str().c_str(), is_key_hex,
|
|
|
|
false); // is_value_hex_ is already honored in oss.
|
|
|
|
// avoid double-hexing it.
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string LDBCommand::HelpRangeCmdArgs() {
|
2016-05-06 23:09:09 +00:00
|
|
|
std::ostringstream str_stream;
|
|
|
|
str_stream << " ";
|
|
|
|
str_stream << "[--" << ARG_FROM << "] ";
|
|
|
|
str_stream << "[--" << ARG_TO << "] ";
|
|
|
|
return str_stream.str();
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::IsKeyHex(const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags) {
|
2016-05-06 23:09:09 +00:00
|
|
|
return (IsFlagPresent(flags, ARG_HEX) || IsFlagPresent(flags, ARG_KEY_HEX) ||
|
|
|
|
ParseBooleanOption(options, ARG_HEX, false) ||
|
|
|
|
ParseBooleanOption(options, ARG_KEY_HEX, false));
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::IsValueHex(const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags) {
|
2016-05-06 23:09:09 +00:00
|
|
|
return (IsFlagPresent(flags, ARG_HEX) ||
|
|
|
|
IsFlagPresent(flags, ARG_VALUE_HEX) ||
|
|
|
|
ParseBooleanOption(options, ARG_HEX, false) ||
|
|
|
|
ParseBooleanOption(options, ARG_VALUE_HEX, false));
|
|
|
|
}
|
|
|
|
|
2022-05-04 15:49:46 +00:00
|
|
|
bool LDBCommand::IsTryLoadOptions(
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags) {
|
|
|
|
if (IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// if `DB` is specified and not explicitly to create a new db, default
|
|
|
|
// `try_load_options` to true. The user could still disable that by set
|
|
|
|
// `try_load_options=false`.
|
|
|
|
// Note: Opening as TTL DB doesn't support `try_load_options`, so it's default
|
|
|
|
// to false. TODO: TTL_DB may need to fix that, otherwise it's unable to open
|
|
|
|
// DB which has incompatible setting with default options.
|
|
|
|
bool default_val = (options.find(ARG_DB) != options.end()) &&
|
|
|
|
!IsFlagPresent(flags, ARG_CREATE_IF_MISSING) &&
|
|
|
|
!IsFlagPresent(flags, ARG_TTL);
|
|
|
|
return ParseBooleanOption(options, ARG_TRY_LOAD_OPTIONS, default_val);
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::ParseBooleanOption(
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::string& option, bool default_val) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(option);
|
2016-05-06 23:09:09 +00:00
|
|
|
if (itr != options.end()) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string option_val = itr->second;
|
2016-05-06 23:09:09 +00:00
|
|
|
return StringToBool(itr->second);
|
|
|
|
}
|
|
|
|
return default_val;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
bool LDBCommand::StringToBool(std::string val) {
|
2016-05-06 23:09:09 +00:00
|
|
|
std::transform(val.begin(), val.end(), val.begin(),
|
|
|
|
[](char ch) -> char { return (char)::tolower(ch); });
|
|
|
|
|
|
|
|
if (val == "true") {
|
|
|
|
return true;
|
|
|
|
} else if (val == "false") {
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
throw "Invalid value for boolean argument";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
CompactorCommand::CompactorCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_FROM, ARG_TO, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_TTL})),
|
|
|
|
null_from_(true),
|
|
|
|
null_to_(true) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_FROM);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 18:47:18 +00:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void CompactorCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CompactorCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
void CompactorCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2013-04-12 03:21:49 +00:00
|
|
|
Slice* begin = nullptr;
|
|
|
|
Slice* end = nullptr;
|
2012-10-31 18:47:18 +00:00
|
|
|
if (!null_from_) {
|
2013-04-12 03:21:49 +00:00
|
|
|
begin = new Slice(from_);
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
if (!null_to_) {
|
2013-04-12 03:21:49 +00:00
|
|
|
end = new Slice(to_);
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2015-11-17 22:45:26 +00:00
|
|
|
CompactRangeOptions cro;
|
2019-04-17 06:29:32 +00:00
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
|
2015-11-17 22:45:26 +00:00
|
|
|
|
2021-03-18 21:42:00 +00:00
|
|
|
Status s = db_->CompactRange(cro, GetCfHandle(), begin, end);
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Compaction failed: " << s.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
|
|
|
|
delete begin;
|
|
|
|
delete end;
|
|
|
|
}
|
|
|
|
|
2019-07-04 02:03:29 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string DBLoaderCommand::ARG_DISABLE_WAL = "disable_wal";
|
|
|
|
const std::string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load";
|
|
|
|
const std::string DBLoaderCommand::ARG_COMPACT = "compact";
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
DBLoaderCommand::DBLoaderCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_CREATE_IF_MISSING, ARG_DISABLE_WAL,
|
|
|
|
ARG_BULK_LOAD, ARG_COMPACT})),
|
|
|
|
disable_wal_(false),
|
|
|
|
bulk_load_(false),
|
|
|
|
compact_(false) {
|
2013-01-11 19:09:23 +00:00
|
|
|
create_if_missing_ = IsFlagPresent(flags, ARG_CREATE_IF_MISSING);
|
|
|
|
disable_wal_ = IsFlagPresent(flags, ARG_DISABLE_WAL);
|
2013-02-26 06:57:37 +00:00
|
|
|
bulk_load_ = IsFlagPresent(flags, ARG_BULK_LOAD);
|
|
|
|
compact_ = IsFlagPresent(flags, ARG_COMPACT);
|
2012-12-17 01:06:51 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void DBLoaderCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBLoaderCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
|
|
|
ret.append(" [--" + ARG_DISABLE_WAL + "]");
|
|
|
|
ret.append(" [--" + ARG_BULK_LOAD + "]");
|
|
|
|
ret.append(" [--" + ARG_COMPACT + "]");
|
|
|
|
ret.append("\n");
|
2012-12-17 01:06:51 +00:00
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void DBLoaderCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
|
|
|
options_.create_if_missing = create_if_missing_;
|
2013-02-26 06:57:37 +00:00
|
|
|
if (bulk_load_) {
|
2020-09-03 23:53:14 +00:00
|
|
|
options_.PrepareForBulkLoad();
|
2013-02-26 06:57:37 +00:00
|
|
|
}
|
2012-12-17 01:06:51 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
void DBLoaderCommand::DoCommand() {
|
2012-12-17 01:06:51 +00:00
|
|
|
if (!db_) {
|
2016-01-22 23:46:32 +00:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2012-12-17 01:06:51 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
if (disable_wal_) {
|
|
|
|
write_options.disableWAL = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bad_lines = 0;
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string line;
|
2016-07-22 18:46:40 +00:00
|
|
|
// prefer ifstream getline performance vs that from std::cin istream
|
|
|
|
std::ifstream ifs_stdin("/dev/stdin");
|
|
|
|
std::istream* istream_p = ifs_stdin.is_open() ? &ifs_stdin : &std::cin;
|
2021-03-18 21:42:00 +00:00
|
|
|
Status s;
|
|
|
|
while (s.ok() && getline(*istream_p, line, '\n')) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string key;
|
|
|
|
std::string value;
|
2013-01-11 19:09:23 +00:00
|
|
|
if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
|
2021-03-18 21:42:00 +00:00
|
|
|
s = db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
|
2012-12-17 01:06:51 +00:00
|
|
|
} else if (0 == line.find("Keys in range:")) {
|
|
|
|
// ignore this line
|
|
|
|
} else if (0 == line.find("Created bg thread 0x")) {
|
|
|
|
// ignore this line
|
|
|
|
} else {
|
2022-10-25 21:29:41 +00:00
|
|
|
bad_lines++;
|
2012-12-17 01:06:51 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2012-12-17 01:06:51 +00:00
|
|
|
if (bad_lines > 0) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cout << "Warning: " << bad_lines << " bad lines ignored." << std::endl;
|
2012-12-17 01:06:51 +00:00
|
|
|
}
|
2021-03-18 21:42:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Load failed: " << s.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
}
|
|
|
|
if (compact_ && s.ok()) {
|
|
|
|
s = db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr,
|
|
|
|
nullptr);
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Compaction failed: " << s.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
2013-02-26 06:57:37 +00:00
|
|
|
}
|
2012-12-17 01:06:51 +00:00
|
|
|
}
|
|
|
|
|
2013-03-22 16:17:30 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
namespace {
|
|
|
|
|
2019-01-03 19:11:09 +00:00
|
|
|
void DumpManifestFile(Options options, std::string file, bool verbose, bool hex,
|
2023-07-19 17:44:10 +00:00
|
|
|
bool json,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>& cf_descs) {
|
2014-11-24 18:04:16 +00:00
|
|
|
EnvOptions sopt;
|
|
|
|
std::string dbname("dummy");
|
2015-03-17 22:04:37 +00:00
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(options.max_open_files - 10,
|
|
|
|
options.table_cache_numshardbits));
|
2014-11-24 18:04:16 +00:00
|
|
|
// Notice we are using the default options not through SanitizeOptions(),
|
|
|
|
// if VersionSet::DumpManifest() depends on any option done by
|
|
|
|
// SanitizeOptions(), we need to initialize it manually.
|
|
|
|
options.db_paths.emplace_back("dummy", 0);
|
2015-08-03 18:01:24 +00:00
|
|
|
options.num_levels = 64;
|
2015-05-15 22:52:51 +00:00
|
|
|
WriteController wc(options.delayed_write_rate);
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
2016-09-23 23:34:04 +00:00
|
|
|
ImmutableDBOptions immutable_db_options(options);
|
2019-06-13 22:39:52 +00:00
|
|
|
VersionSet versions(dbname, &immutable_db_options, sopt, tc.get(), &wb, &wc,
|
2021-06-10 18:01:44 +00:00
|
|
|
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr,
|
2022-06-21 03:58:11 +00:00
|
|
|
/*db_id*/ "", /*db_session_id*/ "");
|
2023-07-19 17:44:10 +00:00
|
|
|
Status s = versions.DumpManifest(options, file, verbose, hex, json, cf_descs);
|
2014-11-24 18:04:16 +00:00
|
|
|
if (!s.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stderr, "Error in processing file %s %s\n", file.c_str(),
|
|
|
|
s.ToString().c_str());
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string ManifestDumpCommand::ARG_VERBOSE = "verbose";
|
|
|
|
const std::string ManifestDumpCommand::ARG_JSON = "json";
|
|
|
|
const std::string ManifestDumpCommand::ARG_PATH = "path";
|
2013-03-22 16:17:30 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void ManifestDumpCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ManifestDumpCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_VERBOSE + "]");
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 17:07:40 +00:00
|
|
|
ret.append(" [--" + ARG_JSON + "]");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_manifest_file>]");
|
|
|
|
ret.append("\n");
|
2013-03-22 16:17:30 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
ManifestDumpCommand::ManifestDumpCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_VERBOSE, ARG_PATH, ARG_HEX, ARG_JSON})),
|
|
|
|
verbose_(false),
|
|
|
|
json_(false),
|
|
|
|
path_("") {
|
2013-03-22 16:17:30 +00:00
|
|
|
verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 17:07:40 +00:00
|
|
|
json_ = IsFlagPresent(flags, ARG_JSON);
|
2013-03-22 16:17:30 +00:00
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_PATH);
|
2013-03-22 16:17:30 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
|
|
|
if (path_.empty()) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("--path: missing pathname");
|
2013-03-22 16:17:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ManifestDumpCommand::DoCommand() {
|
2023-08-21 22:04:22 +00:00
|
|
|
PrepareOptions();
|
2013-03-22 16:17:30 +00:00
|
|
|
std::string manifestfile;
|
|
|
|
|
|
|
|
if (!path_.empty()) {
|
|
|
|
manifestfile = path_;
|
|
|
|
} else {
|
|
|
|
// We need to find the manifest file by searching the directory
|
|
|
|
// containing the db for files of the form MANIFEST_[0-9]+
|
2015-07-01 23:13:49 +00:00
|
|
|
|
2019-12-10 19:47:20 +00:00
|
|
|
std::vector<std::string> files;
|
|
|
|
Status s = options_.env->GetChildren(db_path_, &files);
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::string err_msg = s.ToString();
|
|
|
|
err_msg.append(": Failed to list the content of ");
|
|
|
|
err_msg.append(db_path_);
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(err_msg);
|
2013-03-22 16:17:30 +00:00
|
|
|
return;
|
|
|
|
}
|
2019-12-10 19:47:20 +00:00
|
|
|
const std::string kManifestNamePrefix = "MANIFEST-";
|
|
|
|
std::string matched_file;
|
|
|
|
#ifdef OS_WIN
|
|
|
|
const char kPathDelim = '\\';
|
|
|
|
#else
|
|
|
|
const char kPathDelim = '/';
|
|
|
|
#endif
|
|
|
|
for (const auto& file_path : files) {
|
|
|
|
// Some Env::GetChildren() return absolute paths. Some directories' path
|
|
|
|
// end with path delim, e.g. '/' or '\\'.
|
|
|
|
size_t pos = file_path.find_last_of(kPathDelim);
|
|
|
|
if (pos == file_path.size() - 1) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
std::string fname;
|
|
|
|
if (pos != std::string::npos) {
|
|
|
|
// Absolute path.
|
|
|
|
fname.assign(file_path, pos + 1, file_path.size() - pos - 1);
|
|
|
|
} else {
|
|
|
|
fname = file_path;
|
|
|
|
}
|
|
|
|
uint64_t file_num = 0;
|
2020-10-23 00:04:39 +00:00
|
|
|
FileType file_type = kWalFile; // Just for initialization
|
2019-12-10 19:47:20 +00:00
|
|
|
if (ParseFileName(fname, &file_num, &file_type) &&
|
|
|
|
file_type == kDescriptorFile) {
|
|
|
|
if (!matched_file.empty()) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"Multiple MANIFEST files found; use --path to select one");
|
2013-03-22 16:17:30 +00:00
|
|
|
return;
|
2019-12-10 19:47:20 +00:00
|
|
|
} else {
|
|
|
|
matched_file.swap(fname);
|
2013-03-22 16:17:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-10 19:47:20 +00:00
|
|
|
if (matched_file.empty()) {
|
|
|
|
std::string err_msg("No MANIFEST found in ");
|
|
|
|
err_msg.append(db_path_);
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(err_msg);
|
|
|
|
return;
|
|
|
|
}
|
2022-01-27 19:34:20 +00:00
|
|
|
if (db_path_.back() != '/') {
|
2019-12-10 19:47:20 +00:00
|
|
|
db_path_.append("/");
|
|
|
|
}
|
|
|
|
manifestfile = db_path_ + matched_file;
|
2013-03-22 16:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (verbose_) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "Processing Manifest file %s\n", manifestfile.c_str());
|
2013-03-22 16:17:30 +00:00
|
|
|
}
|
|
|
|
|
2023-07-19 17:44:10 +00:00
|
|
|
DumpManifestFile(options_, manifestfile, verbose_, is_key_hex_, json_,
|
|
|
|
column_families_);
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 17:07:40 +00:00
|
|
|
|
2013-03-22 16:17:30 +00:00
|
|
|
if (verbose_) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "Processing Manifest file %s done\n", manifestfile.c_str());
|
2013-03-22 16:17:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-10 23:42:46 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
namespace {
|
|
|
|
|
2021-09-14 00:06:05 +00:00
|
|
|
Status GetLiveFilesChecksumInfoFromVersionSet(Options options,
|
|
|
|
const std::string& db_path,
|
|
|
|
FileChecksumList* checksum_list) {
|
2020-02-10 23:42:46 +00:00
|
|
|
EnvOptions sopt;
|
|
|
|
Status s;
|
|
|
|
std::string dbname(db_path);
|
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(options.max_open_files - 10,
|
|
|
|
options.table_cache_numshardbits));
|
|
|
|
// Notice we are using the default options not through SanitizeOptions(),
|
|
|
|
// if VersionSet::GetLiveFilesChecksumInfo depends on any option done by
|
|
|
|
// SanitizeOptions(), we need to initialize it manually.
|
|
|
|
options.db_paths.emplace_back(db_path, 0);
|
|
|
|
options.num_levels = 64;
|
|
|
|
WriteController wc(options.delayed_write_rate);
|
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
|
|
|
ImmutableDBOptions immutable_db_options(options);
|
|
|
|
VersionSet versions(dbname, &immutable_db_options, sopt, tc.get(), &wb, &wc,
|
2021-06-10 18:01:44 +00:00
|
|
|
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr,
|
2022-06-21 03:58:11 +00:00
|
|
|
/*db_id*/ "", /*db_session_id*/ "");
|
2020-02-10 23:42:46 +00:00
|
|
|
std::vector<std::string> cf_name_list;
|
|
|
|
s = versions.ListColumnFamilies(&cf_name_list, db_path,
|
2020-03-24 04:50:42 +00:00
|
|
|
immutable_db_options.fs.get());
|
2020-02-10 23:42:46 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> cf_list;
|
|
|
|
for (const auto& name : cf_name_list) {
|
|
|
|
cf_list.emplace_back(name, ColumnFamilyOptions(options));
|
|
|
|
}
|
|
|
|
s = versions.Recover(cf_list, true);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = versions.GetLiveFilesChecksumInfo(checksum_list);
|
|
|
|
}
|
2021-09-14 00:06:05 +00:00
|
|
|
return s;
|
2020-02-10 23:42:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
const std::string FileChecksumDumpCommand::ARG_PATH = "path";
|
|
|
|
|
|
|
|
void FileChecksumDumpCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(FileChecksumDumpCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_manifest_file>]");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
FileChecksumDumpCommand::FileChecksumDumpCommand(
|
|
|
|
const std::vector<std::string>& /*params*/,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2021-01-04 19:12:08 +00:00
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_PATH, ARG_HEX})),
|
2020-02-10 23:42:46 +00:00
|
|
|
path_("") {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_PATH);
|
2020-02-10 23:42:46 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
|
|
|
if (path_.empty()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("--path: missing pathname");
|
|
|
|
}
|
|
|
|
}
|
2021-01-04 19:12:08 +00:00
|
|
|
is_checksum_hex_ = IsFlagPresent(flags, ARG_HEX);
|
2020-02-10 23:42:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void FileChecksumDumpCommand::DoCommand() {
|
2023-08-21 22:04:22 +00:00
|
|
|
PrepareOptions();
|
2020-02-10 23:42:46 +00:00
|
|
|
// print out the checksum information in the following format:
|
|
|
|
// sst file number, checksum function name, checksum value
|
|
|
|
// sst file number, checksum function name, checksum value
|
|
|
|
// ......
|
|
|
|
|
|
|
|
std::unique_ptr<FileChecksumList> checksum_list(NewFileChecksumList());
|
2021-09-14 00:06:05 +00:00
|
|
|
Status s = GetLiveFilesChecksumInfoFromVersionSet(options_, db_path_,
|
|
|
|
checksum_list.get());
|
|
|
|
if (s.ok() && checksum_list != nullptr) {
|
2020-02-10 23:42:46 +00:00
|
|
|
std::vector<uint64_t> file_numbers;
|
|
|
|
std::vector<std::string> checksums;
|
|
|
|
std::vector<std::string> checksum_func_names;
|
2021-09-14 00:06:05 +00:00
|
|
|
s = checksum_list->GetAllFileChecksums(&file_numbers, &checksums,
|
|
|
|
&checksum_func_names);
|
2020-02-10 23:42:46 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
for (size_t i = 0; i < file_numbers.size(); i++) {
|
|
|
|
assert(i < file_numbers.size());
|
|
|
|
assert(i < checksums.size());
|
|
|
|
assert(i < checksum_func_names.size());
|
2021-01-04 19:12:08 +00:00
|
|
|
std::string checksum;
|
|
|
|
if (is_checksum_hex_) {
|
|
|
|
checksum = StringToHex(checksums[i]);
|
|
|
|
} else {
|
|
|
|
checksum = std::move(checksums[i]);
|
|
|
|
}
|
2020-02-10 23:42:46 +00:00
|
|
|
fprintf(stdout, "%" PRId64 ", %s, %s\n", file_numbers[i],
|
2021-01-04 19:12:08 +00:00
|
|
|
checksum_func_names[i].c_str(), checksum.c_str());
|
2020-02-10 23:42:46 +00:00
|
|
|
}
|
2021-09-14 00:06:05 +00:00
|
|
|
fprintf(stdout, "Print SST file checksum information finished \n");
|
2020-02-10 23:42:46 +00:00
|
|
|
}
|
2021-09-14 00:06:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(s.ToString());
|
2020-02-10 23:42:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-22 16:17:30 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2014-02-28 00:18:23 +00:00
|
|
|
|
2020-12-19 15:59:08 +00:00
|
|
|
void GetPropertyCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(GetPropertyCommand::Name());
|
|
|
|
ret.append(" <property_name>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
GetPropertyCommand::GetPropertyCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true, BuildCmdLineOptions({})) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("property name must be specified");
|
|
|
|
} else {
|
|
|
|
property_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetPropertyCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::map<std::string, std::string> value_map;
|
|
|
|
std::string value;
|
|
|
|
|
|
|
|
// Rather than having different ldb command for map properties vs. string
|
|
|
|
// properties, we simply try Map property first. (This order only chosen
|
|
|
|
// because I prefer the map-style output for
|
|
|
|
// "rocksdb.aggregated-table-properties".)
|
|
|
|
if (db_->GetMapProperty(GetCfHandle(), property_, &value_map)) {
|
|
|
|
if (value_map.empty()) {
|
|
|
|
fprintf(stdout, "%s: <empty map>\n", property_.c_str());
|
|
|
|
} else {
|
|
|
|
for (auto& e : value_map) {
|
|
|
|
fprintf(stdout, "%s.%s: %s\n", property_.c_str(), e.first.c_str(),
|
|
|
|
e.second.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (db_->GetProperty(GetCfHandle(), property_, &value)) {
|
|
|
|
fprintf(stdout, "%s: %s\n", property_.c_str(), value.c_str());
|
|
|
|
} else {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("failed to get property: " + property_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void ListColumnFamiliesCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ListColumnFamiliesCommand::Name());
|
|
|
|
ret.append("\n");
|
2014-02-28 00:18:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ListColumnFamiliesCommand::ListColumnFamiliesCommand(
|
2019-10-09 02:17:39 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2019-10-09 02:17:39 +00:00
|
|
|
: LDBCommand(options, flags, false, BuildCmdLineOptions({})) {}
|
2014-02-28 00:18:23 +00:00
|
|
|
|
|
|
|
void ListColumnFamiliesCommand::DoCommand() {
|
2023-08-21 22:04:22 +00:00
|
|
|
PrepareOptions();
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string> column_families;
|
2019-10-09 02:17:39 +00:00
|
|
|
Status s = DB::ListColumnFamilies(options_, db_path_, &column_families);
|
2014-02-28 00:18:23 +00:00
|
|
|
if (!s.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stderr, "Error in processing db %s %s\n", db_path_.c_str(),
|
|
|
|
s.ToString().c_str());
|
2014-02-28 00:18:23 +00:00
|
|
|
} else {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "Column families in %s: \n{", db_path_.c_str());
|
2014-02-28 00:18:23 +00:00
|
|
|
bool first = true;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
if (!first) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, ", ");
|
2014-02-28 00:18:23 +00:00
|
|
|
}
|
|
|
|
first = false;
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "%s", cf.c_str());
|
2014-02-28 00:18:23 +00:00
|
|
|
}
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "}\n");
|
2014-02-28 00:18:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void CreateColumnFamilyCommand::Help(std::string& ret) {
|
2016-01-22 23:46:32 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CreateColumnFamilyCommand::Name());
|
|
|
|
ret.append(" --db=<db_path> <new_column_family_name>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CreateColumnFamilyCommand::CreateColumnFamilyCommand(
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2016-01-22 23:46:32 +00:00
|
|
|
: LDBCommand(options, flags, true, {ARG_DB}) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"new column family name must be specified");
|
|
|
|
} else {
|
|
|
|
new_cf_name_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateColumnFamilyCommand::DoCommand() {
|
2020-06-04 18:38:34 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2017-07-06 22:50:56 +00:00
|
|
|
ColumnFamilyHandle* new_cf_handle = nullptr;
|
2016-01-22 23:46:32 +00:00
|
|
|
Status st = db_->CreateColumnFamily(options_, new_cf_name_, &new_cf_handle);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Fail to create new column family: " + st.ToString());
|
|
|
|
}
|
|
|
|
delete new_cf_handle;
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
|
2019-06-27 18:08:45 +00:00
|
|
|
void DropColumnFamilyCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DropColumnFamilyCommand::Name());
|
|
|
|
ret.append(" --db=<db_path> <column_family_name_to_drop>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
DropColumnFamilyCommand::DropColumnFamilyCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2019-09-20 19:00:55 +00:00
|
|
|
: LDBCommand(options, flags, true, {ARG_DB}) {
|
2019-06-27 18:08:45 +00:00
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"The name of column family to drop must be specified");
|
|
|
|
} else {
|
|
|
|
cf_name_to_drop_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DropColumnFamilyCommand::DoCommand() {
|
2020-06-04 18:38:34 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2019-06-27 18:08:45 +00:00
|
|
|
auto iter = cf_handles_.find(cf_name_to_drop_);
|
|
|
|
if (iter == cf_handles_.end()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Column family: " + cf_name_to_drop_ + " doesn't exist in db.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ColumnFamilyHandle* cf_handle_to_drop = iter->second;
|
|
|
|
Status st = db_->DropColumnFamily(cf_handle_to_drop);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Fail to drop column family: " + st.ToString());
|
|
|
|
}
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
|
2014-02-28 00:18:23 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2014-04-10 04:17:14 +00:00
|
|
|
namespace {
|
|
|
|
|
2013-06-19 02:57:54 +00:00
|
|
|
// This function only called when it's the sane case of >1 buckets in time-range
|
|
|
|
// Also called only when timekv falls between ttl_start and ttl_end provided
|
2016-05-20 14:42:18 +00:00
|
|
|
void IncBucketCounts(std::vector<uint64_t>& bucket_counts, int ttl_start,
|
|
|
|
int time_range, int bucket_size, int timekv,
|
|
|
|
int num_buckets) {
|
2018-04-13 00:55:14 +00:00
|
|
|
#ifdef NDEBUG
|
|
|
|
(void)time_range;
|
|
|
|
(void)num_buckets;
|
|
|
|
#endif
|
2013-06-20 18:50:33 +00:00
|
|
|
assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 &&
|
2022-10-25 21:29:41 +00:00
|
|
|
timekv < (ttl_start + time_range) && num_buckets > 1);
|
2013-06-20 18:50:33 +00:00
|
|
|
int bucket = (timekv - ttl_start) / bucket_size;
|
2014-11-01 02:22:49 +00:00
|
|
|
bucket_counts[bucket]++;
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void PrintBucketCounts(const std::vector<uint64_t>& bucket_counts,
|
|
|
|
int ttl_start, int ttl_end, int bucket_size,
|
|
|
|
int num_buckets) {
|
2013-06-19 02:57:54 +00:00
|
|
|
int time_point = ttl_start;
|
2022-10-25 21:29:41 +00:00
|
|
|
for (int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
|
2014-11-01 02:22:49 +00:00
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2019-07-04 02:03:29 +00:00
|
|
|
TimeToHumanString(time_point).c_str(),
|
|
|
|
TimeToHumanString(time_point + bucket_size).c_str(),
|
2014-11-01 02:22:49 +00:00
|
|
|
(unsigned long)bucket_counts[i]);
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
2014-11-01 02:22:49 +00:00
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2019-07-04 02:03:29 +00:00
|
|
|
TimeToHumanString(time_point).c_str(),
|
|
|
|
TimeToHumanString(ttl_end).c_str(),
|
2014-11-01 02:22:49 +00:00
|
|
|
(unsigned long)bucket_counts[num_buckets - 1]);
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
|
2014-04-10 04:17:14 +00:00
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
|
|
|
|
const std::string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim";
|
|
|
|
const std::string InternalDumpCommand::ARG_STATS = "stats";
|
|
|
|
const std::string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex";
|
2013-06-20 23:02:36 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
InternalDumpCommand::InternalDumpCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2022-04-20 18:10:20 +00:00
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM, ARG_TO,
|
|
|
|
ARG_MAX_KEYS, ARG_COUNT_ONLY, ARG_COUNT_DELIM, ARG_STATS,
|
|
|
|
ARG_INPUT_KEY_HEX, ARG_DECODE_BLOB_INDEX})),
|
2016-01-06 22:19:08 +00:00
|
|
|
has_from_(false),
|
|
|
|
has_to_(false),
|
|
|
|
max_keys_(-1),
|
|
|
|
delim_("."),
|
|
|
|
count_only_(false),
|
|
|
|
count_delim_(false),
|
|
|
|
print_stats_(false),
|
2022-04-20 18:10:20 +00:00
|
|
|
is_input_key_hex_(false),
|
|
|
|
decode_blob_index_(false) {
|
2013-06-20 23:02:36 +00:00
|
|
|
has_from_ = ParseStringOption(options, ARG_FROM, &from_);
|
|
|
|
has_to_ = ParseStringOption(options, ARG_TO, &to_);
|
|
|
|
|
2014-11-01 02:22:49 +00:00
|
|
|
ParseIntOption(options, ARG_MAX_KEYS, max_keys_, exec_state_);
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_COUNT_DELIM);
|
2013-11-01 20:59:14 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
2022-10-25 21:29:41 +00:00
|
|
|
// fprintf(stdout,"delim = %c\n",delim_[0]);
|
2013-11-01 20:59:14 +00:00
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
2022-10-25 21:29:41 +00:00
|
|
|
delim_ = ".";
|
2013-11-01 20:59:14 +00:00
|
|
|
}
|
2013-06-20 23:02:36 +00:00
|
|
|
|
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
2013-08-08 22:51:16 +00:00
|
|
|
is_input_key_hex_ = IsFlagPresent(flags, ARG_INPUT_KEY_HEX);
|
2022-04-20 18:10:20 +00:00
|
|
|
decode_blob_index_ = IsFlagPresent(flags, ARG_DECODE_BLOB_INDEX);
|
2013-06-20 23:02:36 +00:00
|
|
|
|
2013-08-08 22:51:16 +00:00
|
|
|
if (is_input_key_hex_) {
|
2013-06-20 23:02:36 +00:00
|
|
|
if (has_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (has_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void InternalDumpCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(InternalDumpCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_INPUT_KEY_HEX + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
2022-04-20 18:10:20 +00:00
|
|
|
ret.append(" [--" + ARG_DECODE_BLOB_INDEX + "]");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append("\n");
|
2013-06-20 23:02:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void InternalDumpCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-22 23:46:32 +00:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2013-06-20 23:02:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (print_stats_) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string stats;
|
2016-01-22 23:46:32 +00:00
|
|
|
if (db_->GetProperty(GetCfHandle(), "rocksdb.stats", &stats)) {
|
2013-06-20 23:02:36 +00:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast as DBImpl to get internal iterator
|
2017-05-12 21:59:57 +00:00
|
|
|
std::vector<KeyVersion> key_versions;
|
2019-07-19 18:31:52 +00:00
|
|
|
Status st = GetAllKeyVersions(db_, GetCfHandle(), from_, to_, max_keys_,
|
|
|
|
&key_versions);
|
2017-05-12 21:59:57 +00:00
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-06-20 23:02:36 +00:00
|
|
|
return;
|
|
|
|
}
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string rtype1, rtype2, row, val;
|
2013-11-01 20:59:14 +00:00
|
|
|
rtype2 = "";
|
2022-10-25 21:29:41 +00:00
|
|
|
uint64_t c = 0;
|
|
|
|
uint64_t s1 = 0, s2 = 0;
|
2013-06-20 23:02:36 +00:00
|
|
|
|
2014-11-01 02:22:49 +00:00
|
|
|
long long count = 0;
|
2017-05-12 21:59:57 +00:00
|
|
|
for (auto& key_version : key_versions) {
|
2022-04-20 18:10:20 +00:00
|
|
|
ValueType value_type = static_cast<ValueType>(key_version.type);
|
|
|
|
InternalKey ikey(key_version.user_key, key_version.sequence, value_type);
|
2017-05-12 21:59:57 +00:00
|
|
|
if (has_to_ && ikey.user_key() == to_) {
|
|
|
|
// GetAllKeyVersions() includes keys with user key `to_`, but idump has
|
|
|
|
// traditionally excluded such keys.
|
2013-06-20 23:02:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
++count;
|
2013-11-01 20:59:14 +00:00
|
|
|
int k;
|
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
2022-10-25 21:29:41 +00:00
|
|
|
s1 = 0;
|
2017-05-12 21:59:57 +00:00
|
|
|
row = ikey.Encode().ToString();
|
|
|
|
val = key_version.value;
|
2022-10-25 21:29:41 +00:00
|
|
|
for (k = 0; row[k] != '\x01' && row[k] != '\0'; k++) s1++;
|
|
|
|
for (k = 0; val[k] != '\x01' && val[k] != '\0'; k++) s1++;
|
|
|
|
for (int j = 0; row[j] != delim_[0] && row[j] != '\0' && row[j] != '\x01';
|
|
|
|
j++)
|
|
|
|
rtype1 += row[j];
|
|
|
|
if (rtype2.compare("") && rtype2.compare(rtype1) != 0) {
|
2018-06-07 18:34:52 +00:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
2022-10-25 21:29:41 +00:00
|
|
|
c = 1;
|
|
|
|
s2 = s1;
|
2013-11-01 20:59:14 +00:00
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
|
|
|
c++;
|
2022-10-25 21:29:41 +00:00
|
|
|
s2 += s1;
|
|
|
|
rtype2 = rtype1;
|
2017-05-12 21:59:57 +00:00
|
|
|
}
|
2013-11-01 20:59:14 +00:00
|
|
|
}
|
2013-06-20 23:02:36 +00:00
|
|
|
|
2013-11-01 20:59:14 +00:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string key = ikey.DebugString(is_key_hex_);
|
2022-04-20 18:10:20 +00:00
|
|
|
Slice value(key_version.value);
|
|
|
|
if (!decode_blob_index_ || value_type != kTypeBlobIndex) {
|
Wide Column support in ldb (#11754)
Summary:
wide_columns can now be pretty-printed in the following commands
- `./ldb dump_wal`
- `./ldb dump`
- `./ldb idump`
- `./ldb dump_live_files`
- `./ldb scan`
- `./sst_dump --command=scan`
There are opportunities to refactor to reduce some nearly identical code. This PR is initial change to add wide column support in `ldb` and `sst_dump` tool. More PRs to come for the refactor.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11754
Test Plan:
**New Tests added**
- `WideColumnsHelperTest::DumpWideColumns`
- `WideColumnsHelperTest::DumpSliceAsWideColumns`
**Changes added to existing tests**
- `ExternalSSTFileTest::BasicMixed` added to cover mixed case (This test should have been added in https://github.com/facebook/rocksdb/issues/11688). This test does not verify the ldb or sst_dump output. This test was used to create test SST files having some rows with wide columns and some without and the generated SST files were used to manually test sst_dump_tool.
- `createSST()` in `sst_dump_test` now takes `wide_column_one_in` to add wide column value in SST
**dump_wal**
```
./ldb dump_wal --walfile=/tmp/rocksdbtest-226125/db_wide_basic_test_2675429_2308393776696827948/000004.log --print_value --header
```
```
Sequence,Count,ByteSize,Physical Offset,Key(s) : value
1,1,59,0,PUT_ENTITY(0) : 0x:0x68656C6C6F 0x617474725F6E616D6531:0x666F6F 0x617474725F6E616D6532:0x626172
2,1,34,42,PUT_ENTITY(0) : 0x617474725F6F6E65:0x74776F 0x617474725F7468726565:0x666F7572
3,1,17,7d,PUT(0) : 0x7468697264 : 0x62617A
```
**idump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ idump
```
```
'first' seq:1, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:2, type:22 => attr_one:two attr_three:four
'third' seq:3, type:1 => baz
Internal keys in range: 3
```
**SST Dump from dump_live_files**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ compact
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump_live_files
```
```
...
==============================
SST Files
==============================
/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst level:1
------------------------------
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
...
```
**dump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump
```
```
first ==> :hello attr_name1:foo attr_name2:bar
second ==> attr_one:two attr_three:four
third ==> baz
Keys in range: 3
```
**scan**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ scan
```
```
first : :hello attr_name1:foo attr_name2:bar
second : attr_one:two attr_three:four
third : baz
```
**sst_dump**
```
./sst_dump --file=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst --command=scan
```
```
options.env is 0x7ff54b296000
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
from [] to []
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
```
Reviewed By: ltamasi
Differential Revision: D48837999
Pulled By: jaykorean
fbshipit-source-id: b0280f0589d2b9716bb9b50530ffcabb397d140f
2023-08-30 19:45:52 +00:00
|
|
|
if (value_type == kTypeWideColumnEntity) {
|
|
|
|
std::ostringstream oss;
|
|
|
|
const Status s = WideColumnsHelper::DumpSliceAsWideColumns(
|
|
|
|
value, oss, is_value_hex_);
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "%s => error deserializing wide columns\n",
|
|
|
|
key.c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "%s => %s\n", key.c_str(), oss.str().c_str());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "%s => %s\n", key.c_str(),
|
|
|
|
value.ToString(is_value_hex_).c_str());
|
|
|
|
}
|
2022-04-20 18:10:20 +00:00
|
|
|
} else {
|
|
|
|
BlobIndex blob_index;
|
|
|
|
|
|
|
|
const Status s = blob_index.DecodeFrom(value);
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "%s => error decoding blob index =>\n", key.c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "%s => %s\n", key.c_str(),
|
|
|
|
blob_index.DebugString(is_value_hex_).c_str());
|
|
|
|
}
|
|
|
|
}
|
2013-06-20 23:02:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
2014-11-01 02:22:49 +00:00
|
|
|
if (max_keys_ > 0 && count >= max_keys_) break;
|
2013-06-20 23:02:36 +00:00
|
|
|
}
|
2022-10-25 21:29:41 +00:00
|
|
|
if (count_delim_) {
|
2018-06-07 18:34:52 +00:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Internal keys in range: %lld\n", count);
|
|
|
|
}
|
2013-06-20 23:02:36 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string DBDumperCommand::ARG_COUNT_ONLY = "count_only";
|
|
|
|
const std::string DBDumperCommand::ARG_COUNT_DELIM = "count_delim";
|
|
|
|
const std::string DBDumperCommand::ARG_STATS = "stats";
|
|
|
|
const std::string DBDumperCommand::ARG_TTL_BUCKET = "bucket";
|
2013-06-20 23:02:36 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
DBDumperCommand::DBDumperCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2022-04-22 03:37:07 +00:00
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM, ARG_TO,
|
|
|
|
ARG_MAX_KEYS, ARG_COUNT_ONLY, ARG_COUNT_DELIM, ARG_STATS,
|
|
|
|
ARG_TTL_START, ARG_TTL_END, ARG_TTL_BUCKET, ARG_TIMESTAMP,
|
|
|
|
ARG_PATH, ARG_DECODE_BLOB_INDEX, ARG_DUMP_UNCOMPRESSED_BLOBS})),
|
2016-01-06 22:19:08 +00:00
|
|
|
null_from_(true),
|
|
|
|
null_to_(true),
|
|
|
|
max_keys_(-1),
|
|
|
|
count_only_(false),
|
|
|
|
count_delim_(false),
|
2022-04-20 18:10:20 +00:00
|
|
|
print_stats_(false),
|
|
|
|
decode_blob_index_(false) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_FROM);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2015-04-24 02:17:57 +00:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2016-05-20 14:42:18 +00:00
|
|
|
max_keys_ = std::stoi(itr->second);
|
2015-04-24 02:17:57 +00:00
|
|
|
#endif
|
2016-05-20 14:42:18 +00:00
|
|
|
} catch (const std::invalid_argument&) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
" has an invalid value");
|
2016-05-20 14:42:18 +00:00
|
|
|
} catch (const std::out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 20:59:14 +00:00
|
|
|
itr = options.find(ARG_COUNT_DELIM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
2022-10-25 21:29:41 +00:00
|
|
|
delim_ = ".";
|
2013-11-01 20:59:14 +00:00
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
2022-04-20 18:10:20 +00:00
|
|
|
decode_blob_index_ = IsFlagPresent(flags, ARG_DECODE_BLOB_INDEX);
|
2022-04-22 03:37:07 +00:00
|
|
|
dump_uncompressed_blobs_ = IsFlagPresent(flags, ARG_DUMP_UNCOMPRESSED_BLOBS);
|
2013-01-11 19:09:23 +00:00
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 18:47:18 +00:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
2016-01-06 22:19:08 +00:00
|
|
|
|
|
|
|
itr = options.find(ARG_PATH);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
2017-10-02 16:40:00 +00:00
|
|
|
if (db_path_.empty()) {
|
|
|
|
db_path_ = path_;
|
|
|
|
}
|
2016-01-06 22:19:08 +00:00
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void DBDumperCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBDumperCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
|
|
|
ret.append(" [--" + ARG_TTL_BUCKET + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2016-01-06 22:19:08 +00:00
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_a_file>]");
|
2022-04-20 18:10:20 +00:00
|
|
|
ret.append(" [--" + ARG_DECODE_BLOB_INDEX + "]");
|
2022-04-22 03:37:07 +00:00
|
|
|
ret.append(" [--" + ARG_DUMP_UNCOMPRESSED_BLOBS + "]");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append("\n");
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2016-01-06 22:19:08 +00:00
|
|
|
/**
|
|
|
|
* Handles two separate cases:
|
|
|
|
*
|
|
|
|
* 1) --db is specified - just dump the database.
|
|
|
|
*
|
|
|
|
* 2) --path is specified - determine based on file extension what dumping
|
|
|
|
* function to call. Please note that we intentionally use the extension
|
|
|
|
* and avoid probing the file contents under the assumption that renaming
|
|
|
|
* the files is not a supported scenario.
|
|
|
|
*
|
|
|
|
*/
|
2013-01-11 19:09:23 +00:00
|
|
|
void DBDumperCommand::DoCommand() {
|
2012-11-21 21:26:32 +00:00
|
|
|
if (!db_) {
|
2016-01-06 22:19:08 +00:00
|
|
|
assert(!path_.empty());
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string fileName = GetFileNameFromPath(path_);
|
2016-01-06 22:19:08 +00:00
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
|
|
|
|
|
|
|
if (!ParseFileName(fileName, &number, &type)) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Can't parse file type: " + path_);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
2020-10-23 00:04:39 +00:00
|
|
|
case kWalFile:
|
2018-04-08 04:46:53 +00:00
|
|
|
// TODO(myabandeh): allow configuring is_write_commited
|
2019-01-03 19:11:09 +00:00
|
|
|
DumpWalFile(options_, path_, /* print_header_ */ true,
|
|
|
|
/* print_values_ */ true, true /* is_write_commited */,
|
|
|
|
&exec_state_);
|
2016-01-06 22:19:08 +00:00
|
|
|
break;
|
|
|
|
case kTableFile:
|
2022-04-15 16:04:04 +00:00
|
|
|
DumpSstFile(options_, path_, is_key_hex_, /* show_properties */ true,
|
2022-07-27 03:40:18 +00:00
|
|
|
decode_blob_index_, from_, to_);
|
2016-01-06 22:19:08 +00:00
|
|
|
break;
|
|
|
|
case kDescriptorFile:
|
2019-01-03 19:11:09 +00:00
|
|
|
DumpManifestFile(options_, path_, /* verbose_ */ false, is_key_hex_,
|
2023-07-19 17:44:10 +00:00
|
|
|
/* json_ */ false, column_families_);
|
2016-01-06 22:19:08 +00:00
|
|
|
break;
|
2022-04-22 03:37:07 +00:00
|
|
|
case kBlobFile:
|
|
|
|
DumpBlobFile(path_, is_key_hex_, is_value_hex_,
|
|
|
|
dump_uncompressed_blobs_);
|
|
|
|
break;
|
2016-01-06 22:19:08 +00:00
|
|
|
default:
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"File type not supported: " + path_);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
DoDumpCommand();
|
2012-11-21 21:26:32 +00:00
|
|
|
}
|
2016-01-06 22:19:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBDumperCommand::DoDumpCommand() {
|
|
|
|
assert(nullptr != db_);
|
|
|
|
assert(path_.empty());
|
|
|
|
|
2012-10-31 18:47:18 +00:00
|
|
|
// Parse command line args
|
|
|
|
uint64_t count = 0;
|
|
|
|
if (print_stats_) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string stats;
|
2013-10-05 05:32:05 +00:00
|
|
|
if (db_->GetProperty("rocksdb.stats", &stats)) {
|
2012-10-31 18:47:18 +00:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup key iterator
|
2019-03-12 20:04:20 +00:00
|
|
|
ReadOptions scan_read_opts;
|
|
|
|
scan_read_opts.total_order_seek = true;
|
|
|
|
Iterator* iter = db_->NewIterator(scan_read_opts, GetCfHandle());
|
2013-04-12 03:21:49 +00:00
|
|
|
Status st = iter->status();
|
2012-10-31 18:47:18 +00:00
|
|
|
if (!st.ok()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Iterator error." + st.ToString());
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!null_from_) {
|
|
|
|
iter->Seek(from_);
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
|
|
|
int max_keys = max_keys_;
|
2013-06-19 02:57:54 +00:00
|
|
|
int ttl_start;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 03:34:20 +00:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
int ttl_end;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 03:34:20 +00:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete iter;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int time_range = ttl_end - ttl_start;
|
|
|
|
int bucket_size;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) ||
|
2013-06-19 02:57:54 +00:00
|
|
|
bucket_size <= 0) {
|
2022-10-25 21:29:41 +00:00
|
|
|
bucket_size = time_range; // Will have just 1 bucket by default
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
2022-10-25 21:29:41 +00:00
|
|
|
// cretaing variables for row count of each type
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string rtype1, rtype2, row, val;
|
2013-11-01 20:59:14 +00:00
|
|
|
rtype2 = "";
|
2022-10-25 21:29:41 +00:00
|
|
|
uint64_t c = 0;
|
|
|
|
uint64_t s1 = 0, s2 = 0;
|
2013-11-01 20:59:14 +00:00
|
|
|
|
2014-11-01 02:22:49 +00:00
|
|
|
// At this point, bucket_size=0 => time_range=0
|
2014-11-11 21:47:22 +00:00
|
|
|
int num_buckets = (bucket_size >= time_range)
|
|
|
|
? 1
|
|
|
|
: ((time_range + bucket_size - 1) / bucket_size);
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<uint64_t> bucket_counts(num_buckets, 0);
|
2013-11-01 20:59:14 +00:00
|
|
|
if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
|
2013-06-19 02:57:54 +00:00
|
|
|
fprintf(stdout, "Dumping key-values from %s to %s\n",
|
2019-07-04 02:03:29 +00:00
|
|
|
TimeToHumanString(ttl_start).c_str(),
|
|
|
|
TimeToHumanString(ttl_end).c_str());
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
|
2017-10-02 16:40:00 +00:00
|
|
|
HistogramImpl vsize_hist;
|
|
|
|
|
2012-10-31 18:47:18 +00:00
|
|
|
for (; iter->Valid(); iter->Next()) {
|
2013-06-19 02:57:54 +00:00
|
|
|
int rawtime = 0;
|
2012-10-31 18:47:18 +00:00
|
|
|
// If end marker was specified, we stop before it
|
2022-10-25 21:29:41 +00:00
|
|
|
if (!null_to_ && (iter->key().ToString() >= to_)) break;
|
2012-10-31 18:47:18 +00:00
|
|
|
// Terminate if maximum number of keys have been dumped
|
2022-10-25 21:29:41 +00:00
|
|
|
if (max_keys == 0) break;
|
2013-06-19 02:57:54 +00:00
|
|
|
if (is_db_ttl_) {
|
2020-04-29 20:06:27 +00:00
|
|
|
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(iter);
|
2020-03-07 00:21:03 +00:00
|
|
|
rawtime = it_ttl->ttl_timestamp();
|
2013-06-20 18:50:33 +00:00
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 02:57:54 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
if (max_keys > 0) {
|
|
|
|
--max_keys;
|
|
|
|
}
|
2013-06-19 02:57:54 +00:00
|
|
|
if (is_db_ttl_ && num_buckets > 1) {
|
2014-11-01 02:22:49 +00:00
|
|
|
IncBucketCounts(bucket_counts, ttl_start, time_range, bucket_size,
|
2013-06-19 02:57:54 +00:00
|
|
|
rawtime, num_buckets);
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
++count;
|
2013-11-01 20:59:14 +00:00
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
|
|
|
row = iter->key().ToString();
|
|
|
|
val = iter->value().ToString();
|
2022-10-25 21:29:41 +00:00
|
|
|
s1 = row.size() + val.size();
|
|
|
|
for (int j = 0; row[j] != delim_[0] && row[j] != '\0'; j++)
|
|
|
|
rtype1 += row[j];
|
|
|
|
if (rtype2.compare("") && rtype2.compare(rtype1) != 0) {
|
2018-06-07 18:34:52 +00:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
2022-10-25 21:29:41 +00:00
|
|
|
c = 1;
|
|
|
|
s2 = s1;
|
2013-11-01 20:59:14 +00:00
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
2022-10-25 21:29:41 +00:00
|
|
|
c++;
|
|
|
|
s2 += s1;
|
|
|
|
rtype2 = rtype1;
|
2013-11-01 20:59:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-02 16:40:00 +00:00
|
|
|
if (count_only_) {
|
|
|
|
vsize_hist.Add(iter->value().size());
|
|
|
|
}
|
2014-11-01 02:22:49 +00:00
|
|
|
|
2013-11-01 20:59:14 +00:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2013-06-19 02:57:54 +00:00
|
|
|
if (is_db_ttl_ && timestamp_) {
|
2019-07-04 02:03:29 +00:00
|
|
|
fprintf(stdout, "%s ", TimeToHumanString(rawtime).c_str());
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
Wide Column support in ldb (#11754)
Summary:
wide_columns can now be pretty-printed in the following commands
- `./ldb dump_wal`
- `./ldb dump`
- `./ldb idump`
- `./ldb dump_live_files`
- `./ldb scan`
- `./sst_dump --command=scan`
There are opportunities to refactor to reduce some nearly identical code. This PR is initial change to add wide column support in `ldb` and `sst_dump` tool. More PRs to come for the refactor.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11754
Test Plan:
**New Tests added**
- `WideColumnsHelperTest::DumpWideColumns`
- `WideColumnsHelperTest::DumpSliceAsWideColumns`
**Changes added to existing tests**
- `ExternalSSTFileTest::BasicMixed` added to cover mixed case (This test should have been added in https://github.com/facebook/rocksdb/issues/11688). This test does not verify the ldb or sst_dump output. This test was used to create test SST files having some rows with wide columns and some without and the generated SST files were used to manually test sst_dump_tool.
- `createSST()` in `sst_dump_test` now takes `wide_column_one_in` to add wide column value in SST
**dump_wal**
```
./ldb dump_wal --walfile=/tmp/rocksdbtest-226125/db_wide_basic_test_2675429_2308393776696827948/000004.log --print_value --header
```
```
Sequence,Count,ByteSize,Physical Offset,Key(s) : value
1,1,59,0,PUT_ENTITY(0) : 0x:0x68656C6C6F 0x617474725F6E616D6531:0x666F6F 0x617474725F6E616D6532:0x626172
2,1,34,42,PUT_ENTITY(0) : 0x617474725F6F6E65:0x74776F 0x617474725F7468726565:0x666F7572
3,1,17,7d,PUT(0) : 0x7468697264 : 0x62617A
```
**idump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ idump
```
```
'first' seq:1, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:2, type:22 => attr_one:two attr_three:four
'third' seq:3, type:1 => baz
Internal keys in range: 3
```
**SST Dump from dump_live_files**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ compact
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump_live_files
```
```
...
==============================
SST Files
==============================
/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst level:1
------------------------------
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
...
```
**dump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump
```
```
first ==> :hello attr_name1:foo attr_name2:bar
second ==> attr_one:two attr_three:four
third ==> baz
Keys in range: 3
```
**scan**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ scan
```
```
first : :hello attr_name1:foo attr_name2:bar
second : attr_one:two attr_three:four
third : baz
```
**sst_dump**
```
./sst_dump --file=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst --command=scan
```
```
options.env is 0x7ff54b296000
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
from [] to []
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
```
Reviewed By: ltamasi
Differential Revision: D48837999
Pulled By: jaykorean
fbshipit-source-id: b0280f0589d2b9716bb9b50530ffcabb397d140f
2023-08-30 19:45:52 +00:00
|
|
|
// (TODO) TTL Iterator does not support wide columns yet.
|
2023-08-31 23:17:03 +00:00
|
|
|
std::string str =
|
|
|
|
is_db_ttl_
|
|
|
|
? PrintKeyValue(iter->key().ToString(), iter->value().ToString(),
|
|
|
|
is_key_hex_, is_value_hex_)
|
|
|
|
: PrintKeyValueOrWideColumns(iter->key(), iter->value(),
|
|
|
|
iter->columns(), is_key_hex_,
|
|
|
|
is_value_hex_);
|
|
|
|
fprintf(stdout, "%s\n", str.c_str());
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 20:59:14 +00:00
|
|
|
|
2013-06-19 02:57:54 +00:00
|
|
|
if (num_buckets > 1 && is_db_ttl_) {
|
2013-06-20 18:50:33 +00:00
|
|
|
PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size,
|
2013-06-19 02:57:54 +00:00
|
|
|
num_buckets);
|
2022-10-25 21:29:41 +00:00
|
|
|
} else if (count_delim_) {
|
2018-06-07 18:34:52 +00:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
2013-06-19 02:57:54 +00:00
|
|
|
} else {
|
2018-06-07 18:34:52 +00:00
|
|
|
fprintf(stdout, "Keys in range: %" PRIu64 "\n", count);
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
2017-10-02 16:40:00 +00:00
|
|
|
|
|
|
|
if (count_only_) {
|
|
|
|
fprintf(stdout, "Value size distribution: \n");
|
|
|
|
fprintf(stdout, "%s\n", vsize_hist.ToString().c_str());
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
// Clean up
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string ReduceDBLevelsCommand::ARG_NEW_LEVELS = "new_levels";
|
|
|
|
const std::string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS =
|
|
|
|
"print_old_levels";
|
2012-12-26 23:15:54 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
ReduceDBLevelsCommand::ReduceDBLevelsCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_NEW_LEVELS, ARG_PRINT_OLD_LEVELS})),
|
|
|
|
old_levels_(1 << 7),
|
|
|
|
new_levels_(-1),
|
|
|
|
print_old_levels_(false) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
|
2013-01-11 19:09:23 +00:00
|
|
|
print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2022-10-25 21:29:41 +00:00
|
|
|
if (new_levels_ <= 0) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string> ReduceDBLevelsCommand::PrepareArgs(
|
|
|
|
const std::string& db_path, int new_levels, bool print_old_level) {
|
|
|
|
std::vector<std::string> ret;
|
2013-01-11 19:09:23 +00:00
|
|
|
ret.push_back("reduce_levels");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.push_back("--" + ARG_DB + "=" + db_path);
|
2022-05-06 20:03:58 +00:00
|
|
|
ret.push_back("--" + ARG_NEW_LEVELS + "=" + std::to_string(new_levels));
|
2022-10-25 21:29:41 +00:00
|
|
|
if (print_old_level) {
|
2013-01-11 19:09:23 +00:00
|
|
|
ret.push_back("--" + ARG_PRINT_OLD_LEVELS);
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void ReduceDBLevelsCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ReduceDBLevelsCommand::Name());
|
|
|
|
ret.append(" --" + ARG_NEW_LEVELS + "=<New number of levels>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_OLD_LEVELS + "]");
|
|
|
|
ret.append("\n");
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2021-01-12 04:54:58 +00:00
|
|
|
void ReduceDBLevelsCommand::OverrideBaseCFOptions(
|
|
|
|
ColumnFamilyOptions* cf_opts) {
|
|
|
|
LDBCommand::OverrideBaseCFOptions(cf_opts);
|
|
|
|
cf_opts->num_levels = old_levels_;
|
|
|
|
cf_opts->max_bytes_for_level_multiplier_additional.resize(cf_opts->num_levels,
|
2020-09-03 23:53:14 +00:00
|
|
|
1);
|
2012-11-09 02:45:19 +00:00
|
|
|
// Disable size compaction
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->max_bytes_for_level_base = 1ULL << 50;
|
|
|
|
cf_opts->max_bytes_for_level_multiplier = 1;
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2022-10-25 21:29:41 +00:00
|
|
|
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) {
|
2016-09-23 23:34:04 +00:00
|
|
|
ImmutableDBOptions db_options(opt);
|
2013-06-07 22:35:17 +00:00
|
|
|
EnvOptions soptions;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
std::shared_ptr<Cache> tc(
|
2015-03-17 22:04:37 +00:00
|
|
|
NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits));
|
2013-03-08 20:29:19 +00:00
|
|
|
const InternalKeyComparator cmp(opt.comparator);
|
2015-05-15 22:52:51 +00:00
|
|
|
WriteController wc(opt.delayed_write_rate);
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager wb(opt.db_write_buffer_size);
|
2019-06-13 22:39:52 +00:00
|
|
|
VersionSet versions(db_path_, &db_options, soptions, tc.get(), &wb, &wc,
|
2021-06-10 18:01:44 +00:00
|
|
|
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr,
|
2022-06-21 03:58:11 +00:00
|
|
|
/*db_id*/ "", /*db_session_id*/ "");
|
2014-01-22 19:44:53 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
2014-04-09 16:56:17 +00:00
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
2014-02-01 03:44:48 +00:00
|
|
|
ColumnFamilyOptions(opt));
|
|
|
|
dummy.push_back(dummy_descriptor);
|
2012-11-09 02:45:19 +00:00
|
|
|
// We rely the VersionSet::Recover to tell us the internal data structures
|
|
|
|
// in the db. And the Recover() should never do any change
|
|
|
|
// (like LogAndApply) to the manifest file.
|
2014-01-22 19:44:53 +00:00
|
|
|
Status st = versions.Recover(dummy);
|
2012-11-09 02:45:19 +00:00
|
|
|
if (!st.ok()) {
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
int max = -1;
|
2014-01-27 22:33:50 +00:00
|
|
|
auto default_cfd = versions.GetColumnFamilySet()->GetDefault();
|
2014-02-03 20:08:33 +00:00
|
|
|
for (int i = 0; i < default_cfd->NumberLevels(); i++) {
|
2014-10-31 15:48:19 +00:00
|
|
|
if (default_cfd->current()->storage_info()->NumLevelFiles(i)) {
|
2012-11-09 02:45:19 +00:00
|
|
|
max = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*levels = max + 1;
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
void ReduceDBLevelsCommand::DoCommand() {
|
2012-10-31 18:47:18 +00:00
|
|
|
if (new_levels_ <= 1) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Invalid number of levels.\n");
|
2012-10-31 18:47:18 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-12 03:21:49 +00:00
|
|
|
Status st;
|
2020-09-03 23:53:14 +00:00
|
|
|
PrepareOptions();
|
2012-11-09 02:45:19 +00:00
|
|
|
int old_level_num = -1;
|
2020-09-03 23:53:14 +00:00
|
|
|
st = GetOldNumOfLevels(options_, &old_level_num);
|
2012-11-09 02:45:19 +00:00
|
|
|
if (!st.ok()) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2012-11-09 02:45:19 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-10-31 18:47:18 +00:00
|
|
|
if (print_old_levels_) {
|
2014-11-01 02:22:49 +00:00
|
|
|
fprintf(stdout, "The old number of levels in use is %d\n", old_level_num);
|
2012-11-09 02:45:19 +00:00
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2012-11-09 02:45:19 +00:00
|
|
|
if (old_level_num <= new_levels_) {
|
|
|
|
return;
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
|
|
|
|
2012-11-09 02:45:19 +00:00
|
|
|
old_levels_ = old_level_num;
|
|
|
|
|
|
|
|
OpenDB();
|
2017-04-20 17:16:13 +00:00
|
|
|
if (exec_state_.IsFailed()) {
|
2012-11-21 00:14:04 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-12-07 06:54:11 +00:00
|
|
|
assert(db_ != nullptr);
|
2012-10-31 18:47:18 +00:00
|
|
|
// Compact the whole DB to put all files to the highest level.
|
2012-11-09 02:45:19 +00:00
|
|
|
fprintf(stdout, "Compacting the db...\n");
|
2020-12-23 07:44:44 +00:00
|
|
|
st =
|
|
|
|
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
|
|
|
|
2012-10-31 18:47:18 +00:00
|
|
|
CloseDB();
|
|
|
|
|
2020-12-23 07:44:44 +00:00
|
|
|
if (st.ok()) {
|
|
|
|
EnvOptions soptions;
|
|
|
|
st = VersionSet::ReduceNumberOfLevels(db_path_, &options_, soptions,
|
|
|
|
new_levels_);
|
|
|
|
}
|
2012-10-31 18:47:18 +00:00
|
|
|
if (!st.ok()) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2012-10-31 18:47:18 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string ChangeCompactionStyleCommand::ARG_OLD_COMPACTION_STYLE =
|
|
|
|
"old_compaction_style";
|
|
|
|
const std::string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE =
|
|
|
|
"new_compaction_style";
|
2013-09-04 20:13:08 +00:00
|
|
|
|
|
|
|
ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_OLD_COMPACTION_STYLE, ARG_NEW_COMPACTION_STYLE})),
|
|
|
|
old_compaction_style_(-1),
|
|
|
|
new_compaction_style_(-1) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_,
|
2022-10-25 21:29:41 +00:00
|
|
|
exec_state_);
|
2013-09-04 20:13:08 +00:00
|
|
|
if (old_compaction_style_ != kCompactionStyleLevel &&
|
2022-10-25 21:29:41 +00:00
|
|
|
old_compaction_style_ != kCompactionStyleUniversal) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-01 02:22:49 +00:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_,
|
2022-10-25 21:29:41 +00:00
|
|
|
exec_state_);
|
2013-09-04 20:13:08 +00:00
|
|
|
if (new_compaction_style_ != kCompactionStyleLevel &&
|
2022-10-25 21:29:41 +00:00
|
|
|
new_compaction_style_ != kCompactionStyleUniversal) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_compaction_style_ == old_compaction_style_) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"Old compaction style is the same as new compaction style. "
|
|
|
|
"Nothing to do.\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleUniversal &&
|
|
|
|
new_compaction_style_ == kCompactionStyleLevel) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"Convert from universal compaction to level compaction. "
|
|
|
|
"Nothing to do.\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void ChangeCompactionStyleCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ChangeCompactionStyleCommand::Name());
|
|
|
|
ret.append(" --" + ARG_OLD_COMPACTION_STYLE + "=<Old compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append(" --" + ARG_NEW_COMPACTION_STYLE + "=<New compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append("\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
}
|
|
|
|
|
2021-01-12 04:54:58 +00:00
|
|
|
void ChangeCompactionStyleCommand::OverrideBaseCFOptions(
|
|
|
|
ColumnFamilyOptions* cf_opts) {
|
|
|
|
LDBCommand::OverrideBaseCFOptions(cf_opts);
|
2013-09-04 20:13:08 +00:00
|
|
|
if (old_compaction_style_ == kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// In order to convert from level compaction to universal compaction, we
|
|
|
|
// need to compact all data into a single file and move it to level 0.
|
2021-01-12 04:54:58 +00:00
|
|
|
cf_opts->disable_auto_compactions = true;
|
|
|
|
cf_opts->target_file_size_base = INT_MAX;
|
|
|
|
cf_opts->target_file_size_multiplier = 1;
|
|
|
|
cf_opts->max_bytes_for_level_base = INT_MAX;
|
|
|
|
cf_opts->max_bytes_for_level_multiplier = 1;
|
2013-09-04 20:13:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChangeCompactionStyleCommand::DoCommand() {
|
2020-06-04 18:38:34 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-09-04 20:13:08 +00:00
|
|
|
// print db stats before we have made any change
|
|
|
|
std::string property;
|
|
|
|
std::string files_per_level;
|
2016-01-22 23:46:32 +00:00
|
|
|
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
db_->GetProperty(GetCfHandle(),
|
|
|
|
"rocksdb.num-files-at-level" + std::to_string(i),
|
2013-09-04 20:13:08 +00:00
|
|
|
&property);
|
|
|
|
|
2014-11-01 02:22:49 +00:00
|
|
|
// format print string
|
2013-09-04 20:13:08 +00:00
|
|
|
char buf[100];
|
2014-11-01 02:22:49 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
2013-09-04 20:13:08 +00:00
|
|
|
files_per_level += buf;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "files per level before compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
|
|
|
|
// manual compact into a single file and move the file to level 0
|
2015-06-17 21:36:14 +00:00
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 0;
|
2021-03-18 21:42:00 +00:00
|
|
|
Status s =
|
|
|
|
db_->CompactRange(compact_options, GetCfHandle(), nullptr, nullptr);
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Compaction failed: " << s.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
return;
|
|
|
|
}
|
2013-09-04 20:13:08 +00:00
|
|
|
|
|
|
|
// verify compaction result
|
|
|
|
files_per_level = "";
|
|
|
|
int num_files = 0;
|
2017-02-07 19:36:01 +00:00
|
|
|
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
db_->GetProperty(GetCfHandle(),
|
|
|
|
"rocksdb.num-files-at-level" + std::to_string(i),
|
2013-09-04 20:13:08 +00:00
|
|
|
&property);
|
|
|
|
|
2014-11-01 02:22:49 +00:00
|
|
|
// format print string
|
2013-09-04 20:13:08 +00:00
|
|
|
char buf[100];
|
2014-11-01 02:22:49 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
2013-09-04 20:13:08 +00:00
|
|
|
files_per_level += buf;
|
|
|
|
|
|
|
|
num_files = atoi(property.c_str());
|
|
|
|
|
|
|
|
// level 0 should have only 1 file
|
|
|
|
if (i == 0 && num_files != 1) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Number of db files at "
|
|
|
|
"level 0 after compaction is " +
|
2022-05-06 20:03:58 +00:00
|
|
|
std::to_string(num_files) + ", not 1.\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// other levels should have no file
|
|
|
|
if (i > 0 && num_files != 0) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Number of db files at "
|
|
|
|
"level " +
|
2022-05-06 20:03:58 +00:00
|
|
|
std::to_string(i) + " after compaction is " +
|
|
|
|
std::to_string(num_files) + ", not 0.\n");
|
2013-09-04 20:13:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stdout, "files per level after compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct StdErrReporter : public log::Reader::Reporter {
|
2019-02-14 21:52:47 +00:00
|
|
|
void Corruption(size_t /*bytes*/, const Status& s) override {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cerr << "Corruption detected in log file " << s.ToString() << "\n";
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-02-20 02:12:20 +00:00
|
|
|
class InMemoryHandler : public WriteBatch::Handler {
|
|
|
|
public:
|
2018-04-08 04:46:53 +00:00
|
|
|
InMemoryHandler(std::stringstream& row, bool print_values,
|
|
|
|
bool write_after_commit = false)
|
|
|
|
: Handler(),
|
|
|
|
row_(row),
|
|
|
|
print_values_(print_values),
|
|
|
|
write_after_commit_(write_after_commit) {}
|
2013-02-20 02:12:20 +00:00
|
|
|
|
2013-12-04 07:16:36 +00:00
|
|
|
void commonPutMerge(const Slice& key, const Slice& value) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string k = LDBCommand::StringToHex(key.ToString());
|
2013-12-04 07:16:36 +00:00
|
|
|
if (print_values_) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string v = LDBCommand::StringToHex(value.ToString());
|
2013-12-04 07:16:36 +00:00
|
|
|
row_ << k << " : ";
|
|
|
|
row_ << v << " ";
|
|
|
|
} else {
|
|
|
|
row_ << k << " ";
|
|
|
|
}
|
2013-02-20 02:12:20 +00:00
|
|
|
}
|
2013-12-04 07:16:36 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "PUT(" << cf << ") : ";
|
2013-12-04 07:16:36 +00:00
|
|
|
commonPutMerge(key, value);
|
2016-04-29 01:23:29 +00:00
|
|
|
return Status::OK();
|
2013-02-20 02:12:20 +00:00
|
|
|
}
|
|
|
|
|
Wide Column support in ldb (#11754)
Summary:
wide_columns can now be pretty-printed in the following commands
- `./ldb dump_wal`
- `./ldb dump`
- `./ldb idump`
- `./ldb dump_live_files`
- `./ldb scan`
- `./sst_dump --command=scan`
There are opportunities to refactor to reduce some nearly identical code. This PR is initial change to add wide column support in `ldb` and `sst_dump` tool. More PRs to come for the refactor.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11754
Test Plan:
**New Tests added**
- `WideColumnsHelperTest::DumpWideColumns`
- `WideColumnsHelperTest::DumpSliceAsWideColumns`
**Changes added to existing tests**
- `ExternalSSTFileTest::BasicMixed` added to cover mixed case (This test should have been added in https://github.com/facebook/rocksdb/issues/11688). This test does not verify the ldb or sst_dump output. This test was used to create test SST files having some rows with wide columns and some without and the generated SST files were used to manually test sst_dump_tool.
- `createSST()` in `sst_dump_test` now takes `wide_column_one_in` to add wide column value in SST
**dump_wal**
```
./ldb dump_wal --walfile=/tmp/rocksdbtest-226125/db_wide_basic_test_2675429_2308393776696827948/000004.log --print_value --header
```
```
Sequence,Count,ByteSize,Physical Offset,Key(s) : value
1,1,59,0,PUT_ENTITY(0) : 0x:0x68656C6C6F 0x617474725F6E616D6531:0x666F6F 0x617474725F6E616D6532:0x626172
2,1,34,42,PUT_ENTITY(0) : 0x617474725F6F6E65:0x74776F 0x617474725F7468726565:0x666F7572
3,1,17,7d,PUT(0) : 0x7468697264 : 0x62617A
```
**idump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ idump
```
```
'first' seq:1, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:2, type:22 => attr_one:two attr_three:four
'third' seq:3, type:1 => baz
Internal keys in range: 3
```
**SST Dump from dump_live_files**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ compact
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump_live_files
```
```
...
==============================
SST Files
==============================
/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst level:1
------------------------------
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
...
```
**dump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump
```
```
first ==> :hello attr_name1:foo attr_name2:bar
second ==> attr_one:two attr_three:four
third ==> baz
Keys in range: 3
```
**scan**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ scan
```
```
first : :hello attr_name1:foo attr_name2:bar
second : attr_one:two attr_three:four
third : baz
```
**sst_dump**
```
./sst_dump --file=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst --command=scan
```
```
options.env is 0x7ff54b296000
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
from [] to []
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
```
Reviewed By: ltamasi
Differential Revision: D48837999
Pulled By: jaykorean
fbshipit-source-id: b0280f0589d2b9716bb9b50530ffcabb397d140f
2023-08-30 19:45:52 +00:00
|
|
|
Status PutEntityCF(uint32_t cf, const Slice& key,
|
|
|
|
const Slice& value) override {
|
|
|
|
row_ << "PUT_ENTITY(" << cf << ") : ";
|
|
|
|
std::string k = LDBCommand::StringToHex(key.ToString());
|
|
|
|
if (print_values_) {
|
|
|
|
return WideColumnsHelper::DumpSliceAsWideColumns(value, row_, true);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status MergeCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "MERGE(" << cf << ") : ";
|
2013-12-04 07:16:36 +00:00
|
|
|
commonPutMerge(key, value);
|
2016-04-29 01:23:29 +00:00
|
|
|
return Status::OK();
|
2013-02-20 02:12:20 +00:00
|
|
|
}
|
2013-12-04 07:16:36 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status MarkNoop(bool) override {
|
2018-01-09 16:47:46 +00:00
|
|
|
row_ << "NOOP ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status DeleteCF(uint32_t cf, const Slice& key) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "DELETE(" << cf << ") : ";
|
2013-12-04 07:16:36 +00:00
|
|
|
row_ << LDBCommand::StringToHex(key.ToString()) << " ";
|
2016-04-29 01:23:29 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "SINGLE_DELETE(" << cf << ") : ";
|
|
|
|
row_ << LDBCommand::StringToHex(key.ToString()) << " ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status DeleteRangeCF(uint32_t cf, const Slice& begin_key,
|
|
|
|
const Slice& end_key) override {
|
2016-11-15 23:49:15 +00:00
|
|
|
row_ << "DELETE_RANGE(" << cf << ") : ";
|
|
|
|
row_ << LDBCommand::StringToHex(begin_key.ToString()) << " ";
|
|
|
|
row_ << LDBCommand::StringToHex(end_key.ToString()) << " ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status MarkBeginPrepare(bool unprepare) override {
|
2018-07-07 00:17:36 +00:00
|
|
|
row_ << "BEGIN_PREPARE(";
|
|
|
|
row_ << (unprepare ? "true" : "false") << ") ";
|
2016-04-29 01:23:29 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status MarkEndPrepare(const Slice& xid) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "END_PREPARE(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status MarkRollback(const Slice& xid) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "ROLLBACK(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Status MarkCommit(const Slice& xid) override {
|
2016-04-29 01:23:29 +00:00
|
|
|
row_ << "COMMIT(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
2013-02-20 02:12:20 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 19:03:39 +00:00
|
|
|
Status MarkCommitWithTimestamp(const Slice& xid,
|
|
|
|
const Slice& commit_ts) override {
|
|
|
|
row_ << "COMMIT_WITH_TIMESTAMP(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ", ";
|
|
|
|
row_ << LDBCommand::StringToHex(commit_ts.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~InMemoryHandler() override {}
|
2013-12-04 07:16:36 +00:00
|
|
|
|
2018-04-08 04:46:53 +00:00
|
|
|
protected:
|
2022-04-28 21:42:00 +00:00
|
|
|
Handler::OptionState WriteAfterCommit() const override {
|
|
|
|
return write_after_commit_ ? Handler::OptionState::kEnabled
|
|
|
|
: Handler::OptionState::kDisabled;
|
|
|
|
}
|
2018-04-08 04:46:53 +00:00
|
|
|
|
2013-02-20 02:12:20 +00:00
|
|
|
private:
|
2016-05-20 14:42:18 +00:00
|
|
|
std::stringstream& row_;
|
2013-12-04 07:16:36 +00:00
|
|
|
bool print_values_;
|
2018-04-08 04:46:53 +00:00
|
|
|
bool write_after_commit_;
|
2013-02-20 02:12:20 +00:00
|
|
|
};
|
|
|
|
|
2019-01-03 19:11:09 +00:00
|
|
|
void DumpWalFile(Options options, std::string wal_file, bool print_header,
|
|
|
|
bool print_values, bool is_write_committed,
|
|
|
|
LDBCommandExecuteResult* exec_state) {
|
2021-01-29 06:08:46 +00:00
|
|
|
const auto& fs = options.env->GetFileSystem();
|
|
|
|
FileOptions soptions(options);
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<SequentialFileReader> wal_file_reader;
|
2022-05-24 17:28:57 +00:00
|
|
|
Status status = SequentialFileReader::Create(
|
|
|
|
fs, wal_file, soptions, &wal_file_reader, nullptr /* dbg */,
|
|
|
|
nullptr /* rate_limiter */);
|
2014-11-24 18:04:16 +00:00
|
|
|
if (!status.ok()) {
|
|
|
|
if (exec_state) {
|
2015-03-17 01:08:59 +00:00
|
|
|
*exec_state = LDBCommandExecuteResult::Failed("Failed to open WAL file " +
|
2014-11-24 18:04:16 +00:00
|
|
|
status.ToString());
|
|
|
|
} else {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cerr << "Error: Failed to open WAL file " << status.ToString()
|
|
|
|
<< std::endl;
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
StdErrReporter reporter;
|
2015-10-08 17:06:16 +00:00
|
|
|
uint64_t log_number;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
// we need the log number, but ParseFilename expects dbname/NNN.log.
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string sanitized = wal_file;
|
2015-10-08 17:06:16 +00:00
|
|
|
size_t lastslash = sanitized.rfind('/');
|
|
|
|
if (lastslash != std::string::npos)
|
|
|
|
sanitized = sanitized.substr(lastslash + 1);
|
|
|
|
if (!ParseFileName(sanitized, &log_number, &type)) {
|
|
|
|
// bogus input, carry on as best we can
|
|
|
|
log_number = 0;
|
|
|
|
}
|
2019-01-03 19:11:09 +00:00
|
|
|
log::Reader reader(options.info_log, std::move(wal_file_reader), &reporter,
|
2019-03-26 23:41:31 +00:00
|
|
|
true /* checksum */, log_number);
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string scratch;
|
2014-11-24 18:04:16 +00:00
|
|
|
WriteBatch batch;
|
|
|
|
Slice record;
|
2016-05-20 14:42:18 +00:00
|
|
|
std::stringstream row;
|
2014-11-24 18:04:16 +00:00
|
|
|
if (print_header) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cout << "Sequence,Count,ByteSize,Physical Offset,Key(s)";
|
2014-11-24 18:04:16 +00:00
|
|
|
if (print_values) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cout << " : value ";
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cout << "\n";
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
2021-03-18 21:42:00 +00:00
|
|
|
while (status.ok() && reader.ReadRecord(&record, &scratch)) {
|
2014-11-24 18:04:16 +00:00
|
|
|
row.str("");
|
2016-03-30 17:35:22 +00:00
|
|
|
if (record.size() < WriteBatchInternal::kHeader) {
|
2014-11-24 18:04:16 +00:00
|
|
|
reporter.Corruption(record.size(),
|
|
|
|
Status::Corruption("log record too small"));
|
|
|
|
} else {
|
2021-03-18 21:42:00 +00:00
|
|
|
status = WriteBatchInternal::SetContents(&batch, record);
|
|
|
|
if (!status.ok()) {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Parsing write batch failed: " << status.ToString();
|
|
|
|
if (exec_state) {
|
|
|
|
*exec_state = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
} else {
|
|
|
|
std::cerr << oss.str() << std::endl;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2014-11-24 18:04:16 +00:00
|
|
|
row << WriteBatchInternal::Sequence(&batch) << ",";
|
|
|
|
row << WriteBatchInternal::Count(&batch) << ",";
|
|
|
|
row << WriteBatchInternal::ByteSize(&batch) << ",";
|
|
|
|
row << reader.LastRecordOffset() << ",";
|
2018-04-08 04:46:53 +00:00
|
|
|
InMemoryHandler handler(row, print_values, is_write_committed);
|
2021-03-18 21:42:00 +00:00
|
|
|
status = batch.Iterate(&handler);
|
|
|
|
if (!status.ok()) {
|
|
|
|
if (exec_state) {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Print write batch error: " << status.ToString();
|
|
|
|
*exec_state = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
}
|
|
|
|
row << "error: " << status.ToString();
|
|
|
|
break;
|
|
|
|
}
|
2014-11-24 18:04:16 +00:00
|
|
|
row << "\n";
|
|
|
|
}
|
2016-05-20 14:42:18 +00:00
|
|
|
std::cout << row.str();
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string WALDumperCommand::ARG_WAL_FILE = "walfile";
|
2018-04-08 04:46:53 +00:00
|
|
|
const std::string WALDumperCommand::ARG_WRITE_COMMITTED = "write_committed";
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string WALDumperCommand::ARG_PRINT_VALUE = "print_value";
|
|
|
|
const std::string WALDumperCommand::ARG_PRINT_HEADER = "header";
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
WALDumperCommand::WALDumperCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
2018-04-08 04:46:53 +00:00
|
|
|
BuildCmdLineOptions({ARG_WAL_FILE, ARG_WRITE_COMMITTED,
|
|
|
|
ARG_PRINT_HEADER, ARG_PRINT_VALUE})),
|
2016-05-20 14:42:18 +00:00
|
|
|
print_header_(false),
|
2018-04-08 04:46:53 +00:00
|
|
|
print_values_(false),
|
|
|
|
is_write_committed_(false) {
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
wal_file_.clear();
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_WAL_FILE);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
wal_file_ = itr->second;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2013-02-20 02:12:20 +00:00
|
|
|
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
|
|
|
|
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
|
2018-04-08 04:46:53 +00:00
|
|
|
is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true);
|
|
|
|
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
if (wal_file_.empty()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("Argument " + ARG_WAL_FILE +
|
|
|
|
" must be specified.");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void WALDumperCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(WALDumperCommand::Name());
|
|
|
|
ret.append(" --" + ARG_WAL_FILE + "=<write_ahead_log_file_path>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_HEADER + "] ");
|
|
|
|
ret.append(" [--" + ARG_PRINT_VALUE + "] ");
|
2018-04-08 04:46:53 +00:00
|
|
|
ret.append(" [--" + ARG_WRITE_COMMITTED + "=true|false] ");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append("\n");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
void WALDumperCommand::DoCommand() {
|
2023-08-21 22:04:22 +00:00
|
|
|
PrepareOptions();
|
2019-01-03 19:11:09 +00:00
|
|
|
DumpWalFile(options_, wal_file_, print_header_, print_values_,
|
|
|
|
is_write_committed_, &exec_state_);
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 00:45:45 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
GetCommand::GetCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (params.size() != 1) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"<key> must be specified for the get command");
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void GetCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(GetCommand::Name());
|
|
|
|
ret.append(" <key>");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void GetCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string value;
|
2016-01-22 23:46:32 +00:00
|
|
|
Status st = db_->Get(ReadOptions(), GetCfHandle(), key_, &value);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "%s\n",
|
2022-10-25 21:29:41 +00:00
|
|
|
(is_value_hex_ ? StringToHex(value) : value).c_str());
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
2021-03-18 21:42:00 +00:00
|
|
|
std::stringstream oss;
|
|
|
|
oss << "Get failed: " << st.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2023-09-12 23:32:40 +00:00
|
|
|
GetEntityCommand::GetEntityCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"<key> must be specified for the get_entity command");
|
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetEntityCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(GetEntityCommand::Name());
|
|
|
|
ret.append(" <key>");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetEntityCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PinnableWideColumns pinnable_wide_columns;
|
|
|
|
Status st = db_->GetEntity(ReadOptions(), GetCfHandle(), key_,
|
|
|
|
&pinnable_wide_columns);
|
|
|
|
if (st.ok()) {
|
|
|
|
std::ostringstream oss;
|
|
|
|
WideColumnsHelper::DumpWideColumns(pinnable_wide_columns.columns(), oss,
|
|
|
|
is_value_hex_);
|
|
|
|
fprintf(stdout, "%s\n", oss.str().c_str());
|
|
|
|
} else {
|
|
|
|
std::stringstream oss;
|
|
|
|
oss << "GetEntity failed: " << st.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
ApproxSizeCommand::ApproxSizeCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM, ARG_TO})) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (options.find(ARG_FROM) != options.end()) {
|
|
|
|
start_key_ = options.find(ARG_FROM)->second;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_FROM + " must be specified for approxsize command");
|
2013-01-11 19:09:23 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.find(ARG_TO) != options.end()) {
|
|
|
|
end_key_ = options.find(ARG_TO)->second;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_TO + " must be specified for approxsize command");
|
2013-01-11 19:09:23 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void ApproxSizeCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ApproxSizeCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ApproxSizeCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-04-12 03:21:49 +00:00
|
|
|
Range ranges[1];
|
|
|
|
ranges[0] = Range(start_key_, end_key_);
|
2013-01-11 19:09:23 +00:00
|
|
|
uint64_t sizes[1];
|
2021-01-06 22:14:01 +00:00
|
|
|
Status s = db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
|
|
|
|
if (!s.ok()) {
|
2021-03-18 21:42:00 +00:00
|
|
|
std::stringstream oss;
|
|
|
|
oss << "ApproximateSize failed: " << s.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
2021-01-06 22:14:01 +00:00
|
|
|
} else {
|
|
|
|
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
BatchPutCommand::BatchPutCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (params.size() < 2) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2014-11-01 02:22:49 +00:00
|
|
|
"At least one <key> <value> pair must be specified batchput.");
|
2013-01-11 19:09:23 +00:00
|
|
|
} else if (params.size() % 2 != 0) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2013-01-11 19:09:23 +00:00
|
|
|
"Equal number of <key>s and <value>s must be specified for batchput.");
|
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < params.size(); i += 2) {
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string key = params.at(i);
|
|
|
|
std::string value = params.at(i + 1);
|
|
|
|
key_values_.push_back(std::pair<std::string, std::string>(
|
|
|
|
is_key_hex_ ? HexToString(key) : key,
|
|
|
|
is_value_hex_ ? HexToString(value) : value));
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-20 17:16:13 +00:00
|
|
|
create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void BatchPutCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(BatchPutCommand::Name());
|
|
|
|
ret.append(" <key> <value> [<key> <value>] [..]");
|
2020-06-04 01:55:25 +00:00
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void BatchPutCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-04-12 03:21:49 +00:00
|
|
|
WriteBatch batch;
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2021-03-18 21:42:00 +00:00
|
|
|
Status st;
|
|
|
|
std::stringstream oss;
|
2016-05-20 14:42:18 +00:00
|
|
|
for (std::vector<std::pair<std::string, std::string>>::const_iterator itr =
|
|
|
|
key_values_.begin();
|
|
|
|
itr != key_values_.end(); ++itr) {
|
2021-03-18 21:42:00 +00:00
|
|
|
st = batch.Put(GetCfHandle(), itr->first, itr->second);
|
|
|
|
if (!st.ok()) {
|
|
|
|
oss << "Put to write batch failed: " << itr->first << "=>" << itr->second
|
|
|
|
<< " error: " << st.ToString();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (st.ok()) {
|
|
|
|
st = db_->Write(WriteOptions(), &batch);
|
|
|
|
if (!st.ok()) {
|
|
|
|
oss << "Write failed: " << st.ToString();
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2021-03-18 21:42:00 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void BatchPutCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
|
|
|
options_.create_if_missing = create_if_missing_;
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
ScanCommand::ScanCommand(const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_NO_VALUE, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_TO, ARG_VALUE_HEX, ARG_FROM, ARG_TIMESTAMP,
|
|
|
|
ARG_MAX_KEYS, ARG_TTL_START, ARG_TTL_END})),
|
2016-03-10 21:34:42 +00:00
|
|
|
start_key_specified_(false),
|
|
|
|
end_key_specified_(false),
|
|
|
|
max_keys_scanned_(-1),
|
|
|
|
no_value_(false) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_FROM);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
start_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
}
|
|
|
|
start_key_specified_ = true;
|
|
|
|
}
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
end_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
end_key_specified_ = true;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::vector<std::string>::const_iterator vitr =
|
2016-03-10 21:34:42 +00:00
|
|
|
std::find(flags.begin(), flags.end(), ARG_NO_VALUE);
|
2016-01-11 18:51:42 +00:00
|
|
|
if (vitr != flags.end()) {
|
|
|
|
no_value_ = true;
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2015-04-24 02:17:57 +00:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_scanned_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2016-05-20 14:42:18 +00:00
|
|
|
max_keys_scanned_ = std::stoi(itr->second);
|
2015-04-24 02:17:57 +00:00
|
|
|
#endif
|
2016-05-20 14:42:18 +00:00
|
|
|
} catch (const std::invalid_argument&) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
" has an invalid value");
|
2016-05-20 14:42:18 +00:00
|
|
|
} catch (const std::out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void ScanCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ScanCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>q] ");
|
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2016-01-11 18:51:42 +00:00
|
|
|
ret.append(" [--" + ARG_NO_VALUE + "]");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ScanCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
|
|
|
|
int num_keys_scanned = 0;
|
2019-03-12 20:04:20 +00:00
|
|
|
ReadOptions scan_read_opts;
|
|
|
|
scan_read_opts.total_order_seek = true;
|
|
|
|
Iterator* it = db_->NewIterator(scan_read_opts, GetCfHandle());
|
2013-01-11 19:09:23 +00:00
|
|
|
if (start_key_specified_) {
|
|
|
|
it->Seek(start_key_);
|
|
|
|
} else {
|
|
|
|
it->SeekToFirst();
|
|
|
|
}
|
2013-06-19 02:57:54 +00:00
|
|
|
int ttl_start;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 03:34:20 +00:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
int ttl_end;
|
2014-11-01 02:22:49 +00:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 03:34:20 +00:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete it;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "Scanning key-values from %s to %s\n",
|
2019-07-04 02:03:29 +00:00
|
|
|
TimeToHumanString(ttl_start).c_str(),
|
|
|
|
TimeToHumanString(ttl_end).c_str());
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
2022-10-25 21:29:41 +00:00
|
|
|
for (;
|
|
|
|
it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
|
|
|
|
it->Next()) {
|
2013-06-19 02:57:54 +00:00
|
|
|
if (is_db_ttl_) {
|
2020-04-29 20:06:27 +00:00
|
|
|
TtlIterator* it_ttl = static_cast_with_check<TtlIterator>(it);
|
2020-03-07 00:21:03 +00:00
|
|
|
int rawtime = it_ttl->ttl_timestamp();
|
2013-06-20 18:50:33 +00:00
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 02:57:54 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (timestamp_) {
|
2019-07-04 02:03:29 +00:00
|
|
|
fprintf(stdout, "%s ", TimeToHumanString(rawtime).c_str());
|
2013-06-19 02:57:54 +00:00
|
|
|
}
|
|
|
|
}
|
2015-11-20 06:26:37 +00:00
|
|
|
|
2016-01-11 18:51:42 +00:00
|
|
|
if (no_value_) {
|
2023-08-31 23:17:03 +00:00
|
|
|
std::string key_str = it->key().ToString();
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_str = StringToHex(key_str);
|
|
|
|
} else if (ldb_options_.key_formatter) {
|
|
|
|
key_str = ldb_options_.key_formatter->Format(key_str);
|
2016-03-10 21:34:42 +00:00
|
|
|
}
|
2023-08-31 23:17:03 +00:00
|
|
|
fprintf(stdout, "%s\n", key_str.c_str());
|
Wide Column support in ldb (#11754)
Summary:
wide_columns can now be pretty-printed in the following commands
- `./ldb dump_wal`
- `./ldb dump`
- `./ldb idump`
- `./ldb dump_live_files`
- `./ldb scan`
- `./sst_dump --command=scan`
There are opportunities to refactor to reduce some nearly identical code. This PR is initial change to add wide column support in `ldb` and `sst_dump` tool. More PRs to come for the refactor.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11754
Test Plan:
**New Tests added**
- `WideColumnsHelperTest::DumpWideColumns`
- `WideColumnsHelperTest::DumpSliceAsWideColumns`
**Changes added to existing tests**
- `ExternalSSTFileTest::BasicMixed` added to cover mixed case (This test should have been added in https://github.com/facebook/rocksdb/issues/11688). This test does not verify the ldb or sst_dump output. This test was used to create test SST files having some rows with wide columns and some without and the generated SST files were used to manually test sst_dump_tool.
- `createSST()` in `sst_dump_test` now takes `wide_column_one_in` to add wide column value in SST
**dump_wal**
```
./ldb dump_wal --walfile=/tmp/rocksdbtest-226125/db_wide_basic_test_2675429_2308393776696827948/000004.log --print_value --header
```
```
Sequence,Count,ByteSize,Physical Offset,Key(s) : value
1,1,59,0,PUT_ENTITY(0) : 0x:0x68656C6C6F 0x617474725F6E616D6531:0x666F6F 0x617474725F6E616D6532:0x626172
2,1,34,42,PUT_ENTITY(0) : 0x617474725F6F6E65:0x74776F 0x617474725F7468726565:0x666F7572
3,1,17,7d,PUT(0) : 0x7468697264 : 0x62617A
```
**idump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ idump
```
```
'first' seq:1, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:2, type:22 => attr_one:two attr_three:four
'third' seq:3, type:1 => baz
Internal keys in range: 3
```
**SST Dump from dump_live_files**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ compact
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump_live_files
```
```
...
==============================
SST Files
==============================
/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst level:1
------------------------------
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
...
```
**dump**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ dump
```
```
first ==> :hello attr_name1:foo attr_name2:bar
second ==> attr_one:two attr_three:four
third ==> baz
Keys in range: 3
```
**scan**
```
./ldb --db=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/ scan
```
```
first : :hello attr_name1:foo attr_name2:bar
second : attr_one:two attr_three:four
third : baz
```
**sst_dump**
```
./sst_dump --file=/tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst --command=scan
```
```
options.env is 0x7ff54b296000
Process /tmp/rocksdbtest-226125/db_wide_basic_test_3481961_2308393776696827948/000013.sst
Sst file format: block-based
from [] to []
'first' seq:0, type:22 => :hello attr_name1:foo attr_name2:bar
'second' seq:0, type:22 => attr_one:two attr_three:four
'third' seq:0, type:1 => baz
```
Reviewed By: ltamasi
Differential Revision: D48837999
Pulled By: jaykorean
fbshipit-source-id: b0280f0589d2b9716bb9b50530ffcabb397d140f
2023-08-30 19:45:52 +00:00
|
|
|
} else {
|
2023-08-31 23:17:03 +00:00
|
|
|
std::string str = is_db_ttl_ ? PrintKeyValue(it->key().ToString(),
|
|
|
|
it->value().ToString(),
|
|
|
|
is_key_hex_, is_value_hex_)
|
|
|
|
: PrintKeyValueOrWideColumns(
|
|
|
|
it->key(), it->value(), it->columns(),
|
|
|
|
is_key_hex_, is_value_hex_);
|
|
|
|
fprintf(stdout, "%s\n", str.c_str());
|
2015-11-20 06:26:37 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
num_keys_scanned++;
|
|
|
|
if (max_keys_scanned_ >= 0 && num_keys_scanned >= max_keys_scanned_) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!it->status().ok()) { // Check for any errors found during the scan
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(it->status().ToString());
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
delete it;
|
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
DeleteCommand::DeleteCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (params.size() != 1) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"KEY must be specified for the delete command");
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void DeleteCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DeleteCommand::Name() + " <key>");
|
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->Delete(WriteOptions(), GetCfHandle(), key_);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-10 23:37:19 +00:00
|
|
|
SingleDeleteCommand::SingleDeleteCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"KEY must be specified for the single delete command");
|
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SingleDeleteCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(SingleDeleteCommand::Name() + " <key>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void SingleDeleteCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->SingleDelete(WriteOptions(), GetCfHandle(), key_);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-15 23:49:15 +00:00
|
|
|
DeleteRangeCommand::DeleteRangeCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
|
|
|
if (params.size() != 2) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"begin and end keys must be specified for the delete command");
|
|
|
|
} else {
|
|
|
|
begin_key_ = params.at(0);
|
|
|
|
end_key_ = params.at(1);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
begin_key_ = HexToString(begin_key_);
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteRangeCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DeleteRangeCommand::Name() + " <begin key> <end key>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteRangeCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st =
|
|
|
|
db_->DeleteRange(WriteOptions(), GetCfHandle(), begin_key_, end_key_);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
PutCommand::PutCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
|
2013-01-11 19:09:23 +00:00
|
|
|
if (params.size() != 2) {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
"<key> and <value> must be specified for the put command");
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
value_ = params.at(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_value_hex_) {
|
|
|
|
value_ = HexToString(value_);
|
|
|
|
}
|
2017-04-20 17:16:13 +00:00
|
|
|
create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void PutCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(PutCommand::Name());
|
2020-06-04 01:55:25 +00:00
|
|
|
ret.append(" <key> <value>");
|
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PutCommand::DoCommand() {
|
2016-01-22 23:46:32 +00:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->Put(WriteOptions(), GetCfHandle(), key_, value_);
|
2013-01-11 19:09:23 +00:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 01:08:59 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void PutCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
|
|
|
options_.create_if_missing = create_if_missing_;
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2023-09-12 23:32:40 +00:00
|
|
|
PutEntityCommand::PutEntityCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
|
|
|
|
if (params.size() < 2) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"<key> and at least one column <column_name>:<column_value> must be "
|
|
|
|
"specified for the put_entity command");
|
|
|
|
} else {
|
|
|
|
auto iter = params.begin();
|
|
|
|
key_ = *iter;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
for (++iter; iter != params.end(); ++iter) {
|
|
|
|
auto split = StringSplit(*iter, ':');
|
|
|
|
if (split.size() != 2) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"wide column format needs to be <column_name>:<column_value> (did "
|
|
|
|
"you mean put <key> <value>?)");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
std::string name(split[0]);
|
|
|
|
std::string value(split[1]);
|
|
|
|
if (is_value_hex_) {
|
|
|
|
name = HexToString(name);
|
|
|
|
value = HexToString(value);
|
|
|
|
}
|
|
|
|
column_names_.push_back(name);
|
|
|
|
column_values_.push_back(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PutEntityCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(PutCommand::Name());
|
|
|
|
ret.append(
|
|
|
|
" <key> <column1_name>:<column1_value> <column2_name>:<column2_value> "
|
|
|
|
"<...>");
|
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void PutEntityCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
assert(column_names_.size() == column_values_.size());
|
|
|
|
WideColumns columns;
|
|
|
|
for (size_t i = 0; i < column_names_.size(); i++) {
|
|
|
|
WideColumn column(column_names_[i], column_values_[i]);
|
|
|
|
columns.emplace_back(column);
|
|
|
|
}
|
|
|
|
Status st = db_->PutEntity(WriteOptions(), GetCfHandle(), key_, columns);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PutEntityCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
|
|
|
options_.create_if_missing = create_if_missing_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2013-01-11 19:09:23 +00:00
|
|
|
const char* DBQuerierCommand::HELP_CMD = "help";
|
|
|
|
const char* DBQuerierCommand::GET_CMD = "get";
|
|
|
|
const char* DBQuerierCommand::PUT_CMD = "put";
|
|
|
|
const char* DBQuerierCommand::DELETE_CMD = "delete";
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
DBQuerierCommand::DBQuerierCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
2013-01-11 19:09:23 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void DBQuerierCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBQuerierCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2022-10-25 21:29:41 +00:00
|
|
|
ret.append(
|
|
|
|
" Starts a REPL shell. Type help for list of available "
|
|
|
|
"commands.");
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append("\n");
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBQuerierCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-22 23:46:32 +00:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2013-01-11 19:09:23 +00:00
|
|
|
return;
|
|
|
|
}
|
2014-11-01 02:22:49 +00:00
|
|
|
|
2013-04-12 03:21:49 +00:00
|
|
|
ReadOptions read_options;
|
|
|
|
WriteOptions write_options;
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string line;
|
|
|
|
std::string key;
|
|
|
|
std::string value;
|
2021-03-18 21:42:00 +00:00
|
|
|
Status s;
|
|
|
|
std::stringstream oss;
|
|
|
|
while (s.ok() && getline(std::cin, line, '\n')) {
|
2016-05-20 14:42:18 +00:00
|
|
|
// Parse line into std::vector<std::string>
|
|
|
|
std::vector<std::string> tokens;
|
2013-01-11 19:09:23 +00:00
|
|
|
size_t pos = 0;
|
|
|
|
while (true) {
|
|
|
|
size_t pos2 = line.find(' ', pos);
|
2016-05-20 14:42:18 +00:00
|
|
|
if (pos2 == std::string::npos) {
|
2013-01-11 19:09:23 +00:00
|
|
|
break;
|
|
|
|
}
|
2022-10-25 21:29:41 +00:00
|
|
|
tokens.push_back(line.substr(pos, pos2 - pos));
|
2013-01-11 19:09:23 +00:00
|
|
|
pos = pos2 + 1;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos));
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::string& cmd = tokens[0];
|
2013-01-11 19:09:23 +00:00
|
|
|
|
|
|
|
if (cmd == HELP_CMD) {
|
|
|
|
fprintf(stdout,
|
|
|
|
"get <key>\n"
|
|
|
|
"put <key> <value>\n"
|
|
|
|
"delete <key>\n");
|
|
|
|
} else if (cmd == DELETE_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
2021-03-18 21:42:00 +00:00
|
|
|
s = db_->Delete(write_options, GetCfHandle(), Slice(key));
|
|
|
|
if (s.ok()) {
|
|
|
|
fprintf(stdout, "Successfully deleted %s\n", tokens[1].c_str());
|
|
|
|
} else {
|
|
|
|
oss << "delete " << key << " failed: " << s.ToString();
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
} else if (cmd == PUT_CMD && tokens.size() == 3) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
|
|
|
value = (is_value_hex_ ? HexToString(tokens[2]) : tokens[2]);
|
2021-03-18 21:42:00 +00:00
|
|
|
s = db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
|
|
|
|
if (s.ok()) {
|
|
|
|
fprintf(stdout, "Successfully put %s %s\n", tokens[1].c_str(),
|
|
|
|
tokens[2].c_str());
|
|
|
|
} else {
|
|
|
|
oss << "put " << key << "=>" << value << " failed: " << s.ToString();
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
} else if (cmd == GET_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
2021-03-18 21:42:00 +00:00
|
|
|
s = db_->Get(read_options, GetCfHandle(), Slice(key), &value);
|
|
|
|
if (s.ok()) {
|
2022-10-25 21:29:41 +00:00
|
|
|
fprintf(stdout, "%s\n",
|
|
|
|
PrintKeyValue(key, value, is_key_hex_, is_value_hex_).c_str());
|
2013-01-11 19:09:23 +00:00
|
|
|
} else {
|
2021-03-18 21:42:00 +00:00
|
|
|
if (s.IsNotFound()) {
|
|
|
|
fprintf(stdout, "Not found %s\n", tokens[1].c_str());
|
|
|
|
} else {
|
|
|
|
oss << "get " << key << " error: " << s.ToString();
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Unknown command %s\n", line.c_str());
|
|
|
|
}
|
|
|
|
}
|
2021-03-18 21:42:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(oss.str());
|
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
CheckConsistencyCommand::CheckConsistencyCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2020-05-08 21:12:18 +00:00
|
|
|
: LDBCommand(options, flags, true, BuildCmdLineOptions({})) {}
|
2014-03-20 20:42:45 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void CheckConsistencyCommand::Help(std::string& ret) {
|
2014-11-01 02:22:49 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CheckConsistencyCommand::Name());
|
|
|
|
ret.append("\n");
|
2014-03-20 20:42:45 +00:00
|
|
|
}
|
2013-01-11 19:09:23 +00:00
|
|
|
|
2014-03-20 20:42:45 +00:00
|
|
|
void CheckConsistencyCommand::DoCommand() {
|
2020-05-08 21:12:18 +00:00
|
|
|
options_.paranoid_checks = true;
|
|
|
|
options_.num_levels = 64;
|
|
|
|
OpenDB();
|
|
|
|
if (exec_state_.IsSucceed() || exec_state_.IsNotStarted()) {
|
2014-03-20 20:42:45 +00:00
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
}
|
2020-05-08 21:12:18 +00:00
|
|
|
CloseDB();
|
2012-10-31 18:47:18 +00:00
|
|
|
}
|
2014-03-20 20:42:45 +00:00
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2016-03-12 21:50:20 +00:00
|
|
|
|
2017-03-21 18:49:08 +00:00
|
|
|
const std::string CheckPointCommand::ARG_CHECKPOINT_DIR = "checkpoint_dir";
|
|
|
|
|
|
|
|
CheckPointCommand::CheckPointCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2017-03-21 18:49:08 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_CHECKPOINT_DIR})) {
|
|
|
|
auto itr = options.find(ARG_CHECKPOINT_DIR);
|
2018-02-21 00:42:06 +00:00
|
|
|
if (itr != options.end()) {
|
2017-03-21 18:49:08 +00:00
|
|
|
checkpoint_dir_ = itr->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckPointCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CheckPointCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_CHECKPOINT_DIR + "] ");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckPointCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
Status status = Checkpoint::Create(db_, &checkpoint);
|
|
|
|
status = checkpoint->CreateCheckpoint(checkpoint_dir_);
|
|
|
|
if (status.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "OK\n");
|
2017-03-21 18:49:08 +00:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2021-07-28 23:43:16 +00:00
|
|
|
const std::string RepairCommand::ARG_VERBOSE = "verbose";
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
RepairCommand::RepairCommand(const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2021-07-28 23:43:16 +00:00
|
|
|
: LDBCommand(options, flags, false, BuildCmdLineOptions({ARG_VERBOSE})) {
|
|
|
|
verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
|
|
|
|
}
|
2016-03-12 21:50:20 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void RepairCommand::Help(std::string& ret) {
|
2016-03-12 21:50:20 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(RepairCommand::Name());
|
2021-07-28 23:43:16 +00:00
|
|
|
ret.append(" [--" + ARG_VERBOSE + "]");
|
2016-03-12 21:50:20 +00:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void RepairCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
2021-07-28 23:43:16 +00:00
|
|
|
auto level = verbose_ ? InfoLogLevel::INFO_LEVEL : InfoLogLevel::WARN_LEVEL;
|
|
|
|
options_.info_log.reset(new StderrLogger(level));
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
|
|
|
|
2016-03-12 21:50:20 +00:00
|
|
|
void RepairCommand::DoCommand() {
|
2020-09-03 23:53:14 +00:00
|
|
|
PrepareOptions();
|
|
|
|
Status status = RepairDB(db_path_, options_);
|
2016-03-12 21:50:20 +00:00
|
|
|
if (status.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "OK\n");
|
2016-03-12 21:50:20 +00:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2016-07-14 21:09:31 +00:00
|
|
|
|
2022-04-05 16:52:33 +00:00
|
|
|
const std::string BackupEngineCommand::ARG_NUM_THREADS = "num_threads";
|
|
|
|
const std::string BackupEngineCommand::ARG_BACKUP_ENV_URI = "backup_env_uri";
|
|
|
|
const std::string BackupEngineCommand::ARG_BACKUP_FS_URI = "backup_fs_uri";
|
|
|
|
const std::string BackupEngineCommand::ARG_BACKUP_DIR = "backup_dir";
|
|
|
|
const std::string BackupEngineCommand::ARG_STDERR_LOG_LEVEL =
|
|
|
|
"stderr_log_level";
|
|
|
|
|
|
|
|
BackupEngineCommand::BackupEngineCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2017-03-03 21:17:39 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false /* is_read_only */,
|
2021-03-10 04:47:26 +00:00
|
|
|
BuildCmdLineOptions({ARG_BACKUP_ENV_URI, ARG_BACKUP_FS_URI,
|
|
|
|
ARG_BACKUP_DIR, ARG_NUM_THREADS,
|
|
|
|
ARG_STDERR_LOG_LEVEL})),
|
2017-03-03 21:17:39 +00:00
|
|
|
num_threads_(1) {
|
|
|
|
auto itr = options.find(ARG_NUM_THREADS);
|
2016-07-14 21:09:31 +00:00
|
|
|
if (itr != options.end()) {
|
2017-03-03 21:17:39 +00:00
|
|
|
num_threads_ = std::stoi(itr->second);
|
2016-07-14 21:09:31 +00:00
|
|
|
}
|
2017-03-03 21:17:39 +00:00
|
|
|
itr = options.find(ARG_BACKUP_ENV_URI);
|
2016-07-26 18:13:26 +00:00
|
|
|
if (itr != options.end()) {
|
2017-03-03 21:17:39 +00:00
|
|
|
backup_env_uri_ = itr->second;
|
2016-07-14 21:09:31 +00:00
|
|
|
}
|
2021-03-10 04:47:26 +00:00
|
|
|
itr = options.find(ARG_BACKUP_FS_URI);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
backup_fs_uri_ = itr->second;
|
|
|
|
}
|
|
|
|
if (!backup_env_uri_.empty() && !backup_fs_uri_.empty()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"you may not specity both --" + ARG_BACKUP_ENV_URI + " and --" +
|
|
|
|
ARG_BACKUP_FS_URI);
|
|
|
|
}
|
2016-07-14 21:09:31 +00:00
|
|
|
itr = options.find(ARG_BACKUP_DIR);
|
|
|
|
if (itr == options.end()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("--" + ARG_BACKUP_DIR +
|
|
|
|
": missing backup directory");
|
|
|
|
} else {
|
2017-03-03 21:17:39 +00:00
|
|
|
backup_dir_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_STDERR_LOG_LEVEL);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
int stderr_log_level = std::stoi(itr->second);
|
|
|
|
if (stderr_log_level < 0 ||
|
|
|
|
stderr_log_level >= InfoLogLevel::NUM_INFO_LOG_LEVELS) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_STDERR_LOG_LEVEL + " must be >= 0 and < " +
|
|
|
|
std::to_string(InfoLogLevel::NUM_INFO_LOG_LEVELS) + ".");
|
|
|
|
} else {
|
|
|
|
logger_.reset(
|
|
|
|
new StderrLogger(static_cast<InfoLogLevel>(stderr_log_level)));
|
|
|
|
}
|
2016-07-14 21:09:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:52:33 +00:00
|
|
|
void BackupEngineCommand::Help(const std::string& name, std::string& ret) {
|
2016-07-14 21:09:31 +00:00
|
|
|
ret.append(" ");
|
2017-03-03 21:17:39 +00:00
|
|
|
ret.append(name);
|
2021-03-10 04:47:26 +00:00
|
|
|
ret.append(" [--" + ARG_BACKUP_ENV_URI + " | --" + ARG_BACKUP_FS_URI + "] ");
|
2016-07-14 21:09:31 +00:00
|
|
|
ret.append(" [--" + ARG_BACKUP_DIR + "] ");
|
2017-03-03 21:17:39 +00:00
|
|
|
ret.append(" [--" + ARG_NUM_THREADS + "] ");
|
|
|
|
ret.append(" [--" + ARG_STDERR_LOG_LEVEL + "=<int (InfoLogLevel)>] ");
|
2016-07-14 21:09:31 +00:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
2017-03-03 21:17:39 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
BackupCommand::BackupCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2022-04-05 16:52:33 +00:00
|
|
|
: BackupEngineCommand(params, options, flags) {}
|
2017-03-03 21:17:39 +00:00
|
|
|
|
|
|
|
void BackupCommand::Help(std::string& ret) {
|
2022-04-05 16:52:33 +00:00
|
|
|
BackupEngineCommand::Help(Name(), ret);
|
2017-03-03 21:17:39 +00:00
|
|
|
}
|
|
|
|
|
2016-07-14 21:09:31 +00:00
|
|
|
void BackupCommand::DoCommand() {
|
|
|
|
BackupEngine* backup_engine;
|
|
|
|
Status status;
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "open db OK\n");
|
2021-03-10 04:47:26 +00:00
|
|
|
|
2021-06-15 10:42:52 +00:00
|
|
|
Env* custom_env = backup_env_guard_.get();
|
|
|
|
if (custom_env == nullptr) {
|
|
|
|
Status s =
|
|
|
|
Env::CreateFromUri(config_options_, backup_env_uri_, backup_fs_uri_,
|
|
|
|
&custom_env, &backup_env_guard_);
|
|
|
|
if (!s.ok()) {
|
2021-03-10 04:47:26 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(s.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2021-06-15 10:42:52 +00:00
|
|
|
assert(custom_env != nullptr);
|
2019-07-24 00:08:26 +00:00
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
BackupEngineOptions backup_options =
|
|
|
|
BackupEngineOptions(backup_dir_, custom_env);
|
2017-03-03 21:17:39 +00:00
|
|
|
backup_options.info_log = logger_.get();
|
|
|
|
backup_options.max_background_operations = num_threads_;
|
2021-03-10 04:47:26 +00:00
|
|
|
status = BackupEngine::Open(options_.env, backup_options, &backup_engine);
|
2016-07-14 21:09:31 +00:00
|
|
|
if (status.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "open backup engine OK\n");
|
2016-07-14 21:09:31 +00:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
status = backup_engine->CreateNewBackup(db_);
|
|
|
|
if (status.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "create new backup OK\n");
|
2016-07-14 21:09:31 +00:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2016-07-26 18:13:26 +00:00
|
|
|
|
|
|
|
RestoreCommand::RestoreCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2022-04-05 16:52:33 +00:00
|
|
|
: BackupEngineCommand(params, options, flags) {}
|
2016-07-26 18:13:26 +00:00
|
|
|
|
|
|
|
void RestoreCommand::Help(std::string& ret) {
|
2022-04-05 16:52:33 +00:00
|
|
|
BackupEngineCommand::Help(Name(), ret);
|
2016-07-26 18:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void RestoreCommand::DoCommand() {
|
2021-06-15 10:42:52 +00:00
|
|
|
Env* custom_env = backup_env_guard_.get();
|
|
|
|
if (custom_env == nullptr) {
|
|
|
|
Status s =
|
|
|
|
Env::CreateFromUri(config_options_, backup_env_uri_, backup_fs_uri_,
|
|
|
|
&custom_env, &backup_env_guard_);
|
|
|
|
if (!s.ok()) {
|
2021-03-10 04:47:26 +00:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(s.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2021-06-15 10:42:52 +00:00
|
|
|
assert(custom_env != nullptr);
|
2019-07-24 00:08:26 +00:00
|
|
|
|
2016-07-26 18:13:26 +00:00
|
|
|
std::unique_ptr<BackupEngineReadOnly> restore_engine;
|
|
|
|
Status status;
|
|
|
|
{
|
2022-01-27 23:44:23 +00:00
|
|
|
BackupEngineOptions opts(backup_dir_, custom_env);
|
2017-03-03 21:17:39 +00:00
|
|
|
opts.info_log = logger_.get();
|
2016-07-26 18:13:26 +00:00
|
|
|
opts.max_background_operations = num_threads_;
|
|
|
|
BackupEngineReadOnly* raw_restore_engine_ptr;
|
2019-10-09 02:17:39 +00:00
|
|
|
status =
|
2021-03-10 04:47:26 +00:00
|
|
|
BackupEngineReadOnly::Open(options_.env, opts, &raw_restore_engine_ptr);
|
2016-07-26 18:13:26 +00:00
|
|
|
if (status.ok()) {
|
|
|
|
restore_engine.reset(raw_restore_engine_ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (status.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "open restore engine OK\n");
|
2016-07-26 18:13:26 +00:00
|
|
|
status = restore_engine->RestoreDBFromLatestBackup(db_path_, db_path_);
|
|
|
|
}
|
|
|
|
if (status.ok()) {
|
2019-12-10 19:47:20 +00:00
|
|
|
fprintf(stdout, "restore from backup OK\n");
|
2016-07-26 18:13:26 +00:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2014-11-24 18:04:16 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2019-01-03 19:11:09 +00:00
|
|
|
void DumpSstFile(Options options, std::string filename, bool output_hex,
|
2022-07-27 03:40:18 +00:00
|
|
|
bool show_properties, bool decode_blob_index,
|
|
|
|
std::string from_key, std::string to_key) {
|
2014-11-24 18:04:16 +00:00
|
|
|
if (filename.length() <= 4 ||
|
|
|
|
filename.rfind(".sst") != filename.length() - 4) {
|
|
|
|
std::cout << "Invalid sst file name." << std::endl;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// no verification
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SstFileDumper dumper(
|
New backup meta schema, with file temperatures (#9660)
Summary:
The primary goal of this change is to add support for backing up and
restoring (applying on restore) file temperature metadata, without
committing to either the DB manifest or the FS reported "current"
temperatures being exclusive "source of truth".
To achieve this goal, we need to add temperature information to backup
metadata, which requires updated backup meta schema. Fortunately I
prepared for this in https://github.com/facebook/rocksdb/issues/8069, which began forward compatibility in version
6.19.0 for this kind of schema update. (Previously, backup meta schema
was not extensible! Making this schema update public will allow some
other "nice to have" features like taking backups with hard links, and
avoiding crc32c checksum computation when another checksum is already
available.) While schema version 2 is newly public, the default schema
version is still 1. Until we change the default, users will need to set
to 2 to enable features like temperature data backup+restore. New
metadata like temperature information will be ignored with a warning
in versions before this change and since 6.19.0. The metadata is
considered ignorable because a functioning DB can be restored without
it.
Some detail:
* Some renaming because "future schema" is now just public schema 2.
* Initialize some atomics in TestFs (linter reported)
* Add temperature hint support to SstFileDumper (used by BackupEngine)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9660
Test Plan:
related unit test majorly updated for the new functionality,
including some shared testing support for tracking temperatures in a FS.
Some other tests and testing hooks into production code also updated for
making the backup meta schema change public.
Reviewed By: ajkr
Differential Revision: D34686968
Pulled By: pdillinger
fbshipit-source-id: 3ac1fa3e67ee97ca8a5103d79cc87d872c1d862a
2022-03-18 18:06:17 +00:00
|
|
|
options, filename, Temperature::kUnknown,
|
|
|
|
2 * 1024 * 1024 /* readahead_size */,
|
2022-04-15 16:04:04 +00:00
|
|
|
/* verify_checksum */ false, output_hex, decode_blob_index);
|
2018-12-13 22:12:02 +00:00
|
|
|
Status st = dumper.ReadSequential(true, std::numeric_limits<uint64_t>::max(),
|
2022-07-27 03:40:18 +00:00
|
|
|
!from_key.empty(), from_key,
|
|
|
|
!to_key.empty(), to_key);
|
2014-11-24 18:04:16 +00:00
|
|
|
if (!st.ok()) {
|
|
|
|
std::cerr << "Error in reading SST file " << filename << st.ToString()
|
|
|
|
<< std::endl;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (show_properties) {
|
2020-02-20 20:07:53 +00:00
|
|
|
const ROCKSDB_NAMESPACE::TableProperties* table_properties;
|
2014-11-24 18:04:16 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
std::shared_ptr<const ROCKSDB_NAMESPACE::TableProperties>
|
2014-11-24 18:04:16 +00:00
|
|
|
table_properties_from_reader;
|
2018-11-27 20:59:27 +00:00
|
|
|
st = dumper.ReadTableProperties(&table_properties_from_reader);
|
2014-11-24 18:04:16 +00:00
|
|
|
if (!st.ok()) {
|
|
|
|
std::cerr << filename << ": " << st.ToString()
|
|
|
|
<< ". Try to use initial table properties" << std::endl;
|
2018-11-27 20:59:27 +00:00
|
|
|
table_properties = dumper.GetInitTableProperties();
|
2014-11-24 18:04:16 +00:00
|
|
|
} else {
|
|
|
|
table_properties = table_properties_from_reader.get();
|
|
|
|
}
|
|
|
|
if (table_properties != nullptr) {
|
|
|
|
std::cout << std::endl << "Table Properties:" << std::endl;
|
|
|
|
std::cout << table_properties->ToString("\n") << std::endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-22 03:37:07 +00:00
|
|
|
void DumpBlobFile(const std::string& filename, bool is_key_hex,
|
|
|
|
bool is_value_hex, bool dump_uncompressed_blobs) {
|
|
|
|
using ROCKSDB_NAMESPACE::blob_db::BlobDumpTool;
|
|
|
|
BlobDumpTool tool;
|
|
|
|
BlobDumpTool::DisplayType blob_type = is_value_hex
|
|
|
|
? BlobDumpTool::DisplayType::kHex
|
|
|
|
: BlobDumpTool::DisplayType::kRaw;
|
|
|
|
BlobDumpTool::DisplayType show_uncompressed_blob =
|
|
|
|
dump_uncompressed_blobs ? blob_type : BlobDumpTool::DisplayType::kNone;
|
|
|
|
BlobDumpTool::DisplayType show_blob =
|
|
|
|
dump_uncompressed_blobs ? BlobDumpTool::DisplayType::kNone : blob_type;
|
|
|
|
|
|
|
|
BlobDumpTool::DisplayType show_key = is_key_hex
|
|
|
|
? BlobDumpTool::DisplayType::kHex
|
|
|
|
: BlobDumpTool::DisplayType::kRaw;
|
|
|
|
Status s = tool.Run(filename, show_key, show_blob, show_uncompressed_blob,
|
|
|
|
/* show_summary */ true);
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Failed: %s\n", s.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
2014-11-24 18:04:16 +00:00
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
DBFileDumperCommand::DBFileDumperCommand(
|
2018-03-05 21:08:17 +00:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 14:42:18 +00:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2022-04-15 16:04:04 +00:00
|
|
|
: LDBCommand(options, flags, true,
|
2022-04-22 23:54:43 +00:00
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_DECODE_BLOB_INDEX, ARG_DUMP_UNCOMPRESSED_BLOBS})),
|
|
|
|
decode_blob_index_(IsFlagPresent(flags, ARG_DECODE_BLOB_INDEX)),
|
|
|
|
dump_uncompressed_blobs_(
|
|
|
|
IsFlagPresent(flags, ARG_DUMP_UNCOMPRESSED_BLOBS)) {}
|
2014-11-24 18:04:16 +00:00
|
|
|
|
2016-05-20 14:42:18 +00:00
|
|
|
void DBFileDumperCommand::Help(std::string& ret) {
|
2014-11-24 18:04:16 +00:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBFileDumperCommand::Name());
|
2022-04-15 16:04:04 +00:00
|
|
|
ret.append(" [--" + ARG_DECODE_BLOB_INDEX + "] ");
|
2022-04-22 23:54:43 +00:00
|
|
|
ret.append(" [--" + ARG_DUMP_UNCOMPRESSED_BLOBS + "] ");
|
2014-11-24 18:04:16 +00:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBFileDumperCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-22 23:46:32 +00:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2014-11-24 18:04:16 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status s;
|
|
|
|
|
2022-04-22 23:54:43 +00:00
|
|
|
// TODO: Use --hex, --key_hex, --value_hex flags consistently for
|
|
|
|
// dumping manifest file, sst files and blob files.
|
2014-11-24 18:04:16 +00:00
|
|
|
std::cout << "Manifest File" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
std::string manifest_filename;
|
|
|
|
s = ReadFileToString(db_->GetEnv(), CurrentFileName(db_->GetName()),
|
|
|
|
&manifest_filename);
|
|
|
|
if (!s.ok() || manifest_filename.empty() ||
|
|
|
|
manifest_filename.back() != '\n') {
|
|
|
|
std::cerr << "Error when reading CURRENT file "
|
|
|
|
<< CurrentFileName(db_->GetName()) << std::endl;
|
|
|
|
}
|
|
|
|
// remove the trailing '\n'
|
|
|
|
manifest_filename.resize(manifest_filename.size() - 1);
|
2016-05-20 14:42:18 +00:00
|
|
|
std::string manifest_filepath = db_->GetName() + "/" + manifest_filename;
|
2021-06-22 18:45:14 +00:00
|
|
|
// Correct concatenation of filepath and filename:
|
|
|
|
// Check that there is no double slashes (or more!) when concatenation
|
|
|
|
// happens.
|
|
|
|
manifest_filepath = NormalizePath(manifest_filepath);
|
|
|
|
|
2014-11-24 18:04:16 +00:00
|
|
|
std::cout << manifest_filepath << std::endl;
|
2023-07-19 17:44:10 +00:00
|
|
|
DumpManifestFile(options_, manifest_filepath, false, false, false,
|
|
|
|
column_families_);
|
2014-11-24 18:04:16 +00:00
|
|
|
std::cout << std::endl;
|
|
|
|
|
2022-04-22 23:54:43 +00:00
|
|
|
std::vector<ColumnFamilyMetaData> column_families;
|
|
|
|
db_->GetAllColumnFamilyMetaData(&column_families);
|
|
|
|
for (const auto& column_family : column_families) {
|
|
|
|
std::cout << "Column family name: " << column_family.name << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
2014-11-24 18:04:16 +00:00
|
|
|
std::cout << std::endl;
|
2022-04-22 23:54:43 +00:00
|
|
|
std::cout << "SST Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
for (const LevelMetaData& level : column_family.levels) {
|
|
|
|
for (const SstFileMetaData& sst_file : level.files) {
|
|
|
|
std::string filename = sst_file.db_path + "/" + sst_file.name;
|
|
|
|
// Correct concatenation of filepath and filename:
|
|
|
|
// Check that there is no double slashes (or more!) when concatenation
|
|
|
|
// happens.
|
|
|
|
filename = NormalizePath(filename);
|
|
|
|
std::cout << filename << " level:" << level.level << std::endl;
|
|
|
|
std::cout << "------------------------------" << std::endl;
|
|
|
|
DumpSstFile(options_, filename, false, true, decode_blob_index_);
|
|
|
|
std::cout << std::endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::cout << "Blob Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
for (const BlobMetaData& blob_file : column_family.blob_files) {
|
|
|
|
std::string filename =
|
|
|
|
blob_file.blob_file_path + "/" + blob_file.blob_file_name;
|
|
|
|
// Correct concatenation of filepath and filename:
|
|
|
|
// Check that there is no double slashes (or more!) when concatenation
|
|
|
|
// happens.
|
|
|
|
filename = NormalizePath(filename);
|
|
|
|
std::cout << filename << std::endl;
|
|
|
|
std::cout << "------------------------------" << std::endl;
|
|
|
|
DumpBlobFile(filename, /* is_key_hex */ false, /* is_value_hex */ false,
|
|
|
|
dump_uncompressed_blobs_);
|
|
|
|
std::cout << std::endl;
|
|
|
|
}
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
|
|
|
std::cout << std::endl;
|
|
|
|
|
|
|
|
std::cout << "Write Ahead Log Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::VectorLogPtr wal_files;
|
2014-11-24 18:04:16 +00:00
|
|
|
s = db_->GetSortedWalFiles(wal_files);
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::cerr << "Error when getting WAL files" << std::endl;
|
|
|
|
} else {
|
2021-07-30 19:15:04 +00:00
|
|
|
std::string wal_dir;
|
|
|
|
if (options_.wal_dir.empty()) {
|
|
|
|
wal_dir = db_->GetName();
|
|
|
|
} else {
|
|
|
|
wal_dir = NormalizePath(options_.wal_dir + "/");
|
|
|
|
}
|
2014-11-24 18:04:16 +00:00
|
|
|
for (auto& wal : wal_files) {
|
|
|
|
// TODO(qyang): option.wal_dir should be passed into ldb command
|
2021-07-30 19:15:04 +00:00
|
|
|
std::string filename = wal_dir + wal->PathName();
|
2014-11-24 18:04:16 +00:00
|
|
|
std::cout << filename << std::endl;
|
2018-04-08 04:46:53 +00:00
|
|
|
// TODO(myabandeh): allow configuring is_write_commited
|
2019-01-03 19:11:09 +00:00
|
|
|
DumpWalFile(options_, filename, true, true, true /* is_write_commited */,
|
2018-04-08 04:46:53 +00:00
|
|
|
&exec_state_);
|
2014-11-24 18:04:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
|
|
|
const std::string DBLiveFilesMetadataDumperCommand::ARG_SORT_BY_FILENAME =
|
|
|
|
"sort_by_filename";
|
|
|
|
|
|
|
|
DBLiveFilesMetadataDumperCommand::DBLiveFilesMetadataDumperCommand(
|
|
|
|
const std::vector<std::string>& /*params*/,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_SORT_BY_FILENAME})) {
|
|
|
|
sort_by_filename_ = IsFlagPresent(flags, ARG_SORT_BY_FILENAME);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBLiveFilesMetadataDumperCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBLiveFilesMetadataDumperCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_SORT_BY_FILENAME + "] ");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBLiveFilesMetadataDumperCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status s;
|
|
|
|
|
2021-09-30 22:11:49 +00:00
|
|
|
std::vector<ColumnFamilyMetaData> metadata;
|
|
|
|
db_->GetAllColumnFamilyMetaData(&metadata);
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
|
|
|
if (sort_by_filename_) {
|
2021-09-30 22:11:49 +00:00
|
|
|
std::cout << "Live SST and Blob Files:" << std::endl;
|
|
|
|
// tuple of <file path, level, column family name>
|
|
|
|
std::vector<std::tuple<std::string, int, std::string>> all_files;
|
|
|
|
|
|
|
|
for (const auto& column_metadata : metadata) {
|
|
|
|
// Iterate Levels
|
|
|
|
const auto& levels = column_metadata.levels;
|
|
|
|
const std::string& cf = column_metadata.name;
|
|
|
|
for (const auto& level_metadata : levels) {
|
|
|
|
// Iterate SST files
|
|
|
|
const auto& sst_files = level_metadata.files;
|
|
|
|
int level = level_metadata.level;
|
|
|
|
for (const auto& sst_metadata : sst_files) {
|
|
|
|
// The SstFileMetaData.name always starts with "/",
|
|
|
|
// however SstFileMetaData.db_path is the string provided by
|
|
|
|
// the user as an input. Therefore we check if we can
|
|
|
|
// concantenate the two strings directly or if we need to
|
|
|
|
// drop a possible extra "/" at the end of SstFileMetaData.db_path.
|
|
|
|
std::string filename =
|
|
|
|
NormalizePath(sst_metadata.db_path + "/" + sst_metadata.name);
|
|
|
|
all_files.emplace_back(filename, level, cf);
|
|
|
|
} // End of for-loop over sst files
|
|
|
|
} // End of for-loop over levels
|
|
|
|
|
|
|
|
const auto& blob_files = column_metadata.blob_files;
|
|
|
|
for (const auto& blob_metadata : blob_files) {
|
|
|
|
// The BlobMetaData.blob_file_name always starts with "/",
|
|
|
|
// however BlobMetaData.blob_file_path is the string provided by
|
|
|
|
// the user as an input. Therefore we check if we can
|
|
|
|
// concantenate the two strings directly or if we need to
|
|
|
|
// drop a possible extra "/" at the end of BlobMetaData.blob_file_path.
|
|
|
|
std::string filename = NormalizePath(
|
|
|
|
blob_metadata.blob_file_path + "/" + blob_metadata.blob_file_name);
|
|
|
|
// Level for blob files is encoded as -1
|
|
|
|
all_files.emplace_back(filename, -1, cf);
|
|
|
|
} // End of for-loop over blob files
|
|
|
|
} // End of for-loop over column metadata
|
|
|
|
|
|
|
|
// Sort by filename (i.e. first entry in tuple)
|
|
|
|
std::sort(all_files.begin(), all_files.end());
|
|
|
|
|
|
|
|
for (const auto& item : all_files) {
|
|
|
|
const std::string& filename = std::get<0>(item);
|
|
|
|
int level = std::get<1>(item);
|
|
|
|
const std::string& cf = std::get<2>(item);
|
|
|
|
if (level == -1) { // Blob File
|
|
|
|
std::cout << filename << ", column family '" << cf << "'" << std::endl;
|
|
|
|
} else { // SST file
|
|
|
|
std::cout << filename << " : level " << level << ", column family '"
|
|
|
|
<< cf << "'" << std::endl;
|
|
|
|
}
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
|
|
|
}
|
|
|
|
} else {
|
2021-09-30 22:11:49 +00:00
|
|
|
for (const auto& column_metadata : metadata) {
|
|
|
|
std::cout << "===== Column Family: " << column_metadata.name
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
|
|
|
<< " =====" << std::endl;
|
|
|
|
|
2021-09-30 22:11:49 +00:00
|
|
|
std::cout << "Live SST Files:" << std::endl;
|
|
|
|
// Iterate levels
|
|
|
|
const auto& levels = column_metadata.levels;
|
|
|
|
for (const auto& level_metadata : levels) {
|
|
|
|
std::cout << "---------- level " << level_metadata.level
|
|
|
|
<< " ----------" << std::endl;
|
|
|
|
// Iterate SST files
|
|
|
|
const auto& sst_files = level_metadata.files;
|
|
|
|
for (const auto& sst_metadata : sst_files) {
|
|
|
|
// The SstFileMetaData.name always starts with "/",
|
|
|
|
// however SstFileMetaData.db_path is the string provided by
|
|
|
|
// the user as an input. Therefore we check if we can
|
|
|
|
// concantenate the two strings directly or if we need to
|
|
|
|
// drop a possible extra "/" at the end of SstFileMetaData.db_path.
|
|
|
|
std::string filename =
|
|
|
|
NormalizePath(sst_metadata.db_path + "/" + sst_metadata.name);
|
|
|
|
std::cout << filename << std::endl;
|
|
|
|
} // End of for-loop over sst files
|
|
|
|
} // End of for-loop over levels
|
|
|
|
|
|
|
|
std::cout << "Live Blob Files:" << std::endl;
|
|
|
|
const auto& blob_files = column_metadata.blob_files;
|
|
|
|
for (const auto& blob_metadata : blob_files) {
|
|
|
|
// The BlobMetaData.blob_file_name always starts with "/",
|
|
|
|
// however BlobMetaData.blob_file_path is the string provided by
|
|
|
|
// the user as an input. Therefore we check if we can
|
|
|
|
// concantenate the two strings directly or if we need to
|
|
|
|
// drop a possible extra "/" at the end of BlobMetaData.blob_file_path.
|
|
|
|
std::string filename = NormalizePath(
|
|
|
|
blob_metadata.blob_file_path + "/" + blob_metadata.blob_file_name);
|
|
|
|
std::cout << filename << std::endl;
|
|
|
|
} // End of for-loop over blob files
|
|
|
|
} // End of for-loop over column metadata
|
|
|
|
} // End of else ("not sort_by_filename")
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
|
|
|
std::cout << "------------------------------" << std::endl;
|
|
|
|
}
|
|
|
|
|
2018-08-09 21:18:59 +00:00
|
|
|
void WriteExternalSstFilesCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(WriteExternalSstFilesCommand::Name());
|
|
|
|
ret.append(" <output_sst_path>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteExternalSstFilesCommand::WriteExternalSstFilesCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_CREATE_IF_MISSING})) {
|
|
|
|
create_if_missing_ =
|
|
|
|
IsFlagPresent(flags, ARG_CREATE_IF_MISSING) ||
|
|
|
|
ParseBooleanOption(options, ARG_CREATE_IF_MISSING, false);
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"output SST file path must be specified");
|
|
|
|
} else {
|
|
|
|
output_sst_path_ = params.at(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteExternalSstFilesCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ColumnFamilyHandle* cfh = GetCfHandle();
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), db_->GetOptions(), cfh);
|
|
|
|
Status status = sst_file_writer.Open(output_sst_path_);
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("failed to open SST file: " +
|
|
|
|
status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bad_lines = 0;
|
|
|
|
std::string line;
|
|
|
|
std::ifstream ifs_stdin("/dev/stdin");
|
|
|
|
std::istream* istream_p = ifs_stdin.is_open() ? &ifs_stdin : &std::cin;
|
|
|
|
while (getline(*istream_p, line, '\n')) {
|
|
|
|
std::string key;
|
|
|
|
std::string value;
|
|
|
|
if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
|
|
|
|
status = sst_file_writer.Put(key, value);
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"failed to write record to file: " + status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else if (0 == line.find("Keys in range:")) {
|
|
|
|
// ignore this line
|
|
|
|
} else if (0 == line.find("Created bg thread 0x")) {
|
|
|
|
// ignore this line
|
|
|
|
} else {
|
|
|
|
bad_lines++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
status = sst_file_writer.Finish();
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Failed to finish writing to file: " + status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bad_lines > 0) {
|
|
|
|
fprintf(stderr, "Warning: %d bad lines ignored.\n", bad_lines);
|
|
|
|
}
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed(
|
|
|
|
"external SST file written to " + output_sst_path_);
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void WriteExternalSstFilesCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
|
|
|
options_.create_if_missing = create_if_missing_;
|
2018-08-09 21:18:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_MOVE_FILES = "move_files";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_SNAPSHOT_CONSISTENCY =
|
|
|
|
"snapshot_consistency";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_ALLOW_GLOBAL_SEQNO =
|
|
|
|
"allow_global_seqno";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_ALLOW_BLOCKING_FLUSH =
|
|
|
|
"allow_blocking_flush";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_INGEST_BEHIND =
|
|
|
|
"ingest_behind";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_WRITE_GLOBAL_SEQNO =
|
|
|
|
"write_global_seqno";
|
|
|
|
|
|
|
|
void IngestExternalSstFilesCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(IngestExternalSstFilesCommand::Name());
|
|
|
|
ret.append(" <input_sst_path>");
|
|
|
|
ret.append(" [--" + ARG_MOVE_FILES + "] ");
|
|
|
|
ret.append(" [--" + ARG_SNAPSHOT_CONSISTENCY + "] ");
|
|
|
|
ret.append(" [--" + ARG_ALLOW_GLOBAL_SEQNO + "] ");
|
|
|
|
ret.append(" [--" + ARG_ALLOW_BLOCKING_FLUSH + "] ");
|
|
|
|
ret.append(" [--" + ARG_INGEST_BEHIND + "] ");
|
|
|
|
ret.append(" [--" + ARG_WRITE_GLOBAL_SEQNO + "] ");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
IngestExternalSstFilesCommand::IngestExternalSstFilesCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_MOVE_FILES, ARG_SNAPSHOT_CONSISTENCY,
|
|
|
|
ARG_ALLOW_GLOBAL_SEQNO, ARG_CREATE_IF_MISSING,
|
|
|
|
ARG_ALLOW_BLOCKING_FLUSH, ARG_INGEST_BEHIND,
|
|
|
|
ARG_WRITE_GLOBAL_SEQNO})),
|
|
|
|
move_files_(false),
|
|
|
|
snapshot_consistency_(true),
|
|
|
|
allow_global_seqno_(true),
|
|
|
|
allow_blocking_flush_(true),
|
|
|
|
ingest_behind_(false),
|
|
|
|
write_global_seqno_(true) {
|
|
|
|
create_if_missing_ =
|
|
|
|
IsFlagPresent(flags, ARG_CREATE_IF_MISSING) ||
|
|
|
|
ParseBooleanOption(options, ARG_CREATE_IF_MISSING, false);
|
|
|
|
move_files_ = IsFlagPresent(flags, ARG_MOVE_FILES) ||
|
|
|
|
ParseBooleanOption(options, ARG_MOVE_FILES, false);
|
|
|
|
snapshot_consistency_ =
|
|
|
|
IsFlagPresent(flags, ARG_SNAPSHOT_CONSISTENCY) ||
|
|
|
|
ParseBooleanOption(options, ARG_SNAPSHOT_CONSISTENCY, true);
|
|
|
|
allow_global_seqno_ =
|
|
|
|
IsFlagPresent(flags, ARG_ALLOW_GLOBAL_SEQNO) ||
|
|
|
|
ParseBooleanOption(options, ARG_ALLOW_GLOBAL_SEQNO, true);
|
|
|
|
allow_blocking_flush_ =
|
|
|
|
IsFlagPresent(flags, ARG_ALLOW_BLOCKING_FLUSH) ||
|
|
|
|
ParseBooleanOption(options, ARG_ALLOW_BLOCKING_FLUSH, true);
|
|
|
|
ingest_behind_ = IsFlagPresent(flags, ARG_INGEST_BEHIND) ||
|
|
|
|
ParseBooleanOption(options, ARG_INGEST_BEHIND, false);
|
|
|
|
write_global_seqno_ =
|
|
|
|
IsFlagPresent(flags, ARG_WRITE_GLOBAL_SEQNO) ||
|
|
|
|
ParseBooleanOption(options, ARG_WRITE_GLOBAL_SEQNO, true);
|
|
|
|
|
|
|
|
if (allow_global_seqno_) {
|
|
|
|
if (!write_global_seqno_) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Warning: not writing global_seqno to the ingested SST can\n"
|
|
|
|
"prevent older versions of RocksDB from being able to open it\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (write_global_seqno_) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"ldb cannot write global_seqno to the ingested SST when global_seqno "
|
|
|
|
"is not allowed");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("input SST path must be specified");
|
|
|
|
} else {
|
|
|
|
input_sst_path_ = params.at(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void IngestExternalSstFilesCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (GetExecuteState().IsFailed()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ColumnFamilyHandle* cfh = GetCfHandle();
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.move_files = move_files_;
|
|
|
|
ifo.snapshot_consistency = snapshot_consistency_;
|
|
|
|
ifo.allow_global_seqno = allow_global_seqno_;
|
|
|
|
ifo.allow_blocking_flush = allow_blocking_flush_;
|
|
|
|
ifo.ingest_behind = ingest_behind_;
|
|
|
|
ifo.write_global_seqno = write_global_seqno_;
|
|
|
|
Status status = db_->IngestExternalFile(cfh, {input_sst_path_}, ifo);
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"failed to ingest external SST: " + status.ToString());
|
|
|
|
} else {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Succeed("external SST files ingested");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 23:53:14 +00:00
|
|
|
void IngestExternalSstFilesCommand::OverrideBaseOptions() {
|
|
|
|
LDBCommand::OverrideBaseOptions();
|
|
|
|
options_.create_if_missing = create_if_missing_;
|
2018-08-09 21:18:59 +00:00
|
|
|
}
|
|
|
|
|
2019-08-15 23:59:42 +00:00
|
|
|
ListFileRangeDeletesCommand::ListFileRangeDeletesCommand(
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true, BuildCmdLineOptions({ARG_MAX_KEYS})) {
|
2022-02-26 07:13:11 +00:00
|
|
|
auto itr = options.find(ARG_MAX_KEYS);
|
2019-08-15 23:59:42 +00:00
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
|
|
|
max_keys_ = std::stoi(itr->second);
|
|
|
|
#endif
|
|
|
|
} catch (const std::invalid_argument&) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
|
|
|
" has an invalid value");
|
|
|
|
} catch (const std::out_of_range&) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ListFileRangeDeletesCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ListFileRangeDeletesCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" : print tombstones in SST files.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void ListFileRangeDeletesCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-04-29 20:06:27 +00:00
|
|
|
DBImpl* db_impl = static_cast_with_check<DBImpl>(db_->GetRootDB());
|
2019-08-15 23:59:42 +00:00
|
|
|
|
|
|
|
std::string out_str;
|
|
|
|
|
|
|
|
Status st =
|
|
|
|
db_impl->TablesRangeTombstoneSummary(GetCfHandle(), max_keys_, &out_str);
|
|
|
|
if (st.ok()) {
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"ListFileRangeDeletesCommand::DoCommand:BeforePrint", &out_str);
|
|
|
|
fprintf(stdout, "%s\n", out_str.c_str());
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void UnsafeRemoveSstFileCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(UnsafeRemoveSstFileCommand::Name());
|
|
|
|
ret.append(" <SST file number>");
|
2021-03-18 21:42:00 +00:00
|
|
|
ret.append(" ");
|
2020-09-03 23:53:14 +00:00
|
|
|
ret.append(" MUST NOT be used on a live DB.");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
UnsafeRemoveSstFileCommand::UnsafeRemoveSstFileCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({})) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("SST file number must be specified");
|
2019-08-15 23:59:42 +00:00
|
|
|
} else {
|
2020-09-03 23:53:14 +00:00
|
|
|
char* endptr = nullptr;
|
|
|
|
sst_file_number_ = strtoull(params.at(0).c_str(), &endptr, 10 /* base */);
|
|
|
|
if (endptr == nullptr || *endptr != '\0') {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Failed to parse SST file number " + params.at(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void UnsafeRemoveSstFileCommand::DoCommand() {
|
2023-04-21 16:07:18 +00:00
|
|
|
// TODO: plumb Env::IOActivity
|
|
|
|
const ReadOptions read_options;
|
2020-09-03 23:53:14 +00:00
|
|
|
PrepareOptions();
|
|
|
|
|
2022-03-18 23:35:51 +00:00
|
|
|
OfflineManifestWriter w(options_, db_path_);
|
|
|
|
if (column_families_.empty()) {
|
|
|
|
column_families_.emplace_back(kDefaultColumnFamilyName, options_);
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
2022-03-18 23:35:51 +00:00
|
|
|
Status s = w.Recover(column_families_);
|
2020-09-03 23:53:14 +00:00
|
|
|
|
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
int level = -1;
|
|
|
|
if (s.ok()) {
|
|
|
|
FileMetaData* metadata = nullptr;
|
2022-03-18 23:35:51 +00:00
|
|
|
s = w.Versions().GetMetadataForFile(sst_file_number_, &level, &metadata,
|
|
|
|
&cfd);
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
VersionEdit edit;
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
|
|
edit.DeleteFile(level, sst_file_number_);
|
Sync dir containing CURRENT after RenameFile on CURRENT as much as possible (#10573)
Summary:
**Context:**
Below crash test revealed a bug that directory containing CURRENT file (short for `dir_contains_current_file` below) was not always get synced after a new CURRENT is created and being called with `RenameFile` as part of the creation.
This bug exposes a risk that such un-synced directory containing the updated CURRENT can’t survive a host crash (e.g, power loss) hence get corrupted. This then will be followed by a recovery from a corrupted CURRENT that we don't want.
The root-cause is that a nullptr `FSDirectory* dir_contains_current_file` sometimes gets passed-down to `SetCurrentFile()` hence in those case `dir_contains_current_file->FSDirectory::FsyncWithDirOptions()` will be skipped (which otherwise will internally call`Env/FS::SyncDic()` )
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_data_in_errors=True --avoid_unnecessary_blocking_io=0 --backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=8 --block_size=16384 --bloom_bits=134.8015470676662 --bottommost_compression_type=disable --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kCRC32c --clear_column_family_one_in=0 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_pri=2 --compaction_ttl=100 --compression_max_dict_buffer_bytes=511 --compression_max_dict_bytes=16384 --compression_type=zstd --compression_use_zstd_dict_trainer=1 --compression_zstd_max_train_bytes=65536 --continuous_verification_interval=0 --data_block_index_type=0 --db=$db --db_write_buffer_size=1048576 --delpercent=5 --delrangepercent=0 --destroy_db_initially=0 --disable_wal=0 --enable_compaction_filter=0 --enable_pipelined_write=1 --expected_values_dir=$exp --fail_if_options_file_error=1 --file_checksum_impl=none --flush_one_in=1000000 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=4 --ingest_external_file_one_in=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=True --mark_for_compaction_one_file_in=10 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=10000 --max_key_len=3 --max_manifest_file_size=16384 --max_write_batch_group_size_bytes=64 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=0 --memtable_prefix_bloom_size_ratio=0.001 --memtable_protection_bytes_per_key=1 --memtable_whole_key_filtering=1 --mmap_read=1 --nooverwritepercent=1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=1 --paranoid_file_checks=1 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=0 --prefix_size=5 --prefixpercent=5 --prepopulate_block_cache=1 --progress_reports=0 --read_fault_one_in=1000 --readpercent=45 --recycle_log_file_num=0 --reopen=0 --ribbon_starting_level=999 --secondary_cache_fault_one_in=32 --secondary_cache_uri=compressed_secondary_cache://capacity=8388608 --set_options_one_in=10000 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=3 --sync_fault_injection=1 --target_file_size_base=2097 --target_file_size_multiplier=2 --test_batches_snapshots=1 --top_level_index_pinning=1 --use_full_merge_v1=1 --use_merge=1 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --verify_sst_unique_id_in_manifest=1 --wal_bytes_per_sync=524288 --write_buffer_size=4194 --writepercent=35
```
```
stderr:
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
db_stress: utilities/fault_injection_fs.cc:748: virtual rocksdb::IOStatus rocksdb::FaultInjectionTestFS::RenameFile(const std::string &, const std::string &, const rocksdb::IOOptions &, rocksdb::IODebugContext *): Assertion `tlist.find(tdn.second) == tlist.end()' failed.`
```
**Summary:**
The PR ensured the non-test path pass down a non-null dir containing CURRENT (which is by current RocksDB assumption just db_dir) by doing the following:
- Renamed `directory_to_fsync` as `dir_contains_current_file` in `SetCurrentFile()` to tighten the association between this directory and CURRENT file
- Changed `SetCurrentFile()` API to require `dir_contains_current_file` being passed-in, instead of making it by default nullptr.
- Because `SetCurrentFile()`'s `dir_contains_current_file` is passed down from `VersionSet::LogAndApply()` then `VersionSet::ProcessManifestWrites()` (i.e, think about this as a chain of 3 functions related to MANIFEST update), these 2 functions also got refactored to require `dir_contains_current_file`
- Updated the non-test-path callers of these 3 functions to obtain and pass in non-nullptr `dir_contains_current_file`, which by current assumption of RocksDB, is the `FSDirectory* db_dir`.
- `db_impl` path will obtain `DBImpl::directories_.getDbDir()` while others with no access to such `directories_` are obtained on the fly by creating such object `FileSystem::NewDirectory(..)` and manage it by unique pointers to ensure short life time.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10573
Test Plan:
- `make check`
- Passed the repro db_stress command
- For future improvement, since we currently don't assert dir containing CURRENT to be non-nullptr due to https://github.com/facebook/rocksdb/pull/10573#pullrequestreview-1087698899, there is still chances that future developers mistakenly pass down nullptr dir containing CURRENT thus resulting skipped sync dir and cause the bug again. Therefore a smarter test (e.g, such as quoted from ajkr "(make) unsynced data loss to be dropping files corresponding to unsynced directory entries") is still needed.
Reviewed By: ajkr
Differential Revision: D39005886
Pulled By: hx235
fbshipit-source-id: 336fb9090d0cfa6ca3dd580db86268007dde7f5a
2022-08-30 00:35:21 +00:00
|
|
|
std::unique_ptr<FSDirectory> db_dir;
|
|
|
|
s = options_.env->GetFileSystem()->NewDirectory(db_path_, IOOptions(),
|
|
|
|
&db_dir, nullptr);
|
|
|
|
if (s.ok()) {
|
2023-04-21 16:07:18 +00:00
|
|
|
s = w.LogAndApply(read_options, cfd, &edit, db_dir.get());
|
Sync dir containing CURRENT after RenameFile on CURRENT as much as possible (#10573)
Summary:
**Context:**
Below crash test revealed a bug that directory containing CURRENT file (short for `dir_contains_current_file` below) was not always get synced after a new CURRENT is created and being called with `RenameFile` as part of the creation.
This bug exposes a risk that such un-synced directory containing the updated CURRENT can’t survive a host crash (e.g, power loss) hence get corrupted. This then will be followed by a recovery from a corrupted CURRENT that we don't want.
The root-cause is that a nullptr `FSDirectory* dir_contains_current_file` sometimes gets passed-down to `SetCurrentFile()` hence in those case `dir_contains_current_file->FSDirectory::FsyncWithDirOptions()` will be skipped (which otherwise will internally call`Env/FS::SyncDic()` )
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_data_in_errors=True --avoid_unnecessary_blocking_io=0 --backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=8 --block_size=16384 --bloom_bits=134.8015470676662 --bottommost_compression_type=disable --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kCRC32c --clear_column_family_one_in=0 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_pri=2 --compaction_ttl=100 --compression_max_dict_buffer_bytes=511 --compression_max_dict_bytes=16384 --compression_type=zstd --compression_use_zstd_dict_trainer=1 --compression_zstd_max_train_bytes=65536 --continuous_verification_interval=0 --data_block_index_type=0 --db=$db --db_write_buffer_size=1048576 --delpercent=5 --delrangepercent=0 --destroy_db_initially=0 --disable_wal=0 --enable_compaction_filter=0 --enable_pipelined_write=1 --expected_values_dir=$exp --fail_if_options_file_error=1 --file_checksum_impl=none --flush_one_in=1000000 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=4 --ingest_external_file_one_in=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=True --mark_for_compaction_one_file_in=10 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=10000 --max_key_len=3 --max_manifest_file_size=16384 --max_write_batch_group_size_bytes=64 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=0 --memtable_prefix_bloom_size_ratio=0.001 --memtable_protection_bytes_per_key=1 --memtable_whole_key_filtering=1 --mmap_read=1 --nooverwritepercent=1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=1 --paranoid_file_checks=1 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=0 --prefix_size=5 --prefixpercent=5 --prepopulate_block_cache=1 --progress_reports=0 --read_fault_one_in=1000 --readpercent=45 --recycle_log_file_num=0 --reopen=0 --ribbon_starting_level=999 --secondary_cache_fault_one_in=32 --secondary_cache_uri=compressed_secondary_cache://capacity=8388608 --set_options_one_in=10000 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=3 --sync_fault_injection=1 --target_file_size_base=2097 --target_file_size_multiplier=2 --test_batches_snapshots=1 --top_level_index_pinning=1 --use_full_merge_v1=1 --use_merge=1 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --verify_sst_unique_id_in_manifest=1 --wal_bytes_per_sync=524288 --write_buffer_size=4194 --writepercent=35
```
```
stderr:
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
db_stress: utilities/fault_injection_fs.cc:748: virtual rocksdb::IOStatus rocksdb::FaultInjectionTestFS::RenameFile(const std::string &, const std::string &, const rocksdb::IOOptions &, rocksdb::IODebugContext *): Assertion `tlist.find(tdn.second) == tlist.end()' failed.`
```
**Summary:**
The PR ensured the non-test path pass down a non-null dir containing CURRENT (which is by current RocksDB assumption just db_dir) by doing the following:
- Renamed `directory_to_fsync` as `dir_contains_current_file` in `SetCurrentFile()` to tighten the association between this directory and CURRENT file
- Changed `SetCurrentFile()` API to require `dir_contains_current_file` being passed-in, instead of making it by default nullptr.
- Because `SetCurrentFile()`'s `dir_contains_current_file` is passed down from `VersionSet::LogAndApply()` then `VersionSet::ProcessManifestWrites()` (i.e, think about this as a chain of 3 functions related to MANIFEST update), these 2 functions also got refactored to require `dir_contains_current_file`
- Updated the non-test-path callers of these 3 functions to obtain and pass in non-nullptr `dir_contains_current_file`, which by current assumption of RocksDB, is the `FSDirectory* db_dir`.
- `db_impl` path will obtain `DBImpl::directories_.getDbDir()` while others with no access to such `directories_` are obtained on the fly by creating such object `FileSystem::NewDirectory(..)` and manage it by unique pointers to ensure short life time.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10573
Test Plan:
- `make check`
- Passed the repro db_stress command
- For future improvement, since we currently don't assert dir containing CURRENT to be non-nullptr due to https://github.com/facebook/rocksdb/pull/10573#pullrequestreview-1087698899, there is still chances that future developers mistakenly pass down nullptr dir containing CURRENT thus resulting skipped sync dir and cause the bug again. Therefore a smarter test (e.g, such as quoted from ajkr "(make) unsynced data loss to be dropping files corresponding to unsynced directory entries") is still needed.
Reviewed By: ajkr
Differential Revision: D39005886
Pulled By: hx235
fbshipit-source-id: 336fb9090d0cfa6ca3dd580db86268007dde7f5a
2022-08-30 00:35:21 +00:00
|
|
|
}
|
2020-09-03 23:53:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"failed to unsafely remove SST file: " + s.ToString());
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("unsafely removed SST file");
|
2019-08-15 23:59:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-18 23:35:51 +00:00
|
|
|
const std::string UpdateManifestCommand::ARG_VERBOSE = "verbose";
|
|
|
|
const std::string UpdateManifestCommand::ARG_UPDATE_TEMPERATURES =
|
|
|
|
"update_temperatures";
|
|
|
|
|
|
|
|
void UpdateManifestCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(UpdateManifestCommand::Name());
|
|
|
|
ret.append(" [--update_temperatures]");
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(" MUST NOT be used on a live DB.");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
UpdateManifestCommand::UpdateManifestCommand(
|
|
|
|
const std::vector<std::string>& /*params*/,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_VERBOSE, ARG_UPDATE_TEMPERATURES})) {
|
|
|
|
verbose_ = IsFlagPresent(flags, ARG_VERBOSE) ||
|
|
|
|
ParseBooleanOption(options, ARG_VERBOSE, false);
|
|
|
|
update_temperatures_ =
|
|
|
|
IsFlagPresent(flags, ARG_UPDATE_TEMPERATURES) ||
|
|
|
|
ParseBooleanOption(options, ARG_UPDATE_TEMPERATURES, false);
|
|
|
|
|
|
|
|
if (!update_temperatures_) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"No action like --update_temperatures specified for update_manifest");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void UpdateManifestCommand::DoCommand() {
|
|
|
|
PrepareOptions();
|
|
|
|
|
|
|
|
auto level = verbose_ ? InfoLogLevel::INFO_LEVEL : InfoLogLevel::WARN_LEVEL;
|
|
|
|
options_.info_log.reset(new StderrLogger(level));
|
|
|
|
|
|
|
|
experimental::UpdateManifestForFilesStateOptions opts;
|
|
|
|
opts.update_temperatures = update_temperatures_;
|
|
|
|
if (column_families_.empty()) {
|
|
|
|
column_families_.emplace_back(kDefaultColumnFamilyName, options_);
|
|
|
|
}
|
|
|
|
Status s = experimental::UpdateManifestForFilesState(options_, db_path_,
|
|
|
|
column_families_);
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"failed to update manifest: " + s.ToString());
|
|
|
|
} else {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Succeed("Manifest updates successful");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|