rocksdb/tools/ldb_tool.cc

193 lines
7.7 KiB
C++
Raw Normal View History

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
#include "rocksdb/ldb_tool.h"
#include "rocksdb/utilities/ldb_cmd.h"
#include "tools/ldb_cmd_impl.h"
namespace ROCKSDB_NAMESPACE {
LDBOptions::LDBOptions() = default;
void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options,
const char* /*exec_name*/, bool to_stderr) {
std::string ret;
ret.append(ldb_options.print_help_header);
ret.append("\n\n");
ret.append("commands MUST specify --" + LDBCommand::ARG_DB +
"=<full_path_to_db_directory> when necessary\n");
ret.append("\n");
ret.append("commands can optionally specify\n");
ret.append(" --" + LDBCommand::ARG_ENV_URI + "=<uri_of_environment> or --" +
LDBCommand::ARG_FS_URI + "=<uri_of_filesystem> if necessary");
ret.append("\n");
ret.append(" --" + LDBCommand::ARG_SECONDARY_PATH +
"=<secondary_path> to open DB as secondary instance. Operations "
"not supported in secondary instance will fail.\n\n");
ret.append(" --" + LDBCommand::ARG_LEADER_PATH +
"=<leader_path> to open DB as a follower instance. Operations "
"not supported in follower instance will fail.\n\n");
ret.append(
"The following optional parameters control if keys/values are "
"input/output as hex or as plain strings:\n");
ret.append(" --" + LDBCommand::ARG_KEY_HEX +
" : Keys are input/output as hex\n");
ret.append(" --" + LDBCommand::ARG_VALUE_HEX +
" : Values are input/output as hex\n");
ret.append(" --" + LDBCommand::ARG_HEX +
" : Both keys and values are input/output as hex\n");
ret.append("\n");
ret.append(
"The following optional parameters control the database "
"internals:\n");
ret.append(
" --" + LDBCommand::ARG_CF_NAME +
"=<string> : name of the column family to operate on. default: default "
"column family\n");
ret.append(" --" + LDBCommand::ARG_TTL +
" with 'put','get','scan','dump','query','batchput'"
" : DB supports ttl and value is internally timestamp-suffixed\n");
ret.append(" --" + LDBCommand::ARG_TRY_LOAD_OPTIONS +
" : Try to load option file from DB. Default to true if " +
LDBCommand::ARG_DB +
" is specified and not creating a new DB and not open as TTL DB. "
"Can be set to false explicitly.\n");
ret.append(" --" + LDBCommand::ARG_DISABLE_CONSISTENCY_CHECKS +
" : Set options.force_consistency_checks = false.\n");
ret.append(" --" + LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS +
" : Ignore unknown options when loading option file.\n");
ret.append(" --" + LDBCommand::ARG_BLOOM_BITS + "=<int,e.g.:14>\n");
ret.append(" --" + LDBCommand::ARG_FIX_PREFIX_LEN + "=<int,e.g.:14>\n");
ret.append(" --" + LDBCommand::ARG_COMPRESSION_TYPE +
"=<no|snappy|zlib|bzip2|lz4|lz4hc|xpress|zstd>\n");
ret.append(" --" + LDBCommand::ARG_COMPRESSION_MAX_DICT_BYTES +
"=<int,e.g.:16384>\n");
ret.append(" --" + LDBCommand::ARG_BLOCK_SIZE + "=<block_size_in_bytes>\n");
ret.append(" --" + LDBCommand::ARG_AUTO_COMPACTION + "=<true|false>\n");
ret.append(" --" + LDBCommand::ARG_DB_WRITE_BUFFER_SIZE +
"=<int,e.g.:16777216>\n");
ret.append(" --" + LDBCommand::ARG_WRITE_BUFFER_SIZE +
"=<int,e.g.:4194304>\n");
ret.append(" --" + LDBCommand::ARG_FILE_SIZE + "=<int,e.g.:2097152>\n");
ret.append(" --" + LDBCommand::ARG_ENABLE_BLOB_FILES +
" : Enable key-value separation using BlobDB\n");
ret.append(" --" + LDBCommand::ARG_MIN_BLOB_SIZE + "=<int,e.g.:2097152>\n");
ret.append(" --" + LDBCommand::ARG_BLOB_FILE_SIZE + "=<int,e.g.:2097152>\n");
ret.append(" --" + LDBCommand::ARG_BLOB_COMPRESSION_TYPE +
"=<no|snappy|zlib|bzip2|lz4|lz4hc|xpress|zstd>\n");
ret.append(" --" + LDBCommand::ARG_ENABLE_BLOB_GARBAGE_COLLECTION +
" : Enable blob garbage collection\n");
ret.append(" --" + LDBCommand::ARG_BLOB_GARBAGE_COLLECTION_AGE_CUTOFF +
"=<double,e.g.:0.25>\n");
ret.append(" --" + LDBCommand::ARG_BLOB_GARBAGE_COLLECTION_FORCE_THRESHOLD +
"=<double,e.g.:0.25>\n");
ret.append(" --" + LDBCommand::ARG_BLOB_COMPACTION_READAHEAD_SIZE +
"=<int,e.g.:2097152>\n");
ret.append(" --" + LDBCommand::ARG_READ_TIMESTAMP +
"=<uint64_ts, e.g.:323> : read timestamp, required if column "
"family enables timestamp, otherwise invalid if provided.");
ret.append("\n\n");
ret.append("Data Access Commands:\n");
PutCommand::Help(ret);
PutEntityCommand::Help(ret);
GetCommand::Help(ret);
GetEntityCommand::Help(ret);
MultiGetCommand::Help(ret);
MultiGetEntityCommand::Help(ret);
BatchPutCommand::Help(ret);
ScanCommand::Help(ret);
DeleteCommand::Help(ret);
SingleDeleteCommand::Help(ret);
DeleteRangeCommand::Help(ret);
DBQuerierCommand::Help(ret);
ApproxSizeCommand::Help(ret);
CheckConsistencyCommand::Help(ret);
ListFileRangeDeletesCommand::Help(ret);
ret.append("\n\n");
ret.append("Admin Commands:\n");
WALDumperCommand::Help(ret);
CompactorCommand::Help(ret);
ReduceDBLevelsCommand::Help(ret);
ChangeCompactionStyleCommand::Help(ret);
DBDumperCommand::Help(ret);
DBLoaderCommand::Help(ret);
ManifestDumpCommand::Help(ret);
Add manifest fix-up utility for file temperatures (#9683) Summary: The goal of this change is to allow changes to the "current" (in FileSystem) file temperatures to feed back into DB metadata, so that they can inform decisions and stats reporting. In part because of modular code factoring, it doesn't seem easy to do this automagically, where opening an SST file and observing current Temperature different from expected would trigger a change in metadata and DB manifest write (essentially giving the deep read path access to the write path). It is also difficult to do this while the DB is open because of the limitations of LogAndApply. This change allows updating file temperature metadata on a closed DB using an experimental utility function UpdateManifestForFilesState() or `ldb update_manifest --update_temperatures`. This should suffice for "migration" scenarios where outside tooling has placed or re-arranged DB files into a (different) tiered configuration without going through RocksDB itself (currently, only compaction can change temperature metadata). Some details: * Refactored and added unit test for `ldb unsafe_remove_sst_file` because of shared functionality * Pulled in autovector.h changes from https://github.com/facebook/rocksdb/issues/9546 to fix SuperVersionContext move constructor (related to an older draft of this change) Possible follow-up work: * Support updating manifest with file checksums, such as when a new checksum function is used and want existing DB metadata updated for it. * It's possible that for some repair scenarios, lighter weight than full repair, we might want to support UpdateManifestForFilesState() to modify critical file details like size or checksum using same algorithm. But let's make sure these are differentiated from modifying file details in ways that don't suspect corruption (or require extreme trust). Pull Request resolved: https://github.com/facebook/rocksdb/pull/9683 Test Plan: unit tests added Reviewed By: jay-zhuang Differential Revision: D34798828 Pulled By: pdillinger fbshipit-source-id: cfd83e8fb10761d8c9e7f9c020d68c9106a95554
2022-03-18 23:35:51 +00:00
UpdateManifestCommand::Help(ret);
FileChecksumDumpCommand::Help(ret);
aggregated-table-properties with GetMapProperty (#7779) Summary: So that we can more easily get aggregate live table data such as total filter, index, and data sizes. Also adds ldb support for getting properties Also fixed some missing/inaccurate related comments in db.h For example: $ ./ldb --db=testdb get_property rocksdb.aggregated-table-properties rocksdb.aggregated-table-properties.data_size: 102871 rocksdb.aggregated-table-properties.filter_size: 0 rocksdb.aggregated-table-properties.index_partitions: 0 rocksdb.aggregated-table-properties.index_size: 2232 rocksdb.aggregated-table-properties.num_data_blocks: 100 rocksdb.aggregated-table-properties.num_deletions: 0 rocksdb.aggregated-table-properties.num_entries: 15000 rocksdb.aggregated-table-properties.num_merge_operands: 0 rocksdb.aggregated-table-properties.num_range_deletions: 0 rocksdb.aggregated-table-properties.raw_key_size: 288890 rocksdb.aggregated-table-properties.raw_value_size: 198890 rocksdb.aggregated-table-properties.top_level_index_size: 0 $ ./ldb --db=testdb get_property rocksdb.aggregated-table-properties-at-level1 rocksdb.aggregated-table-properties-at-level1.data_size: 80909 rocksdb.aggregated-table-properties-at-level1.filter_size: 0 rocksdb.aggregated-table-properties-at-level1.index_partitions: 0 rocksdb.aggregated-table-properties-at-level1.index_size: 1787 rocksdb.aggregated-table-properties-at-level1.num_data_blocks: 81 rocksdb.aggregated-table-properties-at-level1.num_deletions: 0 rocksdb.aggregated-table-properties-at-level1.num_entries: 12466 rocksdb.aggregated-table-properties-at-level1.num_merge_operands: 0 rocksdb.aggregated-table-properties-at-level1.num_range_deletions: 0 rocksdb.aggregated-table-properties-at-level1.raw_key_size: 238210 rocksdb.aggregated-table-properties-at-level1.raw_value_size: 163414 rocksdb.aggregated-table-properties-at-level1.top_level_index_size: 0 $ Pull Request resolved: https://github.com/facebook/rocksdb/pull/7779 Test Plan: Added a test to ldb_test.py Reviewed By: jay-zhuang Differential Revision: D25653103 Pulled By: pdillinger fbshipit-source-id: 2905469a08a64dd6b5510cbd7be2e64d3234d6d3
2020-12-19 15:59:08 +00:00
GetPropertyCommand::Help(ret);
ListColumnFamiliesCommand::Help(ret);
CreateColumnFamilyCommand::Help(ret);
DropColumnFamilyCommand::Help(ret);
DBFileDumperCommand::Help(ret);
InternalDumpCommand::Help(ret);
Add list live files metadata (#8446) Summary: Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically. Typically, the output looks like this: ``` ./ldb --db=/tmp/test_db list_live_files_metadata Live SST Files: ===== Column Family: default ===== ---------- level 0 ---------- /tmp/test_db/000069.sst ---------- level 1 ---------- /tmp/test_db/000064.sst /tmp/test_db/000065.sst /tmp/test_db/000066.sst /tmp/test_db/000071.sst ---------- level 2 ---------- /tmp/test_db/000038.sst /tmp/test_db/000039.sst /tmp/test_db/000052.sst /tmp/test_db/000067.sst /tmp/test_db/000070.sst ------------------------------ ``` Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return: ``` ./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename Live SST Files: /tmp/test_db/000038.sst : level 2, column family 'default' /tmp/test_db/000039.sst : level 2, column family 'default' /tmp/test_db/000052.sst : level 2, column family 'default' /tmp/test_db/000064.sst : level 1, column family 'default' /tmp/test_db/000065.sst : level 1, column family 'default' /tmp/test_db/000066.sst : level 1, column family 'default' /tmp/test_db/000067.sst : level 2, column family 'default' /tmp/test_db/000069.sst : level 0, column family 'default' /tmp/test_db/000070.sst : level 2, column family 'default' /tmp/test_db/000071.sst : level 1, column family 'default' ------------------------------ ``` Thus, the user can either request to show the files by levels, or sorted by filenames. This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446 Reviewed By: akankshamahajan15 Differential Revision: D29320080 Pulled By: bjlemaire fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 02:06:44 +00:00
DBLiveFilesMetadataDumperCommand::Help(ret);
RepairCommand::Help(ret);
BackupCommand::Help(ret);
RestoreCommand::Help(ret);
CheckPointCommand::Help(ret);
WriteExternalSstFilesCommand::Help(ret);
IngestExternalSstFilesCommand::Help(ret);
UnsafeRemoveSstFileCommand::Help(ret);
fprintf(to_stderr ? stderr : stdout, "%s\n", ret.c_str());
}
int LDBCommandRunner::RunCommand(
int argc, char const* const* argv, Options options,
const LDBOptions& ldb_options,
const std::vector<ColumnFamilyDescriptor>* column_families) {
if (argc <= 2) {
if (argc <= 1) {
PrintHelp(ldb_options, argv[0], /*to_stderr*/ true);
return 1;
} else if (std::string(argv[1]) == "--version") {
printf("ldb from RocksDB %d.%d.%d\n", ROCKSDB_MAJOR, ROCKSDB_MINOR,
ROCKSDB_PATCH);
return 0;
} else if (std::string(argv[1]) == "--help") {
PrintHelp(ldb_options, argv[0], /*to_stderr*/ false);
return 0;
} else {
PrintHelp(ldb_options, argv[0], /*to_stderr*/ true);
return 1;
}
}
LDBCommand* cmdObj = LDBCommand::InitFromCmdLineArgs(
argc, argv, options, ldb_options, column_families);
if (cmdObj == nullptr) {
fprintf(stderr, "Unknown command\n");
PrintHelp(ldb_options, argv[0], /*to_stderr*/ true);
return 1;
}
if (!cmdObj->ValidateCmdLineOptions()) {
return 1;
}
cmdObj->Run();
LDBCommandExecuteResult ret = cmdObj->GetExecuteState();
if (!ret.ToString().empty()) {
fprintf(stderr, "%s\n", ret.ToString().c_str());
}
delete cmdObj;
return ret.IsFailed() ? 1 : 0;
}
void LDBTool::Run(int argc, char** argv, Options options,
const LDBOptions& ldb_options,
const std::vector<ColumnFamilyDescriptor>* column_families) {
int error_code = LDBCommandRunner::RunCommand(argc, argv, options,
ldb_options, column_families);
exit(error_code);
}
} // namespace ROCKSDB_NAMESPACE