2017-02-28 22:11:01 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
#include <functional>
|
|
|
|
|
|
|
|
#include "db/db_test_util.h"
|
2020-05-20 18:53:49 +00:00
|
|
|
#include "db/version_edit.h"
|
2017-02-28 22:11:01 +00:00
|
|
|
#include "port/port.h"
|
|
|
|
#include "port/stack_trace.h"
|
2024-01-24 19:21:05 +00:00
|
|
|
#include "rocksdb/advanced_options.h"
|
|
|
|
#include "rocksdb/options.h"
|
2017-02-28 22:11:01 +00:00
|
|
|
#include "rocksdb/sst_file_writer.h"
|
2021-09-09 04:20:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testutil.h"
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
#include "util/defer.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/random.h"
|
|
|
|
#include "utilities/fault_injection_env.h"
|
2017-02-28 22:11:01 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-02-28 22:11:01 +00:00
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
class ExternalSSTFileBasicTest
|
|
|
|
: public DBTestBase,
|
|
|
|
public ::testing::WithParamInterface<std::tuple<bool, bool>> {
|
2017-02-28 22:11:01 +00:00
|
|
|
public:
|
2020-08-18 01:41:20 +00:00
|
|
|
ExternalSSTFileBasicTest()
|
2021-07-23 15:37:27 +00:00
|
|
|
: DBTestBase("external_sst_file_basic_test", /*env_do_fsync=*/true) {
|
2021-09-09 04:20:38 +00:00
|
|
|
sst_files_dir_ = dbname_ + "_sst_files/";
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
fault_injection_test_env_.reset(new FaultInjectionTestEnv(env_));
|
2017-02-28 22:11:01 +00:00
|
|
|
DestroyAndRecreateExternalSSTFilesDir();
|
2021-09-09 04:20:38 +00:00
|
|
|
|
|
|
|
// Check if the Env supports RandomRWFile
|
|
|
|
std::string file_path = sst_files_dir_ + "test_random_rw_file";
|
|
|
|
std::unique_ptr<WritableFile> wfile;
|
|
|
|
assert(env_->NewWritableFile(file_path, &wfile, EnvOptions()).ok());
|
|
|
|
wfile.reset();
|
|
|
|
std::unique_ptr<RandomRWFile> rwfile;
|
|
|
|
Status s = env_->NewRandomRWFile(file_path, &rwfile, EnvOptions());
|
|
|
|
if (s.IsNotSupported()) {
|
|
|
|
random_rwfile_supported_ = false;
|
|
|
|
} else {
|
|
|
|
EXPECT_OK(s);
|
|
|
|
random_rwfile_supported_ = true;
|
|
|
|
}
|
|
|
|
rwfile.reset();
|
|
|
|
EXPECT_OK(env_->DeleteFile(file_path));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void DestroyAndRecreateExternalSSTFilesDir() {
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(DestroyDir(env_, sst_files_dir_));
|
|
|
|
ASSERT_OK(env_->CreateDir(sst_files_dir_));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DeprecatedAddFile(const std::vector<std::string>& files,
|
|
|
|
bool move_files = false,
|
|
|
|
bool skip_snapshot_check = false) {
|
|
|
|
IngestExternalFileOptions opts;
|
|
|
|
opts.move_files = move_files;
|
|
|
|
opts.snapshot_consistency = !skip_snapshot_check;
|
|
|
|
opts.allow_global_seqno = false;
|
|
|
|
opts.allow_blocking_flush = false;
|
|
|
|
return db_->IngestExternalFile(files, opts);
|
|
|
|
}
|
|
|
|
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
Status AddFileWithFileChecksum(
|
|
|
|
const std::vector<std::string>& files,
|
|
|
|
const std::vector<std::string>& files_checksums,
|
|
|
|
const std::vector<std::string>& files_checksum_func_names,
|
|
|
|
bool verify_file_checksum = true, bool move_files = false,
|
|
|
|
bool skip_snapshot_check = false, bool write_global_seqno = true) {
|
|
|
|
IngestExternalFileOptions opts;
|
|
|
|
opts.move_files = move_files;
|
|
|
|
opts.snapshot_consistency = !skip_snapshot_check;
|
|
|
|
opts.allow_global_seqno = false;
|
|
|
|
opts.allow_blocking_flush = false;
|
|
|
|
opts.write_global_seqno = write_global_seqno;
|
|
|
|
opts.verify_file_checksum = verify_file_checksum;
|
|
|
|
|
|
|
|
IngestExternalFileArg arg;
|
|
|
|
arg.column_family = db_->DefaultColumnFamily();
|
|
|
|
arg.external_files = files;
|
|
|
|
arg.options = opts;
|
|
|
|
arg.files_checksums = files_checksums;
|
|
|
|
arg.files_checksum_func_names = files_checksum_func_names;
|
|
|
|
return db_->IngestExternalFiles({arg});
|
|
|
|
}
|
|
|
|
|
2017-02-28 22:11:01 +00:00
|
|
|
Status GenerateAndAddExternalFile(
|
2017-05-26 19:05:19 +00:00
|
|
|
const Options options, std::vector<int> keys,
|
2018-07-14 05:40:23 +00:00
|
|
|
const std::vector<ValueType>& value_types,
|
|
|
|
std::vector<std::pair<int, int>> range_deletions, int file_id,
|
2019-01-30 00:16:53 +00:00
|
|
|
bool write_global_seqno, bool verify_checksums_before_ingest,
|
|
|
|
std::map<std::string, std::string>* true_data) {
|
2017-05-26 19:05:19 +00:00
|
|
|
assert(value_types.size() == 1 || keys.size() == value_types.size());
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string file_path = sst_files_dir_ + std::to_string(file_id);
|
2017-03-13 18:17:19 +00:00
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
Status s = sst_file_writer.Open(file_path);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2018-07-14 05:40:23 +00:00
|
|
|
for (size_t i = 0; i < range_deletions.size(); i++) {
|
|
|
|
// Account for the effect of range deletions on true_data before
|
|
|
|
// all point operators, even though sst_file_writer.DeleteRange
|
|
|
|
// must be called before other sst_file_writer methods. This is
|
|
|
|
// because point writes take precedence over range deletions
|
2023-03-19 00:37:17 +00:00
|
|
|
// in the same ingested sst. This precedence is part of
|
|
|
|
// `SstFileWriter::DeleteRange()`'s API contract.
|
2018-07-14 05:40:23 +00:00
|
|
|
std::string start_key = Key(range_deletions[i].first);
|
|
|
|
std::string end_key = Key(range_deletions[i].second);
|
|
|
|
s = sst_file_writer.DeleteRange(start_key, end_key);
|
|
|
|
if (!s.ok()) {
|
|
|
|
sst_file_writer.Finish();
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
auto start_key_it = true_data->find(start_key);
|
|
|
|
if (start_key_it == true_data->end()) {
|
|
|
|
start_key_it = true_data->upper_bound(start_key);
|
|
|
|
}
|
|
|
|
auto end_key_it = true_data->find(end_key);
|
|
|
|
if (end_key_it == true_data->end()) {
|
|
|
|
end_key_it = true_data->upper_bound(end_key);
|
|
|
|
}
|
|
|
|
true_data->erase(start_key_it, end_key_it);
|
|
|
|
}
|
2017-05-26 19:05:19 +00:00
|
|
|
for (size_t i = 0; i < keys.size(); i++) {
|
|
|
|
std::string key = Key(keys[i]);
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string value = Key(keys[i]) + std::to_string(file_id);
|
2017-05-26 19:05:19 +00:00
|
|
|
ValueType value_type =
|
|
|
|
(value_types.size() == 1 ? value_types[0] : value_types[i]);
|
|
|
|
switch (value_type) {
|
|
|
|
case ValueType::kTypeValue:
|
|
|
|
s = sst_file_writer.Put(key, value);
|
|
|
|
(*true_data)[key] = value;
|
|
|
|
break;
|
|
|
|
case ValueType::kTypeMerge:
|
|
|
|
s = sst_file_writer.Merge(key, value);
|
|
|
|
// we only use TestPutOperator in this test
|
|
|
|
(*true_data)[key] = value;
|
|
|
|
break;
|
|
|
|
case ValueType::kTypeDeletion:
|
|
|
|
s = sst_file_writer.Delete(key);
|
|
|
|
true_data->erase(key);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return Status::InvalidArgument("Value type is not supported");
|
|
|
|
}
|
2017-02-28 22:11:01 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
sst_file_writer.Finish();
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = sst_file_writer.Finish();
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.allow_global_seqno = true;
|
2018-11-01 23:21:30 +00:00
|
|
|
ifo.write_global_seqno = write_global_seqno;
|
2019-01-30 00:16:53 +00:00
|
|
|
ifo.verify_checksums_before_ingest = verify_checksums_before_ingest;
|
2017-02-28 22:11:01 +00:00
|
|
|
s = db_->IngestExternalFile({file_path}, ifo);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-07-14 05:40:23 +00:00
|
|
|
Status GenerateAndAddExternalFile(
|
|
|
|
const Options options, std::vector<int> keys,
|
|
|
|
const std::vector<ValueType>& value_types, int file_id,
|
2019-01-30 00:16:53 +00:00
|
|
|
bool write_global_seqno, bool verify_checksums_before_ingest,
|
|
|
|
std::map<std::string, std::string>* true_data) {
|
|
|
|
return GenerateAndAddExternalFile(
|
|
|
|
options, keys, value_types, {}, file_id, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, true_data);
|
2018-07-14 05:40:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-26 19:05:19 +00:00
|
|
|
Status GenerateAndAddExternalFile(
|
|
|
|
const Options options, std::vector<int> keys, const ValueType value_type,
|
2019-01-30 00:16:53 +00:00
|
|
|
int file_id, bool write_global_seqno, bool verify_checksums_before_ingest,
|
2018-11-01 23:21:30 +00:00
|
|
|
std::map<std::string, std::string>* true_data) {
|
2019-01-30 00:16:53 +00:00
|
|
|
return GenerateAndAddExternalFile(
|
|
|
|
options, keys, std::vector<ValueType>(1, value_type), file_id,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, true_data);
|
2017-05-26 19:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-12-10 05:19:55 +00:00
|
|
|
~ExternalSSTFileBasicTest() override {
|
|
|
|
DestroyDir(env_, sst_files_dir_).PermitUncheckedError();
|
|
|
|
}
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
protected:
|
|
|
|
std::string sst_files_dir_;
|
2019-06-21 17:12:29 +00:00
|
|
|
std::unique_ptr<FaultInjectionTestEnv> fault_injection_test_env_;
|
2021-09-09 04:20:38 +00:00
|
|
|
bool random_rwfile_supported_;
|
2017-02-28 22:11:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(ExternalSSTFileBasicTest, Basic) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
|
2017-03-13 18:17:19 +00:00
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
// Current file size should be 0 after sst_file_writer init and before open a
|
|
|
|
// file.
|
|
|
|
ASSERT_EQ(sst_file_writer.FileSize(), 0);
|
|
|
|
|
|
|
|
// file1.sst (0 => 99)
|
|
|
|
std::string file1 = sst_files_dir_ + "file1.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
|
|
|
for (int k = 0; k < 100; k++) {
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file1_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
// Current file size should be non-zero after success write.
|
|
|
|
ASSERT_GT(sst_file_writer.FileSize(), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(file1_info.file_path, file1);
|
|
|
|
ASSERT_EQ(file1_info.num_entries, 100);
|
|
|
|
ASSERT_EQ(file1_info.smallest_key, Key(0));
|
|
|
|
ASSERT_EQ(file1_info.largest_key, Key(99));
|
2018-07-14 05:40:23 +00:00
|
|
|
ASSERT_EQ(file1_info.num_range_del_entries, 0);
|
|
|
|
ASSERT_EQ(file1_info.smallest_range_del_key, "");
|
|
|
|
ASSERT_EQ(file1_info.largest_range_del_key, "");
|
2020-05-20 18:53:49 +00:00
|
|
|
ASSERT_EQ(file1_info.file_checksum, kUnknownFileChecksum);
|
|
|
|
ASSERT_EQ(file1_info.file_checksum_func_name, kUnknownFileChecksumFuncName);
|
|
|
|
// sst_file_writer already finished, cannot add this value
|
|
|
|
s = sst_file_writer.Put(Key(100), "bad_val");
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
2020-05-20 18:53:49 +00:00
|
|
|
s = sst_file_writer.DeleteRange(Key(100), Key(200));
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
2020-05-20 18:53:49 +00:00
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
// Add file using file path
|
|
|
|
s = DeprecatedAddFile({file1});
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2020-05-20 18:53:49 +00:00
|
|
|
ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
|
|
|
|
for (int k = 0; k < 100; k++) {
|
|
|
|
ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
|
|
|
|
}
|
|
|
|
|
|
|
|
DestroyAndRecreateExternalSSTFilesDir();
|
|
|
|
}
|
|
|
|
|
|
|
|
class ChecksumVerifyHelper {
|
|
|
|
private:
|
|
|
|
Options options_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
ChecksumVerifyHelper(Options& options) : options_(options) {}
|
2024-01-05 19:53:57 +00:00
|
|
|
~ChecksumVerifyHelper() = default;
|
2020-05-20 18:53:49 +00:00
|
|
|
|
|
|
|
Status GetSingleFileChecksumAndFuncName(
|
|
|
|
const std::string& file_path, std::string* file_checksum,
|
|
|
|
std::string* file_checksum_func_name) {
|
|
|
|
Status s;
|
|
|
|
EnvOptions soptions;
|
|
|
|
std::unique_ptr<SequentialFile> file_reader;
|
|
|
|
s = options_.env->NewSequentialFile(file_path, &file_reader, soptions);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
std::unique_ptr<char[]> scratch(new char[2048]);
|
|
|
|
Slice result;
|
|
|
|
FileChecksumGenFactory* file_checksum_gen_factory =
|
|
|
|
options_.file_checksum_gen_factory.get();
|
|
|
|
if (file_checksum_gen_factory == nullptr) {
|
|
|
|
*file_checksum = kUnknownFileChecksum;
|
|
|
|
*file_checksum_func_name = kUnknownFileChecksumFuncName;
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
FileChecksumGenContext gen_context;
|
|
|
|
std::unique_ptr<FileChecksumGenerator> file_checksum_gen =
|
|
|
|
file_checksum_gen_factory->CreateFileChecksumGenerator(gen_context);
|
|
|
|
*file_checksum_func_name = file_checksum_gen->Name();
|
|
|
|
s = file_reader->Read(2048, &result, scratch.get());
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
while (result.size() != 0) {
|
|
|
|
file_checksum_gen->Update(scratch.get(), result.size());
|
|
|
|
s = file_reader->Read(2048, &result, scratch.get());
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
file_checksum_gen->Finalize();
|
|
|
|
*file_checksum = file_checksum_gen->GetChecksum();
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(ExternalSSTFileBasicTest, BasicWithFileChecksumCrc32c) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
|
|
|
ChecksumVerifyHelper checksum_helper(options);
|
|
|
|
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
|
|
|
|
// Current file size should be 0 after sst_file_writer init and before open a
|
|
|
|
// file.
|
|
|
|
ASSERT_EQ(sst_file_writer.FileSize(), 0);
|
|
|
|
|
|
|
|
// file1.sst (0 => 99)
|
|
|
|
std::string file1 = sst_files_dir_ + "file1.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
|
|
|
for (int k = 0; k < 100; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file1_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2020-05-20 18:53:49 +00:00
|
|
|
std::string file_checksum, file_checksum_func_name;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file1, &file_checksum, &file_checksum_func_name));
|
|
|
|
|
|
|
|
// Current file size should be non-zero after success write.
|
|
|
|
ASSERT_GT(sst_file_writer.FileSize(), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(file1_info.file_path, file1);
|
|
|
|
ASSERT_EQ(file1_info.num_entries, 100);
|
|
|
|
ASSERT_EQ(file1_info.smallest_key, Key(0));
|
|
|
|
ASSERT_EQ(file1_info.largest_key, Key(99));
|
|
|
|
ASSERT_EQ(file1_info.num_range_del_entries, 0);
|
|
|
|
ASSERT_EQ(file1_info.smallest_range_del_key, "");
|
|
|
|
ASSERT_EQ(file1_info.largest_range_del_key, "");
|
|
|
|
ASSERT_EQ(file1_info.file_checksum, file_checksum);
|
|
|
|
ASSERT_EQ(file1_info.file_checksum_func_name, file_checksum_func_name);
|
2017-02-28 22:11:01 +00:00
|
|
|
// sst_file_writer already finished, cannot add this value
|
2017-05-26 19:05:19 +00:00
|
|
|
s = sst_file_writer.Put(Key(100), "bad_val");
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
2018-07-14 05:40:23 +00:00
|
|
|
s = sst_file_writer.DeleteRange(Key(100), Key(200));
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
// Add file using file path
|
|
|
|
s = DeprecatedAddFile({file1});
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
|
|
|
|
for (int k = 0; k < 100; k++) {
|
|
|
|
ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
|
|
|
|
}
|
|
|
|
|
|
|
|
DestroyAndRecreateExternalSSTFilesDir();
|
|
|
|
}
|
|
|
|
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
|
|
|
Options old_options = CurrentOptions();
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory();
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
ChecksumVerifyHelper checksum_helper(options);
|
|
|
|
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
|
|
|
|
// file01.sst (1000 => 1099)
|
|
|
|
std::string file1 = sst_files_dir_ + "file01.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
|
|
|
for (int k = 1000; k < 1100; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file1_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_EQ(file1_info.file_path, file1);
|
|
|
|
ASSERT_EQ(file1_info.num_entries, 100);
|
|
|
|
ASSERT_EQ(file1_info.smallest_key, Key(1000));
|
|
|
|
ASSERT_EQ(file1_info.largest_key, Key(1099));
|
|
|
|
std::string file_checksum1, file_checksum_func_name1;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file1, &file_checksum1, &file_checksum_func_name1));
|
|
|
|
ASSERT_EQ(file1_info.file_checksum, file_checksum1);
|
|
|
|
ASSERT_EQ(file1_info.file_checksum_func_name, file_checksum_func_name1);
|
|
|
|
|
|
|
|
// file02.sst (1100 => 1299)
|
|
|
|
std::string file2 = sst_files_dir_ + "file02.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file2));
|
|
|
|
for (int k = 1100; k < 1300; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file2_info;
|
|
|
|
s = sst_file_writer.Finish(&file2_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_EQ(file2_info.file_path, file2);
|
|
|
|
ASSERT_EQ(file2_info.num_entries, 200);
|
|
|
|
ASSERT_EQ(file2_info.smallest_key, Key(1100));
|
|
|
|
ASSERT_EQ(file2_info.largest_key, Key(1299));
|
|
|
|
std::string file_checksum2, file_checksum_func_name2;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file2, &file_checksum2, &file_checksum_func_name2));
|
|
|
|
ASSERT_EQ(file2_info.file_checksum, file_checksum2);
|
|
|
|
ASSERT_EQ(file2_info.file_checksum_func_name, file_checksum_func_name2);
|
|
|
|
|
|
|
|
// file03.sst (1300 => 1499)
|
|
|
|
std::string file3 = sst_files_dir_ + "file03.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file3));
|
|
|
|
for (int k = 1300; k < 1500; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file3_info;
|
|
|
|
s = sst_file_writer.Finish(&file3_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_EQ(file3_info.file_path, file3);
|
|
|
|
ASSERT_EQ(file3_info.num_entries, 200);
|
|
|
|
ASSERT_EQ(file3_info.smallest_key, Key(1300));
|
|
|
|
ASSERT_EQ(file3_info.largest_key, Key(1499));
|
|
|
|
std::string file_checksum3, file_checksum_func_name3;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file3, &file_checksum3, &file_checksum_func_name3));
|
|
|
|
ASSERT_EQ(file3_info.file_checksum, file_checksum3);
|
|
|
|
ASSERT_EQ(file3_info.file_checksum_func_name, file_checksum_func_name3);
|
|
|
|
|
|
|
|
// file04.sst (1500 => 1799)
|
|
|
|
std::string file4 = sst_files_dir_ + "file04.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file4));
|
|
|
|
for (int k = 1500; k < 1800; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file4_info;
|
|
|
|
s = sst_file_writer.Finish(&file4_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_EQ(file4_info.file_path, file4);
|
|
|
|
ASSERT_EQ(file4_info.num_entries, 300);
|
|
|
|
ASSERT_EQ(file4_info.smallest_key, Key(1500));
|
|
|
|
ASSERT_EQ(file4_info.largest_key, Key(1799));
|
|
|
|
std::string file_checksum4, file_checksum_func_name4;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file4, &file_checksum4, &file_checksum_func_name4));
|
|
|
|
ASSERT_EQ(file4_info.file_checksum, file_checksum4);
|
|
|
|
ASSERT_EQ(file4_info.file_checksum_func_name, file_checksum_func_name4);
|
|
|
|
|
|
|
|
// file05.sst (1800 => 1899)
|
|
|
|
std::string file5 = sst_files_dir_ + "file05.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file5));
|
|
|
|
for (int k = 1800; k < 2000; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file5_info;
|
|
|
|
s = sst_file_writer.Finish(&file5_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_EQ(file5_info.file_path, file5);
|
|
|
|
ASSERT_EQ(file5_info.num_entries, 200);
|
|
|
|
ASSERT_EQ(file5_info.smallest_key, Key(1800));
|
|
|
|
ASSERT_EQ(file5_info.largest_key, Key(1999));
|
|
|
|
std::string file_checksum5, file_checksum_func_name5;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file5, &file_checksum5, &file_checksum_func_name5));
|
|
|
|
ASSERT_EQ(file5_info.file_checksum, file_checksum5);
|
|
|
|
ASSERT_EQ(file5_info.file_checksum_func_name, file_checksum_func_name5);
|
|
|
|
|
|
|
|
// file06.sst (2000 => 2199)
|
|
|
|
std::string file6 = sst_files_dir_ + "file06.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file6));
|
|
|
|
for (int k = 2000; k < 2200; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file6_info;
|
|
|
|
s = sst_file_writer.Finish(&file6_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_EQ(file6_info.file_path, file6);
|
|
|
|
ASSERT_EQ(file6_info.num_entries, 200);
|
|
|
|
ASSERT_EQ(file6_info.smallest_key, Key(2000));
|
|
|
|
ASSERT_EQ(file6_info.largest_key, Key(2199));
|
|
|
|
std::string file_checksum6, file_checksum_func_name6;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
file6, &file_checksum6, &file_checksum_func_name6));
|
|
|
|
ASSERT_EQ(file6_info.file_checksum, file_checksum6);
|
|
|
|
ASSERT_EQ(file6_info.file_checksum_func_name, file_checksum_func_name6);
|
|
|
|
|
|
|
|
s = AddFileWithFileChecksum({file1}, {file_checksum1, "xyz"},
|
|
|
|
{file_checksum1}, true, false, false, false);
|
|
|
|
// does not care the checksum input since db does not enable file checksum
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file1));
|
|
|
|
std::vector<LiveFileMetaData> live_files;
|
|
|
|
dbfull()->GetLiveFilesMetaData(&live_files);
|
|
|
|
std::set<std::string> set1;
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& f : live_files) {
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
set1.insert(f.name);
|
|
|
|
ASSERT_EQ(f.file_checksum, kUnknownFileChecksum);
|
|
|
|
ASSERT_EQ(f.file_checksum_func_name, kUnknownFileChecksumFuncName);
|
|
|
|
}
|
|
|
|
|
2021-10-08 17:29:06 +00:00
|
|
|
// check the temperature of the file being ingested
|
|
|
|
ColumnFamilyMetaData metadata;
|
|
|
|
db_->GetColumnFamilyMetaData(&metadata);
|
|
|
|
ASSERT_EQ(1, metadata.file_count);
|
|
|
|
ASSERT_EQ(Temperature::kUnknown, metadata.levels[6].files[0].temperature);
|
|
|
|
auto size = GetSstSizeHelper(Temperature::kUnknown);
|
|
|
|
ASSERT_GT(size, 0);
|
|
|
|
size = GetSstSizeHelper(Temperature::kWarm);
|
|
|
|
ASSERT_EQ(size, 0);
|
|
|
|
size = GetSstSizeHelper(Temperature::kHot);
|
|
|
|
ASSERT_EQ(size, 0);
|
|
|
|
size = GetSstSizeHelper(Temperature::kCold);
|
|
|
|
ASSERT_EQ(size, 0);
|
|
|
|
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
// Reopen Db with checksum enabled
|
|
|
|
Reopen(options);
|
|
|
|
// Enable verify_file_checksum option
|
|
|
|
// The checksum vector does not match, fail the ingestion
|
|
|
|
s = AddFileWithFileChecksum({file2}, {file_checksum2, "xyz"},
|
|
|
|
{file_checksum_func_name2}, true, false, false,
|
|
|
|
false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
|
|
|
|
// Enable verify_file_checksum option
|
|
|
|
// The checksum name does not match, fail the ingestion
|
|
|
|
s = AddFileWithFileChecksum({file2}, {file_checksum2}, {"xyz"}, true, false,
|
|
|
|
false, false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
|
|
|
|
// Enable verify_file_checksum option
|
|
|
|
// The checksum itself does not match, fail the ingestion
|
|
|
|
s = AddFileWithFileChecksum({file2}, {"xyz"}, {file_checksum_func_name2},
|
|
|
|
true, false, false, false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
|
|
|
|
// Enable verify_file_checksum option
|
|
|
|
// All matches, ingestion is successful
|
|
|
|
s = AddFileWithFileChecksum({file2}, {file_checksum2},
|
|
|
|
{file_checksum_func_name2}, true, false, false,
|
|
|
|
false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
std::vector<LiveFileMetaData> live_files1;
|
|
|
|
dbfull()->GetLiveFilesMetaData(&live_files1);
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& f : live_files1) {
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
if (set1.find(f.name) == set1.end()) {
|
|
|
|
ASSERT_EQ(f.file_checksum, file_checksum2);
|
|
|
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name2);
|
|
|
|
set1.insert(f.name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_OK(env_->FileExists(file2));
|
|
|
|
|
|
|
|
// Enable verify_file_checksum option
|
|
|
|
// No checksum information is provided, generate it when ingesting
|
|
|
|
std::vector<std::string> checksum, checksum_func;
|
|
|
|
s = AddFileWithFileChecksum({file3}, checksum, checksum_func, true, false,
|
|
|
|
false, false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
std::vector<LiveFileMetaData> live_files2;
|
|
|
|
dbfull()->GetLiveFilesMetaData(&live_files2);
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& f : live_files2) {
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
if (set1.find(f.name) == set1.end()) {
|
|
|
|
ASSERT_EQ(f.file_checksum, file_checksum3);
|
|
|
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name3);
|
|
|
|
set1.insert(f.name);
|
|
|
|
}
|
|
|
|
}
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file3));
|
|
|
|
|
|
|
|
// Does not enable verify_file_checksum options
|
|
|
|
// The checksum name does not match, fail the ingestion
|
|
|
|
s = AddFileWithFileChecksum({file4}, {file_checksum4}, {"xyz"}, false, false,
|
|
|
|
false, false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
|
|
|
|
// Does not enable verify_file_checksum options
|
|
|
|
// Checksum function name matches, store the checksum being ingested.
|
|
|
|
s = AddFileWithFileChecksum({file4}, {"asd"}, {file_checksum_func_name4},
|
|
|
|
false, false, false, false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
std::vector<LiveFileMetaData> live_files3;
|
|
|
|
dbfull()->GetLiveFilesMetaData(&live_files3);
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& f : live_files3) {
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
if (set1.find(f.name) == set1.end()) {
|
|
|
|
ASSERT_FALSE(f.file_checksum == file_checksum4);
|
|
|
|
ASSERT_EQ(f.file_checksum, "asd");
|
|
|
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name4);
|
|
|
|
set1.insert(f.name);
|
|
|
|
}
|
|
|
|
}
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file4));
|
|
|
|
|
|
|
|
// enable verify_file_checksum options, DB enable checksum, and enable
|
|
|
|
// write_global_seq. So the checksum stored is different from the one
|
|
|
|
// ingested due to the sequence number changes.
|
|
|
|
s = AddFileWithFileChecksum({file5}, {file_checksum5},
|
|
|
|
{file_checksum_func_name5}, true, false, false,
|
|
|
|
true);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
std::vector<LiveFileMetaData> live_files4;
|
|
|
|
dbfull()->GetLiveFilesMetaData(&live_files4);
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& f : live_files4) {
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
if (set1.find(f.name) == set1.end()) {
|
|
|
|
std::string cur_checksum5, cur_checksum_func_name5;
|
|
|
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
|
|
|
dbname_ + f.name, &cur_checksum5, &cur_checksum_func_name5));
|
|
|
|
ASSERT_EQ(f.file_checksum, cur_checksum5);
|
|
|
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name5);
|
|
|
|
set1.insert(f.name);
|
|
|
|
}
|
|
|
|
}
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file5));
|
|
|
|
|
|
|
|
// Does not enable verify_file_checksum options and also the ingested file
|
|
|
|
// checksum information is empty. DB will generate and store the checksum
|
|
|
|
// in Manifest.
|
|
|
|
std::vector<std::string> files_c6, files_name6;
|
|
|
|
s = AddFileWithFileChecksum({file6}, files_c6, files_name6, false, false,
|
|
|
|
false, false);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
std::vector<LiveFileMetaData> live_files6;
|
|
|
|
dbfull()->GetLiveFilesMetaData(&live_files6);
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& f : live_files6) {
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
if (set1.find(f.name) == set1.end()) {
|
|
|
|
ASSERT_EQ(f.file_checksum, file_checksum6);
|
|
|
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name6);
|
|
|
|
set1.insert(f.name);
|
|
|
|
}
|
|
|
|
}
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file6));
|
2021-10-08 17:29:06 +00:00
|
|
|
db_->GetColumnFamilyMetaData(&metadata);
|
|
|
|
size = GetSstSizeHelper(Temperature::kUnknown);
|
|
|
|
ASSERT_GT(size, 0);
|
|
|
|
size = GetSstSizeHelper(Temperature::kWarm);
|
|
|
|
ASSERT_EQ(size, 0);
|
|
|
|
size = GetSstSizeHelper(Temperature::kHot);
|
|
|
|
ASSERT_EQ(size, 0);
|
|
|
|
size = GetSstSizeHelper(Temperature::kCold);
|
|
|
|
ASSERT_EQ(size, 0);
|
Ingest SST files with checksum information (#6891)
Summary:
Application can ingest SST files with file checksum information, such that during ingestion, DB is able to check data integrity and identify of the SST file. The PR introduces generate_and_verify_file_checksum to IngestExternalFileOption to control if the ingested checksum information should be verified with the generated checksum.
1. If generate_and_verify_file_checksum options is *FALSE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enables the SST file checksum and the checksum function name matches the checksum function name in DB, we trust the ingested checksum, store it in Manifest. If the checksum function name does not match, we treat that as an error and fail the IngestExternalFile() call.
2. If generate_and_verify_file_checksum options is *TRUE*: *1)* if DB does not enable SST file checksum, the checksum information ingested will be ignored; *2)* if DB enable the SST file checksum, we will use the checksum generator from DB to calculate the checksum for each ingested SST files after they are copied or moved. Then, compare the checksum results with the ingested checksum information: _A)_ if the checksum function name does not match, _verification always report true_ and we store the DB generated checksum information in Manifest. _B)_ if the checksum function name mach, and checksum match, ingestion continues and stores the checksum information in the Manifest. Otherwise, terminate file ingestion and report file corruption.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6891
Test Plan: added unit test, pass make asan_check
Reviewed By: pdillinger
Differential Revision: D21935988
Pulled By: zhichao-cao
fbshipit-source-id: 7b55f486632db467e76d72602218d0658aa7f6ed
2020-06-11 21:25:01 +00:00
|
|
|
}
|
|
|
|
|
2017-02-28 22:11:01 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, NoCopy) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
|
2017-03-13 18:17:19 +00:00
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
// file1.sst (0 => 99)
|
|
|
|
std::string file1 = sst_files_dir_ + "file1.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
|
|
|
for (int k = 0; k < 100; k++) {
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file1_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_EQ(file1_info.file_path, file1);
|
|
|
|
ASSERT_EQ(file1_info.num_entries, 100);
|
|
|
|
ASSERT_EQ(file1_info.smallest_key, Key(0));
|
|
|
|
ASSERT_EQ(file1_info.largest_key, Key(99));
|
|
|
|
|
|
|
|
// file2.sst (100 => 299)
|
|
|
|
std::string file2 = sst_files_dir_ + "file2.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file2));
|
|
|
|
for (int k = 100; k < 300; k++) {
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
ExternalSstFileInfo file2_info;
|
|
|
|
s = sst_file_writer.Finish(&file2_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_EQ(file2_info.file_path, file2);
|
|
|
|
ASSERT_EQ(file2_info.num_entries, 200);
|
|
|
|
ASSERT_EQ(file2_info.smallest_key, Key(100));
|
|
|
|
ASSERT_EQ(file2_info.largest_key, Key(299));
|
|
|
|
|
|
|
|
// file3.sst (110 => 124) .. overlap with file2.sst
|
|
|
|
std::string file3 = sst_files_dir_ + "file3.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file3));
|
|
|
|
for (int k = 110; k < 125; k++) {
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
ExternalSstFileInfo file3_info;
|
|
|
|
s = sst_file_writer.Finish(&file3_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_EQ(file3_info.file_path, file3);
|
|
|
|
ASSERT_EQ(file3_info.num_entries, 15);
|
|
|
|
ASSERT_EQ(file3_info.smallest_key, Key(110));
|
|
|
|
ASSERT_EQ(file3_info.largest_key, Key(124));
|
2018-07-14 05:40:23 +00:00
|
|
|
|
2017-02-28 22:11:01 +00:00
|
|
|
s = DeprecatedAddFile({file1}, true /* move file */);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_EQ(Status::NotFound(), env_->FileExists(file1));
|
|
|
|
|
|
|
|
s = DeprecatedAddFile({file2}, false /* copy file */);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file2));
|
|
|
|
|
2018-07-14 05:40:23 +00:00
|
|
|
// This file has overlapping values with the existing data
|
|
|
|
s = DeprecatedAddFile({file3}, true /* move file */);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_NOK(s) << s.ToString();
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_OK(env_->FileExists(file3));
|
|
|
|
|
|
|
|
for (int k = 0; k < 300; k++) {
|
|
|
|
ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 23:21:30 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestFileWithGlobalSeqnoPickedSeqno) {
|
2019-01-30 00:16:53 +00:00
|
|
|
bool write_global_seqno = std::get<0>(GetParam());
|
|
|
|
bool verify_checksums_before_ingest = std::get<1>(GetParam());
|
2017-04-26 20:28:39 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
2023-01-05 20:10:02 +00:00
|
|
|
options.disable_auto_compactions = true;
|
2017-04-26 20:28:39 +00:00
|
|
|
DestroyAndReopen(options);
|
|
|
|
std::map<std::string, std::string> true_data;
|
|
|
|
|
|
|
|
int file_id = 1;
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 2, 3, 4, 5, 6}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {10, 11, 12, 13}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 4, 6}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 1);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {11, 15, 19}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {120, 130}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 130}, ValueType::kTypeValue, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
|
|
|
|
|
|
|
|
// Write some keys through normal write path
|
|
|
|
for (int i = 0; i < 50; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), "memtable"));
|
|
|
|
true_data[Key(i)] = "memtable";
|
|
|
|
}
|
|
|
|
SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {60, 61, 62}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {40, 41, 42}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 1);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {20, 30, 40}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 2);
|
|
|
|
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
|
|
|
|
// We will need a seqno for the file regardless if the file overwrite
|
|
|
|
// keys in the DB or not because we have a snapshot
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1000, 1002}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 3);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {2000, 3002}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 4);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 20, 40, 100, 150}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {5000, 5001}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// No snapshot anymore, no need to assign a seqno
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
|
|
|
|
|
|
|
|
size_t kcnt = 0;
|
|
|
|
VerifyDBFromMap(true_data, &kcnt, false);
|
2018-11-01 23:21:30 +00:00
|
|
|
} while (ChangeOptionsForFileIngestionTest());
|
2017-05-26 19:05:19 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 23:21:30 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestFileWithMultipleValueType) {
|
2019-01-30 00:16:53 +00:00
|
|
|
bool write_global_seqno = std::get<0>(GetParam());
|
|
|
|
bool verify_checksums_before_ingest = std::get<1>(GetParam());
|
2017-05-26 19:05:19 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
Add missing range conflict check between file ingestion and RefitLevel() (#10988)
Summary:
**Context:**
File ingestion never checks whether the key range it acts on overlaps with an ongoing RefitLevel() (used in `CompactRange()` with `change_level=true`). That's because RefitLevel() doesn't register and make its key range known to file ingestion. Though it checks overlapping with other compactions by https://github.com/facebook/rocksdb/blob/7.8.fb/db/external_sst_file_ingestion_job.cc#L998.
RefitLevel() (used in `CompactRange()` with `change_level=true`) doesn't check whether the key range it acts on overlaps with an ongoing file ingestion. That's because file ingestion does not register and make its key range known to other compactions.
- Note that non-refitlevel-compaction (e.g, manual compaction w/o RefitLevel() or general compaction) also does not check key range overlap with ongoing file ingestion for the same reason.
- But it's fine. Credited to cbi42's discovery, `WaitForIngestFile` was called by background and foreground compactions. They were introduced in https://github.com/facebook/rocksdb/commit/0f88160f67d36ea30e3aca3a3cef924c3a009be6, https://github.com/facebook/rocksdb/commit/5c64fb67d2fc198f1a73ff3ae543749a6a41f513 and https://github.com/facebook/rocksdb/commit/87dfc1d23e0e16ff73e15f63c6fa0fb3b3fc8c8c.
- Regardless, this PR registers file ingestion like a compaction is a general approach that will also add range conflict check between file ingestion and non-refitlevel-compaction, though it has not been the issue motivated this PR.
Above are bugs resulting in two bad consequences:
- If file ingestion and RefitLevel() creates files in the same level, then range-overlapped files will be created at that level and caught as corruption by `force_consistency_checks=true`
- If file ingestion and RefitLevel() creates file in different levels, then with one further compaction on the ingested file, it can result in two same keys both with seqno 0 in two different levels. Then with iterator's [optimization](https://github.com/facebook/rocksdb/blame/c62f3221698fd273b673d4f7e54eabb8329a4369/db/db_iter.cc#L342-L343) that assumes no two same keys both with seqno 0, it will either break this assertion in debug build or, even worst, return value of this same key for the key after it, which is the wrong value to return, in release build.
Therefore we decide to introduce range conflict check for file ingestion and RefitLevel() inspired from the existing range conflict check among compactions.
**Summary:**
- Treat file ingestion job and RefitLevel() as `Compaction` of new compaction reasons: `CompactionReason::kExternalSstIngestion` and `CompactionReason::kRefitLevel` and register/unregister them. File ingestion is treated as compaction from L0 to different levels and RefitLevel() as compaction from source level to target level.
- Check for `RangeOverlapWithCompaction` with other ongoing compactions, `RegisterCompaction()` on this "compaction" before changing the LSM state in `VersionStorageInfo`, and `UnregisterCompaction()` after changing.
- Replace scattered fixes (https://github.com/facebook/rocksdb/commit/0f88160f67d36ea30e3aca3a3cef924c3a009be6, https://github.com/facebook/rocksdb/commit/5c64fb67d2fc198f1a73ff3ae543749a6a41f513 and https://github.com/facebook/rocksdb/commit/87dfc1d23e0e16ff73e15f63c6fa0fb3b3fc8c8c.) that prevents overlapping between file ingestion and non-refit-level compaction with this fix cuz those practices are easy to overlook.
- Misc: logic cleanup, see PR comments
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10988
Test Plan:
- New unit test `DBCompactionTestWithOngoingFileIngestionParam*` that failed pre-fix and passed afterwards.
- Made compatible with existing tests, see PR comments
- make check
- [Ongoing] Stress test rehearsal with normal value and aggressive CI value https://github.com/facebook/rocksdb/pull/10761
Reviewed By: cbi42
Differential Revision: D41535685
Pulled By: hx235
fbshipit-source-id: 549833a577ba1496d20a870583d4caa737da1258
2022-12-29 23:05:36 +00:00
|
|
|
options.disable_auto_compactions = true;
|
2017-05-26 19:05:19 +00:00
|
|
|
options.merge_operator.reset(new TestPutOperator());
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
std::map<std::string, std::string> true_data;
|
|
|
|
|
|
|
|
int file_id = 1;
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 2, 3, 4, 5, 6}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {10, 11, 12, 13}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 4, 6}, ValueType::kTypeMerge, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 1);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {11, 15, 19}, ValueType::kTypeDeletion, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {120, 130}, ValueType::kTypeMerge, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 130}, ValueType::kTypeDeletion, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
|
|
|
|
|
2018-11-01 23:21:30 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {120}, {ValueType::kTypeValue}, {{120, 135}}, file_id++,
|
2019-01-30 00:16:53 +00:00
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {}, {}, {{110, 120}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// The range deletion ends on a key, but it doesn't actually delete
|
|
|
|
// this key because the largest key in the range is exclusive. Still,
|
|
|
|
// it counts as an overlap so a new seqno will be assigned.
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 5);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {}, {}, {{100, 109}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 5);
|
|
|
|
|
2017-05-26 19:05:19 +00:00
|
|
|
// Write some keys through normal write path
|
|
|
|
for (int i = 0; i < 50; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), "memtable"));
|
|
|
|
true_data[Key(i)] = "memtable";
|
|
|
|
}
|
|
|
|
SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {60, 61, 62}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {40, 41, 42}, ValueType::kTypeMerge, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 1);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {20, 30, 40}, ValueType::kTypeDeletion, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-04-26 20:28:39 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 2);
|
|
|
|
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
|
|
|
|
// We will need a seqno for the file regardless if the file overwrite
|
|
|
|
// keys in the DB or not because we have a snapshot
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1000, 1002}, ValueType::kTypeMerge, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-04-26 20:28:39 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 3);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {2000, 3002}, ValueType::kTypeMerge, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-04-26 20:28:39 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 4);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 20, 40, 100, 150}, ValueType::kTypeMerge, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-04-26 20:28:39 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {5000, 5001}, ValueType::kTypeValue, file_id++,
|
|
|
|
write_global_seqno, verify_checksums_before_ingest, &true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// No snapshot anymore, no need to assign a seqno
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
|
|
|
|
|
|
|
|
size_t kcnt = 0;
|
|
|
|
VerifyDBFromMap(true_data, &kcnt, false);
|
2018-11-01 23:21:30 +00:00
|
|
|
} while (ChangeOptionsForFileIngestionTest());
|
2017-05-26 19:05:19 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 23:21:30 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestFileWithMixedValueType) {
|
2019-01-30 00:16:53 +00:00
|
|
|
bool write_global_seqno = std::get<0>(GetParam());
|
|
|
|
bool verify_checksums_before_ingest = std::get<1>(GetParam());
|
2017-05-26 19:05:19 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
2023-01-05 20:10:02 +00:00
|
|
|
options.disable_auto_compactions = true;
|
2017-05-26 19:05:19 +00:00
|
|
|
options.merge_operator.reset(new TestPutOperator());
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
std::map<std::string, std::string> true_data;
|
|
|
|
|
|
|
|
int file_id = 1;
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 2, 3, 4, 5, 6},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue,
|
|
|
|
ValueType::kTypeMerge, ValueType::kTypeValue, ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {10, 11, 12, 13},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue,
|
|
|
|
ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
2018-11-01 23:21:30 +00:00
|
|
|
options, {1, 4, 6},
|
|
|
|
{ValueType::kTypeDeletion, ValueType::kTypeValue,
|
|
|
|
ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 1);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
2018-11-01 23:21:30 +00:00
|
|
|
options, {11, 15, 19},
|
|
|
|
{ValueType::kTypeDeletion, ValueType::kTypeMerge,
|
|
|
|
ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {120, 130}, {ValueType::kTypeValue, ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 130}, {ValueType::kTypeMerge, ValueType::kTypeDeletion},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {150, 151, 152},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeMerge,
|
|
|
|
ValueType::kTypeDeletion},
|
2019-01-30 00:16:53 +00:00
|
|
|
{{150, 160}, {180, 190}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
|
|
|
|
|
2018-07-14 05:40:23 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {150, 151, 152},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
{{200, 250}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {300, 301, 302},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeMerge,
|
|
|
|
ValueType::kTypeDeletion},
|
2019-01-30 00:16:53 +00:00
|
|
|
{{1, 2}, {152, 154}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 5);
|
|
|
|
|
2017-05-26 19:05:19 +00:00
|
|
|
// Write some keys through normal write path
|
|
|
|
for (int i = 0; i < 50; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), "memtable"));
|
|
|
|
true_data[Key(i)] = "memtable";
|
|
|
|
}
|
|
|
|
SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {60, 61, 62},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File doesn't overwrite any keys, no seqno needed
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
2018-07-14 05:40:23 +00:00
|
|
|
options, {40, 41, 42},
|
|
|
|
{ValueType::kTypeValue, ValueType::kTypeDeletion,
|
|
|
|
ValueType::kTypeDeletion},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 1);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {20, 30, 40},
|
|
|
|
{ValueType::kTypeDeletion, ValueType::kTypeDeletion,
|
|
|
|
ValueType::kTypeDeletion},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
// File overwrites some keys, a seqno will be assigned
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 2);
|
|
|
|
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
|
|
|
|
// We will need a seqno for the file regardless if the file overwrite
|
|
|
|
// keys in the DB or not because we have a snapshot
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1000, 1002}, {ValueType::kTypeValue, ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 3);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {2000, 3002}, {ValueType::kTypeValue, ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 4);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {1, 20, 40, 100, 150},
|
|
|
|
{ValueType::kTypeDeletion, ValueType::kTypeDeletion,
|
|
|
|
ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2017-05-26 19:05:19 +00:00
|
|
|
// A global seqno will be assigned anyway because of the snapshot
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {5000, 5001}, {ValueType::kTypeValue, ValueType::kTypeMerge},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2017-04-26 20:28:39 +00:00
|
|
|
// No snapshot anymore, no need to assign a seqno
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
|
|
|
|
|
|
|
|
size_t kcnt = 0;
|
|
|
|
VerifyDBFromMap(true_data, &kcnt, false);
|
2018-11-01 23:21:30 +00:00
|
|
|
} while (ChangeOptionsForFileIngestionTest());
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ExternalSSTFileBasicTest, FadviseTrigger) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
const int kNumKeys = 10000;
|
|
|
|
|
|
|
|
size_t total_fadvised_bytes = 0;
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2017-05-26 19:05:19 +00:00
|
|
|
"SstFileWriter::Rep::InvalidatePageCache", [&](void* arg) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
size_t fadvise_size = *(static_cast<size_t*>(arg));
|
2017-02-28 22:11:01 +00:00
|
|
|
total_fadvised_bytes += fadvise_size;
|
|
|
|
});
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
std::unique_ptr<SstFileWriter> sst_file_writer;
|
|
|
|
|
|
|
|
std::string sst_file_path = sst_files_dir_ + "file_fadvise_disable.sst";
|
2017-03-13 18:17:19 +00:00
|
|
|
sst_file_writer.reset(
|
|
|
|
new SstFileWriter(EnvOptions(), options, nullptr, false));
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_OK(sst_file_writer->Open(sst_file_path));
|
|
|
|
for (int i = 0; i < kNumKeys; i++) {
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_OK(sst_file_writer->Put(Key(i), Key(i)));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(sst_file_writer->Finish());
|
|
|
|
// fadvise disabled
|
|
|
|
ASSERT_EQ(total_fadvised_bytes, 0);
|
|
|
|
|
|
|
|
sst_file_path = sst_files_dir_ + "file_fadvise_enable.sst";
|
2017-03-13 18:17:19 +00:00
|
|
|
sst_file_writer.reset(
|
|
|
|
new SstFileWriter(EnvOptions(), options, nullptr, true));
|
2017-02-28 22:11:01 +00:00
|
|
|
ASSERT_OK(sst_file_writer->Open(sst_file_path));
|
|
|
|
for (int i = 0; i < kNumKeys; i++) {
|
2017-05-26 19:05:19 +00:00
|
|
|
ASSERT_OK(sst_file_writer->Put(Key(i), Key(i)));
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(sst_file_writer->Finish());
|
|
|
|
// fadvise enabled
|
|
|
|
ASSERT_EQ(total_fadvised_bytes, sst_file_writer->FileSize());
|
|
|
|
ASSERT_GT(total_fadvised_bytes, 0);
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2017-02-28 22:11:01 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 17:12:29 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, SyncFailure) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.env = fault_injection_test_env_.get();
|
|
|
|
|
|
|
|
std::vector<std::pair<std::string, std::string>> test_cases = {
|
|
|
|
{"ExternalSstFileIngestionJob::BeforeSyncIngestedFile",
|
|
|
|
"ExternalSstFileIngestionJob::AfterSyncIngestedFile"},
|
|
|
|
{"ExternalSstFileIngestionJob::BeforeSyncDir",
|
|
|
|
"ExternalSstFileIngestionJob::AfterSyncDir"},
|
|
|
|
{"ExternalSstFileIngestionJob::BeforeSyncGlobalSeqno",
|
|
|
|
"ExternalSstFileIngestionJob::AfterSyncGlobalSeqno"}};
|
|
|
|
|
|
|
|
for (size_t i = 0; i < test_cases.size(); i++) {
|
2021-09-09 04:20:38 +00:00
|
|
|
bool no_sync = false;
|
2019-06-21 17:12:29 +00:00
|
|
|
SyncPoint::GetInstance()->SetCallBack(test_cases[i].first, [&](void*) {
|
|
|
|
fault_injection_test_env_->SetFilesystemActive(false);
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(test_cases[i].second, [&](void*) {
|
|
|
|
fault_injection_test_env_->SetFilesystemActive(true);
|
|
|
|
});
|
2021-09-09 04:20:38 +00:00
|
|
|
if (i == 0) {
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"ExternalSstFileIngestionJob::Prepare:Reopen", [&](void* s) {
|
|
|
|
Status* status = static_cast<Status*>(s);
|
|
|
|
if (status->IsNotSupported()) {
|
|
|
|
no_sync = true;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
if (i == 2) {
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"ExternalSstFileIngestionJob::NewRandomRWFile", [&](void* s) {
|
|
|
|
Status* status = static_cast<Status*>(s);
|
|
|
|
if (status->IsNotSupported()) {
|
|
|
|
no_sync = true;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
2019-06-21 17:12:29 +00:00
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
if (i == 2) {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
}
|
|
|
|
|
|
|
|
Options sst_file_writer_options;
|
2021-10-11 23:22:10 +00:00
|
|
|
sst_file_writer_options.env = fault_injection_test_env_.get();
|
2019-06-21 17:12:29 +00:00
|
|
|
std::unique_ptr<SstFileWriter> sst_file_writer(
|
|
|
|
new SstFileWriter(EnvOptions(), sst_file_writer_options));
|
|
|
|
std::string file_name =
|
2022-05-06 20:03:58 +00:00
|
|
|
sst_files_dir_ + "sync_failure_test_" + std::to_string(i) + ".sst";
|
2019-06-21 17:12:29 +00:00
|
|
|
ASSERT_OK(sst_file_writer->Open(file_name));
|
|
|
|
ASSERT_OK(sst_file_writer->Put("bar", "v2"));
|
|
|
|
ASSERT_OK(sst_file_writer->Finish());
|
|
|
|
|
|
|
|
IngestExternalFileOptions ingest_opt;
|
2023-02-03 21:00:04 +00:00
|
|
|
ASSERT_FALSE(ingest_opt.write_global_seqno); // new default
|
2019-06-21 17:12:29 +00:00
|
|
|
if (i == 0) {
|
|
|
|
ingest_opt.move_files = true;
|
|
|
|
}
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
if (i == 2) {
|
|
|
|
ingest_opt.write_global_seqno = true;
|
|
|
|
}
|
2021-09-09 04:20:38 +00:00
|
|
|
Status s = db_->IngestExternalFile({file_name}, ingest_opt);
|
|
|
|
if (no_sync) {
|
|
|
|
ASSERT_OK(s);
|
|
|
|
} else {
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
}
|
2019-06-21 17:12:29 +00:00
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
Destroy(options);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-19 02:32:55 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, ReopenNotSupported) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.env = env_;
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"ExternalSstFileIngestionJob::Prepare:Reopen", [&](void* arg) {
|
|
|
|
Status* s = static_cast<Status*>(arg);
|
|
|
|
*s = Status::NotSupported();
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
Options sst_file_writer_options;
|
|
|
|
sst_file_writer_options.env = env_;
|
|
|
|
std::unique_ptr<SstFileWriter> sst_file_writer(
|
|
|
|
new SstFileWriter(EnvOptions(), sst_file_writer_options));
|
|
|
|
std::string file_name =
|
|
|
|
sst_files_dir_ + "reopen_not_supported_test_" + ".sst";
|
|
|
|
ASSERT_OK(sst_file_writer->Open(file_name));
|
|
|
|
ASSERT_OK(sst_file_writer->Put("bar", "v2"));
|
|
|
|
ASSERT_OK(sst_file_writer->Finish());
|
|
|
|
|
|
|
|
IngestExternalFileOptions ingest_opt;
|
|
|
|
ingest_opt.move_files = true;
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
ASSERT_OK(db_->IngestExternalFile({file_name}, ingest_opt));
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
Destroy(options);
|
|
|
|
}
|
|
|
|
|
2019-08-20 17:40:59 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, VerifyChecksumReadahead) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
SpecialEnv senv(env_);
|
2019-08-20 17:40:59 +00:00
|
|
|
options.env = &senv;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
Options sst_file_writer_options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
sst_file_writer_options.env = env_;
|
2019-08-20 17:40:59 +00:00
|
|
|
std::unique_ptr<SstFileWriter> sst_file_writer(
|
|
|
|
new SstFileWriter(EnvOptions(), sst_file_writer_options));
|
|
|
|
std::string file_name = sst_files_dir_ + "verify_checksum_readahead_test.sst";
|
|
|
|
ASSERT_OK(sst_file_writer->Open(file_name));
|
|
|
|
Random rnd(301);
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string value = rnd.RandomString(4000);
|
2019-08-20 17:40:59 +00:00
|
|
|
for (int i = 0; i < 5000; i++) {
|
|
|
|
ASSERT_OK(sst_file_writer->Put(DBTestBase::Key(i), value));
|
|
|
|
}
|
|
|
|
ASSERT_OK(sst_file_writer->Finish());
|
|
|
|
|
|
|
|
// Ingest it once without verifying checksums to see the baseline
|
|
|
|
// preads.
|
|
|
|
IngestExternalFileOptions ingest_opt;
|
|
|
|
ingest_opt.move_files = false;
|
|
|
|
senv.count_random_reads_ = true;
|
|
|
|
senv.random_read_bytes_counter_ = 0;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile({file_name}, ingest_opt));
|
|
|
|
|
|
|
|
auto base_num_reads = senv.random_read_counter_.Read();
|
|
|
|
// Make sure the counter is enabled.
|
|
|
|
ASSERT_GT(base_num_reads, 0);
|
|
|
|
|
|
|
|
// Ingest again and observe the reads made for for readahead.
|
|
|
|
ingest_opt.move_files = false;
|
|
|
|
ingest_opt.verify_checksums_before_ingest = true;
|
|
|
|
ingest_opt.verify_checksums_readahead_size = size_t{2 * 1024 * 1024};
|
|
|
|
|
|
|
|
senv.count_random_reads_ = true;
|
|
|
|
senv.random_read_bytes_counter_ = 0;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile({file_name}, ingest_opt));
|
|
|
|
|
|
|
|
// Make sure the counter is enabled.
|
|
|
|
ASSERT_GT(senv.random_read_counter_.Read() - base_num_reads, 0);
|
|
|
|
|
|
|
|
// The SST file is about 20MB. Readahead size is 2MB.
|
|
|
|
// Give a conservative 15 reads for metadata blocks, the number
|
|
|
|
// of random reads should be within 20 MB / 2MB + 15 = 25.
|
|
|
|
ASSERT_LE(senv.random_read_counter_.Read() - base_num_reads, 40);
|
|
|
|
|
|
|
|
Destroy(options);
|
|
|
|
}
|
|
|
|
|
2024-01-24 19:21:05 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, ReadOldValueOfIngestedKeyBug) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.num_levels = 3;
|
|
|
|
options.preserve_internal_time_seconds = 36000;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
// To create the following LSM tree to trigger the bug:
|
|
|
|
// L0
|
|
|
|
// L1 with seqno [1, 2]
|
|
|
|
// L2 with seqno [3, 4]
|
|
|
|
|
|
|
|
// To create L1 shape
|
|
|
|
ASSERT_OK(
|
|
|
|
db_->Put(WriteOptions(), db_->DefaultColumnFamily(), "k1", "seqno1"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
ASSERT_OK(
|
|
|
|
db_->Put(WriteOptions(), db_->DefaultColumnFamily(), "k1", "seqno2"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
ColumnFamilyMetaData meta_1;
|
|
|
|
db_->GetColumnFamilyMetaData(&meta_1);
|
|
|
|
auto& files_1 = meta_1.levels[0].files;
|
|
|
|
ASSERT_EQ(files_1.size(), 2);
|
|
|
|
std::string file1 = files_1[0].db_path + files_1[0].name;
|
|
|
|
std::string file2 = files_1[1].db_path + files_1[1].name;
|
|
|
|
ASSERT_OK(db_->CompactFiles(CompactionOptions(), {file1, file2}, 1));
|
|
|
|
// To confirm L1 shape
|
|
|
|
ColumnFamilyMetaData meta_2;
|
|
|
|
db_->GetColumnFamilyMetaData(&meta_2);
|
|
|
|
ASSERT_EQ(meta_2.levels[0].files.size(), 0);
|
|
|
|
ASSERT_EQ(meta_2.levels[1].files.size(), 1);
|
|
|
|
// Seqno starts from non-zero due to seqno reservation for
|
|
|
|
// preserve_internal_time_seconds greater than 0;
|
|
|
|
ASSERT_EQ(meta_2.levels[1].files[0].largest_seqno, 102);
|
|
|
|
ASSERT_EQ(meta_2.levels[2].files.size(), 0);
|
|
|
|
// To create L2 shape
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), db_->DefaultColumnFamily(), "k2overlap",
|
|
|
|
"old_value"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), db_->DefaultColumnFamily(), "k2overlap",
|
|
|
|
"old_value"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
ColumnFamilyMetaData meta_3;
|
|
|
|
db_->GetColumnFamilyMetaData(&meta_3);
|
|
|
|
auto& files_3 = meta_3.levels[0].files;
|
|
|
|
std::string file3 = files_3[0].db_path + files_3[0].name;
|
|
|
|
std::string file4 = files_3[1].db_path + files_3[1].name;
|
|
|
|
ASSERT_OK(db_->CompactFiles(CompactionOptions(), {file3, file4}, 2));
|
|
|
|
// To confirm L2 shape
|
|
|
|
ColumnFamilyMetaData meta_4;
|
|
|
|
db_->GetColumnFamilyMetaData(&meta_4);
|
|
|
|
ASSERT_EQ(meta_4.levels[0].files.size(), 0);
|
|
|
|
ASSERT_EQ(meta_4.levels[1].files.size(), 1);
|
|
|
|
ASSERT_EQ(meta_4.levels[2].files.size(), 1);
|
|
|
|
ASSERT_EQ(meta_4.levels[2].files[0].largest_seqno, 104);
|
|
|
|
|
|
|
|
// Ingest a file with new value of the key "k2overlap"
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::string f = sst_files_dir_ + "f.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(f));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("k2overlap", "new_value"));
|
|
|
|
ExternalSstFileInfo f_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&f_info));
|
|
|
|
ASSERT_OK(db_->IngestExternalFile({f}, IngestExternalFileOptions()));
|
|
|
|
|
|
|
|
// To verify new value of the key "k2overlap" is correctly returned
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(db_->Get(ReadOptions(), "k2overlap", &value));
|
|
|
|
// Before the fix, the value would be "old_value" and assertion failed
|
|
|
|
ASSERT_EQ(value, "new_value");
|
|
|
|
}
|
|
|
|
|
2020-02-25 23:29:17 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, IngestRangeDeletionTombstoneWithGlobalSeqno) {
|
|
|
|
for (int i = 5; i < 25; i++) {
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), db_->DefaultColumnFamily(), Key(i),
|
|
|
|
Key(i) + "_val"));
|
|
|
|
}
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
Reopen(options);
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
|
|
|
|
// file.sst (delete 0 => 30)
|
|
|
|
std::string file = sst_files_dir_ + "file.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file));
|
|
|
|
ASSERT_OK(sst_file_writer.DeleteRange(Key(0), Key(30)));
|
|
|
|
ExternalSstFileInfo file_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file_info));
|
|
|
|
ASSERT_EQ(file_info.file_path, file);
|
|
|
|
ASSERT_EQ(file_info.num_entries, 0);
|
|
|
|
ASSERT_EQ(file_info.smallest_key, "");
|
|
|
|
ASSERT_EQ(file_info.largest_key, "");
|
|
|
|
ASSERT_EQ(file_info.num_range_del_entries, 1);
|
|
|
|
ASSERT_EQ(file_info.smallest_range_del_key, Key(0));
|
|
|
|
ASSERT_EQ(file_info.largest_range_del_key, Key(30));
|
|
|
|
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.move_files = true;
|
|
|
|
ifo.snapshot_consistency = true;
|
|
|
|
ifo.allow_global_seqno = true;
|
|
|
|
ifo.write_global_seqno = true;
|
|
|
|
ifo.verify_checksums_before_ingest = false;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile({file}, ifo));
|
|
|
|
|
|
|
|
for (int i = 5; i < 25; i++) {
|
|
|
|
std::string res;
|
|
|
|
ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &res).IsNotFound());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 23:21:30 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestionWithRangeDeletions) {
|
2017-11-28 19:18:42 +00:00
|
|
|
int kNumLevels = 7;
|
2017-05-31 20:43:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
2017-11-28 19:18:42 +00:00
|
|
|
options.num_levels = kNumLevels;
|
2017-05-31 20:43:25 +00:00
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
std::map<std::string, std::string> true_data;
|
|
|
|
int file_id = 1;
|
|
|
|
// prevent range deletions from being dropped due to becoming obsolete.
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
|
2017-11-28 19:18:42 +00:00
|
|
|
// range del [0, 50) in L6 file, [50, 100) in L0 file, [100, 150) in memtable
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
if (i != 0) {
|
2023-08-09 22:46:44 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
2017-11-28 19:18:42 +00:00
|
|
|
if (i == 1) {
|
|
|
|
MoveFilesToLevel(kNumLevels - 1);
|
|
|
|
}
|
2017-05-31 20:43:25 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
|
|
|
|
Key(50 * i), Key(50 * (i + 1))));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(0));
|
2017-11-28 19:18:42 +00:00
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 1));
|
2017-05-31 20:43:25 +00:00
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
bool write_global_seqno = std::get<0>(GetParam());
|
|
|
|
bool verify_checksums_before_ingest = std::get<1>(GetParam());
|
2017-11-28 19:18:42 +00:00
|
|
|
// overlaps with L0 file but not memtable, so flush is skipped and file is
|
|
|
|
// ingested into L0
|
2017-05-31 20:43:25 +00:00
|
|
|
SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
|
2017-11-28 19:18:42 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {60, 90}, {ValueType::kTypeValue, ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
{{65, 70}, {70, 85}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2017-11-28 19:18:42 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), ++last_seqno);
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(options.num_levels - 1));
|
|
|
|
|
|
|
|
// overlaps with L6 file but not memtable or L0 file, so flush is skipped and
|
|
|
|
// file is ingested into L5
|
2017-05-31 20:43:25 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {10, 40}, {ValueType::kTypeValue, ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2017-05-31 20:43:25 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), ++last_seqno);
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
2017-11-28 19:18:42 +00:00
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(options.num_levels - 1));
|
2017-05-31 20:43:25 +00:00
|
|
|
|
2018-07-14 05:40:23 +00:00
|
|
|
// overlaps with L5 file but not memtable or L0 file, so flush is skipped and
|
|
|
|
// file is ingested into L4
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {}, {}, {{5, 15}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2018-07-14 05:40:23 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), ++last_seqno);
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(options.num_levels - 2));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(options.num_levels - 1));
|
|
|
|
|
2017-11-28 19:18:42 +00:00
|
|
|
// ingested file overlaps with memtable, so flush is triggered before the file
|
|
|
|
// is ingested such that the ingested data is considered newest. So L0 file
|
|
|
|
// count increases by two.
|
2017-05-31 20:43:25 +00:00
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
2017-11-28 19:18:42 +00:00
|
|
|
options, {100, 140}, {ValueType::kTypeValue, ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
file_id++, write_global_seqno, verify_checksums_before_ingest,
|
|
|
|
&true_data));
|
2017-05-31 20:43:25 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), ++last_seqno);
|
|
|
|
ASSERT_EQ(4, NumTableFilesAtLevel(0));
|
2017-11-28 19:18:42 +00:00
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(options.num_levels - 1));
|
2017-05-31 20:43:25 +00:00
|
|
|
|
2017-11-28 19:18:42 +00:00
|
|
|
// snapshot unneeded now that all range deletions are persisted
|
2017-05-31 20:43:25 +00:00
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
|
|
|
// overlaps with nothing, so places at bottom level and skips incrementing
|
|
|
|
// seqnum.
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
2017-11-28 19:18:42 +00:00
|
|
|
options, {151, 175}, {ValueType::kTypeValue, ValueType::kTypeValue},
|
2019-01-30 00:16:53 +00:00
|
|
|
{{160, 200}}, file_id++, write_global_seqno,
|
|
|
|
verify_checksums_before_ingest, &true_data));
|
2017-05-31 20:43:25 +00:00
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
|
|
|
|
ASSERT_EQ(4, NumTableFilesAtLevel(0));
|
2017-11-28 19:18:42 +00:00
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(options.num_levels - 1));
|
2023-04-20 20:02:16 +00:00
|
|
|
VerifyDBFromMap(true_data);
|
2017-05-31 20:43:25 +00:00
|
|
|
}
|
|
|
|
|
2019-08-15 03:58:59 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, AdjacentRangeDeletionTombstones) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
|
|
|
|
// file8.sst (delete 300 => 400)
|
|
|
|
std::string file8 = sst_files_dir_ + "file8.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file8));
|
|
|
|
ASSERT_OK(sst_file_writer.DeleteRange(Key(300), Key(400)));
|
|
|
|
ExternalSstFileInfo file8_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file8_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2019-08-15 03:58:59 +00:00
|
|
|
ASSERT_EQ(file8_info.file_path, file8);
|
|
|
|
ASSERT_EQ(file8_info.num_entries, 0);
|
|
|
|
ASSERT_EQ(file8_info.smallest_key, "");
|
|
|
|
ASSERT_EQ(file8_info.largest_key, "");
|
|
|
|
ASSERT_EQ(file8_info.num_range_del_entries, 1);
|
|
|
|
ASSERT_EQ(file8_info.smallest_range_del_key, Key(300));
|
|
|
|
ASSERT_EQ(file8_info.largest_range_del_key, Key(400));
|
|
|
|
|
|
|
|
// file9.sst (delete 400 => 500)
|
|
|
|
std::string file9 = sst_files_dir_ + "file9.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file9));
|
|
|
|
ASSERT_OK(sst_file_writer.DeleteRange(Key(400), Key(500)));
|
|
|
|
ExternalSstFileInfo file9_info;
|
|
|
|
s = sst_file_writer.Finish(&file9_info);
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2019-08-15 03:58:59 +00:00
|
|
|
ASSERT_EQ(file9_info.file_path, file9);
|
|
|
|
ASSERT_EQ(file9_info.num_entries, 0);
|
|
|
|
ASSERT_EQ(file9_info.smallest_key, "");
|
|
|
|
ASSERT_EQ(file9_info.largest_key, "");
|
|
|
|
ASSERT_EQ(file9_info.num_range_del_entries, 1);
|
|
|
|
ASSERT_EQ(file9_info.smallest_range_del_key, Key(400));
|
|
|
|
ASSERT_EQ(file9_info.largest_range_del_key, Key(500));
|
|
|
|
|
|
|
|
// Range deletion tombstones are exclusive on their end key, so these SSTs
|
|
|
|
// should not be considered as overlapping.
|
|
|
|
s = DeprecatedAddFile({file8, file9});
|
2020-12-10 05:19:55 +00:00
|
|
|
ASSERT_OK(s) << s.ToString();
|
2019-08-15 03:58:59 +00:00
|
|
|
ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
|
|
|
|
DestroyAndRecreateExternalSSTFilesDir();
|
|
|
|
}
|
|
|
|
|
2023-04-20 20:02:16 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, UnorderedRangeDeletions) {
|
|
|
|
int kNumLevels = 7;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.num_levels = kNumLevels;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
std::map<std::string, std::string> true_data;
|
|
|
|
int file_id = 1;
|
|
|
|
|
|
|
|
// prevent range deletions from being dropped due to becoming obsolete.
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
|
|
|
|
// Range del [0, 50) in memtable
|
|
|
|
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0),
|
|
|
|
Key(50)));
|
|
|
|
|
|
|
|
// Out of order range del overlaps memtable, so flush is required before file
|
|
|
|
// is ingested into L0
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {60, 90}, {ValueType::kTypeValue, ValueType::kTypeValue},
|
|
|
|
{{65, 70}, {45, 50}}, file_id++, true /* write_global_seqno */,
|
|
|
|
true /* verify_checksums_before_ingest */, &true_data));
|
|
|
|
ASSERT_EQ(2, true_data.size());
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(0));
|
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(kNumLevels - 1));
|
|
|
|
VerifyDBFromMap(true_data);
|
|
|
|
|
|
|
|
// Compact to L6
|
|
|
|
MoveFilesToLevel(kNumLevels - 1);
|
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 1));
|
|
|
|
VerifyDBFromMap(true_data);
|
|
|
|
|
|
|
|
// Ingest a file containing out of order range dels that cover nothing
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {151, 175}, {ValueType::kTypeValue, ValueType::kTypeValue},
|
|
|
|
{{160, 200}, {120, 180}}, file_id++, true /* write_global_seqno */,
|
|
|
|
true /* verify_checksums_before_ingest */, &true_data));
|
|
|
|
ASSERT_EQ(4, true_data.size());
|
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(kNumLevels - 1));
|
|
|
|
VerifyDBFromMap(true_data);
|
|
|
|
|
|
|
|
// Ingest a file containing out of order range dels that cover keys in L6
|
|
|
|
ASSERT_OK(GenerateAndAddExternalFile(
|
|
|
|
options, {}, {}, {{190, 200}, {170, 180}, {55, 65}}, file_id++,
|
|
|
|
true /* write_global_seqno */, true /* verify_checksums_before_ingest */,
|
|
|
|
&true_data));
|
|
|
|
ASSERT_EQ(2, true_data.size());
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(kNumLevels - 2));
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(kNumLevels - 1));
|
|
|
|
VerifyDBFromMap(true_data);
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
}
|
|
|
|
|
2023-03-23 04:03:13 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, RangeDeletionEndComesBeforeStart) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
|
|
|
|
// "file.sst"
|
|
|
|
// Verify attempt to delete 300 => 200 fails.
|
|
|
|
// Then, verify attempt to delete 300 => 300 succeeds but writes nothing.
|
|
|
|
// Afterwards, verify attempt to delete 300 => 400 works normally.
|
|
|
|
std::string file = sst_files_dir_ + "file.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
sst_file_writer.DeleteRange(Key(300), Key(200)).IsInvalidArgument());
|
|
|
|
ASSERT_OK(sst_file_writer.DeleteRange(Key(300), Key(300)));
|
|
|
|
ASSERT_OK(sst_file_writer.DeleteRange(Key(300), Key(400)));
|
|
|
|
ExternalSstFileInfo file_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file_info);
|
|
|
|
ASSERT_OK(s) << s.ToString();
|
|
|
|
ASSERT_EQ(file_info.file_path, file);
|
|
|
|
ASSERT_EQ(file_info.num_entries, 0);
|
|
|
|
ASSERT_EQ(file_info.smallest_key, "");
|
|
|
|
ASSERT_EQ(file_info.largest_key, "");
|
|
|
|
ASSERT_EQ(file_info.num_range_del_entries, 1);
|
|
|
|
ASSERT_EQ(file_info.smallest_range_del_key, Key(300));
|
|
|
|
ASSERT_EQ(file_info.largest_range_del_key, Key(400));
|
|
|
|
}
|
|
|
|
|
2019-01-30 00:16:53 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestFileWithBadBlockChecksum) {
|
2023-11-06 15:41:36 +00:00
|
|
|
bool verify_checksums_before_ingest = std::get<1>(GetParam());
|
|
|
|
if (!verify_checksums_before_ingest) {
|
|
|
|
ROCKSDB_GTEST_BYPASS("Bypassing test when !verify_checksums_before_ingest");
|
|
|
|
return;
|
|
|
|
}
|
2019-01-30 00:16:53 +00:00
|
|
|
bool change_checksum_called = false;
|
|
|
|
const auto& change_checksum = [&](void* arg) {
|
|
|
|
if (!change_checksum_called) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
char* buf = static_cast<char*>(arg);
|
2019-01-30 00:16:53 +00:00
|
|
|
assert(nullptr != buf);
|
|
|
|
buf[0] ^= 0x1;
|
|
|
|
change_checksum_called = true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
"BlockBasedTableBuilder::WriteMaybeCompressedBlock:TamperWithChecksum",
|
2019-01-30 00:16:53 +00:00
|
|
|
change_checksum);
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
int file_id = 0;
|
|
|
|
bool write_global_seqno = std::get<0>(GetParam());
|
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
std::map<std::string, std::string> true_data;
|
|
|
|
Status s = GenerateAndAddExternalFile(
|
|
|
|
options, {1, 2, 3, 4, 5, 6}, ValueType::kTypeValue, file_id++,
|
2023-11-06 15:41:36 +00:00
|
|
|
write_global_seqno, /*verify_checksums_before_ingest=*/true,
|
|
|
|
&true_data);
|
|
|
|
ASSERT_NOK(s);
|
2019-01-30 00:16:53 +00:00
|
|
|
change_checksum_called = false;
|
|
|
|
} while (ChangeOptionsForFileIngestionTest());
|
|
|
|
}
|
|
|
|
|
2023-11-06 15:41:36 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestFileWithCorruptedDataBlock) {
|
2021-09-09 04:20:38 +00:00
|
|
|
if (!random_rwfile_supported_) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires NewRandomRWFile support");
|
|
|
|
return;
|
|
|
|
}
|
2019-01-30 00:16:53 +00:00
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
int file_id = 0;
|
|
|
|
EnvOptions env_options;
|
2023-11-06 15:41:36 +00:00
|
|
|
Random rnd(301);
|
2019-01-30 00:16:53 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
2023-11-06 15:41:36 +00:00
|
|
|
options.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 4 * 1024;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string file_path = sst_files_dir_ + std::to_string(file_id++);
|
2019-01-30 00:16:53 +00:00
|
|
|
SstFileWriter sst_file_writer(env_options, options);
|
|
|
|
Status s = sst_file_writer.Open(file_path);
|
|
|
|
ASSERT_OK(s);
|
2023-11-06 15:41:36 +00:00
|
|
|
// This should write more than 2 data blocks.
|
2019-01-30 00:16:53 +00:00
|
|
|
for (int i = 0; i != 100; ++i) {
|
|
|
|
std::string key = Key(i);
|
2023-11-06 15:41:36 +00:00
|
|
|
std::string value = rnd.RandomString(200);
|
2019-01-30 00:16:53 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put(key, value));
|
|
|
|
}
|
|
|
|
ASSERT_OK(sst_file_writer.Finish());
|
|
|
|
{
|
|
|
|
// Get file size
|
|
|
|
uint64_t file_size = 0;
|
|
|
|
ASSERT_OK(env_->GetFileSize(file_path, &file_size));
|
|
|
|
ASSERT_GT(file_size, 8);
|
|
|
|
std::unique_ptr<RandomRWFile> rwfile;
|
|
|
|
ASSERT_OK(env_->NewRandomRWFile(file_path, &rwfile, EnvOptions()));
|
2023-11-06 15:41:36 +00:00
|
|
|
// Corrupt the second data block.
|
|
|
|
// We need to corrupt a non-first and non-last data block
|
|
|
|
// since we access them to get smallest and largest internal
|
|
|
|
// key in the file in GetIngestedFileInfo().
|
|
|
|
const uint64_t offset = 5000;
|
2019-01-30 00:16:53 +00:00
|
|
|
char scratch[8] = {0};
|
|
|
|
Slice buf;
|
|
|
|
ASSERT_OK(rwfile->Read(offset, sizeof(scratch), &buf, scratch));
|
|
|
|
scratch[0] ^= 0xff; // flip one bit
|
|
|
|
ASSERT_OK(rwfile->Write(offset, buf));
|
|
|
|
}
|
|
|
|
// Ingest file.
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.write_global_seqno = std::get<0>(GetParam());
|
|
|
|
ifo.verify_checksums_before_ingest = std::get<1>(GetParam());
|
|
|
|
s = db_->IngestExternalFile({file_path}, ifo);
|
|
|
|
if (ifo.verify_checksums_before_ingest) {
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
} else {
|
|
|
|
ASSERT_OK(s);
|
|
|
|
}
|
|
|
|
} while (ChangeOptionsForFileIngestionTest());
|
|
|
|
}
|
|
|
|
|
2019-02-11 19:37:07 +00:00
|
|
|
TEST_P(ExternalSSTFileBasicTest, IngestExternalFileWithCorruptedPropsBlock) {
|
|
|
|
bool verify_checksums_before_ingest = std::get<1>(GetParam());
|
2021-09-13 19:16:51 +00:00
|
|
|
if (!verify_checksums_before_ingest) {
|
|
|
|
ROCKSDB_GTEST_BYPASS("Bypassing test when !verify_checksums_before_ingest");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!random_rwfile_supported_) {
|
2021-09-09 04:20:38 +00:00
|
|
|
ROCKSDB_GTEST_SKIP("Test requires NewRandomRWFile support");
|
2019-02-11 19:37:07 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
uint64_t props_block_offset = 0;
|
|
|
|
size_t props_block_size = 0;
|
|
|
|
const auto& get_props_block_offset = [&](void* arg) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
props_block_offset = *static_cast<uint64_t*>(arg);
|
2019-02-11 19:37:07 +00:00
|
|
|
};
|
|
|
|
const auto& get_props_block_size = [&](void* arg) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
props_block_size = *static_cast<uint64_t*>(arg);
|
2019-02-11 19:37:07 +00:00
|
|
|
};
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"BlockBasedTableBuilder::WritePropertiesBlock:GetPropsBlockOffset",
|
|
|
|
get_props_block_offset);
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"BlockBasedTableBuilder::WritePropertiesBlock:GetPropsBlockSize",
|
|
|
|
get_props_block_size);
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
int file_id = 0;
|
|
|
|
Random64 rand(time(nullptr));
|
|
|
|
do {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string file_path = sst_files_dir_ + std::to_string(file_id++);
|
2019-02-11 19:37:07 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
Status s = sst_file_writer.Open(file_path);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
for (int i = 0; i != 100; ++i) {
|
|
|
|
std::string key = Key(i);
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string value = Key(i) + std::to_string(0);
|
2019-02-11 19:37:07 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put(key, value));
|
|
|
|
}
|
|
|
|
ASSERT_OK(sst_file_writer.Finish());
|
|
|
|
|
|
|
|
{
|
|
|
|
std::unique_ptr<RandomRWFile> rwfile;
|
|
|
|
ASSERT_OK(env_->NewRandomRWFile(file_path, &rwfile, EnvOptions()));
|
|
|
|
// Manually corrupt the file
|
|
|
|
ASSERT_GT(props_block_size, 8);
|
|
|
|
uint64_t offset =
|
|
|
|
props_block_offset + rand.Next() % (props_block_size - 8);
|
|
|
|
char scratch[8] = {0};
|
|
|
|
Slice buf;
|
|
|
|
ASSERT_OK(rwfile->Read(offset, sizeof(scratch), &buf, scratch));
|
|
|
|
scratch[0] ^= 0xff; // flip one bit
|
|
|
|
ASSERT_OK(rwfile->Write(offset, buf));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ingest file.
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.write_global_seqno = std::get<0>(GetParam());
|
|
|
|
ifo.verify_checksums_before_ingest = true;
|
|
|
|
s = db_->IngestExternalFile({file_path}, ifo);
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
} while (ChangeOptionsForFileIngestionTest());
|
|
|
|
}
|
|
|
|
|
2019-09-13 21:48:18 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, OverlappingFiles) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
|
|
|
|
std::vector<std::string> files;
|
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::string file1 = sst_files_dir_ + "file1.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
2024-10-16 00:22:01 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put("a", "a1"));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("i", "i1"));
|
2019-09-13 21:48:18 +00:00
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file1_info));
|
|
|
|
files.push_back(std::move(file1));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::string file2 = sst_files_dir_ + "file2.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file2));
|
2024-10-16 00:22:01 +00:00
|
|
|
ASSERT_OK(sst_file_writer.Put("i", "i2"));
|
2019-09-13 21:48:18 +00:00
|
|
|
ExternalSstFileInfo file2_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file2_info));
|
|
|
|
files.push_back(std::move(file2));
|
|
|
|
}
|
|
|
|
|
2024-10-16 00:22:01 +00:00
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::string file3 = sst_files_dir_ + "file3.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file3));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("j", "j1"));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("m", "m1"));
|
|
|
|
ExternalSstFileInfo file3_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file3_info));
|
|
|
|
files.push_back(std::move(file3));
|
|
|
|
}
|
|
|
|
|
2019-09-13 21:48:18 +00:00
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(files, ifo));
|
2024-10-16 00:22:01 +00:00
|
|
|
ASSERT_EQ(Get("a"), "a1");
|
|
|
|
ASSERT_EQ(Get("i"), "i2");
|
|
|
|
ASSERT_EQ(Get("j"), "j1");
|
|
|
|
ASSERT_EQ(Get("m"), "m1");
|
2019-09-13 21:48:18 +00:00
|
|
|
|
|
|
|
int total_keys = 0;
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
total_keys++;
|
|
|
|
}
|
2024-10-16 00:22:01 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2019-09-13 21:48:18 +00:00
|
|
|
delete iter;
|
2024-10-16 00:22:01 +00:00
|
|
|
ASSERT_EQ(total_keys, 4);
|
2019-09-13 21:48:18 +00:00
|
|
|
|
2024-10-16 00:22:01 +00:00
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(6));
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(5));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ExternalSSTFileBasicTest, AtomicReplaceData) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = CompactionStyle::kCompactionStyleUniversal;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
std::vector<std::string> files;
|
|
|
|
{
|
|
|
|
// Writes first version of data in range partitioned files.
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::string file1 = sst_files_dir_ + "file1.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("a", "a1"));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("b", "b1"));
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file1_info));
|
|
|
|
files.push_back(std::move(file1));
|
|
|
|
|
|
|
|
std::string file2 = sst_files_dir_ + "file2.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file2));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("x", "x1"));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("y", "y1"));
|
|
|
|
ExternalSstFileInfo file2_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file2_info));
|
|
|
|
files.push_back(std::move(file2));
|
|
|
|
}
|
|
|
|
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(files, ifo));
|
|
|
|
ASSERT_EQ(Get("a"), "a1");
|
|
|
|
ASSERT_EQ(Get("b"), "b1");
|
|
|
|
ASSERT_EQ(Get("x"), "x1");
|
|
|
|
ASSERT_EQ(Get("y"), "y1");
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(6));
|
|
|
|
|
|
|
|
{
|
|
|
|
// Atomically delete old version of data with one range delete file.
|
|
|
|
// And a new batch of range partitioned files with new version of data.
|
|
|
|
files.clear();
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
std::string file2 = sst_files_dir_ + "file2.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file2));
|
|
|
|
ASSERT_OK(sst_file_writer.DeleteRange("a", "z"));
|
|
|
|
ExternalSstFileInfo file2_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file2_info));
|
|
|
|
files.push_back(std::move(file2));
|
|
|
|
|
|
|
|
std::string file3 = sst_files_dir_ + "file3.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file3));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("a", "a2"));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("b", "b2"));
|
|
|
|
ExternalSstFileInfo file3_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file3_info));
|
|
|
|
files.push_back(std::move(file3));
|
|
|
|
|
|
|
|
std::string file4 = sst_files_dir_ + "file4.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file4));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("x", "x2"));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("y", "y2"));
|
|
|
|
ExternalSstFileInfo file4_info;
|
|
|
|
ASSERT_OK(sst_file_writer.Finish(&file4_info));
|
|
|
|
files.push_back(std::move(file4));
|
|
|
|
}
|
|
|
|
|
|
|
|
auto seqno_before_ingestion = db_->GetLatestSequenceNumber();
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(files, ifo));
|
|
|
|
// Overlapping files each occupy one new sequence number.
|
|
|
|
ASSERT_EQ(db_->GetLatestSequenceNumber(), seqno_before_ingestion + 3);
|
|
|
|
ASSERT_EQ(Get("a"), "a2");
|
|
|
|
ASSERT_EQ(Get("b"), "b2");
|
|
|
|
ASSERT_EQ(Get("x"), "x2");
|
|
|
|
ASSERT_EQ(Get("y"), "y2");
|
|
|
|
|
|
|
|
// Check old version of data, big range deletion, new version of data are
|
|
|
|
// on separate levels.
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(4));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(5));
|
|
|
|
ASSERT_EQ(2, NumTableFilesAtLevel(6));
|
|
|
|
|
|
|
|
CompactRangeOptions cro;
|
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
|
|
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(4));
|
|
|
|
ASSERT_EQ(0, NumTableFilesAtLevel(5));
|
|
|
|
ASSERT_EQ(1, NumTableFilesAtLevel(6));
|
2019-09-13 21:48:18 +00:00
|
|
|
}
|
|
|
|
|
2021-04-20 20:59:24 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, IngestFileAfterDBPut) {
|
|
|
|
// Repro https://github.com/facebook/rocksdb/issues/6245.
|
|
|
|
// Flush three files to L0. Ingest one more file to trigger L0->L1 compaction
|
|
|
|
// via trivial move. The bug happened when L1 files were incorrectly sorted
|
|
|
|
// resulting in an old value for "k" returned by `Get()`.
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
|
|
|
|
ASSERT_OK(Put("k", "a"));
|
2023-08-09 22:46:44 +00:00
|
|
|
ASSERT_OK(Flush());
|
2021-04-20 20:59:24 +00:00
|
|
|
ASSERT_OK(Put("k", "a"));
|
2023-08-09 22:46:44 +00:00
|
|
|
ASSERT_OK(Flush());
|
2021-04-20 20:59:24 +00:00
|
|
|
ASSERT_OK(Put("k", "a"));
|
2023-08-09 22:46:44 +00:00
|
|
|
ASSERT_OK(Flush());
|
2021-04-20 20:59:24 +00:00
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
|
|
|
|
// Current file size should be 0 after sst_file_writer init and before open a
|
|
|
|
// file.
|
|
|
|
ASSERT_EQ(sst_file_writer.FileSize(), 0);
|
|
|
|
|
|
|
|
std::string file1 = sst_files_dir_ + "file1.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("k", "b"));
|
|
|
|
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file1_info);
|
|
|
|
ASSERT_OK(s) << s.ToString();
|
|
|
|
|
|
|
|
// Current file size should be non-zero after success write.
|
|
|
|
ASSERT_GT(sst_file_writer.FileSize(), 0);
|
|
|
|
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
s = db_->IngestExternalFile({file1}, ifo);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
|
|
|
|
|
|
|
ASSERT_EQ(Get("k"), "b");
|
|
|
|
}
|
|
|
|
|
2021-10-08 17:29:06 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, IngestWithTemperature) {
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
// Rather than doubling the running time of this test, this boolean
|
|
|
|
// field gets a random starting value and then alternates between
|
|
|
|
// true and false.
|
|
|
|
bool alternate_hint = Random::GetTLSInstance()->OneIn(2);
|
|
|
|
Destroy(CurrentOptions());
|
2021-10-08 17:29:06 +00:00
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
for (std::string mode : {"ingest_behind", "fail_if_not", "neither"}) {
|
|
|
|
SCOPED_TRACE("Mode: " + mode);
|
2021-10-08 17:29:06 +00:00
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
|
|
|
|
auto test_fs =
|
|
|
|
std::make_shared<FileTemperatureTestFS>(options.env->GetFileSystem());
|
|
|
|
std::unique_ptr<Env> env(new CompositeEnvWrapper(options.env, test_fs));
|
|
|
|
options.env = env.get();
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
options.last_level_temperature = Temperature::kCold;
|
|
|
|
options.default_write_temperature = Temperature::kHot;
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), options);
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.allow_ingest_behind = (mode == "ingest_behind");
|
|
|
|
Reopen(options);
|
|
|
|
Defer destroyer([&]() { Destroy(options); });
|
|
|
|
|
|
|
|
#define VERIFY_SST_COUNT(temp, expected_count_in_db, \
|
|
|
|
expected_count_outside_db) \
|
|
|
|
{ \
|
|
|
|
/* Partially verify against FileSystem */ \
|
|
|
|
ASSERT_EQ( \
|
|
|
|
test_fs->CountCurrentSstFilesWithTemperature(temp), \
|
|
|
|
size_t{expected_count_in_db} + size_t{expected_count_outside_db}); \
|
|
|
|
/* Partially verify against DB manifest */ \
|
|
|
|
if (expected_count_in_db == 0) { \
|
|
|
|
ASSERT_EQ(GetSstSizeHelper(temp), 0); \
|
|
|
|
} else { \
|
|
|
|
ASSERT_GE(GetSstSizeHelper(temp), 1); \
|
|
|
|
} \
|
2021-10-08 17:29:06 +00:00
|
|
|
}
|
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
size_t ex_unknown_in_db = 0;
|
|
|
|
size_t ex_hot_in_db = 0;
|
|
|
|
size_t ex_warm_in_db = 0;
|
|
|
|
size_t ex_cold_in_db = 0;
|
|
|
|
size_t ex_unknown_outside_db = 0;
|
|
|
|
size_t ex_hot_outside_db = 0;
|
|
|
|
size_t ex_warm_outside_db = 0;
|
|
|
|
size_t ex_cold_outside_db = 0;
|
|
|
|
#define VERIFY_SST_COUNTS() \
|
|
|
|
{ \
|
|
|
|
VERIFY_SST_COUNT(Temperature::kUnknown, ex_unknown_in_db, \
|
|
|
|
ex_unknown_outside_db); \
|
|
|
|
VERIFY_SST_COUNT(Temperature::kHot, ex_hot_in_db, ex_hot_outside_db); \
|
|
|
|
VERIFY_SST_COUNT(Temperature::kWarm, ex_warm_in_db, ex_warm_outside_db); \
|
|
|
|
VERIFY_SST_COUNT(Temperature::kCold, ex_cold_in_db, ex_cold_outside_db); \
|
|
|
|
}
|
2021-10-08 17:29:06 +00:00
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
// Create sst file, using a name recognized by FileTemperatureTestFS and
|
|
|
|
// specified temperature
|
|
|
|
std::string file1 = sst_files_dir_ + "9000000.sst";
|
|
|
|
ASSERT_OK(sst_file_writer.Open(file1, Temperature::kWarm));
|
|
|
|
for (int k = 1000; k < 1100; k++) {
|
|
|
|
ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
|
|
|
|
}
|
|
|
|
ExternalSstFileInfo file1_info;
|
|
|
|
Status s = sst_file_writer.Finish(&file1_info);
|
|
|
|
ASSERT_OK(s);
|
2021-10-08 17:29:06 +00:00
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
ex_warm_outside_db++;
|
|
|
|
VERIFY_SST_COUNTS();
|
|
|
|
|
|
|
|
ASSERT_EQ(file1_info.file_path, file1);
|
|
|
|
ASSERT_EQ(file1_info.num_entries, 100);
|
|
|
|
ASSERT_EQ(file1_info.smallest_key, Key(1000));
|
|
|
|
ASSERT_EQ(file1_info.largest_key, Key(1099));
|
|
|
|
|
|
|
|
std::vector<std::string> files;
|
|
|
|
std::vector<std::string> files_checksums;
|
|
|
|
std::vector<std::string> files_checksum_func_names;
|
|
|
|
|
|
|
|
files.push_back(file1);
|
|
|
|
IngestExternalFileOptions in_opts;
|
|
|
|
in_opts.move_files = false;
|
|
|
|
in_opts.snapshot_consistency = true;
|
|
|
|
in_opts.allow_global_seqno = false;
|
|
|
|
in_opts.allow_blocking_flush = false;
|
|
|
|
in_opts.write_global_seqno = true;
|
|
|
|
in_opts.verify_file_checksum = false;
|
|
|
|
in_opts.ingest_behind = (mode == "ingest_behind");
|
|
|
|
in_opts.fail_if_not_bottommost_level = (mode == "fail_if_not");
|
|
|
|
IngestExternalFileArg arg;
|
|
|
|
arg.column_family = db_->DefaultColumnFamily();
|
|
|
|
arg.external_files = files;
|
|
|
|
arg.options = in_opts;
|
|
|
|
arg.files_checksums = files_checksums;
|
|
|
|
arg.files_checksum_func_names = files_checksum_func_names;
|
Fix windows build and CI (#12426)
Summary:
Issue https://github.com/facebook/rocksdb/issues/12421 describes a regression in the migration from CircleCI to GitHub Actions in which failing build steps no longer fail Windows CI jobs. In GHA with pwsh (new preferred powershell command), only the last non-builtin command (or something like that) affects the overall success/failure result, and failures in external commands do not exit the script, even with `$ErrorActionPreference = 'Stop'` and `$PSNativeCommandErrorActionPreference = $true`. Switching to `powershell` causes some obscure failure (not seen in CircleCI) about the `-Lo` option to `curl`.
Here we work around this using the only reasonable-but-ugly way known: explicitly check the result after every non-trivial build step. This leaves us highly susceptible to future regressions with unchecked build steps in the future, but a clean solution is not known.
This change also fixes the build errors that were allowed to creep in because of the CI regression. Also decreased the unnecessarily long running time of DBWriteTest.WriteThreadWaitNanosCounter.
For background, this problem explicitly contradicts GitHub's documentation, and GitHub has known about the problem for more than a year, with no evidence of caring or intending to fix. https://github.com/actions/runner-images/issues/6668 Somehow CircleCI doesn't have this problem. And even though cmd.exe and powershell have been perpetuating DOS-isms for decades, they still seem to be a somewhat active "hot mess" when it comes to sensible, consistent, and documented behavior.
Fixes https://github.com/facebook/rocksdb/issues/12421
A history of some things I tried in development is here: https://github.com/facebook/rocksdb/compare/main...pdillinger:rocksdb:debug_windows_ci_orig
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12426
Test Plan: CI, including https://github.com/facebook/rocksdb/issues/12434 where I have temporarily enabled other Windows builds on PR with this change
Reviewed By: cbi42
Differential Revision: D54903698
Pulled By: pdillinger
fbshipit-source-id: 116bcbebbbf98f347c7cf7dfdeebeaaed7f76827
2024-03-14 19:04:41 +00:00
|
|
|
alternate_hint = !alternate_hint;
|
|
|
|
if (alternate_hint) {
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
// Provide correct hint (for optimal file open performance)
|
|
|
|
arg.file_temperature = Temperature::kWarm;
|
|
|
|
} else {
|
|
|
|
// No hint (also works because ingestion will read the temperature
|
|
|
|
// according to storage)
|
|
|
|
arg.file_temperature = Temperature::kUnknown;
|
|
|
|
}
|
|
|
|
s = db_->IngestExternalFiles({arg});
|
|
|
|
ASSERT_OK(s);
|
2021-10-08 17:29:06 +00:00
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
// check the temperature of the file ingested (copied)
|
|
|
|
ColumnFamilyMetaData metadata;
|
|
|
|
db_->GetColumnFamilyMetaData(&metadata);
|
|
|
|
ASSERT_EQ(1, metadata.file_count);
|
2021-10-08 17:29:06 +00:00
|
|
|
|
Fix/improve temperature handling for file ingestion (#12402)
Summary:
Partly following up on leftovers from https://github.com/facebook/rocksdb/issues/12388
In terms of public API:
* Make it clear that IngestExternalFileArg::file_temperature is just a hint for opening the existing file, though it was previously used for both copy-from temp hint and copy-to temp, which was bizarre.
* Specify how IngestExternalFile assigns temperature to file ingested into DB. (See details in comments.) This approach is not perfect in terms of matching how the DB assigns temperatures, but was the simplest way to get close. The key complication for matching DB temperature assignments is that ingestion files are copied (to a destination temp) before their target level is determined (in general).
* Add a temperature option to SstFileWriter::Open so that files intended for ingestion can be initially written to a chosen temperature.
* Note that "fail_if_not_bottommost_level" is obsolete/confusing use of "bottommost"
In terms of the implementation, there was a similar bit of oddness with the internal CopyFile API, which only took one temperature, ambiguously applicable to the source, destination, or both. This is also fixed.
Eventual suggested follow-up:
* Before copying files for ingestion, determine a tentative level assignment to use for destination temperature, and keep that even if final level assignment happens to be different at commit time (rare).
* More temperature handling for CreateColumnFamilyWithImport and Checkpoints.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12402
Test Plan:
Deeply revamped
ExternalSSTFileBasicTest.IngestWithTemperature to test the new changes. Previously this test was insufficient because it was only looking at temperatures according to the DB manifest. Incorporating FileTemperatureTestFS allows us to also test the temperatures in the storage layer.
Used macros instead of functions for better tracing to critical source location on test failures.
Some enhancements to FileTemperatureTestFS in the process of developing the revamped test.
Reviewed By: jowlyzhang
Differential Revision: D54442794
Pulled By: pdillinger
fbshipit-source-id: 41d9d0afdc073e6a983304c10bbc07c70cc7e995
2024-03-06 00:56:08 +00:00
|
|
|
if (mode != "neither") {
|
|
|
|
ASSERT_EQ(Temperature::kCold, metadata.levels[6].files[0].temperature);
|
|
|
|
ex_cold_in_db++;
|
|
|
|
} else {
|
|
|
|
// Currently, we are only able to use last_level_temperature for ingestion
|
|
|
|
// when using an ingestion option that guarantees ingestion to last level.
|
|
|
|
ASSERT_EQ(Temperature::kHot, metadata.levels[6].files[0].temperature);
|
|
|
|
ex_hot_in_db++;
|
|
|
|
}
|
|
|
|
VERIFY_SST_COUNTS();
|
|
|
|
|
|
|
|
// non-bottommost file still has kHot temperature
|
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
|
|
|
ASSERT_OK(Put("bar", "bar"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
db_->GetColumnFamilyMetaData(&metadata);
|
|
|
|
ASSERT_EQ(2, metadata.file_count);
|
|
|
|
ASSERT_EQ(Temperature::kHot, metadata.levels[0].files[0].temperature);
|
|
|
|
|
|
|
|
ex_hot_in_db++;
|
|
|
|
VERIFY_SST_COUNTS();
|
|
|
|
|
|
|
|
// reopen and check the information is persisted
|
|
|
|
Reopen(options);
|
|
|
|
db_->GetColumnFamilyMetaData(&metadata);
|
|
|
|
ASSERT_EQ(2, metadata.file_count);
|
|
|
|
ASSERT_EQ(Temperature::kHot, metadata.levels[0].files[0].temperature);
|
|
|
|
if (mode != "neither") {
|
|
|
|
ASSERT_EQ(Temperature::kCold, metadata.levels[6].files[0].temperature);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(Temperature::kHot, metadata.levels[6].files[0].temperature);
|
|
|
|
}
|
|
|
|
|
|
|
|
// (no change)
|
|
|
|
VERIFY_SST_COUNTS();
|
|
|
|
|
|
|
|
// check invalid temperature with DB property. Not sure why the original
|
|
|
|
// author is testing this case, but perhaps so that downgrading DB with
|
|
|
|
// new GetProperty code using a new Temperature will report something
|
|
|
|
// reasonable and not an error.
|
|
|
|
std::string prop;
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
DB::Properties::kLiveSstFilesSizeAtTemperature + std::to_string(22),
|
|
|
|
&prop));
|
|
|
|
ASSERT_EQ(std::atoi(prop.c_str()), 0);
|
|
|
|
#undef VERIFY_SST_COUNT
|
|
|
|
}
|
2021-10-08 17:29:06 +00:00
|
|
|
}
|
|
|
|
|
2022-04-16 01:12:06 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, FailIfNotBottommostLevel) {
|
|
|
|
Options options = GetDefaultOptions();
|
|
|
|
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string file_path = sst_files_dir_ + std::to_string(1);
|
2022-04-16 01:12:06 +00:00
|
|
|
SstFileWriter sfw(EnvOptions(), options);
|
|
|
|
|
|
|
|
ASSERT_OK(sfw.Open(file_path));
|
|
|
|
ASSERT_OK(sfw.Put("b", "dontcare"));
|
|
|
|
ASSERT_OK(sfw.Finish());
|
|
|
|
|
|
|
|
// Test universal compaction + ingest with snapshot consistency
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.compaction_style = CompactionStyle::kCompactionStyleUniversal;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
{
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
ManagedSnapshot snapshot_guard(db_, snapshot);
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.fail_if_not_bottommost_level = true;
|
|
|
|
ifo.snapshot_consistency = true;
|
|
|
|
const Status s = db_->IngestExternalFile({file_path}, ifo);
|
2024-10-11 18:18:45 +00:00
|
|
|
ASSERT_TRUE(s.ok());
|
2022-04-16 01:12:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test level compaction
|
|
|
|
options.compaction_style = CompactionStyle::kCompactionStyleLevel;
|
|
|
|
options.num_levels = 2;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "a", "dontcare"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "c", "dontcare"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "b", "dontcare"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "d", "dontcare"));
|
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
|
|
|
|
{
|
|
|
|
CompactRangeOptions cro;
|
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
|
|
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
|
|
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.fail_if_not_bottommost_level = true;
|
|
|
|
const Status s = db_->IngestExternalFile({file_path}, ifo);
|
|
|
|
ASSERT_TRUE(s.IsTryAgain());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-02 17:22:08 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, VerifyChecksum) {
|
|
|
|
const std::string kPutVal = "put_val";
|
|
|
|
const std::string kIngestedVal = "ingested_val";
|
|
|
|
|
|
|
|
ASSERT_OK(Put("k", kPutVal, WriteOptions()));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
std::string external_file = sst_files_dir_ + "/file_to_ingest.sst";
|
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer{EnvOptions(), CurrentOptions()};
|
|
|
|
|
|
|
|
ASSERT_OK(sst_file_writer.Open(external_file));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("k", kIngestedVal));
|
|
|
|
ASSERT_OK(sst_file_writer.Finish());
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(db_->DefaultColumnFamily(), {external_file},
|
|
|
|
IngestExternalFileOptions()));
|
|
|
|
|
|
|
|
ASSERT_OK(db_->VerifyChecksum());
|
|
|
|
}
|
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, VerifySstUniqueId) {
|
|
|
|
const std::string kPutVal = "put_val";
|
|
|
|
const std::string kIngestedVal = "ingested_val";
|
|
|
|
|
|
|
|
ASSERT_OK(Put("k", kPutVal, WriteOptions()));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
std::string external_file = sst_files_dir_ + "/file_to_ingest.sst";
|
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer{EnvOptions(), CurrentOptions()};
|
|
|
|
|
|
|
|
ASSERT_OK(sst_file_writer.Open(external_file));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("k", kIngestedVal));
|
|
|
|
ASSERT_OK(sst_file_writer.Finish());
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(db_->DefaultColumnFamily(), {external_file},
|
|
|
|
IngestExternalFileOptions()));
|
|
|
|
|
|
|
|
// Test ingest file without session_id and db_id (for example generated by an
|
|
|
|
// older version of sst_writer)
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"PropertyBlockBuilder::AddTableProperty:Start", [&](void* props_vs) {
|
|
|
|
auto props = static_cast<TableProperties*>(props_vs);
|
|
|
|
// update table property session_id to a different one
|
|
|
|
props->db_session_id = "";
|
|
|
|
props->db_id = "";
|
|
|
|
});
|
Always verify SST unique IDs on SST file open (#10532)
Summary:
Although we've been tracking SST unique IDs in the DB manifest
unconditionally, checking has been opt-in and with an extra pass at DB::Open
time. This changes the behavior of `verify_sst_unique_id_in_manifest` to
check unique ID against manifest every time an SST file is opened through
table cache (normal DB operations), replacing the explicit pass over files
at DB::Open time. This change also enables the option by default and
removes the "EXPERIMENTAL" designation.
One possible criticism is that the option no longer ensures the integrity
of a DB at Open time. This is far from an all-or-nothing issue. Verifying
the IDs of all SST files hardly ensures all the data in the DB is readable.
(VerifyChecksum is supposed to do that.) Also, with
max_open_files=-1 (default, extremely common), all SST files are
opened at DB::Open time anyway.
Implementation details:
* `VerifySstUniqueIdInManifest()` functions are the extra/explicit pass
that is now removed.
* Unit tests that manipulate/corrupt table properties have to opt out of
this check, because that corrupts the "actual" unique id. (And even for
testing we don't currently have a mechanism to set "no unique id"
in the in-memory file metadata for new files.)
* A lot of other unit test churn relates to (a) default checking on, and
(b) checking on SST open even without DB::Open (e.g. on flush)
* Use `FileMetaData` for more `TableCache` operations (in place of
`FileDescriptor`) so that we have access to the unique_id whenever
we might need to open an SST file. **There is the possibility of
performance impact because we can no longer use the more
localized `fd` part of an `FdWithKeyRange` but instead follow the
`file_metadata` pointer. However, this change (possible regression)
is only done for `GetMemoryUsageByTableReaders`.**
* Removed a completely unnecessary constructor overload of
`TableReaderOptions`
Possible follow-up:
* Verification only happens when opening through table cache. Are there
more places where this should happen?
* Improve error message when there is a file size mismatch vs. manifest
(FIXME added in the appropriate place).
* I'm not sure there's a justification for `FileDescriptor` to be distinct from
`FileMetaData`.
* I'm skeptical that `FdWithKeyRange` really still makes sense for
optimizing some data locality by duplicating some data in memory, but I
could be wrong.
* An unnecessary overload of NewTableReader was recently added, in
the public API nonetheless (though unusable there). It should be cleaned
up to put most things under `TableReaderOptions`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10532
Test Plan:
updated unit tests
Performance test showing no significant difference (just noise I think):
`./db_bench -benchmarks=readwhilewriting[-X10] -num=3000000 -disable_wal=1 -bloom_bits=8 -write_buffer_size=1000000 -target_file_size_base=1000000`
Before: readwhilewriting [AVG 10 runs] : 68702 (± 6932) ops/sec
After: readwhilewriting [AVG 10 runs] : 68239 (± 7198) ops/sec
Reviewed By: jay-zhuang
Differential Revision: D38765551
Pulled By: pdillinger
fbshipit-source-id: a827a708155f12344ab2a5c16e7701c7636da4c2
2022-09-08 05:52:42 +00:00
|
|
|
std::atomic_int skipped = 0, passed = 0;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"BlockBasedTable::Open::SkippedVerifyUniqueId",
|
|
|
|
[&](void* /*arg*/) { skipped++; });
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"BlockBasedTable::Open::PassedVerifyUniqueId",
|
|
|
|
[&](void* /*arg*/) { passed++; });
|
2022-05-19 18:04:21 +00:00
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
Always verify SST unique IDs on SST file open (#10532)
Summary:
Although we've been tracking SST unique IDs in the DB manifest
unconditionally, checking has been opt-in and with an extra pass at DB::Open
time. This changes the behavior of `verify_sst_unique_id_in_manifest` to
check unique ID against manifest every time an SST file is opened through
table cache (normal DB operations), replacing the explicit pass over files
at DB::Open time. This change also enables the option by default and
removes the "EXPERIMENTAL" designation.
One possible criticism is that the option no longer ensures the integrity
of a DB at Open time. This is far from an all-or-nothing issue. Verifying
the IDs of all SST files hardly ensures all the data in the DB is readable.
(VerifyChecksum is supposed to do that.) Also, with
max_open_files=-1 (default, extremely common), all SST files are
opened at DB::Open time anyway.
Implementation details:
* `VerifySstUniqueIdInManifest()` functions are the extra/explicit pass
that is now removed.
* Unit tests that manipulate/corrupt table properties have to opt out of
this check, because that corrupts the "actual" unique id. (And even for
testing we don't currently have a mechanism to set "no unique id"
in the in-memory file metadata for new files.)
* A lot of other unit test churn relates to (a) default checking on, and
(b) checking on SST open even without DB::Open (e.g. on flush)
* Use `FileMetaData` for more `TableCache` operations (in place of
`FileDescriptor`) so that we have access to the unique_id whenever
we might need to open an SST file. **There is the possibility of
performance impact because we can no longer use the more
localized `fd` part of an `FdWithKeyRange` but instead follow the
`file_metadata` pointer. However, this change (possible regression)
is only done for `GetMemoryUsageByTableReaders`.**
* Removed a completely unnecessary constructor overload of
`TableReaderOptions`
Possible follow-up:
* Verification only happens when opening through table cache. Are there
more places where this should happen?
* Improve error message when there is a file size mismatch vs. manifest
(FIXME added in the appropriate place).
* I'm not sure there's a justification for `FileDescriptor` to be distinct from
`FileMetaData`.
* I'm skeptical that `FdWithKeyRange` really still makes sense for
optimizing some data locality by duplicating some data in memory, but I
could be wrong.
* An unnecessary overload of NewTableReader was recently added, in
the public API nonetheless (though unusable there). It should be cleaned
up to put most things under `TableReaderOptions`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10532
Test Plan:
updated unit tests
Performance test showing no significant difference (just noise I think):
`./db_bench -benchmarks=readwhilewriting[-X10] -num=3000000 -disable_wal=1 -bloom_bits=8 -write_buffer_size=1000000 -target_file_size_base=1000000`
Before: readwhilewriting [AVG 10 runs] : 68702 (± 6932) ops/sec
After: readwhilewriting [AVG 10 runs] : 68239 (± 7198) ops/sec
Reviewed By: jay-zhuang
Differential Revision: D38765551
Pulled By: pdillinger
fbshipit-source-id: a827a708155f12344ab2a5c16e7701c7636da4c2
2022-09-08 05:52:42 +00:00
|
|
|
auto options = CurrentOptions();
|
|
|
|
ASSERT_TRUE(options.verify_sst_unique_id_in_manifest);
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(skipped, 0);
|
|
|
|
ASSERT_EQ(passed, 2); // one flushed + one ingested
|
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
external_file = sst_files_dir_ + "/file_to_ingest2.sst";
|
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer{EnvOptions(), CurrentOptions()};
|
|
|
|
|
|
|
|
ASSERT_OK(sst_file_writer.Open(external_file));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("k", kIngestedVal));
|
|
|
|
ASSERT_OK(sst_file_writer.Finish());
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(db_->DefaultColumnFamily(), {external_file},
|
|
|
|
IngestExternalFileOptions()));
|
|
|
|
|
Always verify SST unique IDs on SST file open (#10532)
Summary:
Although we've been tracking SST unique IDs in the DB manifest
unconditionally, checking has been opt-in and with an extra pass at DB::Open
time. This changes the behavior of `verify_sst_unique_id_in_manifest` to
check unique ID against manifest every time an SST file is opened through
table cache (normal DB operations), replacing the explicit pass over files
at DB::Open time. This change also enables the option by default and
removes the "EXPERIMENTAL" designation.
One possible criticism is that the option no longer ensures the integrity
of a DB at Open time. This is far from an all-or-nothing issue. Verifying
the IDs of all SST files hardly ensures all the data in the DB is readable.
(VerifyChecksum is supposed to do that.) Also, with
max_open_files=-1 (default, extremely common), all SST files are
opened at DB::Open time anyway.
Implementation details:
* `VerifySstUniqueIdInManifest()` functions are the extra/explicit pass
that is now removed.
* Unit tests that manipulate/corrupt table properties have to opt out of
this check, because that corrupts the "actual" unique id. (And even for
testing we don't currently have a mechanism to set "no unique id"
in the in-memory file metadata for new files.)
* A lot of other unit test churn relates to (a) default checking on, and
(b) checking on SST open even without DB::Open (e.g. on flush)
* Use `FileMetaData` for more `TableCache` operations (in place of
`FileDescriptor`) so that we have access to the unique_id whenever
we might need to open an SST file. **There is the possibility of
performance impact because we can no longer use the more
localized `fd` part of an `FdWithKeyRange` but instead follow the
`file_metadata` pointer. However, this change (possible regression)
is only done for `GetMemoryUsageByTableReaders`.**
* Removed a completely unnecessary constructor overload of
`TableReaderOptions`
Possible follow-up:
* Verification only happens when opening through table cache. Are there
more places where this should happen?
* Improve error message when there is a file size mismatch vs. manifest
(FIXME added in the appropriate place).
* I'm not sure there's a justification for `FileDescriptor` to be distinct from
`FileMetaData`.
* I'm skeptical that `FdWithKeyRange` really still makes sense for
optimizing some data locality by duplicating some data in memory, but I
could be wrong.
* An unnecessary overload of NewTableReader was recently added, in
the public API nonetheless (though unusable there). It should be cleaned
up to put most things under `TableReaderOptions`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10532
Test Plan:
updated unit tests
Performance test showing no significant difference (just noise I think):
`./db_bench -benchmarks=readwhilewriting[-X10] -num=3000000 -disable_wal=1 -bloom_bits=8 -write_buffer_size=1000000 -target_file_size_base=1000000`
Before: readwhilewriting [AVG 10 runs] : 68702 (± 6932) ops/sec
After: readwhilewriting [AVG 10 runs] : 68239 (± 7198) ops/sec
Reviewed By: jay-zhuang
Differential Revision: D38765551
Pulled By: pdillinger
fbshipit-source-id: a827a708155f12344ab2a5c16e7701c7636da4c2
2022-09-08 05:52:42 +00:00
|
|
|
// Two table file opens skipping verification:
|
|
|
|
// * ExternalSstFileIngestionJob::GetIngestedFileInfo
|
|
|
|
// * TableCache::GetTableReader
|
|
|
|
ASSERT_EQ(skipped, 2);
|
|
|
|
ASSERT_EQ(passed, 2);
|
|
|
|
|
|
|
|
// Check same after re-open (except no GetIngestedFileInfo)
|
|
|
|
skipped = 0;
|
|
|
|
passed = 0;
|
2022-05-19 18:04:21 +00:00
|
|
|
Reopen(options);
|
|
|
|
ASSERT_EQ(skipped, 1);
|
Always verify SST unique IDs on SST file open (#10532)
Summary:
Although we've been tracking SST unique IDs in the DB manifest
unconditionally, checking has been opt-in and with an extra pass at DB::Open
time. This changes the behavior of `verify_sst_unique_id_in_manifest` to
check unique ID against manifest every time an SST file is opened through
table cache (normal DB operations), replacing the explicit pass over files
at DB::Open time. This change also enables the option by default and
removes the "EXPERIMENTAL" designation.
One possible criticism is that the option no longer ensures the integrity
of a DB at Open time. This is far from an all-or-nothing issue. Verifying
the IDs of all SST files hardly ensures all the data in the DB is readable.
(VerifyChecksum is supposed to do that.) Also, with
max_open_files=-1 (default, extremely common), all SST files are
opened at DB::Open time anyway.
Implementation details:
* `VerifySstUniqueIdInManifest()` functions are the extra/explicit pass
that is now removed.
* Unit tests that manipulate/corrupt table properties have to opt out of
this check, because that corrupts the "actual" unique id. (And even for
testing we don't currently have a mechanism to set "no unique id"
in the in-memory file metadata for new files.)
* A lot of other unit test churn relates to (a) default checking on, and
(b) checking on SST open even without DB::Open (e.g. on flush)
* Use `FileMetaData` for more `TableCache` operations (in place of
`FileDescriptor`) so that we have access to the unique_id whenever
we might need to open an SST file. **There is the possibility of
performance impact because we can no longer use the more
localized `fd` part of an `FdWithKeyRange` but instead follow the
`file_metadata` pointer. However, this change (possible regression)
is only done for `GetMemoryUsageByTableReaders`.**
* Removed a completely unnecessary constructor overload of
`TableReaderOptions`
Possible follow-up:
* Verification only happens when opening through table cache. Are there
more places where this should happen?
* Improve error message when there is a file size mismatch vs. manifest
(FIXME added in the appropriate place).
* I'm not sure there's a justification for `FileDescriptor` to be distinct from
`FileMetaData`.
* I'm skeptical that `FdWithKeyRange` really still makes sense for
optimizing some data locality by duplicating some data in memory, but I
could be wrong.
* An unnecessary overload of NewTableReader was recently added, in
the public API nonetheless (though unusable there). It should be cleaned
up to put most things under `TableReaderOptions`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10532
Test Plan:
updated unit tests
Performance test showing no significant difference (just noise I think):
`./db_bench -benchmarks=readwhilewriting[-X10] -num=3000000 -disable_wal=1 -bloom_bits=8 -write_buffer_size=1000000 -target_file_size_base=1000000`
Before: readwhilewriting [AVG 10 runs] : 68702 (± 6932) ops/sec
After: readwhilewriting [AVG 10 runs] : 68239 (± 7198) ops/sec
Reviewed By: jay-zhuang
Differential Revision: D38765551
Pulled By: pdillinger
fbshipit-source-id: a827a708155f12344ab2a5c16e7701c7636da4c2
2022-09-08 05:52:42 +00:00
|
|
|
ASSERT_EQ(passed, 2);
|
2022-05-19 18:04:21 +00:00
|
|
|
}
|
|
|
|
|
2022-05-25 17:05:17 +00:00
|
|
|
TEST_F(ExternalSSTFileBasicTest, StableSnapshotWhileLoggingToManifest) {
|
|
|
|
const std::string kPutVal = "put_val";
|
|
|
|
const std::string kIngestedVal = "ingested_val";
|
|
|
|
|
|
|
|
ASSERT_OK(Put("k", kPutVal, WriteOptions()));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
std::string external_file = sst_files_dir_ + "/file_to_ingest.sst";
|
|
|
|
{
|
|
|
|
SstFileWriter sst_file_writer{EnvOptions(), CurrentOptions()};
|
|
|
|
ASSERT_OK(sst_file_writer.Open(external_file));
|
|
|
|
ASSERT_OK(sst_file_writer.Put("k", kIngestedVal));
|
|
|
|
ASSERT_OK(sst_file_writer.Finish());
|
|
|
|
}
|
|
|
|
|
|
|
|
const Snapshot* snapshot = nullptr;
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"VersionSet::LogAndApply:WriteManifest", [&](void* /* arg */) {
|
2022-05-31 15:48:57 +00:00
|
|
|
// prevent background compaction job to call this callback
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2022-05-25 17:05:17 +00:00
|
|
|
snapshot = db_->GetSnapshot();
|
|
|
|
ReadOptions read_opts;
|
|
|
|
read_opts.snapshot = snapshot;
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(db_->Get(read_opts, "k", &value));
|
|
|
|
ASSERT_EQ(kPutVal, value);
|
|
|
|
});
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
ASSERT_OK(db_->IngestExternalFile(db_->DefaultColumnFamily(), {external_file},
|
|
|
|
IngestExternalFileOptions()));
|
|
|
|
auto ingested_file_seqno = db_->GetLatestSequenceNumber();
|
|
|
|
ASSERT_NE(nullptr, snapshot);
|
|
|
|
// snapshot is taken before SST ingestion is done
|
|
|
|
ASSERT_EQ(ingested_file_seqno, snapshot->GetSequenceNumber() + 1);
|
|
|
|
|
|
|
|
ReadOptions read_opts;
|
|
|
|
read_opts.snapshot = snapshot;
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(db_->Get(read_opts, "k", &value));
|
|
|
|
ASSERT_EQ(kPutVal, value);
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
|
|
|
|
// After reopen, sequence number should be up current such that
|
|
|
|
// ingested value is read
|
|
|
|
Reopen(CurrentOptions());
|
|
|
|
ASSERT_OK(db_->Get(ReadOptions(), "k", &value));
|
|
|
|
ASSERT_EQ(kIngestedVal, value);
|
|
|
|
|
|
|
|
// New write should get higher seqno compared to ingested file
|
|
|
|
ASSERT_OK(Put("k", kPutVal, WriteOptions()));
|
|
|
|
ASSERT_EQ(db_->GetLatestSequenceNumber(), ingested_file_seqno + 1);
|
|
|
|
}
|
|
|
|
|
2020-06-03 22:53:09 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(ExternalSSTFileBasicTest, ExternalSSTFileBasicTest,
|
|
|
|
testing::Values(std::make_tuple(true, true),
|
|
|
|
std::make_tuple(true, false),
|
|
|
|
std::make_tuple(false, true),
|
|
|
|
std::make_tuple(false, false)));
|
2018-11-01 23:21:30 +00:00
|
|
|
|
2017-02-28 22:11:01 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2017-02-28 22:11:01 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2017-02-28 22:11:01 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2021-09-09 04:20:38 +00:00
|
|
|
RegisterCustomObjects(argc, argv);
|
2017-02-28 22:11:01 +00:00
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|