2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "db/flush_job.h"
|
|
|
|
|
2015-08-24 18:11:12 +00:00
|
|
|
#include <algorithm>
|
2019-10-14 22:19:31 +00:00
|
|
|
#include <array>
|
2014-10-29 00:52:32 +00:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
|
2020-03-12 17:58:27 +00:00
|
|
|
#include "db/blob/blob_index.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
#include "db/column_family.h"
|
2019-09-03 15:50:47 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
#include "db/version_set.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/writable_file_writer.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2021-01-29 06:08:46 +00:00
|
|
|
#include "rocksdb/file_system.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
|
|
|
#include "table/mock_table.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/random.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-10-28 18:54:33 +00:00
|
|
|
|
|
|
|
// TODO(icanadi) Mock out everything else:
|
|
|
|
// 1. VersionSet
|
2014-10-29 00:52:32 +00:00
|
|
|
// 2. Memtable
|
2020-11-13 02:43:30 +00:00
|
|
|
class FlushJobTestBase : public testing::Test {
|
|
|
|
protected:
|
|
|
|
FlushJobTestBase(std::string dbname, const Comparator* ucmp)
|
2014-10-28 18:54:33 +00:00
|
|
|
: env_(Env::Default()),
|
2021-01-06 18:48:24 +00:00
|
|
|
fs_(env_->GetFileSystem()),
|
2020-11-13 02:43:30 +00:00
|
|
|
dbname_(std::move(dbname)),
|
|
|
|
ucmp_(ucmp),
|
2016-09-23 23:34:04 +00:00
|
|
|
options_(),
|
|
|
|
db_options_(options_),
|
2018-10-16 02:59:20 +00:00
|
|
|
column_family_names_({kDefaultColumnFamilyName, "foo", "bar"}),
|
2015-03-17 22:04:37 +00:00
|
|
|
table_cache_(NewLRUCache(50000, 16)),
|
2016-06-21 01:01:03 +00:00
|
|
|
write_buffer_manager_(db_options_.db_write_buffer_size),
|
2014-10-29 00:52:32 +00:00
|
|
|
shutting_down_(false),
|
2020-11-13 02:43:30 +00:00
|
|
|
mock_table_factory_(new mock::MockTableFactory()) {}
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2020-11-13 02:43:30 +00:00
|
|
|
virtual ~FlushJobTestBase() {
|
|
|
|
if (getenv("KEEP_DB")) {
|
|
|
|
fprintf(stdout, "db is still in %s\n", dbname_.c_str());
|
|
|
|
} else {
|
Fix testcase failures on windows (#7992)
Summary:
Fixed 5 test case failures found on Windows 10/Windows Server 2016
1. In `flush_job_test`, the DestroyDir function fails in deconstructor because some file handles are still being held by VersionSet. This happens on Windows Server 2016, so need to manually reset versions_ pointer to release all file handles.
2. In `StatsHistoryTest.InMemoryStatsHistoryPurging` test, the capping memory cost of stats_history_size on Windows becomes 14000 bytes with latest changes, not just 13000 bytes.
3. In `SSTDumpToolTest.RawOutput` test, the output file handle is not closed at the end.
4. In `FullBloomTest.OptimizeForMemory` test, ROCKSDB_MALLOC_USABLE_SIZE is undefined on windows so `total_mem` is always equal to `total_size`. The internal memory fragmentation assertion does not apply in this case.
5. In `BlockFetcherTest.FetchAndUncompressCompressedDataBlock` test, XPRESS cannot reach 87.5% compression ratio with original CreateTable method, so I append extra zeros to the string value to enhance compression ratio. Beside, since XPRESS allocates memory internally, thus does not support for custom allocator verification, we will skip the allocator verification for XPRESS
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7992
Reviewed By: jay-zhuang
Differential Revision: D26615283
Pulled By: ajkr
fbshipit-source-id: 3632612f84b99e2b9c77c403b112b6bedf3b125d
2021-02-23 22:31:50 +00:00
|
|
|
// destroy versions_ to release all file handles
|
|
|
|
versions_.reset();
|
2020-11-13 02:43:30 +00:00
|
|
|
EXPECT_OK(DestroyDir(env_, dbname_));
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void NewDB() {
|
2020-12-24 00:54:05 +00:00
|
|
|
ASSERT_OK(SetIdentityFile(env_, dbname_));
|
2014-10-28 18:54:33 +00:00
|
|
|
VersionEdit new_db;
|
2020-11-13 02:43:30 +00:00
|
|
|
|
2014-10-28 18:54:33 +00:00
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
2018-10-16 02:59:20 +00:00
|
|
|
autovector<VersionEdit> new_cfs;
|
|
|
|
SequenceNumber last_seq = 1;
|
|
|
|
uint32_t cf_id = 1;
|
|
|
|
for (size_t i = 1; i != column_family_names_.size(); ++i) {
|
|
|
|
VersionEdit new_cf;
|
|
|
|
new_cf.AddColumnFamily(column_family_names_[i]);
|
|
|
|
new_cf.SetColumnFamily(cf_id++);
|
2020-11-13 02:43:30 +00:00
|
|
|
new_cf.SetComparatorName(ucmp_->Name());
|
2023-07-27 03:16:32 +00:00
|
|
|
new_cf.SetPersistUserDefinedTimestamps(persist_udt_);
|
2018-10-16 02:59:20 +00:00
|
|
|
new_cf.SetLogNumber(0);
|
|
|
|
new_cf.SetNextFile(2);
|
|
|
|
new_cf.SetLastSequence(last_seq++);
|
|
|
|
new_cfs.emplace_back(new_cf);
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:54:33 +00:00
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
2021-01-29 06:08:46 +00:00
|
|
|
const auto& fs = env_->GetFileSystem();
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
|
|
Status s = WritableFileWriter::Create(
|
|
|
|
fs, manifest, fs->OptimizeForManifestWrite(env_options_), &file_writer,
|
|
|
|
nullptr);
|
2014-10-28 18:54:33 +00:00
|
|
|
ASSERT_OK(s);
|
2021-01-29 06:08:46 +00:00
|
|
|
|
2014-10-28 18:54:33 +00:00
|
|
|
{
|
2015-10-08 17:07:15 +00:00
|
|
|
log::Writer log(std::move(file_writer), 0, false);
|
2014-10-28 18:54:33 +00:00
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
2020-12-24 00:54:05 +00:00
|
|
|
ASSERT_OK(s);
|
2018-10-16 02:59:20 +00:00
|
|
|
|
|
|
|
for (const auto& e : new_cfs) {
|
|
|
|
record.clear();
|
|
|
|
e.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
s = SetCurrentFile(fs_.get(), dbname_, 1, nullptr);
|
2020-09-28 21:57:48 +00:00
|
|
|
ASSERT_OK(s);
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
2020-11-13 02:43:30 +00:00
|
|
|
void SetUp() override {
|
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
|
|
|
|
|
|
|
// TODO(icanadi) Remove this once we mock out VersionSet
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
db_options_.env = env_;
|
|
|
|
db_options_.fs = fs_;
|
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
|
|
|
db_options_.statistics = CreateDBStatistics();
|
|
|
|
|
|
|
|
cf_options_.comparator = ucmp_;
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
cf_options_.persist_user_defined_timestamps = persist_udt_;
|
|
|
|
cf_options_.paranoid_file_checks = paranoid_file_checks_;
|
2020-11-13 02:43:30 +00:00
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
|
|
|
for (const auto& cf_name : column_family_names_) {
|
|
|
|
column_families.emplace_back(cf_name, cf_options_);
|
|
|
|
}
|
|
|
|
|
|
|
|
versions_.reset(
|
|
|
|
new VersionSet(dbname_, &db_options_, env_options_, table_cache_.get(),
|
|
|
|
&write_buffer_manager_, &write_controller_,
|
2021-06-10 18:01:44 +00:00
|
|
|
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr,
|
2022-06-21 03:58:11 +00:00
|
|
|
/*db_id*/ "", /*db_session_id*/ ""));
|
2020-11-13 02:43:30 +00:00
|
|
|
EXPECT_OK(versions_->Recover(column_families, false));
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:54:33 +00:00
|
|
|
Env* env_;
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
std::shared_ptr<FileSystem> fs_;
|
2014-10-28 18:54:33 +00:00
|
|
|
std::string dbname_;
|
2020-11-13 02:43:30 +00:00
|
|
|
const Comparator* const ucmp_;
|
2014-10-28 18:54:33 +00:00
|
|
|
EnvOptions env_options_;
|
2016-09-23 23:34:04 +00:00
|
|
|
Options options_;
|
|
|
|
ImmutableDBOptions db_options_;
|
2018-10-16 02:59:20 +00:00
|
|
|
const std::vector<std::string> column_family_names_;
|
2014-10-28 18:54:33 +00:00
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager write_buffer_manager_;
|
2014-10-28 18:54:33 +00:00
|
|
|
ColumnFamilyOptions cf_options_;
|
|
|
|
std::unique_ptr<VersionSet> versions_;
|
2015-02-05 05:39:45 +00:00
|
|
|
InstrumentedMutex mutex_;
|
2014-10-28 18:54:33 +00:00
|
|
|
std::atomic<bool> shutting_down_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
2022-07-15 04:49:34 +00:00
|
|
|
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
bool persist_udt_ = true;
|
|
|
|
bool paranoid_file_checks_ = false;
|
|
|
|
|
2022-07-15 04:49:34 +00:00
|
|
|
SeqnoToTimeMapping empty_seqno_to_time_mapping_;
|
2014-10-28 18:54:33 +00:00
|
|
|
};
|
|
|
|
|
2020-11-13 02:43:30 +00:00
|
|
|
class FlushJobTest : public FlushJobTestBase {
|
|
|
|
public:
|
|
|
|
FlushJobTest()
|
|
|
|
: FlushJobTestBase(test::PerThreadDBPath("flush_job_test"),
|
|
|
|
BytewiseComparator()) {}
|
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(FlushJobTest, Empty) {
|
2015-02-12 17:54:48 +00:00
|
|
|
JobContext job_context(0);
|
2014-10-28 18:54:33 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 17:15:54 +00:00
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
2023-01-24 17:54:04 +00:00
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
|
|
|
*cfd->GetLatestMutableCFOptions(),
|
|
|
|
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
|
|
|
versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, nullptr, &event_logger, false,
|
|
|
|
true /* sync_output_directory */, true /* write_manifest */,
|
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_);
|
2016-07-19 22:12:46 +00:00
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
ASSERT_OK(flush_job.Run());
|
|
|
|
}
|
2014-11-15 00:57:17 +00:00
|
|
|
job_context.Clean();
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(FlushJobTest, NonEmpty) {
|
2015-02-12 17:54:48 +00:00
|
|
|
JobContext job_context(0);
|
2014-10-28 18:54:33 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2015-05-29 21:36:35 +00:00
|
|
|
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
2014-10-28 18:54:33 +00:00
|
|
|
new_mem->Ref();
|
2015-09-02 20:58:22 +00:00
|
|
|
auto inserted_keys = mock::MakeMockFile();
|
2015-08-24 18:11:12 +00:00
|
|
|
// Test data:
|
|
|
|
// seqno [ 1, 2 ... 8998, 8999, 9000, 9001, 9002 ... 9999 ]
|
|
|
|
// key [ 1001, 1002 ... 9998, 9999, 0, 1, 2 ... 999 ]
|
2016-11-01 03:35:54 +00:00
|
|
|
// range-delete "9995" -> "9999" at seqno 10000
|
2019-10-14 22:19:31 +00:00
|
|
|
// blob references with seqnos 10001..10006
|
2014-10-28 18:54:33 +00:00
|
|
|
for (int i = 1; i < 10000; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string key(std::to_string((i + 1000) % 10000));
|
2015-08-24 18:11:12 +00:00
|
|
|
std::string value("value" + key);
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
ASSERT_OK(new_mem->Add(SequenceNumber(i), kTypeValue, key, value,
|
|
|
|
nullptr /* kv_prot_info */));
|
2016-11-01 03:35:54 +00:00
|
|
|
if ((i + 1000) % 10000 < 9995) {
|
|
|
|
InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
|
2020-10-01 17:08:52 +00:00
|
|
|
inserted_keys.push_back({internal_key.Encode().ToString(), value});
|
2016-11-01 03:35:54 +00:00
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
2019-10-14 22:19:31 +00:00
|
|
|
|
|
|
|
{
|
2020-11-24 00:27:46 +00:00
|
|
|
ASSERT_OK(new_mem->Add(SequenceNumber(10000), kTypeRangeDeletion, "9995",
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
"9999a", nullptr /* kv_prot_info */));
|
2019-10-14 22:19:31 +00:00
|
|
|
InternalKey internal_key("9995", SequenceNumber(10000), kTypeRangeDeletion);
|
2020-10-01 17:08:52 +00:00
|
|
|
inserted_keys.push_back({internal_key.Encode().ToString(), "9999a"});
|
2019-10-14 22:19:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note: the first two blob references will not be considered when resolving
|
|
|
|
// the oldest blob file referenced (the first one is inlined TTL, while the
|
|
|
|
// second one is TTL and thus points to a TTL blob file).
|
2022-11-02 21:34:24 +00:00
|
|
|
constexpr std::array<uint64_t, 6> blob_file_numbers{
|
|
|
|
{kInvalidBlobFileNumber, 5, 103, 17, 102, 101}};
|
2019-10-14 22:19:31 +00:00
|
|
|
for (size_t i = 0; i < blob_file_numbers.size(); ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string key(std::to_string(i + 10001));
|
2019-10-14 22:19:31 +00:00
|
|
|
std::string blob_index;
|
|
|
|
if (i == 0) {
|
|
|
|
BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 1234567890ULL,
|
|
|
|
"foo");
|
|
|
|
} else if (i == 1) {
|
|
|
|
BlobIndex::EncodeBlobTTL(&blob_index, /* expiration */ 1234567890ULL,
|
|
|
|
blob_file_numbers[i], /* offset */ i << 10,
|
|
|
|
/* size */ i << 20, kNoCompression);
|
|
|
|
} else {
|
|
|
|
BlobIndex::EncodeBlob(&blob_index, blob_file_numbers[i],
|
|
|
|
/* offset */ i << 10, /* size */ i << 20,
|
|
|
|
kNoCompression);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SequenceNumber seq(i + 10001);
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
ASSERT_OK(new_mem->Add(seq, kTypeBlobIndex, key, blob_index,
|
|
|
|
nullptr /* kv_prot_info */));
|
2019-10-14 22:19:31 +00:00
|
|
|
|
|
|
|
InternalKey internal_key(key, seq, kTypeBlobIndex);
|
2020-10-01 17:08:52 +00:00
|
|
|
inserted_keys.push_back({internal_key.Encode().ToString(), blob_index});
|
2019-10-14 22:19:31 +00:00
|
|
|
}
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::SortKVVector(&inserted_keys);
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-28 23:34:24 +00:00
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
2022-08-05 19:02:33 +00:00
|
|
|
new_mem->ConstructFragmentedRangeTombstones();
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-28 23:34:24 +00:00
|
|
|
cfd->imm()->Add(new_mem, &to_delete);
|
|
|
|
for (auto& m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 17:15:54 +00:00
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
2020-09-08 17:49:01 +00:00
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
2022-05-05 20:08:21 +00:00
|
|
|
*cfd->GetLatestMutableCFOptions(),
|
|
|
|
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
|
|
|
versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
2023-01-24 17:54:04 +00:00
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
|
|
|
true, true /* sync_output_directory */, true /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_);
|
2017-12-16 02:45:38 +00:00
|
|
|
|
|
|
|
HistogramData hist;
|
2018-07-27 23:00:26 +00:00
|
|
|
FileMetaData file_meta;
|
2015-08-24 18:11:12 +00:00
|
|
|
mutex_.Lock();
|
2016-07-19 22:12:46 +00:00
|
|
|
flush_job.PickMemTable();
|
2018-07-27 23:00:26 +00:00
|
|
|
ASSERT_OK(flush_job.Run(nullptr, &file_meta));
|
2015-08-24 18:11:12 +00:00
|
|
|
mutex_.Unlock();
|
2017-12-16 02:45:38 +00:00
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
|
|
|
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_EQ(std::to_string(0), file_meta.smallest.user_key().ToString());
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_EQ("9999a", file_meta.largest.user_key().ToString());
|
2018-07-27 23:00:26 +00:00
|
|
|
ASSERT_EQ(1, file_meta.fd.smallest_seqno);
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_EQ(10006, file_meta.fd.largest_seqno);
|
|
|
|
ASSERT_EQ(17, file_meta.oldest_blob_file_number);
|
2015-08-24 18:11:12 +00:00
|
|
|
mock_table_factory_->AssertSingleFile(inserted_keys);
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
2018-10-16 02:59:20 +00:00
|
|
|
TEST_F(FlushJobTest, FlushMemTablesSingleColumnFamily) {
|
|
|
|
const size_t num_mems = 2;
|
|
|
|
const size_t num_mems_to_flush = 1;
|
|
|
|
const size_t num_keys_per_table = 100;
|
|
|
|
JobContext job_context(0);
|
|
|
|
ColumnFamilyData* cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
std::vector<uint64_t> memtable_ids;
|
|
|
|
std::vector<MemTable*> new_mems;
|
|
|
|
for (size_t i = 0; i != num_mems; ++i) {
|
|
|
|
MemTable* mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
|
|
|
mem->SetID(i);
|
|
|
|
mem->Ref();
|
|
|
|
new_mems.emplace_back(mem);
|
|
|
|
memtable_ids.push_back(mem->GetID());
|
|
|
|
|
|
|
|
for (size_t j = 0; j < num_keys_per_table; ++j) {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string key(std::to_string(j + i * num_keys_per_table));
|
2018-10-16 02:59:20 +00:00
|
|
|
std::string value("value" + key);
|
2020-11-24 00:27:46 +00:00
|
|
|
ASSERT_OK(mem->Add(SequenceNumber(j + i * num_keys_per_table), kTypeValue,
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
key, value, nullptr /* kv_prot_info */));
|
2018-10-16 02:59:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
for (auto mem : new_mems) {
|
2022-08-05 19:02:33 +00:00
|
|
|
mem->ConstructFragmentedRangeTombstones();
|
2018-10-16 02:59:20 +00:00
|
|
|
cfd->imm()->Add(mem, &to_delete);
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
|
|
|
|
|
|
|
assert(memtable_ids.size() == num_mems);
|
|
|
|
uint64_t smallest_memtable_id = memtable_ids.front();
|
|
|
|
uint64_t flush_memtable_id = smallest_memtable_id + num_mems_to_flush - 1;
|
2020-09-08 17:49:01 +00:00
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
2020-12-02 17:29:50 +00:00
|
|
|
*cfd->GetLatestMutableCFOptions(), flush_memtable_id, env_options_,
|
2020-09-08 17:49:01 +00:00
|
|
|
versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
2023-01-24 17:54:04 +00:00
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
|
|
|
true, true /* sync_output_directory */, true /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_);
|
2018-10-16 02:59:20 +00:00
|
|
|
HistogramData hist;
|
|
|
|
FileMetaData file_meta;
|
|
|
|
mutex_.Lock();
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
ASSERT_OK(flush_job.Run(nullptr /* prep_tracker */, &file_meta));
|
|
|
|
mutex_.Unlock();
|
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
|
|
|
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_EQ(std::to_string(0), file_meta.smallest.user_key().ToString());
|
2018-10-16 02:59:20 +00:00
|
|
|
ASSERT_EQ("99", file_meta.largest.user_key().ToString());
|
|
|
|
ASSERT_EQ(0, file_meta.fd.smallest_seqno);
|
|
|
|
ASSERT_EQ(SequenceNumber(num_mems_to_flush * num_keys_per_table - 1),
|
|
|
|
file_meta.fd.largest_seqno);
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_EQ(kInvalidBlobFileNumber, file_meta.oldest_blob_file_number);
|
2018-10-16 02:59:20 +00:00
|
|
|
|
|
|
|
for (auto m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
to_delete.clear();
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(FlushJobTest, FlushMemtablesMultipleColumnFamilies) {
|
|
|
|
autovector<ColumnFamilyData*> all_cfds;
|
|
|
|
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
|
|
|
all_cfds.push_back(cfd);
|
|
|
|
}
|
|
|
|
const std::vector<size_t> num_memtables = {2, 1, 3};
|
|
|
|
assert(num_memtables.size() == column_family_names_.size());
|
|
|
|
const size_t num_keys_per_memtable = 1000;
|
|
|
|
JobContext job_context(0);
|
|
|
|
std::vector<uint64_t> memtable_ids;
|
|
|
|
std::vector<SequenceNumber> smallest_seqs;
|
|
|
|
std::vector<SequenceNumber> largest_seqs;
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
SequenceNumber curr_seqno = 0;
|
|
|
|
size_t k = 0;
|
|
|
|
for (auto cfd : all_cfds) {
|
|
|
|
smallest_seqs.push_back(curr_seqno);
|
|
|
|
for (size_t i = 0; i != num_memtables[k]; ++i) {
|
|
|
|
MemTable* mem = cfd->ConstructNewMemtable(
|
|
|
|
*cfd->GetLatestMutableCFOptions(), kMaxSequenceNumber);
|
|
|
|
mem->SetID(i);
|
|
|
|
mem->Ref();
|
|
|
|
|
|
|
|
for (size_t j = 0; j != num_keys_per_memtable; ++j) {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string key(std::to_string(j + i * num_keys_per_memtable));
|
2018-10-16 02:59:20 +00:00
|
|
|
std::string value("value" + key);
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
ASSERT_OK(mem->Add(curr_seqno++, kTypeValue, key, value,
|
|
|
|
nullptr /* kv_prot_info */));
|
2018-10-16 02:59:20 +00:00
|
|
|
}
|
2022-08-05 19:02:33 +00:00
|
|
|
mem->ConstructFragmentedRangeTombstones();
|
2018-10-16 02:59:20 +00:00
|
|
|
cfd->imm()->Add(mem, &to_delete);
|
|
|
|
}
|
|
|
|
largest_seqs.push_back(curr_seqno - 1);
|
|
|
|
memtable_ids.push_back(num_memtables[k++] - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relevant
|
2019-10-16 17:39:00 +00:00
|
|
|
std::vector<std::unique_ptr<FlushJob>> flush_jobs;
|
2018-10-16 02:59:20 +00:00
|
|
|
k = 0;
|
|
|
|
for (auto cfd : all_cfds) {
|
|
|
|
std::vector<SequenceNumber> snapshot_seqs;
|
2019-10-16 17:39:00 +00:00
|
|
|
flush_jobs.emplace_back(new FlushJob(
|
2018-10-16 02:59:20 +00:00
|
|
|
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
|
2020-12-02 17:29:50 +00:00
|
|
|
memtable_ids[k], env_options_, versions_.get(), &mutex_,
|
2018-10-16 02:59:20 +00:00
|
|
|
&shutting_down_, snapshot_seqs, kMaxSequenceNumber, snapshot_checker,
|
2023-01-24 17:54:04 +00:00
|
|
|
&job_context, FlushReason::kTest, nullptr, nullptr, nullptr,
|
|
|
|
kNoCompression, db_options_.statistics.get(), &event_logger, true,
|
2019-03-20 00:24:09 +00:00
|
|
|
false /* sync_output_directory */, false /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/,
|
|
|
|
empty_seqno_to_time_mapping_));
|
2018-10-16 02:59:20 +00:00
|
|
|
k++;
|
|
|
|
}
|
|
|
|
HistogramData hist;
|
2019-01-31 22:28:53 +00:00
|
|
|
std::vector<FileMetaData> file_metas;
|
|
|
|
// Call reserve to avoid auto-resizing
|
|
|
|
file_metas.reserve(flush_jobs.size());
|
2018-10-16 02:59:20 +00:00
|
|
|
mutex_.Lock();
|
|
|
|
for (auto& job : flush_jobs) {
|
2019-10-16 17:39:00 +00:00
|
|
|
job->PickMemTable();
|
2018-10-16 02:59:20 +00:00
|
|
|
}
|
|
|
|
for (auto& job : flush_jobs) {
|
|
|
|
FileMetaData meta;
|
|
|
|
// Run will release and re-acquire mutex
|
2019-10-16 17:39:00 +00:00
|
|
|
ASSERT_OK(job->Run(nullptr /**/, &meta));
|
2018-10-16 02:59:20 +00:00
|
|
|
file_metas.emplace_back(meta);
|
|
|
|
}
|
2019-01-31 22:28:53 +00:00
|
|
|
autovector<FileMetaData*> file_meta_ptrs;
|
|
|
|
for (auto& meta : file_metas) {
|
|
|
|
file_meta_ptrs.push_back(&meta);
|
|
|
|
}
|
2018-10-16 02:59:20 +00:00
|
|
|
autovector<const autovector<MemTable*>*> mems_list;
|
|
|
|
for (size_t i = 0; i != all_cfds.size(); ++i) {
|
2019-10-16 17:39:00 +00:00
|
|
|
const auto& mems = flush_jobs[i]->GetMemTables();
|
2018-10-16 02:59:20 +00:00
|
|
|
mems_list.push_back(&mems);
|
|
|
|
}
|
|
|
|
autovector<const MutableCFOptions*> mutable_cf_options_list;
|
|
|
|
for (auto cfd : all_cfds) {
|
|
|
|
mutable_cf_options_list.push_back(cfd->GetLatestMutableCFOptions());
|
|
|
|
}
|
2021-08-03 20:30:05 +00:00
|
|
|
autovector<std::list<std::unique_ptr<FlushJobInfo>>*>
|
|
|
|
committed_flush_jobs_info;
|
|
|
|
for (auto& job : flush_jobs) {
|
|
|
|
committed_flush_jobs_info.push_back(job->GetCommittedFlushJobsInfo());
|
|
|
|
}
|
2018-10-16 02:59:20 +00:00
|
|
|
|
2019-01-04 04:53:52 +00:00
|
|
|
Status s = InstallMemtableAtomicFlushResults(
|
|
|
|
nullptr /* imm_lists */, all_cfds, mutable_cf_options_list, mems_list,
|
2020-12-04 03:21:08 +00:00
|
|
|
versions_.get(), nullptr /* prep_tracker */, &mutex_, file_meta_ptrs,
|
2021-08-03 20:30:05 +00:00
|
|
|
committed_flush_jobs_info, &job_context.memtables_to_free,
|
|
|
|
nullptr /* db_directory */, nullptr /* log_buffer */);
|
2018-10-16 02:59:20 +00:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
mutex_.Unlock();
|
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
|
|
|
k = 0;
|
|
|
|
for (const auto& file_meta : file_metas) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_EQ(std::to_string(0), file_meta.smallest.user_key().ToString());
|
2018-10-16 02:59:20 +00:00
|
|
|
ASSERT_EQ("999", file_meta.largest.user_key()
|
|
|
|
.ToString()); // max key by bytewise comparator
|
|
|
|
ASSERT_EQ(smallest_seqs[k], file_meta.fd.smallest_seqno);
|
|
|
|
ASSERT_EQ(largest_seqs[k], file_meta.fd.largest_seqno);
|
|
|
|
// Verify that imm is empty
|
|
|
|
ASSERT_EQ(std::numeric_limits<uint64_t>::max(),
|
|
|
|
all_cfds[k]->imm()->GetEarliestMemTableID());
|
2023-10-02 23:26:24 +00:00
|
|
|
ASSERT_EQ(0, all_cfds[k]->imm()->GetLatestMemTableID(
|
|
|
|
false /* for_atomic_flush */));
|
2018-10-16 02:59:20 +00:00
|
|
|
++k;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
to_delete.clear();
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
2015-08-24 18:11:12 +00:00
|
|
|
TEST_F(FlushJobTest, Snapshots) {
|
|
|
|
JobContext job_context(0);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
|
|
|
|
|
|
|
std::set<SequenceNumber> snapshots_set;
|
|
|
|
int keys = 10000;
|
|
|
|
int max_inserts_per_keys = 8;
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < keys / 2; ++i) {
|
2019-01-10 00:09:36 +00:00
|
|
|
snapshots_set.insert(rnd.Uniform(keys * (max_inserts_per_keys / 2)) + 1);
|
2015-08-24 18:11:12 +00:00
|
|
|
}
|
2019-01-10 00:09:36 +00:00
|
|
|
// set has already removed the duplicate snapshots
|
|
|
|
std::vector<SequenceNumber> snapshots(snapshots_set.begin(),
|
|
|
|
snapshots_set.end());
|
2015-08-24 18:11:12 +00:00
|
|
|
|
|
|
|
new_mem->Ref();
|
|
|
|
SequenceNumber current_seqno = 0;
|
2015-09-02 20:58:22 +00:00
|
|
|
auto inserted_keys = mock::MakeMockFile();
|
2015-08-24 18:11:12 +00:00
|
|
|
for (int i = 1; i < keys; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string key(std::to_string(i));
|
2015-08-24 18:11:12 +00:00
|
|
|
int insertions = rnd.Uniform(max_inserts_per_keys);
|
|
|
|
for (int j = 0; j < insertions; ++j) {
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string value(rnd.HumanReadableString(10));
|
2015-08-24 18:11:12 +00:00
|
|
|
auto seqno = ++current_seqno;
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
ASSERT_OK(new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value,
|
|
|
|
nullptr /* kv_prot_info */));
|
2015-08-24 18:11:12 +00:00
|
|
|
// a key is visible only if:
|
|
|
|
// 1. it's the last one written (j == insertions - 1)
|
|
|
|
// 2. there's a snapshot pointing at it
|
|
|
|
bool visible = (j == insertions - 1) ||
|
|
|
|
(snapshots_set.find(seqno) != snapshots_set.end());
|
|
|
|
if (visible) {
|
|
|
|
InternalKey internal_key(key, seqno, kTypeValue);
|
2020-10-01 17:08:52 +00:00
|
|
|
inserted_keys.push_back({internal_key.Encode().ToString(), value});
|
2015-08-24 18:11:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::SortKVVector(&inserted_keys);
|
2015-08-24 18:11:12 +00:00
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
2022-08-05 19:02:33 +00:00
|
|
|
new_mem->ConstructFragmentedRangeTombstones();
|
2015-08-24 18:11:12 +00:00
|
|
|
cfd->imm()->Add(new_mem, &to_delete);
|
|
|
|
for (auto& m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
2020-09-08 17:49:01 +00:00
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
2022-05-05 20:08:21 +00:00
|
|
|
*cfd->GetLatestMutableCFOptions(),
|
|
|
|
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
|
|
|
versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
2023-01-24 17:54:04 +00:00
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
|
|
|
true, true /* sync_output_directory */, true /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_);
|
2014-10-28 18:54:33 +00:00
|
|
|
mutex_.Lock();
|
2016-07-19 22:12:46 +00:00
|
|
|
flush_job.PickMemTable();
|
2014-10-28 18:54:33 +00:00
|
|
|
ASSERT_OK(flush_job.Run());
|
|
|
|
mutex_.Unlock();
|
2014-10-29 00:52:32 +00:00
|
|
|
mock_table_factory_->AssertSingleFile(inserted_keys);
|
2017-12-16 02:45:38 +00:00
|
|
|
HistogramData hist;
|
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
2014-11-15 00:57:17 +00:00
|
|
|
job_context.Clean();
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
TEST_F(FlushJobTest, GetRateLimiterPriorityForWrite) {
|
|
|
|
// Prepare a FlushJob that flush MemTables of Single Column Family.
|
|
|
|
const size_t num_mems = 2;
|
|
|
|
const size_t num_mems_to_flush = 1;
|
|
|
|
const size_t num_keys_per_table = 100;
|
|
|
|
JobContext job_context(0);
|
|
|
|
ColumnFamilyData* cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
std::vector<uint64_t> memtable_ids;
|
|
|
|
std::vector<MemTable*> new_mems;
|
|
|
|
for (size_t i = 0; i != num_mems; ++i) {
|
|
|
|
MemTable* mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
|
|
|
mem->SetID(i);
|
|
|
|
mem->Ref();
|
|
|
|
new_mems.emplace_back(mem);
|
|
|
|
memtable_ids.push_back(mem->GetID());
|
|
|
|
|
|
|
|
for (size_t j = 0; j < num_keys_per_table; ++j) {
|
|
|
|
std::string key(std::to_string(j + i * num_keys_per_table));
|
|
|
|
std::string value("value" + key);
|
|
|
|
ASSERT_OK(mem->Add(SequenceNumber(j + i * num_keys_per_table), kTypeValue,
|
|
|
|
key, value, nullptr /* kv_prot_info */));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
for (auto mem : new_mems) {
|
2022-08-05 19:02:33 +00:00
|
|
|
mem->ConstructFragmentedRangeTombstones();
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
cfd->imm()->Add(mem, &to_delete);
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
|
|
|
|
|
|
|
assert(memtable_ids.size() == num_mems);
|
|
|
|
uint64_t smallest_memtable_id = memtable_ids.front();
|
|
|
|
uint64_t flush_memtable_id = smallest_memtable_id + num_mems_to_flush - 1;
|
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
|
|
|
*cfd->GetLatestMutableCFOptions(), flush_memtable_id, env_options_,
|
|
|
|
versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
2023-01-24 17:54:04 +00:00
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
|
|
|
true, true /* sync_output_directory */, true /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_);
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
|
|
|
|
// When the state from WriteController is normal.
|
|
|
|
ASSERT_EQ(flush_job.GetRateLimiterPriorityForWrite(), Env::IO_HIGH);
|
|
|
|
|
|
|
|
WriteController* write_controller =
|
|
|
|
flush_job.versions_->GetColumnFamilySet()->write_controller();
|
|
|
|
|
|
|
|
{
|
|
|
|
// When the state from WriteController is Delayed.
|
|
|
|
std::unique_ptr<WriteControllerToken> delay_token =
|
|
|
|
write_controller->GetDelayToken(1000000);
|
|
|
|
ASSERT_EQ(flush_job.GetRateLimiterPriorityForWrite(), Env::IO_USER);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// When the state from WriteController is Stopped.
|
|
|
|
std::unique_ptr<WriteControllerToken> stop_token =
|
|
|
|
write_controller->GetStopToken();
|
|
|
|
ASSERT_EQ(flush_job.GetRateLimiterPriorityForWrite(), Env::IO_USER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
// Test parameters:
|
|
|
|
// param 0): paranoid file check
|
|
|
|
// param 1): user-defined timestamp test mode
|
|
|
|
class FlushJobTimestampTest
|
|
|
|
: public FlushJobTestBase,
|
|
|
|
public testing::WithParamInterface<
|
|
|
|
std::tuple<bool, test::UserDefinedTimestampTestMode>> {
|
2020-11-13 02:43:30 +00:00
|
|
|
public:
|
|
|
|
FlushJobTimestampTest()
|
|
|
|
: FlushJobTestBase(test::PerThreadDBPath("flush_job_ts_gc_test"),
|
2022-02-08 20:14:25 +00:00
|
|
|
test::BytewiseComparatorWithU64TsWrapper()) {}
|
2020-11-13 02:43:30 +00:00
|
|
|
|
|
|
|
void AddKeyValueToMemtable(MemTable* memtable, std::string key, uint64_t ts,
|
|
|
|
SequenceNumber seq, ValueType value_type,
|
|
|
|
Slice value) {
|
|
|
|
std::string key_str(std::move(key));
|
|
|
|
PutFixed64(&key_str, ts);
|
Integrity protection for live updates to WriteBatch (#7748)
Summary:
This PR adds the foundation classes for key-value integrity protection and the first use case: protecting live updates from the source buffers added to `WriteBatch` through the destination buffer in `MemTable`. The width of the protection info is not yet configurable -- only eight bytes per key is supported. This PR allows users to enable protection by constructing `WriteBatch` with `protection_bytes_per_key == 8`. It does not yet expose a way for users to get integrity protection via other write APIs (e.g., `Put()`, `Merge()`, `Delete()`, etc.).
The foundation classes (`ProtectionInfo.*`) embed the coverage info in their type, and provide `Protect.*()` and `Strip.*()` functions to navigate between types with different coverage. For making bytes per key configurable (for powers of two up to eight) in the future, these classes are templated on the unsigned integer type used to store the protection info. That integer contains the XOR'd result of hashes with independent seeds for all covered fields. For integer fields, the hash is computed on the raw unadjusted bytes, so the result is endian-dependent. The most significant bytes are truncated when the hash value (8 bytes) is wider than the protection integer.
When `WriteBatch` is constructed with `protection_bytes_per_key == 8`, we hold a `ProtectionInfoKVOTC` (i.e., one that covers key, value, optype aka `ValueType`, timestamp, and CF ID) for each entry added to the batch. The protection info is generated from the original buffers passed by the user, as well as the original metadata generated internally. When writing to memtable, each entry is transformed to a `ProtectionInfoKVOTS` (i.e., dropping coverage of CF ID and adding coverage of sequence number), since at that point we know the sequence number, and have already selected a memtable corresponding to a particular CF. This protection info is verified once the entry is encoded in the `MemTable` buffer.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7748
Test Plan:
- an integration test to verify a wide variety of single-byte changes to the encoded `MemTable` buffer are caught
- add to stress/crash test to verify it works in variety of configs/operations without intentional corruption
- [deferred] unit tests for `ProtectionInfo.*` classes for edge cases like KV swap, `SliceParts` and `Slice` APIs are interchangeable, etc.
Reviewed By: pdillinger
Differential Revision: D25754492
Pulled By: ajkr
fbshipit-source-id: e481bac6c03c2ab268be41359730f1ceb9964866
2021-01-29 20:17:17 +00:00
|
|
|
ASSERT_OK(memtable->Add(seq, value_type, key_str, value,
|
|
|
|
nullptr /* kv_prot_info */));
|
2020-11-13 02:43:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
void SetUp() override {
|
|
|
|
paranoid_file_checks_ = std::get<0>(GetParam());
|
|
|
|
auto udt_test_mode = std::get<1>(GetParam());
|
|
|
|
persist_udt_ = test::ShouldPersistUDT(udt_test_mode);
|
|
|
|
FlushJobTestBase::SetUp();
|
|
|
|
}
|
2020-11-13 02:43:30 +00:00
|
|
|
static constexpr uint64_t kStartTs = 10;
|
|
|
|
static constexpr SequenceNumber kStartSeq = 0;
|
|
|
|
SequenceNumber curr_seq_{kStartSeq};
|
|
|
|
std::atomic<uint64_t> curr_ts_{kStartTs};
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
|
|
|
|
void CheckFileMetaData(ColumnFamilyData* cfd,
|
|
|
|
const InternalKey& expected_smallest,
|
|
|
|
const InternalKey& expected_largest,
|
|
|
|
const FileMetaData* meta_from_flush) const {
|
|
|
|
ASSERT_EQ(expected_smallest.Encode(), meta_from_flush->smallest.Encode());
|
|
|
|
ASSERT_EQ(expected_largest.Encode(), meta_from_flush->largest.Encode());
|
|
|
|
|
|
|
|
const VersionStorageInfo* storage_info = cfd->current()->storage_info();
|
|
|
|
const std::vector<FileMetaData*>& l0_files = storage_info->LevelFiles(0);
|
|
|
|
|
|
|
|
ASSERT_EQ(l0_files.size(), 1);
|
|
|
|
auto installed_file_meta = l0_files[0];
|
|
|
|
ASSERT_EQ(expected_smallest.Encode(),
|
|
|
|
installed_file_meta->smallest.Encode());
|
|
|
|
ASSERT_EQ(expected_largest.Encode(), installed_file_meta->largest.Encode());
|
|
|
|
}
|
2023-07-26 23:25:06 +00:00
|
|
|
void CheckFullHistoryTsLow(ColumnFamilyData* cfd,
|
|
|
|
const std::string& expected_full_history_ts_low) {
|
|
|
|
ASSERT_EQ(expected_full_history_ts_low, cfd->GetFullHistoryTsLow());
|
|
|
|
}
|
2020-11-13 02:43:30 +00:00
|
|
|
};
|
|
|
|
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
TEST_P(FlushJobTimestampTest, AllKeysExpired) {
|
2020-11-13 02:43:30 +00:00
|
|
|
ColumnFamilyData* cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
|
|
|
|
{
|
|
|
|
MemTable* new_mem = cfd->ConstructNewMemtable(
|
|
|
|
*cfd->GetLatestMutableCFOptions(), kMaxSequenceNumber);
|
|
|
|
new_mem->Ref();
|
|
|
|
for (int i = 0; i < 100; ++i) {
|
|
|
|
uint64_t ts = curr_ts_.fetch_add(1);
|
|
|
|
SequenceNumber seq = (curr_seq_++);
|
|
|
|
AddKeyValueToMemtable(new_mem, test::EncodeInt(0), ts, seq,
|
|
|
|
ValueType::kTypeValue, "0_value");
|
|
|
|
}
|
|
|
|
uint64_t ts = curr_ts_.fetch_add(1);
|
|
|
|
SequenceNumber seq = (curr_seq_++);
|
|
|
|
AddKeyValueToMemtable(new_mem, test::EncodeInt(0), ts, seq,
|
|
|
|
ValueType::kTypeDeletionWithTimestamp, "");
|
2022-08-05 19:02:33 +00:00
|
|
|
new_mem->ConstructFragmentedRangeTombstones();
|
2020-11-13 02:43:30 +00:00
|
|
|
cfd->imm()->Add(new_mem, &to_delete);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
constexpr SnapshotChecker* const snapshot_checker = nullptr;
|
|
|
|
JobContext job_context(0);
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
std::string full_history_ts_low;
|
|
|
|
PutFixed64(&full_history_ts_low, std::numeric_limits<uint64_t>::max());
|
2023-07-26 23:25:06 +00:00
|
|
|
cfd->SetFullHistoryTsLow(full_history_ts_low);
|
2020-11-13 02:43:30 +00:00
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
|
2022-05-05 20:08:21 +00:00
|
|
|
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
|
|
|
versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
2023-01-24 17:54:04 +00:00
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
|
|
|
true, true /* sync_output_directory */, true /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_,
|
|
|
|
/*db_id=*/"",
|
2020-11-13 02:43:30 +00:00
|
|
|
/*db_session_id=*/"", full_history_ts_low);
|
|
|
|
|
|
|
|
FileMetaData fmeta;
|
|
|
|
mutex_.Lock();
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
ASSERT_OK(flush_job.Run(/*prep_tracker=*/nullptr, &fmeta));
|
|
|
|
mutex_.Unlock();
|
|
|
|
|
|
|
|
{
|
|
|
|
std::string key = test::EncodeInt(0);
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
if (!persist_udt_) {
|
|
|
|
// When `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` flag
|
|
|
|
// is set to false. The user-defined timestamp is stripped from user key
|
|
|
|
// during flush, making the user key logically containing the minimum
|
|
|
|
// timestamp.
|
|
|
|
key.append(test::EncodeInt(0));
|
|
|
|
} else {
|
|
|
|
key.append(test::EncodeInt(curr_ts_.load(std::memory_order_relaxed) - 1));
|
|
|
|
}
|
2020-11-13 02:43:30 +00:00
|
|
|
InternalKey ikey(key, curr_seq_ - 1, ValueType::kTypeDeletionWithTimestamp);
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
CheckFileMetaData(cfd, ikey, ikey, &fmeta);
|
2023-07-26 23:25:06 +00:00
|
|
|
CheckFullHistoryTsLow(cfd, full_history_ts_low);
|
2020-11-13 02:43:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
job_context.Clean();
|
|
|
|
ASSERT_TRUE(to_delete.empty());
|
|
|
|
}
|
|
|
|
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
TEST_P(FlushJobTimestampTest, NoKeyExpired) {
|
2020-11-13 02:43:30 +00:00
|
|
|
ColumnFamilyData* cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
|
|
|
|
{
|
|
|
|
MemTable* new_mem = cfd->ConstructNewMemtable(
|
|
|
|
*cfd->GetLatestMutableCFOptions(), kMaxSequenceNumber);
|
|
|
|
new_mem->Ref();
|
|
|
|
for (int i = 0; i < 100; ++i) {
|
|
|
|
uint64_t ts = curr_ts_.fetch_add(1);
|
|
|
|
SequenceNumber seq = (curr_seq_++);
|
|
|
|
AddKeyValueToMemtable(new_mem, test::EncodeInt(0), ts, seq,
|
|
|
|
ValueType::kTypeValue, "0_value");
|
|
|
|
}
|
2022-08-05 19:02:33 +00:00
|
|
|
new_mem->ConstructFragmentedRangeTombstones();
|
2020-11-13 02:43:30 +00:00
|
|
|
cfd->imm()->Add(new_mem, &to_delete);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
SnapshotChecker* const snapshot_checker = nullptr;
|
|
|
|
JobContext job_context(0);
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
std::string full_history_ts_low;
|
|
|
|
PutFixed64(&full_history_ts_low, 0);
|
2023-07-26 23:25:06 +00:00
|
|
|
cfd->SetFullHistoryTsLow(full_history_ts_low);
|
2020-11-13 02:43:30 +00:00
|
|
|
FlushJob flush_job(
|
|
|
|
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
|
2022-05-05 20:08:21 +00:00
|
|
|
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
|
|
|
versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
2023-01-24 17:54:04 +00:00
|
|
|
snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr,
|
|
|
|
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
|
|
|
true, true /* sync_output_directory */, true /* write_manifest */,
|
2022-07-15 04:49:34 +00:00
|
|
|
Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_,
|
|
|
|
/*db_id=*/"",
|
2020-11-13 02:43:30 +00:00
|
|
|
/*db_session_id=*/"", full_history_ts_low);
|
|
|
|
|
|
|
|
FileMetaData fmeta;
|
|
|
|
mutex_.Lock();
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
ASSERT_OK(flush_job.Run(/*prep_tracker=*/nullptr, &fmeta));
|
|
|
|
mutex_.Unlock();
|
|
|
|
|
|
|
|
{
|
|
|
|
std::string ukey = test::EncodeInt(0);
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
std::string smallest_key;
|
|
|
|
std::string largest_key;
|
2023-07-26 23:25:06 +00:00
|
|
|
std::string expected_full_history_ts_low;
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
if (!persist_udt_) {
|
|
|
|
// When `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` flag
|
|
|
|
// is set to false. The user-defined timestamp is stripped from user key
|
|
|
|
// during flush, making the user key logically containing the minimum
|
|
|
|
// timestamp, which is hardcoded to be all zeros for now.
|
|
|
|
smallest_key = ukey + test::EncodeInt(0);
|
|
|
|
largest_key = ukey + test::EncodeInt(0);
|
2023-07-26 23:25:06 +00:00
|
|
|
// When not all keys have expired and `persist_user_defined_timestamps` is
|
|
|
|
// false. UDTs will be removed during flush, `full_history_ts_low` should
|
|
|
|
// be automatically increased to above the effective cutoff UDT in the
|
|
|
|
// flush.
|
|
|
|
PutFixed64(&expected_full_history_ts_low, curr_ts_.fetch_add(1));
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
} else {
|
|
|
|
smallest_key =
|
|
|
|
ukey + test::EncodeInt(curr_ts_.load(std::memory_order_relaxed) - 1);
|
|
|
|
largest_key = ukey + test::EncodeInt(kStartTs);
|
2023-07-26 23:25:06 +00:00
|
|
|
expected_full_history_ts_low = full_history_ts_low;
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
}
|
2020-11-13 02:43:30 +00:00
|
|
|
InternalKey smallest(smallest_key, curr_seq_ - 1, ValueType::kTypeValue);
|
|
|
|
InternalKey largest(largest_key, kStartSeq, ValueType::kTypeValue);
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
CheckFileMetaData(cfd, smallest, largest, &fmeta);
|
2023-07-26 23:25:06 +00:00
|
|
|
CheckFullHistoryTsLow(cfd, expected_full_history_ts_low);
|
2020-11-13 02:43:30 +00:00
|
|
|
}
|
|
|
|
job_context.Clean();
|
|
|
|
ASSERT_TRUE(to_delete.empty());
|
|
|
|
}
|
|
|
|
|
Logically strip timestamp during flush (#11557)
Summary:
Logically strip the user-defined timestamp when L0 files are created during flush when `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` is false. Logically stripping timestamp here means replacing the original user-defined timestamp with a mininum timestamp, which for now is hard coded to be all zeros bytes.
While working on this, I caught a missing piece on the `BlockBuilder` level for this feature. The current quick path `std::min(buffer_size, last_key_size)` needs a bit tweaking to work for this feature. When user-defined timestamp is stripped during block building, on writing first entry or right after resetting, `buffer` is empty and `buffer_size` is zero as usual. However, in follow-up writes, depending on the size of the stripped user-defined timestamp, and the size of the value, what's in `buffer` can sometimes be smaller than `last_key_size`, leading `std::min(buffer_size, last_key_size)` to truncate the `last_key`. Previous test doesn't caught the bug because in those tests, the size of the stripped user-defined timestamps bytes is smaller than the length of the value. In order to avoid the conditional operation, this PR changed the original trivial `std::min` operation into an arithmetic operation. Since this is a change in a hot and performance critical path, I did the following benchmark to check no observable regression is introduced.
```TEST_TMPDIR=/dev/shm/rocksdb1 ./db_bench -benchmarks=fillseq -memtablerep=vector -allow_concurrent_memtable_write=false -num=50000000```
Compiled with DEBUG_LEVEL=0
Test vs. control runs simulaneous for better accuracy, units = ops/sec
PR vs base:
Round 1: 350652 vs 349055
Round 2: 365733 vs 364308
Round 3: 355681 vs 354475
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11557
Test Plan:
New timestamp specific test added or existing tests augmented, both are parameterized with `UserDefinedTimestampTestMode`:
`UserDefinedTimestampTestMode::kNormal` -> UDT feature enabled, write / read with min timestamp
`UserDefinedTimestampTestMode::kStripUserDefinedTimestamps` -> UDT feature enabled, write / read with min timestamp, set Options.persist_user_defined_timestamps to false.
```
make all check
./db_wal_test --gtest_filter="*WithTimestamp*"
./flush_job_test --gtest_filter="*WithTimestamp*"
./repair_test --gtest_filter="*WithTimestamp*"
./block_based_table_reader_test
```
Reviewed By: pdillinger
Differential Revision: D47027664
Pulled By: jowlyzhang
fbshipit-source-id: e729193b6334dfc63aaa736d684d907a022571f5
2023-06-29 22:50:50 +00:00
|
|
|
// Param 0: paranoid file check
|
|
|
|
// Param 1: test mode for the user-defined timestamp feature
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
FlushJobTimestampTest, FlushJobTimestampTest,
|
|
|
|
::testing::Combine(
|
|
|
|
::testing::Bool(),
|
|
|
|
::testing::Values(
|
|
|
|
test::UserDefinedTimestampTestMode::kStripUserDefinedTimestamp,
|
|
|
|
test::UserDefinedTimestampTestMode::kNormal)));
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|