2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/log_reader.h"
|
|
|
|
#include "db/log_writer.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/sequence_file_reader.h"
|
|
|
|
#include "file/writable_file_writer.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/crc32c.h"
|
|
|
|
#include "util/random.h"
|
2022-02-24 07:45:04 +00:00
|
|
|
#include "utilities/memory_allocators.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 22:37:00 +00:00
|
|
|
namespace log {
|
|
|
|
|
|
|
|
// Construct a string of the specified length made out of the supplied
|
|
|
|
// partial string.
|
|
|
|
static std::string BigString(const std::string& partial_string, size_t n) {
|
|
|
|
std::string result;
|
|
|
|
while (result.size() < n) {
|
|
|
|
result.append(partial_string);
|
|
|
|
}
|
|
|
|
result.resize(n);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a string from a number
|
|
|
|
static std::string NumberString(int n) {
|
|
|
|
char buf[50];
|
|
|
|
snprintf(buf, sizeof(buf), "%d.", n);
|
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a skewed potentially long string
|
|
|
|
static std::string RandomSkewedString(int i, Random* rnd) {
|
|
|
|
return BigString(NumberString(i), rnd->Skewed(17));
|
|
|
|
}
|
|
|
|
|
2023-05-12 00:26:19 +00:00
|
|
|
// Param type is tuple<int, bool, CompressionType>
|
2019-03-26 23:41:31 +00:00
|
|
|
// get<0>(tuple): non-zero if recycling log, zero if regular log
|
|
|
|
// get<1>(tuple): true if allow retry after read EOF, false otherwise
|
2023-05-12 00:26:19 +00:00
|
|
|
// get<2>(tuple): type of compression used
|
2022-02-18 00:18:01 +00:00
|
|
|
class LogTest
|
|
|
|
: public ::testing::TestWithParam<std::tuple<int, bool, CompressionType>> {
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2021-01-04 23:59:52 +00:00
|
|
|
class StringSource : public FSSequentialFile {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
Slice& contents_;
|
2011-03-18 22:37:00 +00:00
|
|
|
bool force_error_;
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
size_t force_error_position_;
|
|
|
|
bool force_eof_;
|
|
|
|
size_t force_eof_position_;
|
2011-03-18 22:37:00 +00:00
|
|
|
bool returned_partial_;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool fail_after_read_partial_;
|
|
|
|
explicit StringSource(Slice& contents, bool fail_after_read_partial)
|
|
|
|
: contents_(contents),
|
|
|
|
force_error_(false),
|
|
|
|
force_error_position_(0),
|
|
|
|
force_eof_(false),
|
|
|
|
force_eof_position_(0),
|
|
|
|
returned_partial_(false),
|
|
|
|
fail_after_read_partial_(fail_after_read_partial) {}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
IOStatus Read(size_t n, const IOOptions& /*opts*/, Slice* result,
|
|
|
|
char* scratch, IODebugContext* /*dbg*/) override {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (fail_after_read_partial_) {
|
|
|
|
EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
if (force_error_) {
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
if (force_error_position_ >= n) {
|
|
|
|
force_error_position_ -= n;
|
|
|
|
} else {
|
|
|
|
*result = Slice(contents_.data(), force_error_position_);
|
|
|
|
contents_.remove_prefix(force_error_position_);
|
|
|
|
force_error_ = false;
|
|
|
|
returned_partial_ = true;
|
2021-01-04 23:59:52 +00:00
|
|
|
return IOStatus::Corruption("read error");
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (contents_.size() < n) {
|
|
|
|
n = contents_.size();
|
|
|
|
returned_partial_ = true;
|
|
|
|
}
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
|
|
|
|
if (force_eof_) {
|
|
|
|
if (force_eof_position_ >= n) {
|
|
|
|
force_eof_position_ -= n;
|
|
|
|
} else {
|
|
|
|
force_eof_ = false;
|
|
|
|
n = force_eof_position_;
|
|
|
|
returned_partial_ = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// By using scratch we ensure that caller has control over the
|
|
|
|
// lifetime of result.data()
|
|
|
|
memcpy(scratch, contents_.data(), n);
|
|
|
|
*result = Slice(scratch, n);
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
contents_.remove_prefix(n);
|
2021-01-04 23:59:52 +00:00
|
|
|
return IOStatus::OK();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
IOStatus Skip(uint64_t n) override {
|
2011-05-21 02:17:43 +00:00
|
|
|
if (n > contents_.size()) {
|
|
|
|
contents_.clear();
|
2021-01-04 23:59:52 +00:00
|
|
|
return IOStatus::NotFound("in-memory file skipepd past end");
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
contents_.remove_prefix(n);
|
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
return IOStatus::OK();
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class ReportCollector : public Reader::Reporter {
|
|
|
|
public:
|
|
|
|
size_t dropped_bytes_;
|
|
|
|
std::string message_;
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
ReportCollector() : dropped_bytes_(0) {}
|
2019-02-14 21:52:47 +00:00
|
|
|
void Corruption(size_t bytes, const Status& status) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
dropped_bytes_ += bytes;
|
|
|
|
message_.append(status.ToString());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
std::string& dest_contents() { return sink_->contents_; }
|
2013-01-20 10:07:13 +00:00
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
const std::string& dest_contents() const { return sink_->contents_; }
|
2013-01-20 10:07:13 +00:00
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
void reset_source_contents() { source_->contents_ = dest_contents(); }
|
2013-01-20 10:07:13 +00:00
|
|
|
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
Slice reader_contents_;
|
2021-01-04 23:59:52 +00:00
|
|
|
test::StringSink* sink_;
|
|
|
|
StringSource* source_;
|
2011-03-18 22:37:00 +00:00
|
|
|
ReportCollector report_;
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
protected:
|
2022-02-18 00:18:01 +00:00
|
|
|
std::unique_ptr<Writer> writer_;
|
|
|
|
std::unique_ptr<Reader> reader_;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool allow_retry_read_;
|
2022-02-18 00:18:01 +00:00
|
|
|
CompressionType compression_type_;
|
2011-05-21 02:17:43 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
LogTest()
|
|
|
|
: reader_contents_(),
|
2021-01-04 23:59:52 +00:00
|
|
|
sink_(new test::StringSink(&reader_contents_)),
|
|
|
|
source_(new StringSource(reader_contents_, !std::get<1>(GetParam()))),
|
2022-02-18 00:18:01 +00:00
|
|
|
allow_retry_read_(std::get<1>(GetParam())),
|
|
|
|
compression_type_(std::get<2>(GetParam())) {
|
2021-01-04 23:59:52 +00:00
|
|
|
std::unique_ptr<FSWritableFile> sink_holder(sink_);
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
std::move(sink_holder), "" /* don't care */, FileOptions()));
|
2022-02-18 00:18:01 +00:00
|
|
|
Writer* writer =
|
|
|
|
new Writer(std::move(file_writer), 123, std::get<0>(GetParam()), false,
|
|
|
|
compression_type_);
|
|
|
|
writer_.reset(writer);
|
2021-01-04 23:59:52 +00:00
|
|
|
std::unique_ptr<FSSequentialFile> source_holder(source_);
|
|
|
|
std::unique_ptr<SequentialFileReader> file_reader(
|
|
|
|
new SequentialFileReader(std::move(source_holder), "" /* file name */));
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
2021-01-04 23:59:52 +00:00
|
|
|
reader_.reset(new FragmentBufferedReader(nullptr, std::move(file_reader),
|
|
|
|
&report_, true /* checksum */,
|
|
|
|
123 /* log_number */));
|
2019-03-26 23:41:31 +00:00
|
|
|
} else {
|
2021-01-04 23:59:52 +00:00
|
|
|
reader_.reset(new Reader(nullptr, std::move(file_reader), &report_,
|
2019-03-26 23:41:31 +00:00
|
|
|
true /* checksum */, 123 /* log_number */));
|
|
|
|
}
|
2015-10-19 21:24:05 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-12-11 19:12:03 +00:00
|
|
|
Slice* get_reader_contents() { return &reader_contents_; }
|
|
|
|
|
2023-06-05 20:36:26 +00:00
|
|
|
void Write(const std::string& msg,
|
|
|
|
const UnorderedMap<uint32_t, size_t>* cf_to_ts_sz = nullptr) {
|
2023-05-12 00:26:19 +00:00
|
|
|
if (cf_to_ts_sz != nullptr && !cf_to_ts_sz->empty()) {
|
|
|
|
ASSERT_OK(writer_->MaybeAddUserDefinedTimestampSizeRecord(*cf_to_ts_sz));
|
|
|
|
}
|
2021-01-04 23:59:52 +00:00
|
|
|
ASSERT_OK(writer_->AddRecord(Slice(msg)));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
size_t WrittenBytes() const { return dest_contents().size(); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2023-06-05 20:36:26 +00:00
|
|
|
std::string Read(const WALRecoveryMode wal_recovery_mode =
|
|
|
|
WALRecoveryMode::kTolerateCorruptedTailRecords,
|
|
|
|
UnorderedMap<uint32_t, size_t>* cf_to_ts_sz = nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string scratch;
|
|
|
|
Slice record;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool ret = false;
|
2022-07-25 23:27:26 +00:00
|
|
|
uint64_t record_checksum;
|
|
|
|
ret = reader_->ReadRecord(&record, &scratch, wal_recovery_mode,
|
|
|
|
&record_checksum);
|
2023-05-12 00:26:19 +00:00
|
|
|
if (cf_to_ts_sz != nullptr) {
|
|
|
|
*cf_to_ts_sz = reader_->GetRecordedTimestampSize();
|
|
|
|
}
|
2019-03-26 23:41:31 +00:00
|
|
|
if (ret) {
|
2022-07-25 23:27:26 +00:00
|
|
|
if (!allow_retry_read_) {
|
|
|
|
// allow_retry_read_ means using FragmentBufferedReader which does not
|
|
|
|
// support record checksum yet.
|
|
|
|
uint64_t actual_record_checksum =
|
|
|
|
XXH3_64bits(record.data(), record.size());
|
|
|
|
assert(actual_record_checksum == record_checksum);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
return record.ToString();
|
|
|
|
} else {
|
|
|
|
return "EOF";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-19 17:48:47 +00:00
|
|
|
void IncrementByte(int offset, char delta) {
|
2013-01-20 10:07:13 +00:00
|
|
|
dest_contents()[offset] += delta;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SetByte(int offset, char new_byte) {
|
2013-01-20 10:07:13 +00:00
|
|
|
dest_contents()[offset] = new_byte;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
void ShrinkSize(int bytes) { sink_->Drop(bytes); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-10-19 21:24:05 +00:00
|
|
|
void FixChecksum(int header_offset, int len, bool recyclable) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Compute crc of type/len/data
|
2015-10-19 21:24:05 +00:00
|
|
|
int header_size = recyclable ? kRecyclableHeaderSize : kHeaderSize;
|
|
|
|
uint32_t crc = crc32c::Value(&dest_contents()[header_offset + 6],
|
|
|
|
header_size - 6 + len);
|
2011-03-18 22:37:00 +00:00
|
|
|
crc = crc32c::Mask(crc);
|
2013-01-20 10:07:13 +00:00
|
|
|
EncodeFixed32(&dest_contents()[header_offset], crc);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
void ForceError(size_t position = 0) {
|
2021-01-04 23:59:52 +00:00
|
|
|
source_->force_error_ = true;
|
|
|
|
source_->force_error_position_ = position;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
size_t DroppedBytes() const { return report_.dropped_bytes_; }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
std::string ReportMessage() const { return report_.message_; }
|
2011-05-21 02:17:43 +00:00
|
|
|
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
void ForceEOF(size_t position = 0) {
|
2021-01-04 23:59:52 +00:00
|
|
|
source_->force_eof_ = true;
|
|
|
|
source_->force_eof_position_ = position;
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void UnmarkEOF() {
|
2021-01-04 23:59:52 +00:00
|
|
|
source_->returned_partial_ = false;
|
2019-03-26 23:41:31 +00:00
|
|
|
reader_->UnmarkEOF();
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
bool IsEOF() { return reader_->IsEOF(); }
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Returns OK iff recorded error message contains "msg"
|
|
|
|
std::string MatchError(const std::string& msg) const {
|
|
|
|
if (report_.message_.find(msg) == std::string::npos) {
|
|
|
|
return report_.message_;
|
|
|
|
} else {
|
|
|
|
return "OK";
|
|
|
|
}
|
|
|
|
}
|
2023-05-12 00:26:19 +00:00
|
|
|
|
|
|
|
void CheckRecordAndTimestampSize(
|
2023-06-05 20:36:26 +00:00
|
|
|
std::string record, UnorderedMap<uint32_t, size_t>& expected_ts_sz) {
|
|
|
|
UnorderedMap<uint32_t, size_t> recorded_ts_sz;
|
2023-05-12 00:26:19 +00:00
|
|
|
ASSERT_EQ(record,
|
|
|
|
Read(WALRecoveryMode::
|
|
|
|
kTolerateCorruptedTailRecords /* wal_recovery_mode */,
|
|
|
|
&recorded_ts_sz));
|
|
|
|
EXPECT_EQ(expected_ts_sz, recorded_ts_sz);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
|
2011-05-21 02:17:43 +00:00
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ReadWrite) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
Write("");
|
|
|
|
Write("xxxx");
|
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("xxxx", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
|
|
|
|
}
|
|
|
|
|
2023-05-12 00:26:19 +00:00
|
|
|
TEST_P(LogTest, ReadWriteWithTimestampSize) {
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_one = {
|
2023-05-12 00:26:19 +00:00
|
|
|
{1, sizeof(uint64_t)},
|
|
|
|
};
|
|
|
|
Write("foo", &ts_sz_one);
|
|
|
|
Write("bar");
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_two = {{2, sizeof(char)}};
|
2023-05-12 00:26:19 +00:00
|
|
|
Write("", &ts_sz_two);
|
|
|
|
Write("xxxx");
|
|
|
|
|
|
|
|
CheckRecordAndTimestampSize("foo", ts_sz_one);
|
|
|
|
CheckRecordAndTimestampSize("bar", ts_sz_one);
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> expected_ts_sz_two;
|
2023-05-12 00:26:19 +00:00
|
|
|
// User-defined timestamp size records are accumulated and applied to
|
|
|
|
// subsequent records.
|
|
|
|
expected_ts_sz_two.insert(ts_sz_one.begin(), ts_sz_one.end());
|
|
|
|
expected_ts_sz_two.insert(ts_sz_two.begin(), ts_sz_two.end());
|
|
|
|
CheckRecordAndTimestampSize("", expected_ts_sz_two);
|
|
|
|
CheckRecordAndTimestampSize("xxxx", expected_ts_sz_two);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(LogTest, ReadWriteWithTimestampSizeZeroTimestampIgnored) {
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_one = {{1, sizeof(uint64_t)}};
|
2023-05-12 00:26:19 +00:00
|
|
|
Write("foo", &ts_sz_one);
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_two(ts_sz_one.begin(), ts_sz_one.end());
|
2023-05-12 00:26:19 +00:00
|
|
|
ts_sz_two.insert(std::make_pair(2, 0));
|
|
|
|
Write("bar", &ts_sz_two);
|
|
|
|
|
|
|
|
CheckRecordAndTimestampSize("foo", ts_sz_one);
|
|
|
|
CheckRecordAndTimestampSize("bar", ts_sz_one);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ManyBlocks) {
|
2011-03-18 22:37:00 +00:00
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
Write(NumberString(i));
|
|
|
|
}
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
ASSERT_EQ(NumberString(i), Read());
|
|
|
|
}
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, Fragmentation) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("small");
|
|
|
|
Write(BigString("medium", 50000));
|
|
|
|
Write(BigString("large", 100000));
|
|
|
|
ASSERT_EQ("small", Read());
|
|
|
|
ASSERT_EQ(BigString("medium", 50000), Read());
|
|
|
|
ASSERT_EQ(BigString("large", 100000), Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, MarginalTrailer) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Make a trailer that is exactly the same length as an empty record.
|
2019-03-26 23:41:31 +00:00
|
|
|
int header_size =
|
|
|
|
std::get<0>(GetParam()) ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
const int n = kBlockSize - 2 * header_size;
|
2011-03-18 22:37:00 +00:00
|
|
|
Write(BigString("foo", n));
|
2015-10-19 21:24:05 +00:00
|
|
|
ASSERT_EQ((unsigned int)(kBlockSize - header_size), WrittenBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("");
|
|
|
|
Write("bar");
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, MarginalTrailer2) {
|
2011-05-21 02:17:43 +00:00
|
|
|
// Make a trailer that is exactly the same length as an empty record.
|
2019-03-26 23:41:31 +00:00
|
|
|
int header_size =
|
|
|
|
std::get<0>(GetParam()) ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
const int n = kBlockSize - 2 * header_size;
|
2011-05-21 02:17:43 +00:00
|
|
|
Write(BigString("foo", n));
|
2015-10-19 21:24:05 +00:00
|
|
|
ASSERT_EQ((unsigned int)(kBlockSize - header_size), WrittenBytes());
|
2011-05-21 02:17:43 +00:00
|
|
|
Write("bar");
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(0U, DroppedBytes());
|
2011-05-21 02:17:43 +00:00
|
|
|
ASSERT_EQ("", ReportMessage());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ShortTrailer) {
|
2019-03-26 23:41:31 +00:00
|
|
|
int header_size =
|
|
|
|
std::get<0>(GetParam()) ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
const int n = kBlockSize - 2 * header_size + 4;
|
2011-03-18 22:37:00 +00:00
|
|
|
Write(BigString("foo", n));
|
2015-10-19 21:24:05 +00:00
|
|
|
ASSERT_EQ((unsigned int)(kBlockSize - header_size + 4), WrittenBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("");
|
|
|
|
Write("bar");
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, AlignedEof) {
|
2019-03-26 23:41:31 +00:00
|
|
|
int header_size =
|
|
|
|
std::get<0>(GetParam()) ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
const int n = kBlockSize - 2 * header_size + 4;
|
2011-03-18 22:37:00 +00:00
|
|
|
Write(BigString("foo", n));
|
2015-10-19 21:24:05 +00:00
|
|
|
ASSERT_EQ((unsigned int)(kBlockSize - header_size + 4), WrittenBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, RandomRead) {
|
2011-03-18 22:37:00 +00:00
|
|
|
const int N = 500;
|
|
|
|
Random write_rnd(301);
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
Write(RandomSkewedString(i, &write_rnd));
|
|
|
|
}
|
|
|
|
Random read_rnd(301);
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(RandomSkewedString(i, &read_rnd), Read());
|
|
|
|
}
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests of all the error paths in log_reader.cc follow:
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ReadError) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
|
|
|
ForceError();
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ((unsigned int)kBlockSize, DroppedBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("read error"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, BadRecordType) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
|
|
|
// Type is stored in header[6]
|
|
|
|
IncrementByte(6, 100);
|
2015-10-19 21:24:05 +00:00
|
|
|
FixChecksum(0, 3, false);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(3U, DroppedBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("unknown record type"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, TruncatedTrailingRecordIsIgnored) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
2022-11-02 21:34:24 +00:00
|
|
|
ShrinkSize(4); // Drop all payload as well as a header byte
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("EOF", Read());
|
2014-02-28 21:19:47 +00:00
|
|
|
// Truncated last record is ignored, not treated as an error
|
2014-03-14 22:44:35 +00:00
|
|
|
ASSERT_EQ(0U, DroppedBytes());
|
2014-02-28 21:19:47 +00:00
|
|
|
ASSERT_EQ("", ReportMessage());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, TruncatedTrailingRecordIsNotIgnored) {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
|
|
|
// If read retry is allowed, then truncated trailing record should not
|
|
|
|
// raise an error.
|
|
|
|
return;
|
|
|
|
}
|
2015-06-15 19:03:13 +00:00
|
|
|
Write("foo");
|
|
|
|
ShrinkSize(4); // Drop all payload as well as a header byte
|
2015-10-02 19:53:59 +00:00
|
|
|
ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
|
2015-06-15 19:03:13 +00:00
|
|
|
// Truncated last record is ignored, not treated as an error
|
|
|
|
ASSERT_GT(DroppedBytes(), 0U);
|
|
|
|
ASSERT_EQ("OK", MatchError("Corruption: truncated header"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, BadLength) {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
|
|
|
// If read retry is allowed, then we should not raise an error when the
|
|
|
|
// record length specified in header is longer than data currently
|
|
|
|
// available. It's possible that the body of the record is not written yet.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
int header_size = recyclable_log ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
const int kPayloadSize = kBlockSize - header_size;
|
2014-02-28 21:19:47 +00:00
|
|
|
Write(BigString("bar", kPayloadSize));
|
|
|
|
Write("foo");
|
|
|
|
// Least significant size byte is stored in header[4].
|
|
|
|
IncrementByte(4, 1);
|
2019-03-26 23:41:31 +00:00
|
|
|
if (!recyclable_log) {
|
2015-12-11 22:17:43 +00:00
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
ASSERT_EQ(kBlockSize, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("bad record length"));
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
2014-02-28 21:19:47 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, BadLengthAtEndIsIgnored) {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
|
|
|
// If read retry is allowed, then we should not raise an error when the
|
|
|
|
// record length specified in header is longer than data currently
|
|
|
|
// available. It's possible that the body of the record is not written yet.
|
|
|
|
return;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
|
|
|
ShrinkSize(1);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2014-03-14 22:44:35 +00:00
|
|
|
ASSERT_EQ(0U, DroppedBytes());
|
2014-02-28 21:19:47 +00:00
|
|
|
ASSERT_EQ("", ReportMessage());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, BadLengthAtEndIsNotIgnored) {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
|
|
|
// If read retry is allowed, then we should not raise an error when the
|
|
|
|
// record length specified in header is longer than data currently
|
|
|
|
// available. It's possible that the body of the record is not written yet.
|
|
|
|
return;
|
|
|
|
}
|
2015-06-15 19:03:13 +00:00
|
|
|
Write("foo");
|
|
|
|
ShrinkSize(1);
|
2015-10-02 19:53:59 +00:00
|
|
|
ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
|
2015-06-15 19:03:13 +00:00
|
|
|
ASSERT_GT(DroppedBytes(), 0U);
|
2020-12-01 02:10:18 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("Corruption: truncated record body"));
|
2015-06-15 19:03:13 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ChecksumMismatch) {
|
2015-10-19 21:24:05 +00:00
|
|
|
Write("foooooo");
|
|
|
|
IncrementByte(0, 14);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("EOF", Read());
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
if (!recyclable_log) {
|
2015-12-11 22:17:43 +00:00
|
|
|
ASSERT_EQ(14U, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("checksum mismatch"));
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(0U, DroppedBytes());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, UnexpectedMiddleType) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
SetByte(6, static_cast<char>(recyclable_log ? kRecyclableMiddleType
|
|
|
|
: kMiddleType));
|
|
|
|
FixChecksum(0, 3, !!recyclable_log);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(3U, DroppedBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("missing start"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, UnexpectedLastType) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
SetByte(6,
|
|
|
|
static_cast<char>(recyclable_log ? kRecyclableLastType : kLastType));
|
|
|
|
FixChecksum(0, 3, !!recyclable_log);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(3U, DroppedBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("missing start"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, UnexpectedFullType) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
SetByte(
|
|
|
|
6, static_cast<char>(recyclable_log ? kRecyclableFirstType : kFirstType));
|
|
|
|
FixChecksum(0, 3, !!recyclable_log);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(3U, DroppedBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("partial record without end"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, UnexpectedFirstType) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Write("foo");
|
|
|
|
Write(BigString("bar", 100000));
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
SetByte(
|
|
|
|
6, static_cast<char>(recyclable_log ? kRecyclableFirstType : kFirstType));
|
|
|
|
FixChecksum(0, 3, !!recyclable_log);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(BigString("bar", 100000), Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(3U, DroppedBytes());
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("partial record without end"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, MissingLastIsIgnored) {
|
2014-02-28 21:19:47 +00:00
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
// Remove the LAST block, including header.
|
|
|
|
ShrinkSize(14);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
2014-03-14 22:44:35 +00:00
|
|
|
ASSERT_EQ(0U, DroppedBytes());
|
2014-02-28 21:19:47 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, MissingLastIsNotIgnored) {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
|
|
|
// If read retry is allowed, then truncated trailing record should not
|
|
|
|
// raise an error.
|
|
|
|
return;
|
|
|
|
}
|
2015-06-15 19:03:13 +00:00
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
// Remove the LAST block, including header.
|
|
|
|
ShrinkSize(14);
|
2015-10-02 19:53:59 +00:00
|
|
|
ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
|
2015-06-15 19:03:13 +00:00
|
|
|
ASSERT_GT(DroppedBytes(), 0U);
|
|
|
|
ASSERT_EQ("OK", MatchError("Corruption: error reading trailing data"));
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, PartialLastIsIgnored) {
|
2014-02-28 21:19:47 +00:00
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
// Cause a bad record length in the LAST block.
|
|
|
|
ShrinkSize(1);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
2014-03-14 22:44:35 +00:00
|
|
|
ASSERT_EQ(0U, DroppedBytes());
|
2014-02-28 21:19:47 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, PartialLastIsNotIgnored) {
|
2019-03-26 23:41:31 +00:00
|
|
|
if (allow_retry_read_) {
|
|
|
|
// If read retry is allowed, then truncated trailing record should not
|
|
|
|
// raise an error.
|
|
|
|
return;
|
|
|
|
}
|
2015-06-15 19:03:13 +00:00
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
// Cause a bad record length in the LAST block.
|
|
|
|
ShrinkSize(1);
|
2015-10-02 19:53:59 +00:00
|
|
|
ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
|
2015-06-15 19:03:13 +00:00
|
|
|
ASSERT_GT(DroppedBytes(), 0U);
|
2020-12-01 02:10:18 +00:00
|
|
|
ASSERT_EQ("OK", MatchError("Corruption: truncated record body"));
|
2015-06-15 19:03:13 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ErrorJoinsRecords) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Consider two fragmented records:
|
|
|
|
// first(R1) last(R1) first(R2) last(R2)
|
|
|
|
// where the middle two fragments disappear. We do not want
|
|
|
|
// first(R1),last(R2) to get joined and returned as a valid record.
|
|
|
|
|
|
|
|
// Write records that span two blocks
|
|
|
|
Write(BigString("foo", kBlockSize));
|
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
Write("correct");
|
|
|
|
|
|
|
|
// Wipe the middle block
|
2022-11-02 21:34:24 +00:00
|
|
|
for (unsigned int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
|
2011-03-18 22:37:00 +00:00
|
|
|
SetByte(offset, 'x');
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
if (!recyclable_log) {
|
2015-12-11 22:17:43 +00:00
|
|
|
ASSERT_EQ("correct", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
size_t dropped = DroppedBytes();
|
|
|
|
ASSERT_LE(dropped, 2 * kBlockSize + 100);
|
|
|
|
ASSERT_GE(dropped, 2 * kBlockSize);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ClearEofSingleBlock) {
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
int header_size = recyclable_log ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
ForceEOF(3 + header_size + 2);
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_TRUE(IsEOF());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
Write("xxx");
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ("xxx", Read());
|
|
|
|
ASSERT_TRUE(IsEOF());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ClearEofMultiBlock) {
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
size_t num_full_blocks = 5;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
int header_size = recyclable_log ? kRecyclableHeaderSize : kHeaderSize;
|
2015-10-19 21:24:05 +00:00
|
|
|
size_t n = (kBlockSize - header_size) * num_full_blocks + 25;
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
Write(BigString("foo", n));
|
|
|
|
Write(BigString("bar", n));
|
2015-10-19 21:24:05 +00:00
|
|
|
ForceEOF(n + num_full_blocks * header_size + header_size + 3);
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_TRUE(IsEOF());
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ(BigString("bar", n), Read());
|
|
|
|
ASSERT_TRUE(IsEOF());
|
|
|
|
Write(BigString("xxx", n));
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ(BigString("xxx", n), Read());
|
|
|
|
ASSERT_TRUE(IsEOF());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ClearEofError) {
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
// If an error occurs during Read() in UnmarkEOF(), the records contained
|
|
|
|
// in the buffer should be returned on subsequent calls of ReadRecord()
|
|
|
|
// until no more full records are left, whereafter ReadRecord() should return
|
|
|
|
// false to indicate that it cannot read any further.
|
|
|
|
|
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
ASSERT_TRUE(IsEOF());
|
|
|
|
Write("xxx");
|
|
|
|
ForceError(0);
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2015-10-08 17:07:15 +00:00
|
|
|
TEST_P(LogTest, ClearEofError2) {
|
Fix UnmarkEOF for partial blocks
Summary:
Blocks in the transaction log are a fixed size, but the last block in the transaction log file is usually a partial block. When a new record is added after the reader hit the end of the file, a new physical record will be appended to the last block. ReadPhysicalRecord can only read full blocks and assumes that the file position indicator is aligned to the start of a block. If the reader is forced to read further by simply clearing the EOF flag, ReadPhysicalRecord will read a full block starting from somewhere in the middle of a real block, causing it to lose alignment and to have a partial physical record at the end of the read buffer. This will result in length mismatches and checksum failures. When the log file is tailed for replication this will cause the log iterator to become invalid, necessitating the creation of a new iterator which will have to read the log file from scratch.
This diff fixes this issue by reading the remaining portion of the last block we read from. This is done when the reader is forced to read further (UnmarkEOF is called).
Test Plan:
- Added unit tests
- Stress test (with replication). Check dbdir/LOG file for corruptions.
- Test on test tier
Reviewers: emayanke, haobo, dhruba
Reviewed By: haobo
CC: vamsi, sheki, dhruba, kailiu, igor
Differential Revision: https://reviews.facebook.net/D15249
2014-01-27 22:49:10 +00:00
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
Write("xxx");
|
|
|
|
ForceError(3);
|
|
|
|
UnmarkEOF();
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(3U, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("read error"));
|
|
|
|
}
|
|
|
|
|
2015-12-11 19:12:03 +00:00
|
|
|
TEST_P(LogTest, Recycle) {
|
2019-03-26 23:41:31 +00:00
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
if (!recyclable_log) {
|
2015-12-11 19:12:03 +00:00
|
|
|
return; // test is only valid for recycled logs
|
|
|
|
}
|
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
Write("baz");
|
|
|
|
Write("bif");
|
|
|
|
Write("blitz");
|
|
|
|
while (get_reader_contents()->size() < log::kBlockSize * 2) {
|
|
|
|
Write("xxxxxxxxxxxxxxxx");
|
|
|
|
}
|
2021-01-04 23:59:52 +00:00
|
|
|
std::unique_ptr<FSWritableFile> sink(
|
|
|
|
new test::OverwritingStringSink(get_reader_contents()));
|
|
|
|
std::unique_ptr<WritableFileWriter> dest_holder(new WritableFileWriter(
|
|
|
|
std::move(sink), "" /* don't care */, FileOptions()));
|
2015-12-11 19:12:03 +00:00
|
|
|
Writer recycle_writer(std::move(dest_holder), 123, true);
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(recycle_writer.AddRecord(Slice("foooo")));
|
|
|
|
ASSERT_OK(recycle_writer.AddRecord(Slice("bar")));
|
2015-12-11 19:12:03 +00:00
|
|
|
ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2);
|
|
|
|
ASSERT_EQ("foooo", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2023-05-12 00:26:19 +00:00
|
|
|
TEST_P(LogTest, RecycleWithTimestampSize) {
|
|
|
|
bool recyclable_log = (std::get<0>(GetParam()) != 0);
|
|
|
|
if (!recyclable_log) {
|
|
|
|
return; // test is only valid for recycled logs
|
|
|
|
}
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_one = {
|
2023-05-12 00:26:19 +00:00
|
|
|
{1, sizeof(uint32_t)},
|
|
|
|
};
|
|
|
|
Write("foo", &ts_sz_one);
|
|
|
|
Write("bar");
|
|
|
|
Write("baz");
|
|
|
|
Write("bif");
|
|
|
|
Write("blitz");
|
|
|
|
while (get_reader_contents()->size() < log::kBlockSize * 2) {
|
|
|
|
Write("xxxxxxxxxxxxxxxx");
|
|
|
|
}
|
|
|
|
std::unique_ptr<FSWritableFile> sink(
|
|
|
|
new test::OverwritingStringSink(get_reader_contents()));
|
|
|
|
std::unique_ptr<WritableFileWriter> dest_holder(new WritableFileWriter(
|
|
|
|
std::move(sink), "" /* don't care */, FileOptions()));
|
|
|
|
Writer recycle_writer(std::move(dest_holder), 123, true);
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_two = {
|
2023-05-12 00:26:19 +00:00
|
|
|
{2, sizeof(uint64_t)},
|
|
|
|
};
|
|
|
|
ASSERT_OK(recycle_writer.MaybeAddUserDefinedTimestampSizeRecord(ts_sz_two));
|
|
|
|
ASSERT_OK(recycle_writer.AddRecord(Slice("foooo")));
|
|
|
|
ASSERT_OK(recycle_writer.AddRecord(Slice("bar")));
|
|
|
|
ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2);
|
|
|
|
CheckRecordAndTimestampSize("foooo", ts_sz_two);
|
|
|
|
CheckRecordAndTimestampSize("bar", ts_sz_two);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2022-02-18 00:18:01 +00:00
|
|
|
// Do NOT enable compression for this instantiation.
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
Log, LogTest,
|
|
|
|
::testing::Combine(::testing::Values(0, 1), ::testing::Bool(),
|
|
|
|
::testing::Values(CompressionType::kNoCompression)));
|
2015-10-08 17:07:15 +00:00
|
|
|
|
2018-10-19 18:51:13 +00:00
|
|
|
class RetriableLogTest : public ::testing::TestWithParam<int> {
|
|
|
|
private:
|
|
|
|
class ReportCollector : public Reader::Reporter {
|
|
|
|
public:
|
|
|
|
size_t dropped_bytes_;
|
|
|
|
std::string message_;
|
|
|
|
|
|
|
|
ReportCollector() : dropped_bytes_(0) {}
|
2019-02-14 21:52:47 +00:00
|
|
|
void Corruption(size_t bytes, const Status& status) override {
|
2018-10-19 18:51:13 +00:00
|
|
|
dropped_bytes_ += bytes;
|
|
|
|
message_.append(status.ToString());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Slice contents_;
|
2021-01-04 23:59:52 +00:00
|
|
|
test::StringSink* sink_;
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<Writer> log_writer_;
|
2018-10-19 18:51:13 +00:00
|
|
|
Env* env_;
|
|
|
|
const std::string test_dir_;
|
|
|
|
const std::string log_file_;
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<WritableFileWriter> writer_;
|
|
|
|
std::unique_ptr<SequentialFileReader> reader_;
|
2018-10-19 18:51:13 +00:00
|
|
|
ReportCollector report_;
|
2019-03-26 23:41:31 +00:00
|
|
|
std::unique_ptr<FragmentBufferedReader> log_reader_;
|
2018-10-19 18:51:13 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
RetriableLogTest()
|
|
|
|
: contents_(),
|
2021-01-04 23:59:52 +00:00
|
|
|
sink_(new test::StringSink(&contents_)),
|
2018-10-19 18:51:13 +00:00
|
|
|
log_writer_(nullptr),
|
|
|
|
env_(Env::Default()),
|
|
|
|
test_dir_(test::PerThreadDBPath("retriable_log_test")),
|
|
|
|
log_file_(test_dir_ + "/log"),
|
|
|
|
writer_(nullptr),
|
|
|
|
reader_(nullptr),
|
2021-01-04 23:59:52 +00:00
|
|
|
log_reader_(nullptr) {
|
|
|
|
std::unique_ptr<FSWritableFile> sink_holder(sink_);
|
|
|
|
std::unique_ptr<WritableFileWriter> wfw(new WritableFileWriter(
|
|
|
|
std::move(sink_holder), "" /* file name */, FileOptions()));
|
|
|
|
log_writer_.reset(new Writer(std::move(wfw), 123, GetParam()));
|
|
|
|
}
|
2018-10-19 18:51:13 +00:00
|
|
|
|
|
|
|
Status SetupTestEnv() {
|
|
|
|
Status s;
|
2021-01-04 23:59:52 +00:00
|
|
|
FileOptions fopts;
|
|
|
|
auto fs = env_->GetFileSystem();
|
|
|
|
s = fs->CreateDirIfMissing(test_dir_, IOOptions(), nullptr);
|
|
|
|
std::unique_ptr<FSWritableFile> writable_file;
|
2018-10-19 18:51:13 +00:00
|
|
|
if (s.ok()) {
|
2021-01-04 23:59:52 +00:00
|
|
|
s = fs->NewWritableFile(log_file_, fopts, &writable_file, nullptr);
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
|
|
|
if (s.ok()) {
|
2021-01-04 23:59:52 +00:00
|
|
|
writer_.reset(
|
|
|
|
new WritableFileWriter(std::move(writable_file), log_file_, fopts));
|
|
|
|
EXPECT_NE(writer_, nullptr);
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
2021-01-04 23:59:52 +00:00
|
|
|
std::unique_ptr<FSSequentialFile> seq_file;
|
2018-10-19 18:51:13 +00:00
|
|
|
if (s.ok()) {
|
2021-01-04 23:59:52 +00:00
|
|
|
s = fs->NewSequentialFile(log_file_, fopts, &seq_file, nullptr);
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
|
|
|
if (s.ok()) {
|
2021-01-04 23:59:52 +00:00
|
|
|
reader_.reset(new SequentialFileReader(std::move(seq_file), log_file_));
|
|
|
|
EXPECT_NE(reader_, nullptr);
|
2019-03-26 23:41:31 +00:00
|
|
|
log_reader_.reset(new FragmentBufferedReader(
|
|
|
|
nullptr, std::move(reader_), &report_, true /* checksum */,
|
|
|
|
123 /* log_number */));
|
2021-01-04 23:59:52 +00:00
|
|
|
EXPECT_NE(log_reader_, nullptr);
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2021-01-04 23:59:52 +00:00
|
|
|
std::string contents() { return sink_->contents_; }
|
2018-10-19 18:51:13 +00:00
|
|
|
|
2020-12-23 07:44:44 +00:00
|
|
|
void Encode(const std::string& msg) {
|
|
|
|
ASSERT_OK(log_writer_->AddRecord(Slice(msg)));
|
|
|
|
}
|
2018-10-19 18:51:13 +00:00
|
|
|
|
|
|
|
void Write(const Slice& data) {
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(writer_->Append(data));
|
|
|
|
ASSERT_OK(writer_->Sync(true));
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
bool TryRead(std::string* result) {
|
|
|
|
assert(result != nullptr);
|
|
|
|
result->clear();
|
2018-10-19 18:51:13 +00:00
|
|
|
std::string scratch;
|
|
|
|
Slice record;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool r = log_reader_->ReadRecord(&record, &scratch);
|
|
|
|
if (r) {
|
|
|
|
result->assign(record.data(), record.size());
|
|
|
|
return true;
|
2018-10-19 18:51:13 +00:00
|
|
|
} else {
|
2019-03-26 23:41:31 +00:00
|
|
|
return false;
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(RetriableLogTest, TailLog_PartialHeader) {
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
std::vector<int> remaining_bytes_in_last_record;
|
|
|
|
size_t header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool eof = false;
|
2018-10-19 18:51:13 +00:00
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"RetriableLogTest::TailLog:AfterPart1",
|
|
|
|
"RetriableLogTest::TailLog:BeforeReadRecord"},
|
2019-03-26 23:41:31 +00:00
|
|
|
{"FragmentBufferedLogReader::TryReadMore:FirstEOF",
|
2018-10-19 18:51:13 +00:00
|
|
|
"RetriableLogTest::TailLog:BeforePart2"}});
|
2019-03-26 23:41:31 +00:00
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"FragmentBufferedLogReader::TryReadMore:FirstEOF",
|
|
|
|
[&](void* /*arg*/) { eof = true; });
|
2018-10-19 18:51:13 +00:00
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
size_t delta = header_size - 1;
|
|
|
|
port::Thread log_writer_thread([&]() {
|
|
|
|
size_t old_sz = contents().size();
|
|
|
|
Encode("foo");
|
|
|
|
size_t new_sz = contents().size();
|
|
|
|
std::string part1 = contents().substr(old_sz, delta);
|
|
|
|
std::string part2 =
|
|
|
|
contents().substr(old_sz + delta, new_sz - old_sz - delta);
|
|
|
|
Write(Slice(part1));
|
|
|
|
TEST_SYNC_POINT("RetriableLogTest::TailLog:AfterPart1");
|
|
|
|
TEST_SYNC_POINT("RetriableLogTest::TailLog:BeforePart2");
|
|
|
|
Write(Slice(part2));
|
|
|
|
});
|
|
|
|
|
|
|
|
std::string record;
|
|
|
|
port::Thread log_reader_thread([&]() {
|
|
|
|
TEST_SYNC_POINT("RetriableLogTest::TailLog:BeforeReadRecord");
|
2019-03-26 23:41:31 +00:00
|
|
|
while (!TryRead(&record)) {
|
|
|
|
}
|
2018-10-19 18:51:13 +00:00
|
|
|
});
|
|
|
|
log_reader_thread.join();
|
|
|
|
log_writer_thread.join();
|
|
|
|
ASSERT_EQ("foo", record);
|
2019-03-26 23:41:31 +00:00
|
|
|
ASSERT_TRUE(eof);
|
2018-10-19 18:51:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(RetriableLogTest, TailLog_FullHeader) {
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
std::vector<int> remaining_bytes_in_last_record;
|
|
|
|
size_t header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
|
2019-03-26 23:41:31 +00:00
|
|
|
bool eof = false;
|
2018-10-19 18:51:13 +00:00
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"RetriableLogTest::TailLog:AfterPart1",
|
|
|
|
"RetriableLogTest::TailLog:BeforeReadRecord"},
|
2019-03-26 23:41:31 +00:00
|
|
|
{"FragmentBufferedLogReader::TryReadMore:FirstEOF",
|
2018-10-19 18:51:13 +00:00
|
|
|
"RetriableLogTest::TailLog:BeforePart2"}});
|
2019-03-26 23:41:31 +00:00
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"FragmentBufferedLogReader::TryReadMore:FirstEOF",
|
|
|
|
[&](void* /*arg*/) { eof = true; });
|
2018-10-19 18:51:13 +00:00
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
size_t delta = header_size + 1;
|
|
|
|
port::Thread log_writer_thread([&]() {
|
|
|
|
size_t old_sz = contents().size();
|
|
|
|
Encode("foo");
|
|
|
|
size_t new_sz = contents().size();
|
|
|
|
std::string part1 = contents().substr(old_sz, delta);
|
|
|
|
std::string part2 =
|
|
|
|
contents().substr(old_sz + delta, new_sz - old_sz - delta);
|
|
|
|
Write(Slice(part1));
|
|
|
|
TEST_SYNC_POINT("RetriableLogTest::TailLog:AfterPart1");
|
|
|
|
TEST_SYNC_POINT("RetriableLogTest::TailLog:BeforePart2");
|
|
|
|
Write(Slice(part2));
|
2019-03-26 23:41:31 +00:00
|
|
|
ASSERT_TRUE(eof);
|
2018-10-19 18:51:13 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
std::string record;
|
|
|
|
port::Thread log_reader_thread([&]() {
|
|
|
|
TEST_SYNC_POINT("RetriableLogTest::TailLog:BeforeReadRecord");
|
2019-03-26 23:41:31 +00:00
|
|
|
while (!TryRead(&record)) {
|
|
|
|
}
|
2018-10-19 18:51:13 +00:00
|
|
|
});
|
|
|
|
log_reader_thread.join();
|
|
|
|
log_writer_thread.join();
|
|
|
|
ASSERT_EQ("foo", record);
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
TEST_P(RetriableLogTest, NonBlockingReadFullRecord) {
|
|
|
|
// Clear all sync point callbacks even if this test does not use sync point.
|
|
|
|
// It is necessary, otherwise the execute of this test may hit a sync point
|
|
|
|
// with which a callback is registered. The registered callback may access
|
|
|
|
// some dead variable, causing segfault.
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
size_t header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
|
|
|
|
size_t delta = header_size - 1;
|
|
|
|
size_t old_sz = contents().size();
|
|
|
|
Encode("foo-bar");
|
|
|
|
size_t new_sz = contents().size();
|
|
|
|
std::string part1 = contents().substr(old_sz, delta);
|
|
|
|
std::string part2 =
|
|
|
|
contents().substr(old_sz + delta, new_sz - old_sz - delta);
|
|
|
|
Write(Slice(part1));
|
|
|
|
std::string record;
|
|
|
|
ASSERT_FALSE(TryRead(&record));
|
|
|
|
ASSERT_TRUE(record.empty());
|
|
|
|
Write(Slice(part2));
|
|
|
|
ASSERT_TRUE(TryRead(&record));
|
|
|
|
ASSERT_EQ("foo-bar", record);
|
|
|
|
}
|
|
|
|
|
2020-06-03 22:53:09 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(bool, RetriableLogTest, ::testing::Values(0, 2));
|
2018-10-19 18:51:13 +00:00
|
|
|
|
2022-02-18 00:18:01 +00:00
|
|
|
class CompressionLogTest : public LogTest {
|
|
|
|
public:
|
|
|
|
Status SetupTestEnv() { return writer_->AddCompressionTypeRecord(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(CompressionLogTest, Empty) {
|
2022-03-09 23:49:53 +00:00
|
|
|
CompressionType compression_type = std::get<2>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
2022-02-18 00:18:01 +00:00
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
const bool compression_enabled =
|
|
|
|
std::get<2>(GetParam()) == kNoCompression ? false : true;
|
|
|
|
// If WAL compression is enabled, a record is added for the compression type
|
|
|
|
const int compression_record_size = compression_enabled ? kHeaderSize + 4 : 0;
|
|
|
|
ASSERT_EQ(compression_record_size, WrittenBytes());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2022-03-09 23:49:53 +00:00
|
|
|
TEST_P(CompressionLogTest, ReadWrite) {
|
|
|
|
CompressionType compression_type = std::get<2>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
Write("");
|
|
|
|
Write("xxxx");
|
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("xxxx", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
|
|
|
|
}
|
|
|
|
|
2023-05-12 00:26:19 +00:00
|
|
|
TEST_P(CompressionLogTest, ReadWriteWithTimestampSize) {
|
|
|
|
CompressionType compression_type = std::get<2>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_one = {
|
2023-05-12 00:26:19 +00:00
|
|
|
{1, sizeof(uint64_t)},
|
|
|
|
};
|
|
|
|
Write("foo", &ts_sz_one);
|
|
|
|
Write("bar");
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> ts_sz_two = {{2, sizeof(char)}};
|
2023-05-12 00:26:19 +00:00
|
|
|
Write("", &ts_sz_two);
|
|
|
|
Write("xxxx");
|
|
|
|
|
|
|
|
CheckRecordAndTimestampSize("foo", ts_sz_one);
|
|
|
|
CheckRecordAndTimestampSize("bar", ts_sz_one);
|
2023-06-05 20:36:26 +00:00
|
|
|
UnorderedMap<uint32_t, size_t> expected_ts_sz_two;
|
2023-05-12 00:26:19 +00:00
|
|
|
// User-defined timestamp size records are accumulated and applied to
|
|
|
|
// subsequent records.
|
|
|
|
expected_ts_sz_two.insert(ts_sz_one.begin(), ts_sz_one.end());
|
|
|
|
expected_ts_sz_two.insert(ts_sz_two.begin(), ts_sz_two.end());
|
|
|
|
CheckRecordAndTimestampSize("", expected_ts_sz_two);
|
|
|
|
CheckRecordAndTimestampSize("xxxx", expected_ts_sz_two);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
|
|
|
|
}
|
|
|
|
|
2022-03-09 23:49:53 +00:00
|
|
|
TEST_P(CompressionLogTest, ManyBlocks) {
|
|
|
|
CompressionType compression_type = std::get<2>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
Write(NumberString(i));
|
|
|
|
}
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
ASSERT_EQ(NumberString(i), Read());
|
|
|
|
}
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CompressionLogTest, Fragmentation) {
|
|
|
|
CompressionType compression_type = std::get<2>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
2022-07-22 18:05:55 +00:00
|
|
|
Random rnd(301);
|
|
|
|
const std::vector<std::string> wal_entries = {
|
|
|
|
"small",
|
|
|
|
rnd.RandomBinaryString(3 * kBlockSize / 2), // Spans into block 2
|
|
|
|
rnd.RandomBinaryString(3 * kBlockSize), // Spans into block 5
|
|
|
|
};
|
|
|
|
for (const std::string& wal_entry : wal_entries) {
|
|
|
|
Write(wal_entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const std::string& wal_entry : wal_entries) {
|
|
|
|
ASSERT_EQ(wal_entry, Read());
|
|
|
|
}
|
2022-03-09 23:49:53 +00:00
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2023-02-08 20:05:49 +00:00
|
|
|
TEST_P(CompressionLogTest, AlignedFragmentation) {
|
|
|
|
CompressionType compression_type = std::get<2>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT_OK(SetupTestEnv());
|
|
|
|
Random rnd(301);
|
|
|
|
int num_filler_records = 0;
|
|
|
|
// Keep writing small records until the next record will be aligned at the
|
|
|
|
// beginning of the block.
|
|
|
|
while ((WrittenBytes() & (kBlockSize - 1)) >= kHeaderSize) {
|
|
|
|
char entry = 'a';
|
|
|
|
ASSERT_OK(writer_->AddRecord(Slice(&entry, 1)));
|
|
|
|
num_filler_records++;
|
|
|
|
}
|
|
|
|
const std::vector<std::string> wal_entries = {
|
|
|
|
rnd.RandomBinaryString(3 * kBlockSize),
|
|
|
|
};
|
|
|
|
for (const std::string& wal_entry : wal_entries) {
|
|
|
|
Write(wal_entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < num_filler_records; ++i) {
|
|
|
|
ASSERT_EQ("a", Read());
|
|
|
|
}
|
|
|
|
for (const std::string& wal_entry : wal_entries) {
|
|
|
|
ASSERT_EQ(wal_entry, Read());
|
|
|
|
}
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2022-02-18 00:18:01 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
Compression, CompressionLogTest,
|
|
|
|
::testing::Combine(::testing::Values(0, 1), ::testing::Bool(),
|
|
|
|
::testing::Values(CompressionType::kNoCompression,
|
|
|
|
CompressionType::kZSTD)));
|
|
|
|
|
2022-02-24 07:45:04 +00:00
|
|
|
class StreamingCompressionTest
|
|
|
|
: public ::testing::TestWithParam<std::tuple<int, CompressionType>> {};
|
|
|
|
|
|
|
|
TEST_P(StreamingCompressionTest, Basic) {
|
|
|
|
size_t input_size = std::get<0>(GetParam());
|
|
|
|
CompressionType compression_type = std::get<1>(GetParam());
|
|
|
|
if (!StreamingCompressionTypeSupported(compression_type)) {
|
|
|
|
ROCKSDB_GTEST_SKIP("Test requires support for compression type");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
CompressionOptions opts;
|
|
|
|
constexpr uint32_t compression_format_version = 2;
|
|
|
|
StreamingCompress* compress = StreamingCompress::Create(
|
|
|
|
compression_type, opts, compression_format_version, kBlockSize);
|
|
|
|
StreamingUncompress* uncompress = StreamingUncompress::Create(
|
|
|
|
compression_type, compression_format_version, kBlockSize);
|
|
|
|
MemoryAllocator* allocator = new DefaultMemoryAllocator();
|
|
|
|
std::string input_buffer = BigString("abc", input_size);
|
|
|
|
std::vector<std::string> compressed_buffers;
|
|
|
|
size_t remaining;
|
|
|
|
// Call compress till the entire input is consumed
|
|
|
|
do {
|
|
|
|
char* output_buffer = (char*)allocator->Allocate(kBlockSize);
|
2022-03-09 23:49:53 +00:00
|
|
|
size_t output_pos;
|
2022-02-24 07:45:04 +00:00
|
|
|
remaining = compress->Compress(input_buffer.c_str(), input_size,
|
2022-03-09 23:49:53 +00:00
|
|
|
output_buffer, &output_pos);
|
|
|
|
if (output_pos > 0) {
|
2022-02-24 07:45:04 +00:00
|
|
|
std::string compressed_buffer;
|
2022-03-09 23:49:53 +00:00
|
|
|
compressed_buffer.assign(output_buffer, output_pos);
|
2022-02-24 07:45:04 +00:00
|
|
|
compressed_buffers.emplace_back(std::move(compressed_buffer));
|
|
|
|
}
|
|
|
|
allocator->Deallocate((void*)output_buffer);
|
|
|
|
} while (remaining > 0);
|
|
|
|
std::string uncompressed_buffer = "";
|
|
|
|
int ret_val = 0;
|
2022-03-09 23:49:53 +00:00
|
|
|
size_t output_pos;
|
2022-02-24 07:45:04 +00:00
|
|
|
char* uncompressed_output_buffer = (char*)allocator->Allocate(kBlockSize);
|
|
|
|
// Uncompress the fragments and concatenate them.
|
|
|
|
for (int i = 0; i < (int)compressed_buffers.size(); i++) {
|
|
|
|
// Call uncompress till either the entire input is consumed or the output
|
|
|
|
// buffer size is equal to the allocated output buffer size.
|
2023-02-08 20:05:49 +00:00
|
|
|
const char* input = compressed_buffers[i].c_str();
|
2022-02-24 07:45:04 +00:00
|
|
|
do {
|
2023-02-08 20:05:49 +00:00
|
|
|
ret_val = uncompress->Uncompress(input, compressed_buffers[i].size(),
|
2022-03-09 23:49:53 +00:00
|
|
|
uncompressed_output_buffer, &output_pos);
|
2023-02-08 20:05:49 +00:00
|
|
|
input = nullptr;
|
2022-03-09 23:49:53 +00:00
|
|
|
if (output_pos > 0) {
|
2022-02-24 07:45:04 +00:00
|
|
|
std::string uncompressed_fragment;
|
2022-03-09 23:49:53 +00:00
|
|
|
uncompressed_fragment.assign(uncompressed_output_buffer, output_pos);
|
2022-02-24 07:45:04 +00:00
|
|
|
uncompressed_buffer += uncompressed_fragment;
|
|
|
|
}
|
2022-03-09 23:49:53 +00:00
|
|
|
} while (ret_val > 0 || output_pos == kBlockSize);
|
2022-02-24 07:45:04 +00:00
|
|
|
}
|
|
|
|
allocator->Deallocate((void*)uncompressed_output_buffer);
|
|
|
|
delete allocator;
|
|
|
|
delete compress;
|
|
|
|
delete uncompress;
|
|
|
|
// The final return value from uncompress() should be 0.
|
|
|
|
ASSERT_EQ(ret_val, 0);
|
|
|
|
ASSERT_EQ(input_buffer, uncompressed_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
StreamingCompression, StreamingCompressionTest,
|
|
|
|
::testing::Combine(::testing::Values(10, 100, 1000, kBlockSize,
|
|
|
|
kBlockSize * 2),
|
|
|
|
::testing::Values(CompressionType::kZSTD)));
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace log
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|