2017-05-10 21:54:35 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2020-06-09 22:12:59 +00:00
|
|
|
#include "db/blob/blob_log_writer.h"
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
#include <cstdint>
|
|
|
|
#include <string>
|
2017-11-28 19:42:28 +00:00
|
|
|
|
2020-06-09 22:12:59 +00:00
|
|
|
#include "db/blob/blob_log_format.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/writable_file_writer.h"
|
2017-11-28 19:42:28 +00:00
|
|
|
#include "monitoring/statistics.h"
|
2021-01-26 06:07:26 +00:00
|
|
|
#include "rocksdb/system_clock.h"
|
2020-07-23 00:24:07 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2017-05-10 21:54:35 +00:00
|
|
|
#include "util/coding.h"
|
2017-11-28 19:42:28 +00:00
|
|
|
#include "util/stop_watch.h"
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
BlobLogWriter::BlobLogWriter(std::unique_ptr<WritableFileWriter>&& dest,
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock, Statistics* statistics,
|
|
|
|
uint64_t log_number, bool use_fs, bool do_flush,
|
|
|
|
uint64_t boffset)
|
2017-05-10 21:54:35 +00:00
|
|
|
: dest_(std::move(dest)),
|
2021-01-26 06:07:26 +00:00
|
|
|
clock_(clock),
|
2017-11-28 19:42:28 +00:00
|
|
|
statistics_(statistics),
|
2017-05-10 21:54:35 +00:00
|
|
|
log_number_(log_number),
|
|
|
|
block_offset_(boffset),
|
|
|
|
use_fsync_(use_fs),
|
Do not explicitly flush blob files when using the integrated BlobDB (#7892)
Summary:
In the original stacked BlobDB implementation, which writes blobs to blob files
immediately and treats blob files as logs, it makes sense to flush the file after
writing each blob to protect against process crashes; however, in the integrated
implementation, which builds blob files in the background jobs, this unnecessarily
reduces performance. This patch fixes this by simply adding a `do_flush` flag to
`BlobLogWriter`, which is set to `true` by the stacked implementation and to `false`
by the new code. Note: the change itself is trivial but the tests needed some work;
since in the new implementation, blobs are now buffered, adding a blob to
`BlobFileBuilder` is no longer guaranteed to result in an actual I/O. Therefore, we can
no longer rely on `FaultInjectionTestEnv` when testing failure cases; instead, we
manipulate the return values of I/O methods directly using `SyncPoint`s.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7892
Test Plan: `make check`
Reviewed By: jay-zhuang
Differential Revision: D26022814
Pulled By: ltamasi
fbshipit-source-id: b3dce419f312137fa70d84cdd9b908fd5d60d8cd
2021-01-25 21:30:17 +00:00
|
|
|
do_flush_(do_flush),
|
2017-10-27 20:14:34 +00:00
|
|
|
last_elem_type_(kEtNone) {}
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2020-08-27 18:54:43 +00:00
|
|
|
BlobLogWriter::~BlobLogWriter() = default;
|
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
Status BlobLogWriter::Sync() {
|
2020-07-23 00:24:07 +00:00
|
|
|
TEST_SYNC_POINT("BlobLogWriter::Sync");
|
|
|
|
|
2021-01-26 06:07:26 +00:00
|
|
|
StopWatch sync_sw(clock_, statistics_, BLOB_DB_BLOB_FILE_SYNC_MICROS);
|
2017-12-11 20:01:22 +00:00
|
|
|
Status s = dest_->Sync(use_fsync_);
|
2017-11-28 19:42:28 +00:00
|
|
|
RecordTick(statistics_, BLOB_DB_BLOB_FILE_SYNCED);
|
2017-12-11 20:01:22 +00:00
|
|
|
return s;
|
2017-11-28 19:42:28 +00:00
|
|
|
}
|
2017-05-10 21:54:35 +00:00
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
Status BlobLogWriter::WriteHeader(BlobLogHeader& header) {
|
2017-05-10 21:54:35 +00:00
|
|
|
assert(block_offset_ == 0);
|
|
|
|
assert(last_elem_type_ == kEtNone);
|
|
|
|
std::string str;
|
|
|
|
header.EncodeTo(&str);
|
|
|
|
|
|
|
|
Status s = dest_->Append(Slice(str));
|
|
|
|
if (s.ok()) {
|
|
|
|
block_offset_ += str.size();
|
Do not explicitly flush blob files when using the integrated BlobDB (#7892)
Summary:
In the original stacked BlobDB implementation, which writes blobs to blob files
immediately and treats blob files as logs, it makes sense to flush the file after
writing each blob to protect against process crashes; however, in the integrated
implementation, which builds blob files in the background jobs, this unnecessarily
reduces performance. This patch fixes this by simply adding a `do_flush` flag to
`BlobLogWriter`, which is set to `true` by the stacked implementation and to `false`
by the new code. Note: the change itself is trivial but the tests needed some work;
since in the new implementation, blobs are now buffered, adding a blob to
`BlobFileBuilder` is no longer guaranteed to result in an actual I/O. Therefore, we can
no longer rely on `FaultInjectionTestEnv` when testing failure cases; instead, we
manipulate the return values of I/O methods directly using `SyncPoint`s.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7892
Test Plan: `make check`
Reviewed By: jay-zhuang
Differential Revision: D26022814
Pulled By: ltamasi
fbshipit-source-id: b3dce419f312137fa70d84cdd9b908fd5d60d8cd
2021-01-25 21:30:17 +00:00
|
|
|
if (do_flush_) {
|
|
|
|
s = dest_->Flush();
|
|
|
|
}
|
2017-05-10 21:54:35 +00:00
|
|
|
}
|
|
|
|
last_elem_type_ = kEtFileHdr;
|
2017-11-28 19:42:28 +00:00
|
|
|
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_WRITTEN,
|
|
|
|
BlobLogHeader::kSize);
|
2017-05-10 21:54:35 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-08-27 18:54:43 +00:00
|
|
|
Status BlobLogWriter::AppendFooter(BlobLogFooter& footer,
|
|
|
|
std::string* checksum_method,
|
|
|
|
std::string* checksum_value) {
|
2017-05-10 21:54:35 +00:00
|
|
|
assert(block_offset_ != 0);
|
2017-10-17 19:11:52 +00:00
|
|
|
assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
std::string str;
|
|
|
|
footer.EncodeTo(&str);
|
|
|
|
|
|
|
|
Status s = dest_->Append(Slice(str));
|
|
|
|
if (s.ok()) {
|
|
|
|
block_offset_ += str.size();
|
2020-08-27 18:54:43 +00:00
|
|
|
|
2020-07-23 00:24:07 +00:00
|
|
|
s = Sync();
|
2020-08-27 18:54:43 +00:00
|
|
|
|
2020-07-23 00:24:07 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
s = dest_->Close();
|
2020-08-27 18:54:43 +00:00
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(!!checksum_method == !!checksum_value);
|
|
|
|
|
|
|
|
if (checksum_method) {
|
|
|
|
assert(checksum_method->empty());
|
|
|
|
|
|
|
|
std::string method = dest_->GetFileChecksumFuncName();
|
|
|
|
if (method != kUnknownFileChecksumFuncName) {
|
|
|
|
*checksum_method = std::move(method);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (checksum_value) {
|
|
|
|
assert(checksum_value->empty());
|
|
|
|
|
|
|
|
std::string value = dest_->GetFileChecksum();
|
|
|
|
if (value != kUnknownFileChecksum) {
|
|
|
|
*checksum_value = std::move(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-07-23 00:24:07 +00:00
|
|
|
}
|
2020-08-27 18:54:43 +00:00
|
|
|
|
2017-05-10 21:54:35 +00:00
|
|
|
dest_.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
last_elem_type_ = kEtFileFooter;
|
2017-11-28 19:42:28 +00:00
|
|
|
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_WRITTEN,
|
|
|
|
BlobLogFooter::kSize);
|
2017-05-10 21:54:35 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
Status BlobLogWriter::AddRecord(const Slice& key, const Slice& val,
|
|
|
|
uint64_t expiration, uint64_t* key_offset,
|
|
|
|
uint64_t* blob_offset) {
|
2017-05-10 21:54:35 +00:00
|
|
|
assert(block_offset_ != 0);
|
2017-10-17 19:11:52 +00:00
|
|
|
assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
std::string buf;
|
2017-10-27 20:14:34 +00:00
|
|
|
ConstructBlobHeader(&buf, key, val, expiration);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
Status BlobLogWriter::AddRecord(const Slice& key, const Slice& val,
|
|
|
|
uint64_t* key_offset, uint64_t* blob_offset) {
|
2017-05-10 21:54:35 +00:00
|
|
|
assert(block_offset_ != 0);
|
2017-10-17 19:11:52 +00:00
|
|
|
assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
std::string buf;
|
2017-10-27 20:14:34 +00:00
|
|
|
ConstructBlobHeader(&buf, key, val, 0);
|
2017-05-10 21:54:35 +00:00
|
|
|
|
|
|
|
Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
void BlobLogWriter::ConstructBlobHeader(std::string* buf, const Slice& key,
|
|
|
|
const Slice& val, uint64_t expiration) {
|
2017-10-27 20:14:34 +00:00
|
|
|
BlobLogRecord record;
|
|
|
|
record.key = key;
|
|
|
|
record.value = val;
|
|
|
|
record.expiration = expiration;
|
|
|
|
record.EncodeHeaderTo(buf);
|
2017-05-10 21:54:35 +00:00
|
|
|
}
|
|
|
|
|
2020-07-07 00:10:41 +00:00
|
|
|
Status BlobLogWriter::EmitPhysicalRecord(const std::string& headerbuf,
|
|
|
|
const Slice& key, const Slice& val,
|
|
|
|
uint64_t* key_offset,
|
|
|
|
uint64_t* blob_offset) {
|
2021-01-26 06:07:26 +00:00
|
|
|
StopWatch write_sw(clock_, statistics_, BLOB_DB_BLOB_FILE_WRITE_MICROS);
|
2017-05-10 21:54:35 +00:00
|
|
|
Status s = dest_->Append(Slice(headerbuf));
|
|
|
|
if (s.ok()) {
|
|
|
|
s = dest_->Append(key);
|
2017-10-17 19:11:52 +00:00
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = dest_->Append(val);
|
|
|
|
}
|
Do not explicitly flush blob files when using the integrated BlobDB (#7892)
Summary:
In the original stacked BlobDB implementation, which writes blobs to blob files
immediately and treats blob files as logs, it makes sense to flush the file after
writing each blob to protect against process crashes; however, in the integrated
implementation, which builds blob files in the background jobs, this unnecessarily
reduces performance. This patch fixes this by simply adding a `do_flush` flag to
`BlobLogWriter`, which is set to `true` by the stacked implementation and to `false`
by the new code. Note: the change itself is trivial but the tests needed some work;
since in the new implementation, blobs are now buffered, adding a blob to
`BlobFileBuilder` is no longer guaranteed to result in an actual I/O. Therefore, we can
no longer rely on `FaultInjectionTestEnv` when testing failure cases; instead, we
manipulate the return values of I/O methods directly using `SyncPoint`s.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7892
Test Plan: `make check`
Reviewed By: jay-zhuang
Differential Revision: D26022814
Pulled By: ltamasi
fbshipit-source-id: b3dce419f312137fa70d84cdd9b908fd5d60d8cd
2021-01-25 21:30:17 +00:00
|
|
|
if (do_flush_ && s.ok()) {
|
2017-10-17 19:11:52 +00:00
|
|
|
s = dest_->Flush();
|
2017-05-10 21:54:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*key_offset = block_offset_ + BlobLogRecord::kHeaderSize;
|
|
|
|
*blob_offset = *key_offset + key.size();
|
|
|
|
block_offset_ = *blob_offset + val.size();
|
|
|
|
last_elem_type_ = kEtRecord;
|
2017-11-28 19:42:28 +00:00
|
|
|
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_WRITTEN,
|
|
|
|
BlobLogRecord::kHeaderSize + key.size() + val.size());
|
2017-05-10 21:54:35 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|