mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-30 22:41:48 +00:00
06e593376c
Summary: ## Context/Summary Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity. For that, this PR does the following: - Tag different write IOs by passing down and converting WriteOptions to IOOptions - Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS Some related code refactory to make implementation cleaner: - Blob stats - Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info. - Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write. - Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority - Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification - Build table - TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables - Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder. This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more - Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority ## Test ### db bench Flush ``` ./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100 rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377 rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377 rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 ``` compaction, db oopen ``` Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1 rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279 rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213 rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66 ``` blob stats - just to make sure they aren't broken by this PR ``` Integrated Blob DB Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1 pre-PR: rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600 rocksdb.blobdb.blob.file.synced COUNT : 1 rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 post-PR: rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614 - COUNT is higher and values are smaller as it includes header and footer write - COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164 rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same) rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same) ``` ``` Stacked Blob DB Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench pre-PR: rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876 rocksdb.blobdb.blob.file.synced COUNT : 8 rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 post-PR: rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924 - COUNT is higher and values are smaller as it includes header and footer write - COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164 rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same) rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same) ``` ### Rehearsal CI stress test Trigger 3 full runs of all our CI stress tests ### Performance Flush ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark; enable_statistics = true Pre-pr: avg 507515519.3 ns 497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908, Post-pr: avg 511971266.5 ns, regressed 0.88% 502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408, ``` Compaction ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark Pre-pr: avg 495346098.30 ns 492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846 Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97% 502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007 ``` Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats) ``` TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000 -- default: 1 thread is used to run benchmark Pre-pr: avg 3848.10 ns 3814,3838,3839,3848,3854,3854,3854,3860,3860,3860 Post-pr: avg 3874.20 ns, regressed 0.68% 3863,3867,3871,3874,3875,3877,3877,3877,3880,3881 ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910 Reviewed By: ajkr Differential Revision: D49788060 Pulled By: hx235 fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
196 lines
5.6 KiB
C++
196 lines
5.6 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
//
|
|
// Logger implementation that uses custom Env object for logging.
|
|
|
|
#pragma once
|
|
|
|
#include <time.h>
|
|
|
|
#include <atomic>
|
|
#include <memory>
|
|
|
|
#include "file/writable_file_writer.h"
|
|
#include "monitoring/iostats_context_imp.h"
|
|
#include "port/sys_time.h"
|
|
#include "rocksdb/env.h"
|
|
#include "rocksdb/file_system.h"
|
|
#include "rocksdb/perf_level.h"
|
|
#include "rocksdb/slice.h"
|
|
#include "test_util/sync_point.h"
|
|
#include "util/mutexlock.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
class EnvLogger : public Logger {
|
|
public:
|
|
EnvLogger(std::unique_ptr<FSWritableFile>&& writable_file,
|
|
const std::string& fname, const EnvOptions& options, Env* env,
|
|
InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
|
|
: Logger(log_level),
|
|
env_(env),
|
|
clock_(env_->GetSystemClock().get()),
|
|
file_(std::move(writable_file), fname, options, clock_),
|
|
last_flush_micros_(0),
|
|
flush_pending_(false) {}
|
|
|
|
~EnvLogger() {
|
|
if (!closed_) {
|
|
closed_ = true;
|
|
CloseHelper().PermitUncheckedError();
|
|
}
|
|
}
|
|
|
|
private:
|
|
// A guard to prepare file operations, such as mutex and skip
|
|
// I/O context.
|
|
class FileOpGuard {
|
|
public:
|
|
explicit FileOpGuard(EnvLogger& logger)
|
|
: logger_(logger), prev_perf_level_(GetPerfLevel()) {
|
|
// Preserve iostats not to pollute writes from user writes. We might
|
|
// need a better solution than this.
|
|
SetPerfLevel(PerfLevel::kDisable);
|
|
IOSTATS_SET_DISABLE(true);
|
|
logger.mutex_.Lock();
|
|
}
|
|
~FileOpGuard() {
|
|
logger_.mutex_.Unlock();
|
|
IOSTATS_SET_DISABLE(false);
|
|
SetPerfLevel(prev_perf_level_);
|
|
}
|
|
|
|
private:
|
|
EnvLogger& logger_;
|
|
PerfLevel prev_perf_level_;
|
|
};
|
|
|
|
void FlushLocked() {
|
|
mutex_.AssertHeld();
|
|
if (flush_pending_) {
|
|
flush_pending_ = false;
|
|
file_.Flush(IOOptions()).PermitUncheckedError();
|
|
file_.reset_seen_error();
|
|
}
|
|
last_flush_micros_ = clock_->NowMicros();
|
|
}
|
|
|
|
void Flush() override {
|
|
TEST_SYNC_POINT("EnvLogger::Flush:Begin1");
|
|
TEST_SYNC_POINT("EnvLogger::Flush:Begin2");
|
|
|
|
FileOpGuard guard(*this);
|
|
FlushLocked();
|
|
}
|
|
|
|
Status CloseImpl() override { return CloseHelper(); }
|
|
|
|
Status CloseHelper() {
|
|
FileOpGuard guard(*this);
|
|
const auto close_status = file_.Close(IOOptions());
|
|
|
|
if (close_status.ok()) {
|
|
return close_status;
|
|
}
|
|
return Status::IOError("Close of log file failed with error:" +
|
|
(close_status.getState()
|
|
? std::string(close_status.getState())
|
|
: std::string()));
|
|
}
|
|
|
|
using Logger::Logv;
|
|
void Logv(const char* format, va_list ap) override {
|
|
IOSTATS_TIMER_GUARD(logger_nanos);
|
|
|
|
const uint64_t thread_id = env_->GetThreadID();
|
|
|
|
// We try twice: the first time with a fixed-size stack allocated buffer,
|
|
// and the second time with a much larger dynamically allocated buffer.
|
|
char buffer[500];
|
|
for (int iter = 0; iter < 2; iter++) {
|
|
char* base;
|
|
int bufsize;
|
|
if (iter == 0) {
|
|
bufsize = sizeof(buffer);
|
|
base = buffer;
|
|
} else {
|
|
bufsize = 65536;
|
|
base = new char[bufsize];
|
|
}
|
|
char* p = base;
|
|
char* limit = base + bufsize;
|
|
|
|
port::TimeVal now_tv;
|
|
port::GetTimeOfDay(&now_tv, nullptr);
|
|
const time_t seconds = now_tv.tv_sec;
|
|
struct tm t;
|
|
port::LocalTimeR(&seconds, &t);
|
|
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llu ",
|
|
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
|
|
t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec),
|
|
static_cast<long long unsigned int>(thread_id));
|
|
|
|
// Print the message
|
|
if (p < limit) {
|
|
va_list backup_ap;
|
|
va_copy(backup_ap, ap);
|
|
p += vsnprintf(p, limit - p, format, backup_ap);
|
|
va_end(backup_ap);
|
|
}
|
|
|
|
// Truncate to available space if necessary
|
|
if (p >= limit) {
|
|
if (iter == 0) {
|
|
continue; // Try again with larger buffer
|
|
} else {
|
|
p = limit - 1;
|
|
}
|
|
}
|
|
|
|
// Add newline if necessary
|
|
if (p == base || p[-1] != '\n') {
|
|
*p++ = '\n';
|
|
}
|
|
|
|
assert(p <= limit);
|
|
{
|
|
FileOpGuard guard(*this);
|
|
// We will ignore any error returned by Append().
|
|
file_.Append(IOOptions(), Slice(base, p - base)).PermitUncheckedError();
|
|
file_.reset_seen_error();
|
|
flush_pending_ = true;
|
|
const uint64_t now_micros = clock_->NowMicros();
|
|
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
|
|
FlushLocked();
|
|
}
|
|
}
|
|
if (base != buffer) {
|
|
delete[] base;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
size_t GetLogFileSize() const override {
|
|
MutexLock l(&mutex_);
|
|
return file_.GetFileSize();
|
|
}
|
|
|
|
private:
|
|
Env* env_;
|
|
SystemClock* clock_;
|
|
WritableFileWriter file_;
|
|
mutable port::Mutex mutex_; // Mutex to protect the shared variables below.
|
|
const static uint64_t flush_every_seconds_ = 5;
|
|
std::atomic_uint_fast64_t last_flush_micros_;
|
|
std::atomic<bool> flush_pending_;
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|