2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
|
|
|
|
#include <sys/time.h>
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/options.h"
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
#include "util/arena.h"
|
|
|
|
#include "util/autovector.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
Env::~Env() {
|
|
|
|
}
|
|
|
|
|
|
|
|
SequentialFile::~SequentialFile() {
|
|
|
|
}
|
|
|
|
|
|
|
|
RandomAccessFile::~RandomAccessFile() {
|
|
|
|
}
|
|
|
|
|
|
|
|
WritableFile::~WritableFile() {
|
|
|
|
}
|
|
|
|
|
2011-07-21 02:40:18 +00:00
|
|
|
Logger::~Logger() {
|
|
|
|
}
|
|
|
|
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
// One log entry with its timestamp
|
|
|
|
struct BufferedLog {
|
|
|
|
struct timeval now_tv; // Timestamp of the log
|
|
|
|
char message[1]; // Beginning of log message
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LogBuffer::Rep {
|
|
|
|
Arena arena_;
|
|
|
|
autovector<BufferedLog*> logs_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Lazily initialize Rep to avoid allocations when new log is added.
|
|
|
|
LogBuffer::LogBuffer(const InfoLogLevel log_level,
|
|
|
|
const shared_ptr<Logger>& info_log)
|
|
|
|
: rep_(nullptr), log_level_(log_level), info_log_(info_log) {}
|
|
|
|
|
|
|
|
LogBuffer::~LogBuffer() { delete rep_; }
|
|
|
|
|
2014-03-06 23:12:34 +00:00
|
|
|
void LogBuffer::AddLogToBuffer(const char* format, va_list ap) {
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
if (!info_log_ || log_level_ < info_log_->GetInfoLogLevel()) {
|
|
|
|
// Skip the level because of its level.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (rep_ == nullptr) {
|
|
|
|
rep_ = new Rep();
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t kLogSizeLimit = 512;
|
|
|
|
char* alloc_mem = rep_->arena_.AllocateAligned(kLogSizeLimit);
|
|
|
|
BufferedLog* buffered_log = new (alloc_mem) BufferedLog();
|
|
|
|
char* p = buffered_log->message;
|
|
|
|
char* limit = alloc_mem + kLogSizeLimit - 1;
|
|
|
|
|
|
|
|
// store the time
|
|
|
|
gettimeofday(&(buffered_log->now_tv), nullptr);
|
|
|
|
|
|
|
|
// Print the message
|
|
|
|
if (p < limit) {
|
2014-03-06 23:12:34 +00:00
|
|
|
va_list backup_ap;
|
|
|
|
va_copy(backup_ap, ap);
|
|
|
|
p += vsnprintf(p, limit - p, format, backup_ap);
|
|
|
|
va_end(backup_ap);
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add '\0' to the end
|
|
|
|
*p = '\0';
|
|
|
|
|
|
|
|
rep_->logs_.push_back(buffered_log);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LogBuffer::FlushBufferToLog() const {
|
|
|
|
if (rep_ != nullptr) {
|
|
|
|
for (BufferedLog* log : rep_->logs_) {
|
|
|
|
const time_t seconds = log->now_tv.tv_sec;
|
|
|
|
struct tm t;
|
|
|
|
localtime_r(&seconds, &t);
|
|
|
|
Log(log_level_, info_log_,
|
|
|
|
"(Original Log Time %04d/%02d/%02d-%02d:%02d:%02d.%06d) %s",
|
|
|
|
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min,
|
|
|
|
t.tm_sec, static_cast<int>(log->now_tv.tv_usec), log->message);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
FileLock::~FileLock() {
|
|
|
|
}
|
|
|
|
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
void LogToBuffer(LogBuffer* log_buffer, const char* format, ...) {
|
|
|
|
if (log_buffer != nullptr) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
2014-03-06 23:12:34 +00:00
|
|
|
log_buffer->AddLogToBuffer(format, ap);
|
Buffer info logs when picking compactions and write them out after releasing the mutex
Summary: Now while the background thread is picking compactions, it writes out multiple info_logs, especially for universal compaction, which introduces a chance of waiting log writing in mutex, which is bad. To remove this risk, write all those info logs to a buffer and flush it after releasing the mutex.
Test Plan:
make all check
check the log lines while running some tests that trigger compactions.
Reviewers: haobo, igor, dhruba
Reviewed By: dhruba
CC: i.am.jin.lei, dhruba, yhchiang, leveldb, nkg-
Differential Revision: https://reviews.facebook.net/D16515
2014-03-04 22:32:55 +00:00
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-07 19:31:56 +00:00
|
|
|
void LogFlush(Logger *info_log) {
|
|
|
|
if (info_log) {
|
|
|
|
info_log->Flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-21 02:40:18 +00:00
|
|
|
void Log(Logger* info_log, const char* format, ...) {
|
2013-01-20 10:07:13 +00:00
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
2014-03-05 02:10:14 +00:00
|
|
|
info_log->Logv(InfoLogLevel::INFO, format, ap);
|
2013-01-20 10:07:13 +00:00
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 22:41:28 +00:00
|
|
|
void Log(const InfoLogLevel log_level, Logger* info_log, const char* format,
|
|
|
|
...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(log_level, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Debug(Logger* info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::DEBUG, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Info(Logger* info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::INFO, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Warn(Logger* info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::WARN, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void Error(Logger* info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::ERROR, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void Fatal(Logger* info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::FATAL, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-07 19:31:56 +00:00
|
|
|
void LogFlush(const shared_ptr<Logger>& info_log) {
|
|
|
|
if (info_log) {
|
|
|
|
info_log->Flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 22:41:28 +00:00
|
|
|
void Log(const InfoLogLevel log_level, const shared_ptr<Logger>& info_log,
|
|
|
|
const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(log_level, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Debug(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::DEBUG, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Info(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::INFO, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Warn(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::WARN, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Error(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::ERROR, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Fatal(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
|
|
|
info_log->Logv(InfoLogLevel::FATAL, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-20 10:07:13 +00:00
|
|
|
void Log(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
|
|
if (info_log) {
|
2011-07-21 02:40:18 +00:00
|
|
|
va_list ap;
|
|
|
|
va_start(ap, format);
|
2014-03-05 02:10:14 +00:00
|
|
|
info_log->Logv(InfoLogLevel::INFO, format, ap);
|
2011-07-21 02:40:18 +00:00
|
|
|
va_end(ap);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-01-25 22:56:52 +00:00
|
|
|
static Status DoWriteStringToFile(Env* env, const Slice& data,
|
|
|
|
const std::string& fname,
|
|
|
|
bool should_sync) {
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<WritableFile> file;
|
2013-06-07 22:35:17 +00:00
|
|
|
EnvOptions soptions;
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = env->NewWritableFile(fname, &file, soptions);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
s = file->Append(data);
|
2012-01-25 22:56:52 +00:00
|
|
|
if (s.ok() && should_sync) {
|
|
|
|
s = file->Sync();
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
env->DeleteFile(fname);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2012-01-25 22:56:52 +00:00
|
|
|
Status WriteStringToFile(Env* env, const Slice& data,
|
|
|
|
const std::string& fname) {
|
|
|
|
return DoWriteStringToFile(env, data, fname, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WriteStringToFileSync(Env* env, const Slice& data,
|
|
|
|
const std::string& fname) {
|
|
|
|
return DoWriteStringToFile(env, data, fname, true);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
2013-06-07 22:35:17 +00:00
|
|
|
EnvOptions soptions;
|
2011-03-18 22:37:00 +00:00
|
|
|
data->clear();
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<SequentialFile> file;
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = env->NewSequentialFile(fname, &file, soptions);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
static const int kBufferSize = 8192;
|
|
|
|
char* space = new char[kBufferSize];
|
|
|
|
while (true) {
|
|
|
|
Slice fragment;
|
|
|
|
s = file->Read(kBufferSize, &fragment, space);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
data->append(fragment.data(), fragment.size());
|
|
|
|
if (fragment.empty()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete[] space;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
EnvWrapper::~EnvWrapper() {
|
|
|
|
}
|
|
|
|
|
2013-06-07 22:35:17 +00:00
|
|
|
namespace { // anonymous namespace
|
|
|
|
|
|
|
|
void AssignEnvOptions(EnvOptions* env_options, const Options& options) {
|
|
|
|
env_options->use_os_buffer = options.allow_os_buffer;
|
|
|
|
env_options->use_mmap_reads = options.allow_mmap_reads;
|
|
|
|
env_options->use_mmap_writes = options.allow_mmap_writes;
|
|
|
|
env_options->set_fd_cloexec = options.is_fd_close_on_exec;
|
2013-06-14 05:49:46 +00:00
|
|
|
env_options->bytes_per_sync = options.bytes_per_sync;
|
2013-06-07 22:35:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-02-28 21:19:47 +00:00
|
|
|
EnvOptions EnvOptions::AdaptForLogWrite() const {
|
|
|
|
EnvOptions adapted = *this;
|
|
|
|
adapted.use_mmap_writes = false;
|
|
|
|
return adapted;
|
|
|
|
}
|
|
|
|
|
2013-06-07 22:35:17 +00:00
|
|
|
EnvOptions::EnvOptions(const Options& options) {
|
|
|
|
AssignEnvOptions(this, options);
|
|
|
|
}
|
|
|
|
|
|
|
|
EnvOptions::EnvOptions() {
|
|
|
|
Options options;
|
|
|
|
AssignEnvOptions(this, options);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|