2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2015-09-24 22:29:05 +00:00
|
|
|
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "hdfs/env_hdfs.h"
|
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
#ifdef USE_HDFS
|
2013-10-05 05:32:05 +00:00
|
|
|
#ifndef ROCKSDB_HDFS_FILE_C
|
|
|
|
#define ROCKSDB_HDFS_FILE_C
|
2012-06-08 08:11:14 +00:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <time.h>
|
2019-03-29 02:16:58 +00:00
|
|
|
#include <algorithm>
|
2012-06-08 08:11:14 +00:00
|
|
|
#include <iostream>
|
|
|
|
#include <sstream>
|
2019-06-01 00:19:43 +00:00
|
|
|
#include "logging/logging.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/status.h"
|
2015-12-02 13:45:28 +00:00
|
|
|
#include "util/string_util.h"
|
2012-06-08 08:11:14 +00:00
|
|
|
|
2014-05-14 19:14:18 +00:00
|
|
|
#define HDFS_EXISTS 0
|
2014-05-20 21:22:12 +00:00
|
|
|
#define HDFS_DOESNT_EXIST -1
|
|
|
|
#define HDFS_SUCCESS 0
|
2014-05-14 19:14:18 +00:00
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
//
|
2013-10-05 05:32:05 +00:00
|
|
|
// This file defines an HDFS environment for rocksdb. It uses the libhdfs
|
|
|
|
// api to access HDFS. All HDFS files created by one instance of rocksdb
|
2012-11-29 00:42:36 +00:00
|
|
|
// will reside on the same HDFS cluster.
|
2012-06-08 08:11:14 +00:00
|
|
|
//
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2012-06-08 08:11:14 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Log error message
|
|
|
|
static Status IOError(const std::string& context, int err_number) {
|
2019-03-26 23:41:31 +00:00
|
|
|
return (err_number == ENOSPC)
|
2021-03-25 06:06:31 +00:00
|
|
|
? Status::NoSpace(context, errnoStr(err_number).c_str())
|
2019-03-26 23:41:31 +00:00
|
|
|
: (err_number == ENOENT)
|
2021-03-25 06:06:31 +00:00
|
|
|
? Status::PathNotFound(context, errnoStr(err_number).c_str())
|
|
|
|
: Status::IOError(context, errnoStr(err_number).c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// assume that there is one global logger for now. It is not thread-safe,
|
|
|
|
// but need not be because the logger is initialized at db-open time.
|
2013-03-01 02:04:58 +00:00
|
|
|
static Logger* mylog = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
|
|
|
|
// Used for reading a file from HDFS. It implements both sequential-read
|
|
|
|
// access methods as well as random read access methods.
|
2014-05-14 19:14:18 +00:00
|
|
|
class HdfsReadableFile : virtual public SequentialFile,
|
|
|
|
virtual public RandomAccessFile {
|
2012-06-08 08:11:14 +00:00
|
|
|
private:
|
|
|
|
hdfsFS fileSys_;
|
|
|
|
std::string filename_;
|
|
|
|
hdfsFile hfile_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
HdfsReadableFile(hdfsFS fileSys, const std::string& fname)
|
2013-03-01 02:04:58 +00:00
|
|
|
: fileSys_(fileSys), filename_(fname), hfile_(nullptr) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile opening file %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog,
|
|
|
|
"[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
|
|
|
|
filename_.c_str(), hfile_);
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~HdfsReadableFile() {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closing file %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
hdfsCloseFile(fileSys_, hfile_);
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
|
|
|
|
filename_.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
hfile_ = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool isValid() {
|
2013-03-01 02:04:58 +00:00
|
|
|
return hfile_ != nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// sequential access, read data at current offset in file
|
|
|
|
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
|
|
|
Status s;
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile reading %s %ld\n",
|
|
|
|
filename_.c_str(), n);
|
2014-05-14 19:14:18 +00:00
|
|
|
|
|
|
|
char* buffer = scratch;
|
|
|
|
size_t total_bytes_read = 0;
|
|
|
|
tSize bytes_read = 0;
|
|
|
|
tSize remaining_bytes = (tSize)n;
|
|
|
|
|
|
|
|
// Read a total of n bytes repeatedly until we hit error or eof
|
|
|
|
while (remaining_bytes > 0) {
|
|
|
|
bytes_read = hdfsRead(fileSys_, hfile_, buffer, remaining_bytes);
|
|
|
|
if (bytes_read <= 0) {
|
|
|
|
break;
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
2014-05-14 19:14:18 +00:00
|
|
|
assert(bytes_read <= remaining_bytes);
|
|
|
|
|
|
|
|
total_bytes_read += bytes_read;
|
|
|
|
remaining_bytes -= bytes_read;
|
|
|
|
buffer += bytes_read;
|
|
|
|
}
|
|
|
|
assert(total_bytes_read <= n);
|
|
|
|
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile read %s\n",
|
|
|
|
filename_.c_str());
|
2014-05-14 19:14:18 +00:00
|
|
|
|
|
|
|
if (bytes_read < 0) {
|
|
|
|
s = IOError(filename_, errno);
|
|
|
|
} else {
|
|
|
|
*result = Slice(scratch, total_bytes_read);
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
2014-05-14 19:14:18 +00:00
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// random access, read data from specified offset in file
|
|
|
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const {
|
|
|
|
Status s;
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile preading %s\n",
|
|
|
|
filename_.c_str());
|
2020-06-12 23:19:56 +00:00
|
|
|
tSize bytes_read =
|
|
|
|
hdfsPread(fileSys_, hfile_, offset, static_cast<void*>(scratch),
|
|
|
|
static_cast<tSize>(n));
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile pread %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
*result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read);
|
|
|
|
if (bytes_read < 0) {
|
|
|
|
// An error: return a non-ok status
|
|
|
|
s = IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Skip(uint64_t n) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile skip %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
// get current offset from file
|
|
|
|
tOffset current = hdfsTell(fileSys_, hfile_);
|
|
|
|
if (current < 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
// seek to new offset in file
|
|
|
|
tOffset newoffset = current + n;
|
|
|
|
int val = hdfsSeek(fileSys_, hfile_, newoffset);
|
|
|
|
if (val < 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
// returns true if we are at the end of file, false otherwise
|
|
|
|
bool feof() {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile feof %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
if (hdfsTell(fileSys_, hfile_) == fileSize()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the current size of the file
|
|
|
|
tOffset fileSize() {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile fileSize %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str());
|
|
|
|
tOffset size = 0L;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (pFileInfo != nullptr) {
|
2012-06-08 08:11:14 +00:00
|
|
|
size = pFileInfo->mSize;
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
} else {
|
2014-05-14 19:14:18 +00:00
|
|
|
throw HdfsFatalException("fileSize on unknown file " + filename_);
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Appends to an existing file in HDFS.
|
|
|
|
class HdfsWritableFile: public WritableFile {
|
|
|
|
private:
|
|
|
|
hdfsFS fileSys_;
|
|
|
|
std::string filename_;
|
|
|
|
hdfsFile hfile_;
|
|
|
|
|
|
|
|
public:
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
HdfsWritableFile(hdfsFS fileSys, const std::string& fname,
|
|
|
|
const EnvOptions& options)
|
|
|
|
: WritableFile(options),
|
|
|
|
fileSys_(fileSys),
|
|
|
|
filename_(fname),
|
|
|
|
hfile_(nullptr) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opening %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0);
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opened %s\n",
|
|
|
|
filename_.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
assert(hfile_ != nullptr);
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
virtual ~HdfsWritableFile() {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (hfile_ != nullptr) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
hdfsCloseFile(fileSys_, hfile_);
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
|
|
|
|
filename_.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
hfile_ = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-23 22:11:13 +00:00
|
|
|
using WritableFile::Append;
|
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
// If the file was successfully created, then this returns true.
|
|
|
|
// Otherwise returns false.
|
|
|
|
bool isValid() {
|
2013-03-01 02:04:58 +00:00
|
|
|
return hfile_ != nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The name of the file, mostly needed for debug logging.
|
|
|
|
const std::string& getName() {
|
|
|
|
return filename_;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Append(const Slice& data) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Append %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
const char* src = data.data();
|
|
|
|
size_t left = data.size();
|
2019-03-29 02:16:58 +00:00
|
|
|
size_t ret = hdfsWrite(fileSys_, hfile_, src, static_cast<tSize>(left));
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Appended %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
if (ret != left) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Flush() {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Sync() {
|
|
|
|
Status s;
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Sync %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
if (hdfsFlush(fileSys_, hfile_) == -1) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
2014-05-20 21:22:12 +00:00
|
|
|
if (hdfsHSync(fileSys_, hfile_) == -1) {
|
2012-06-08 08:11:14 +00:00
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Synced %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is used by HdfsLogger to write data to the debug log file
|
|
|
|
virtual Status Append(const char* src, size_t size) {
|
2019-03-29 02:16:58 +00:00
|
|
|
if (hdfsWrite(fileSys_, hfile_, src, static_cast<tSize>(size)) !=
|
|
|
|
static_cast<tSize>(size)) {
|
2012-06-08 08:11:14 +00:00
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Close() {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
if (hdfsCloseFile(fileSys_, hfile_) != 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
|
|
|
|
filename_.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
hfile_ = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// The object that implements the debug logs to reside in HDFS.
|
|
|
|
class HdfsLogger : public Logger {
|
|
|
|
private:
|
|
|
|
HdfsWritableFile* file_;
|
|
|
|
uint64_t (*gettid_)(); // Return the thread id for the current thread
|
|
|
|
|
2018-02-23 21:50:02 +00:00
|
|
|
Status HdfsCloseHelper() {
|
2018-01-16 18:57:56 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger closed %s\n",
|
|
|
|
file_->getName().c_str());
|
|
|
|
if (mylog != nullptr && mylog == this) {
|
|
|
|
mylog = nullptr;
|
|
|
|
}
|
2019-03-29 02:16:58 +00:00
|
|
|
return Status::OK();
|
2018-01-16 18:57:56 +00:00
|
|
|
}
|
|
|
|
|
2018-02-23 21:50:02 +00:00
|
|
|
protected:
|
2018-03-07 00:13:05 +00:00
|
|
|
virtual Status CloseImpl() override { return HdfsCloseHelper(); }
|
2018-02-23 21:50:02 +00:00
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
public:
|
2014-05-14 19:14:18 +00:00
|
|
|
HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
|
|
|
|
: file_(f), gettid_(gettid) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger opened %s\n",
|
|
|
|
file_->getName().c_str());
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 02:16:58 +00:00
|
|
|
~HdfsLogger() override {
|
2018-02-23 21:50:02 +00:00
|
|
|
if (!closed_) {
|
|
|
|
closed_ = true;
|
|
|
|
HdfsCloseHelper();
|
|
|
|
}
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 02:16:58 +00:00
|
|
|
using Logger::Logv;
|
|
|
|
void Logv(const char* format, va_list ap) override {
|
2012-06-08 08:11:14 +00:00
|
|
|
const uint64_t thread_id = (*gettid_)();
|
|
|
|
|
|
|
|
// We try twice: the first time with a fixed-size stack allocated buffer,
|
|
|
|
// and the second time with a much larger dynamically allocated buffer.
|
|
|
|
char buffer[500];
|
|
|
|
for (int iter = 0; iter < 2; iter++) {
|
|
|
|
char* base;
|
|
|
|
int bufsize;
|
|
|
|
if (iter == 0) {
|
|
|
|
bufsize = sizeof(buffer);
|
|
|
|
base = buffer;
|
|
|
|
} else {
|
|
|
|
bufsize = 30000;
|
|
|
|
base = new char[bufsize];
|
|
|
|
}
|
|
|
|
char* p = base;
|
|
|
|
char* limit = base + bufsize;
|
|
|
|
|
|
|
|
struct timeval now_tv;
|
2013-03-01 02:04:58 +00:00
|
|
|
gettimeofday(&now_tv, nullptr);
|
2012-06-08 08:11:14 +00:00
|
|
|
const time_t seconds = now_tv.tv_sec;
|
|
|
|
struct tm t;
|
|
|
|
localtime_r(&seconds, &t);
|
|
|
|
p += snprintf(p, limit - p,
|
|
|
|
"%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
|
|
|
|
t.tm_year + 1900,
|
|
|
|
t.tm_mon + 1,
|
|
|
|
t.tm_mday,
|
|
|
|
t.tm_hour,
|
|
|
|
t.tm_min,
|
|
|
|
t.tm_sec,
|
|
|
|
static_cast<int>(now_tv.tv_usec),
|
|
|
|
static_cast<long long unsigned int>(thread_id));
|
|
|
|
|
|
|
|
// Print the message
|
|
|
|
if (p < limit) {
|
|
|
|
va_list backup_ap;
|
|
|
|
va_copy(backup_ap, ap);
|
|
|
|
p += vsnprintf(p, limit - p, format, backup_ap);
|
|
|
|
va_end(backup_ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate to available space if necessary
|
|
|
|
if (p >= limit) {
|
|
|
|
if (iter == 0) {
|
|
|
|
continue; // Try again with larger buffer
|
|
|
|
} else {
|
|
|
|
p = limit - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add newline if necessary
|
|
|
|
if (p == base || p[-1] != '\n') {
|
|
|
|
*p++ = '\n';
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(p <= limit);
|
|
|
|
file_->Append(base, p-base);
|
|
|
|
file_->Flush();
|
|
|
|
if (base != buffer) {
|
|
|
|
delete[] base;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Finally, the hdfs environment
|
|
|
|
|
2014-05-14 19:14:18 +00:00
|
|
|
const std::string HdfsEnv::kProto = "hdfs://";
|
|
|
|
const std::string HdfsEnv::pathsep = "/";
|
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
// open a file for sequential reading
|
|
|
|
Status HdfsEnv::NewSequentialFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<SequentialFile>* result,
|
2019-03-29 02:16:58 +00:00
|
|
|
const EnvOptions& /*options*/) {
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset();
|
2012-06-08 08:11:14 +00:00
|
|
|
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
|
2014-05-14 19:14:18 +00:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
|
|
|
delete f;
|
2013-03-01 02:04:58 +00:00
|
|
|
*result = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset(dynamic_cast<SequentialFile*>(f));
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// open a file for random reading
|
|
|
|
Status HdfsEnv::NewRandomAccessFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomAccessFile>* result,
|
2019-03-29 02:16:58 +00:00
|
|
|
const EnvOptions& /*options*/) {
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset();
|
2012-06-08 08:11:14 +00:00
|
|
|
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
|
2014-05-14 19:14:18 +00:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
|
|
|
delete f;
|
2013-03-01 02:04:58 +00:00
|
|
|
*result = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset(dynamic_cast<RandomAccessFile*>(f));
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// create a new file for writing
|
|
|
|
Status HdfsEnv::NewWritableFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<WritableFile>* result,
|
2019-06-18 21:52:44 +00:00
|
|
|
const EnvOptions& options) {
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset();
|
2012-06-08 08:11:14 +00:00
|
|
|
Status s;
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname, options);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
2014-05-14 19:14:18 +00:00
|
|
|
delete f;
|
2013-03-01 02:04:58 +00:00
|
|
|
*result = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset(dynamic_cast<WritableFile*>(f));
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-05-20 21:22:12 +00:00
|
|
|
class HdfsDirectory : public Directory {
|
|
|
|
public:
|
|
|
|
explicit HdfsDirectory(int fd) : fd_(fd) {}
|
2014-05-21 11:50:37 +00:00
|
|
|
~HdfsDirectory() {}
|
2014-05-20 21:22:12 +00:00
|
|
|
|
2019-03-29 02:16:58 +00:00
|
|
|
Status Fsync() override { return Status::OK(); }
|
|
|
|
|
|
|
|
int GetFd() const { return fd_; }
|
2014-05-20 21:22:12 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
int fd_;
|
|
|
|
};
|
|
|
|
|
2014-05-14 19:14:18 +00:00
|
|
|
Status HdfsEnv::NewDirectory(const std::string& name,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<Directory>* result) {
|
2014-05-21 11:50:37 +00:00
|
|
|
int value = hdfsExists(fileSys_, name.c_str());
|
2014-05-20 21:22:12 +00:00
|
|
|
switch (value) {
|
2014-05-21 11:50:37 +00:00
|
|
|
case HDFS_EXISTS:
|
|
|
|
result->reset(new HdfsDirectory(0));
|
2014-05-20 21:22:12 +00:00
|
|
|
return Status::OK();
|
2014-05-21 11:50:37 +00:00
|
|
|
default: // fail if the directory doesn't exist
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_FATAL(mylog, "NewDirectory hdfsExists call failed");
|
2014-05-21 11:50:37 +00:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 04:44:49 +00:00
|
|
|
ToString(value) + " on path " + name +
|
2014-05-21 11:50:37 +00:00
|
|
|
".\n");
|
2014-05-20 21:22:12 +00:00
|
|
|
}
|
2014-01-27 19:02:21 +00:00
|
|
|
}
|
|
|
|
|
2015-07-21 00:20:40 +00:00
|
|
|
Status HdfsEnv::FileExists(const std::string& fname) {
|
2012-06-08 08:11:14 +00:00
|
|
|
int value = hdfsExists(fileSys_, fname.c_str());
|
2014-05-14 19:14:18 +00:00
|
|
|
switch (value) {
|
|
|
|
case HDFS_EXISTS:
|
2015-07-21 00:20:40 +00:00
|
|
|
return Status::OK();
|
2014-05-14 19:14:18 +00:00
|
|
|
case HDFS_DOESNT_EXIST:
|
2015-07-21 00:20:40 +00:00
|
|
|
return Status::NotFound();
|
2014-05-14 19:14:18 +00:00
|
|
|
default: // anything else should be an error
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_FATAL(mylog, "FileExists hdfsExists call failed");
|
2015-07-21 00:20:40 +00:00
|
|
|
return Status::IOError("hdfsExists call failed with error " +
|
|
|
|
ToString(value) + " on path " + fname + ".\n");
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status HdfsEnv::GetChildren(const std::string& path,
|
|
|
|
std::vector<std::string>* result) {
|
|
|
|
int value = hdfsExists(fileSys_, path.c_str());
|
|
|
|
switch (value) {
|
2014-05-14 19:14:18 +00:00
|
|
|
case HDFS_EXISTS: { // directory exists
|
2012-06-08 08:11:14 +00:00
|
|
|
int numEntries = 0;
|
|
|
|
hdfsFileInfo* pHdfsFileInfo = 0;
|
|
|
|
pHdfsFileInfo = hdfsListDirectory(fileSys_, path.c_str(), &numEntries);
|
|
|
|
if (numEntries >= 0) {
|
|
|
|
for(int i = 0; i < numEntries; i++) {
|
2019-03-29 02:16:58 +00:00
|
|
|
std::string pathname(pHdfsFileInfo[i].mName);
|
|
|
|
size_t pos = pathname.rfind("/");
|
|
|
|
if (std::string::npos != pos) {
|
|
|
|
result->push_back(pathname.substr(pos + 1));
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
if (pHdfsFileInfo != nullptr) {
|
2012-06-08 08:11:14 +00:00
|
|
|
hdfsFreeFileInfo(pHdfsFileInfo, numEntries);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// numEntries < 0 indicates error
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_FATAL(mylog, "hdfsListDirectory call failed with error ");
|
2014-05-14 19:14:18 +00:00
|
|
|
throw HdfsFatalException(
|
|
|
|
"hdfsListDirectory call failed negative error.\n");
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2014-05-14 19:14:18 +00:00
|
|
|
case HDFS_DOESNT_EXIST: // directory does not exist, exit
|
2016-12-12 20:38:43 +00:00
|
|
|
return Status::NotFound();
|
2012-06-08 08:11:14 +00:00
|
|
|
default: // anything else should be an error
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_FATAL(mylog, "GetChildren hdfsExists call failed");
|
2014-05-14 19:14:18 +00:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 04:44:49 +00:00
|
|
|
ToString(value) + ".\n");
|
2012-06-08 08:11:14 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status HdfsEnv::DeleteFile(const std::string& fname) {
|
2014-05-21 11:50:37 +00:00
|
|
|
if (hdfsDelete(fileSys_, fname.c_str(), 1) == 0) {
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(fname, errno);
|
|
|
|
};
|
|
|
|
|
|
|
|
Status HdfsEnv::CreateDir(const std::string& name) {
|
|
|
|
if (hdfsCreateDirectory(fileSys_, name.c_str()) == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(name, errno);
|
|
|
|
};
|
|
|
|
|
2012-11-26 21:56:45 +00:00
|
|
|
Status HdfsEnv::CreateDirIfMissing(const std::string& name) {
|
|
|
|
const int value = hdfsExists(fileSys_, name.c_str());
|
|
|
|
// Not atomic. state might change b/w hdfsExists and CreateDir.
|
2014-05-14 19:14:18 +00:00
|
|
|
switch (value) {
|
|
|
|
case HDFS_EXISTS:
|
2012-11-26 21:56:45 +00:00
|
|
|
return Status::OK();
|
2014-05-14 19:14:18 +00:00
|
|
|
case HDFS_DOESNT_EXIST:
|
2012-11-26 21:56:45 +00:00
|
|
|
return CreateDir(name);
|
2014-05-14 19:14:18 +00:00
|
|
|
default: // anything else should be an error
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_FATAL(mylog, "CreateDirIfMissing hdfsExists call failed");
|
2014-05-21 11:54:22 +00:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 04:44:49 +00:00
|
|
|
ToString(value) + ".\n");
|
2012-11-26 21:56:45 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
Status HdfsEnv::DeleteDir(const std::string& name) {
|
|
|
|
return DeleteFile(name);
|
|
|
|
};
|
|
|
|
|
|
|
|
Status HdfsEnv::GetFileSize(const std::string& fname, uint64_t* size) {
|
|
|
|
*size = 0L;
|
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
if (pFileInfo != nullptr) {
|
2012-06-08 08:11:14 +00:00
|
|
|
*size = pFileInfo->mSize;
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
|
|
|
|
2012-11-26 21:56:45 +00:00
|
|
|
Status HdfsEnv::GetFileModificationTime(const std::string& fname,
|
|
|
|
uint64_t* time) {
|
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
if (pFileInfo != nullptr) {
|
2012-11-26 21:56:45 +00:00
|
|
|
*time = static_cast<uint64_t>(pFileInfo->mLastMod);
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(fname, errno);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-06-08 08:11:14 +00:00
|
|
|
// The rename is not atomic. HDFS does not allow a renaming if the
|
2015-04-25 09:14:27 +00:00
|
|
|
// target already exists. So, we delete the target before attempting the
|
2012-06-08 08:11:14 +00:00
|
|
|
// rename.
|
|
|
|
Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) {
|
2014-05-20 21:22:12 +00:00
|
|
|
hdfsDelete(fileSys_, target.c_str(), 1);
|
2012-06-08 08:11:14 +00:00
|
|
|
if (hdfsRename(fileSys_, src.c_str(), target.c_str()) == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(src, errno);
|
|
|
|
}
|
|
|
|
|
2019-03-29 02:16:58 +00:00
|
|
|
Status HdfsEnv::LockFile(const std::string& /*fname*/, FileLock** lock) {
|
2012-06-08 08:11:14 +00:00
|
|
|
// there isn's a very good way to atomically check and create
|
|
|
|
// a file via libhdfs
|
2013-03-01 02:04:58 +00:00
|
|
|
*lock = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-03-29 02:16:58 +00:00
|
|
|
Status HdfsEnv::UnlockFile(FileLock* /*lock*/) { return Status::OK(); }
|
2012-06-08 08:11:14 +00:00
|
|
|
|
2013-01-20 10:07:13 +00:00
|
|
|
Status HdfsEnv::NewLogger(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::shared_ptr<Logger>* result) {
|
2019-06-18 21:52:44 +00:00
|
|
|
// EnvOptions is used exclusively for its `strict_bytes_per_sync` value. That
|
|
|
|
// option is only intended for WAL/flush/compaction writes, so turn it off in
|
|
|
|
// the logger.
|
|
|
|
EnvOptions options;
|
|
|
|
options.strict_bytes_per_sync = false;
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname, options);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
2014-05-14 19:14:18 +00:00
|
|
|
delete f;
|
2013-03-01 02:04:58 +00:00
|
|
|
*result = nullptr;
|
2012-06-08 08:11:14 +00:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
|
|
|
HdfsLogger* h = new HdfsLogger(f, &HdfsEnv::gettid);
|
2014-05-14 19:14:18 +00:00
|
|
|
result->reset(h);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (mylog == nullptr) {
|
2012-06-08 08:11:14 +00:00
|
|
|
// mylog = h; // uncomment this for detailed logging
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2020-04-17 21:36:51 +00:00
|
|
|
Status HdfsEnv::IsDirectory(const std::string& path, bool* is_dir) {
|
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, path.c_str());
|
|
|
|
if (pFileInfo != nullptr) {
|
|
|
|
if (is_dir != nullptr) {
|
2020-06-03 20:48:06 +00:00
|
|
|
*is_dir = (pFileInfo->mKind == kObjectKindDirectory);
|
2020-04-17 21:36:51 +00:00
|
|
|
}
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(path, errno);
|
|
|
|
}
|
|
|
|
|
2015-12-25 04:32:29 +00:00
|
|
|
// The factory method for creating an HDFS Env
|
2015-12-24 01:26:50 +00:00
|
|
|
Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) {
|
2015-12-25 04:38:35 +00:00
|
|
|
*hdfs_env = new HdfsEnv(fsname);
|
|
|
|
return Status::OK();
|
2015-12-24 01:26:50 +00:00
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2012-06-08 08:11:14 +00:00
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#endif // ROCKSDB_HDFS_FILE_C
|
2012-06-08 08:11:14 +00:00
|
|
|
|
|
|
|
#else // USE_HDFS
|
|
|
|
|
|
|
|
// dummy placeholders used when HDFS is not available
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2018-03-05 21:08:17 +00:00
|
|
|
Status HdfsEnv::NewSequentialFile(const std::string& /*fname*/,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<SequentialFile>* /*result*/,
|
2018-03-05 21:08:17 +00:00
|
|
|
const EnvOptions& /*options*/) {
|
|
|
|
return Status::NotSupported("Not compiled with hdfs support");
|
2018-11-09 19:17:34 +00:00
|
|
|
}
|
2015-12-24 01:26:50 +00:00
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
Status NewHdfsEnv(Env** /*hdfs_env*/, const std::string& /*fsname*/) {
|
2015-12-27 03:50:28 +00:00
|
|
|
return Status::NotSupported("Not compiled with hdfs support");
|
2015-12-24 01:26:50 +00:00
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2012-06-08 08:11:14 +00:00
|
|
|
|
|
|
|
#endif
|