2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include <deque>
|
2012-08-18 07:26:50 +00:00
|
|
|
#include <set>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <dirent.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2013-01-31 23:20:24 +00:00
|
|
|
#include <sys/ioctl.h>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/stat.h>
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifdef OS_LINUX
|
2013-03-13 20:50:26 +00:00
|
|
|
#include <sys/statfs.h>
|
2014-08-14 03:49:58 +00:00
|
|
|
#include <sys/syscall.h>
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <unistd.h>
|
2013-01-31 23:20:24 +00:00
|
|
|
#if defined(OS_LINUX)
|
|
|
|
#include <linux/fs.h>
|
2011-03-18 22:37:00 +00:00
|
|
|
#endif
|
2014-07-08 19:31:49 +00:00
|
|
|
#include <signal.h>
|
|
|
|
#include <algorithm>
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/slice.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "port/port.h"
|
2013-01-31 23:20:24 +00:00
|
|
|
#include "util/coding.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/logging.h"
|
2011-07-21 02:40:18 +00:00
|
|
|
#include "util/posix_logger.h"
|
2013-04-05 06:49:43 +00:00
|
|
|
#include "util/random.h"
|
2014-07-03 23:28:03 +00:00
|
|
|
#include "util/iostats_context_imp.h"
|
2015-07-21 00:20:40 +00:00
|
|
|
#include "util/string_util.h"
|
2015-03-17 01:49:14 +00:00
|
|
|
#include "util/sync_point.h"
|
2014-12-22 20:20:17 +00:00
|
|
|
#include "util/thread_status_updater.h"
|
|
|
|
#include "util/thread_status_util.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-03-18 18:26:10 +00:00
|
|
|
// Get nano time includes
|
|
|
|
#if defined(OS_LINUX) || defined(OS_FREEBSD)
|
|
|
|
#elif defined(__MACH__)
|
2013-11-17 07:44:39 +00:00
|
|
|
#include <mach/clock.h>
|
|
|
|
#include <mach/mach.h>
|
2015-03-18 18:26:10 +00:00
|
|
|
#else
|
|
|
|
#include <chrono>
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
|
|
|
|
2013-03-13 20:50:26 +00:00
|
|
|
#if !defined(TMPFS_MAGIC)
|
|
|
|
#define TMPFS_MAGIC 0x01021994
|
|
|
|
#endif
|
|
|
|
#if !defined(XFS_SUPER_MAGIC)
|
|
|
|
#define XFS_SUPER_MAGIC 0x58465342
|
|
|
|
#endif
|
|
|
|
#if !defined(EXT4_SUPER_MAGIC)
|
|
|
|
#define EXT4_SUPER_MAGIC 0xEF53
|
|
|
|
#endif
|
|
|
|
|
2013-11-17 07:44:39 +00:00
|
|
|
// For non linux platform, the following macros are used only as place
|
|
|
|
// holder.
|
2015-04-24 02:17:57 +00:00
|
|
|
#if !(defined OS_LINUX) && !(defined CYGWIN)
|
2013-11-17 07:44:39 +00:00
|
|
|
#define POSIX_FADV_NORMAL 0 /* [MC1] no further special treatment */
|
|
|
|
#define POSIX_FADV_RANDOM 1 /* [MC1] expect random page refs */
|
|
|
|
#define POSIX_FADV_SEQUENTIAL 2 /* [MC1] expect sequential page refs */
|
|
|
|
#define POSIX_FADV_WILLNEED 3 /* [MC1] will need these pages */
|
|
|
|
#define POSIX_FADV_DONTNEED 4 /* [MC1] dont need these pages */
|
|
|
|
#endif
|
|
|
|
|
2013-04-05 06:49:43 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2013-11-17 07:44:39 +00:00
|
|
|
// A wrapper for fadvise, if the platform doesn't support fadvise,
|
|
|
|
// it will simply return Status::NotSupport.
|
|
|
|
int Fadvise(int fd, off_t offset, size_t len, int advice) {
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
return posix_fadvise(fd, offset, len, advice);
|
|
|
|
#else
|
|
|
|
return 0; // simply do nothing.
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-12-22 20:20:17 +00:00
|
|
|
ThreadStatusUpdater* CreateThreadStatusUpdater() {
|
|
|
|
return new ThreadStatusUpdater();
|
|
|
|
}
|
|
|
|
|
2012-08-18 07:26:50 +00:00
|
|
|
// list of pathnames that are locked
|
|
|
|
static std::set<std::string> lockedFiles;
|
|
|
|
static port::Mutex mutex_lockedFiles;
|
|
|
|
|
2011-07-15 00:20:57 +00:00
|
|
|
static Status IOError(const std::string& context, int err_number) {
|
|
|
|
return Status::IOError(context, strerror(err_number));
|
|
|
|
}
|
|
|
|
|
2013-09-02 06:23:40 +00:00
|
|
|
#if defined(OS_LINUX)
|
|
|
|
namespace {
|
|
|
|
static size_t GetUniqueIdFromFile(int fd, char* id, size_t max_size) {
|
|
|
|
if (max_size < kMaxVarint64Length*3) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct stat buf;
|
|
|
|
int result = fstat(fd, &buf);
|
|
|
|
if (result == -1) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
long version = 0;
|
|
|
|
result = ioctl(fd, FS_IOC_GETVERSION, &version);
|
|
|
|
if (result == -1) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
uint64_t uversion = (uint64_t)version;
|
|
|
|
|
|
|
|
char* rid = id;
|
|
|
|
rid = EncodeVarint64(rid, buf.st_dev);
|
|
|
|
rid = EncodeVarint64(rid, buf.st_ino);
|
|
|
|
rid = EncodeVarint64(rid, uversion);
|
|
|
|
assert(rid >= id);
|
|
|
|
return static_cast<size_t>(rid-id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
class PosixSequentialFile: public SequentialFile {
|
|
|
|
private:
|
|
|
|
std::string filename_;
|
|
|
|
FILE* file_;
|
2013-03-15 00:00:04 +00:00
|
|
|
int fd_;
|
2013-04-22 17:41:41 +00:00
|
|
|
bool use_os_buffer_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
public:
|
2013-03-15 00:00:04 +00:00
|
|
|
PosixSequentialFile(const std::string& fname, FILE* f,
|
|
|
|
const EnvOptions& options)
|
2013-04-22 17:41:41 +00:00
|
|
|
: filename_(fname), file_(f), fd_(fileno(f)),
|
2013-06-07 22:35:17 +00:00
|
|
|
use_os_buffer_(options.use_os_buffer) {
|
2013-03-15 00:00:04 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual ~PosixSequentialFile() { fclose(file_); }
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Read(size_t n, Slice* result, char* scratch) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status s;
|
2014-04-01 18:09:06 +00:00
|
|
|
size_t r = 0;
|
2014-03-31 21:45:26 +00:00
|
|
|
do {
|
|
|
|
r = fread_unlocked(scratch, 1, n, file_);
|
2014-04-01 18:09:06 +00:00
|
|
|
} while (r == 0 && ferror(file_) && errno == EINTR);
|
2011-03-18 22:37:00 +00:00
|
|
|
*result = Slice(scratch, r);
|
|
|
|
if (r < n) {
|
|
|
|
if (feof(file_)) {
|
|
|
|
// We leave status as ok if we hit the end of the file
|
2014-04-29 03:30:27 +00:00
|
|
|
// We also clear the error so that the reads can continue
|
|
|
|
// if a new data is written to the file
|
|
|
|
clearerr(file_);
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
|
|
|
// A partial read with an error: return a non-ok status
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(filename_, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
2013-04-22 17:41:41 +00:00
|
|
|
if (!use_os_buffer_) {
|
2013-03-15 00:00:04 +00:00
|
|
|
// we need to fadvise away the entire range of pages because
|
|
|
|
// we do not want readahead pages to be cached.
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_DONTNEED); // free OS pages
|
2013-03-15 00:00:04 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Skip(uint64_t n) override {
|
2014-11-13 19:39:30 +00:00
|
|
|
if (fseek(file_, static_cast<long int>(n), SEEK_CUR)) {
|
2011-07-15 00:20:57 +00:00
|
|
|
return IOError(filename_, errno);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2013-09-21 06:00:13 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status InvalidateCache(size_t offset, size_t length) override {
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifndef OS_LINUX
|
|
|
|
return Status::OK();
|
|
|
|
#else
|
2013-09-21 06:00:13 +00:00
|
|
|
// free OS pages
|
2013-11-17 07:44:39 +00:00
|
|
|
int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
|
2013-09-21 06:00:13 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(filename_, errno);
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-09-21 06:00:13 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2012-03-15 16:14:00 +00:00
|
|
|
// pread() based random-access
|
2011-03-18 22:37:00 +00:00
|
|
|
class PosixRandomAccessFile: public RandomAccessFile {
|
|
|
|
private:
|
|
|
|
std::string filename_;
|
|
|
|
int fd_;
|
2013-04-22 17:41:41 +00:00
|
|
|
bool use_os_buffer_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
public:
|
2013-03-15 00:00:04 +00:00
|
|
|
PosixRandomAccessFile(const std::string& fname, int fd,
|
|
|
|
const EnvOptions& options)
|
2013-06-07 22:35:17 +00:00
|
|
|
: filename_(fname), fd_(fd), use_os_buffer_(options.use_os_buffer) {
|
2014-08-26 19:28:36 +00:00
|
|
|
assert(!options.use_mmap_reads || sizeof(void*) < 8);
|
2012-09-13 18:54:53 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual ~PosixRandomAccessFile() { close(fd_); }
|
|
|
|
|
|
|
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
2015-02-26 19:28:41 +00:00
|
|
|
char* scratch) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status s;
|
2014-03-31 21:45:26 +00:00
|
|
|
ssize_t r = -1;
|
2014-08-30 04:21:49 +00:00
|
|
|
size_t left = n;
|
|
|
|
char* ptr = scratch;
|
|
|
|
while (left > 0) {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
r = pread(fd_, ptr, left, static_cast<off_t>(offset));
|
2015-06-19 18:25:33 +00:00
|
|
|
|
2014-08-30 04:21:49 +00:00
|
|
|
if (r <= 0) {
|
|
|
|
if (errno == EINTR) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ptr += r;
|
|
|
|
offset += r;
|
|
|
|
left -= r;
|
|
|
|
}
|
|
|
|
|
|
|
|
*result = Slice(scratch, (r < 0) ? 0 : n - left);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (r < 0) {
|
|
|
|
// An error: return a non-ok status
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(filename_, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-04-22 17:41:41 +00:00
|
|
|
if (!use_os_buffer_) {
|
2012-09-13 17:10:51 +00:00
|
|
|
// we need to fadvise away the entire range of pages because
|
|
|
|
// we do not want readahead pages to be cached.
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_DONTNEED); // free OS pages
|
2012-05-24 22:54:02 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
2013-01-31 23:20:24 +00:00
|
|
|
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifdef OS_LINUX
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual size_t GetUniqueId(char* id, size_t max_size) const override {
|
2013-09-02 06:23:40 +00:00
|
|
|
return GetUniqueIdFromFile(fd_, id, max_size);
|
2013-01-31 23:20:24 +00:00
|
|
|
}
|
|
|
|
#endif
|
2013-05-17 22:53:01 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void Hint(AccessPattern pattern) override {
|
2013-05-17 22:53:01 +00:00
|
|
|
switch(pattern) {
|
|
|
|
case NORMAL:
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_NORMAL);
|
2013-05-17 22:53:01 +00:00
|
|
|
break;
|
|
|
|
case RANDOM:
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_RANDOM);
|
2013-05-17 22:53:01 +00:00
|
|
|
break;
|
|
|
|
case SEQUENTIAL:
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_SEQUENTIAL);
|
2013-05-17 22:53:01 +00:00
|
|
|
break;
|
|
|
|
case WILLNEED:
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_WILLNEED);
|
2013-05-17 22:53:01 +00:00
|
|
|
break;
|
|
|
|
case DONTNEED:
|
2013-11-17 07:44:39 +00:00
|
|
|
Fadvise(fd_, 0, 0, POSIX_FADV_DONTNEED);
|
2013-05-17 22:53:01 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status InvalidateCache(size_t offset, size_t length) override {
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifndef OS_LINUX
|
|
|
|
return Status::OK();
|
|
|
|
#else
|
2013-09-21 06:00:13 +00:00
|
|
|
// free OS pages
|
2013-11-17 07:44:39 +00:00
|
|
|
int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
|
2013-09-21 06:00:13 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(filename_, errno);
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-09-21 06:00:13 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2012-03-15 16:14:00 +00:00
|
|
|
// mmap() based random-access
|
|
|
|
class PosixMmapReadableFile: public RandomAccessFile {
|
|
|
|
private:
|
2013-09-21 06:00:13 +00:00
|
|
|
int fd_;
|
2012-03-15 16:14:00 +00:00
|
|
|
std::string filename_;
|
|
|
|
void* mmapped_region_;
|
|
|
|
size_t length_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
// base[0,length-1] contains the mmapped contents of the file.
|
2013-09-21 06:00:13 +00:00
|
|
|
PosixMmapReadableFile(const int fd, const std::string& fname,
|
|
|
|
void* base, size_t length,
|
2013-03-15 00:00:04 +00:00
|
|
|
const EnvOptions& options)
|
2013-09-21 06:00:13 +00:00
|
|
|
: fd_(fd), filename_(fname), mmapped_region_(base), length_(length) {
|
2013-11-17 07:44:39 +00:00
|
|
|
fd_ = fd_ + 0; // suppress the warning for used variables
|
2013-06-07 22:35:17 +00:00
|
|
|
assert(options.use_mmap_reads);
|
|
|
|
assert(options.use_os_buffer);
|
2013-03-15 00:00:04 +00:00
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
virtual ~PosixMmapReadableFile() {
|
2013-11-26 20:23:02 +00:00
|
|
|
int ret = munmap(mmapped_region_, length_);
|
|
|
|
if (ret != 0) {
|
2015-07-03 00:23:41 +00:00
|
|
|
fprintf(stdout, "failed to munmap %p length %" ROCKSDB_PRIszt " \n",
|
2013-11-26 20:23:02 +00:00
|
|
|
mmapped_region_, length_);
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
2012-03-15 16:14:00 +00:00
|
|
|
|
|
|
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
2015-02-26 19:28:41 +00:00
|
|
|
char* scratch) const override {
|
2012-03-15 16:14:00 +00:00
|
|
|
Status s;
|
2015-08-26 21:51:38 +00:00
|
|
|
if (offset > length_) {
|
2012-03-15 16:14:00 +00:00
|
|
|
*result = Slice();
|
2015-08-26 21:51:38 +00:00
|
|
|
return IOError(filename_, EINVAL);
|
|
|
|
} else if (offset + n > length_) {
|
|
|
|
n = length_ - offset;
|
2012-03-15 16:14:00 +00:00
|
|
|
}
|
2015-08-26 21:51:38 +00:00
|
|
|
*result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
|
2012-03-15 16:14:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status InvalidateCache(size_t offset, size_t length) override {
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifndef OS_LINUX
|
|
|
|
return Status::OK();
|
|
|
|
#else
|
2013-09-21 06:00:13 +00:00
|
|
|
// free OS pages
|
2013-11-17 07:44:39 +00:00
|
|
|
int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
|
2013-09-21 06:00:13 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(filename_, errno);
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-09-21 06:00:13 +00:00
|
|
|
}
|
2012-03-15 16:14:00 +00:00
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// We preallocate up to an extra megabyte and use memcpy to append new
|
|
|
|
// data to the file. This is safe since we either properly close the
|
|
|
|
// file before reading from it, or for log files, the reading code
|
|
|
|
// knows enough to skip zero suffixes.
|
|
|
|
class PosixMmapFile : public WritableFile {
|
|
|
|
private:
|
|
|
|
std::string filename_;
|
|
|
|
int fd_;
|
|
|
|
size_t page_size_;
|
|
|
|
size_t map_size_; // How much extra memory to map at a time
|
|
|
|
char* base_; // The mapped region
|
|
|
|
char* limit_; // Limit of the mapped region
|
|
|
|
char* dst_; // Where to write next (in range [base_,limit_])
|
|
|
|
char* last_sync_; // Where have we synced up to
|
|
|
|
uint64_t file_offset_; // Offset of base_ in file
|
2014-03-19 22:40:12 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2014-03-18 04:52:14 +00:00
|
|
|
bool fallocate_with_keep_size_;
|
2014-03-19 22:40:12 +00:00
|
|
|
#endif
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Roundup x to a multiple of y
|
|
|
|
static size_t Roundup(size_t x, size_t y) {
|
|
|
|
return ((x + y - 1) / y) * y;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t TruncateToPageBoundary(size_t s) {
|
|
|
|
s -= (s & (page_size_ - 1));
|
|
|
|
assert((s % page_size_) == 0);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-02-18 01:54:02 +00:00
|
|
|
Status UnmapCurrentRegion() {
|
2013-10-05 05:32:05 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (base_ != nullptr) {
|
2015-02-18 01:54:02 +00:00
|
|
|
int munmap_status = munmap(base_, limit_ - base_);
|
|
|
|
if (munmap_status != 0) {
|
|
|
|
return IOError(filename_, munmap_status);
|
2011-07-15 00:20:57 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
file_offset_ += limit_ - base_;
|
2013-03-01 02:04:58 +00:00
|
|
|
base_ = nullptr;
|
|
|
|
limit_ = nullptr;
|
|
|
|
last_sync_ = nullptr;
|
|
|
|
dst_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Increase the amount we map the next time, but capped at 1MB
|
|
|
|
if (map_size_ < (1<<20)) {
|
|
|
|
map_size_ *= 2;
|
|
|
|
}
|
|
|
|
}
|
2015-02-18 01:54:02 +00:00
|
|
|
return Status::OK();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-03-13 20:50:26 +00:00
|
|
|
Status MapNewRegion() {
|
2013-12-11 06:34:19 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2013-03-01 02:04:58 +00:00
|
|
|
assert(base_ == nullptr);
|
2013-03-13 20:50:26 +00:00
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2014-03-18 04:52:14 +00:00
|
|
|
// we can't fallocate with FALLOC_FL_KEEP_SIZE here
|
2015-07-03 00:23:41 +00:00
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(allocate_nanos);
|
|
|
|
int alloc_status = fallocate(fd_, 0, file_offset_, map_size_);
|
|
|
|
if (alloc_status != 0) {
|
|
|
|
// fallback to posix_fallocate
|
|
|
|
alloc_status = posix_fallocate(fd_, file_offset_, map_size_);
|
|
|
|
}
|
|
|
|
if (alloc_status != 0) {
|
|
|
|
return Status::IOError("Error allocating space to file : " + filename_ +
|
|
|
|
"Error : " + strerror(alloc_status));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-03-13 20:50:26 +00:00
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2013-03-01 02:04:58 +00:00
|
|
|
void* ptr = mmap(nullptr, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED,
|
2011-03-18 22:37:00 +00:00
|
|
|
fd_, file_offset_);
|
|
|
|
if (ptr == MAP_FAILED) {
|
2013-03-13 20:50:26 +00:00
|
|
|
return Status::IOError("MMap failed on " + filename_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-10-05 05:32:05 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2013-04-05 06:49:43 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
base_ = reinterpret_cast<char*>(ptr);
|
|
|
|
limit_ = base_ + map_size_;
|
|
|
|
dst_ = base_;
|
|
|
|
last_sync_ = base_;
|
2013-03-13 20:50:26 +00:00
|
|
|
return Status::OK();
|
2013-11-17 07:44:39 +00:00
|
|
|
#else
|
|
|
|
return Status::NotSupported("This platform doesn't support fallocate()");
|
|
|
|
#endif
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-22 19:27:39 +00:00
|
|
|
Status Msync() {
|
|
|
|
if (dst_ == last_sync_) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
// Find the beginnings of the pages that contain the first and last
|
|
|
|
// bytes to be synced.
|
|
|
|
size_t p1 = TruncateToPageBoundary(last_sync_ - base_);
|
|
|
|
size_t p2 = TruncateToPageBoundary(dst_ - base_ - 1);
|
|
|
|
last_sync_ = dst_;
|
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
|
|
|
if (msync(base_ + p1, p2 - p1 + page_size_, MS_SYNC) < 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2013-03-15 00:00:04 +00:00
|
|
|
PosixMmapFile(const std::string& fname, int fd, size_t page_size,
|
|
|
|
const EnvOptions& options)
|
2011-03-18 22:37:00 +00:00
|
|
|
: filename_(fname),
|
|
|
|
fd_(fd),
|
|
|
|
page_size_(page_size),
|
|
|
|
map_size_(Roundup(65536, page_size)),
|
2013-03-01 02:04:58 +00:00
|
|
|
base_(nullptr),
|
|
|
|
limit_(nullptr),
|
|
|
|
dst_(nullptr),
|
|
|
|
last_sync_(nullptr),
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
file_offset_(0) {
|
2014-03-19 22:40:12 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
|
|
|
fallocate_with_keep_size_ = options.fallocate_with_keep_size;
|
|
|
|
#endif
|
2011-03-18 22:37:00 +00:00
|
|
|
assert((page_size & (page_size - 1)) == 0);
|
2013-06-07 22:35:17 +00:00
|
|
|
assert(options.use_mmap_writes);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
~PosixMmapFile() {
|
|
|
|
if (fd_ >= 0) {
|
|
|
|
PosixMmapFile::Close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Append(const Slice& data) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
const char* src = data.data();
|
|
|
|
size_t left = data.size();
|
|
|
|
while (left > 0) {
|
|
|
|
assert(base_ <= dst_);
|
|
|
|
assert(dst_ <= limit_);
|
|
|
|
size_t avail = limit_ - dst_;
|
|
|
|
if (avail == 0) {
|
2015-02-18 01:54:02 +00:00
|
|
|
Status s = UnmapCurrentRegion();
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
s = MapNewRegion();
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
2011-07-15 00:20:57 +00:00
|
|
|
}
|
2015-02-18 01:54:02 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t n = (left <= avail) ? left : avail;
|
|
|
|
memcpy(dst_, src, n);
|
|
|
|
dst_ += n;
|
|
|
|
src += n;
|
|
|
|
left -= n;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-09-11 16:57:02 +00:00
|
|
|
// Means Close() will properly take care of truncate
|
|
|
|
// and it does not need any additional information
|
|
|
|
virtual Status Truncate(uint64_t size) override {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Close() override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status s;
|
|
|
|
size_t unused = limit_ - dst_;
|
2013-04-05 06:49:43 +00:00
|
|
|
|
2015-02-18 01:54:02 +00:00
|
|
|
s = UnmapCurrentRegion();
|
|
|
|
if (!s.ok()) {
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(filename_, errno);
|
|
|
|
} else if (unused > 0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Trim the extra space at the end of the file
|
|
|
|
if (ftruncate(fd_, file_offset_ - unused) < 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(filename_, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (close(fd_) < 0) {
|
|
|
|
if (s.ok()) {
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(filename_, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fd_ = -1;
|
2013-03-01 02:04:58 +00:00
|
|
|
base_ = nullptr;
|
|
|
|
limit_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Flush() override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Sync() override {
|
2015-07-22 19:27:39 +00:00
|
|
|
if (fdatasync(fd_) < 0) {
|
|
|
|
return IOError(filename_, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-22 19:27:39 +00:00
|
|
|
return Msync();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2012-08-27 19:10:26 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush data as well as metadata to stable storage.
|
|
|
|
*/
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Fsync() override {
|
2015-07-22 19:27:39 +00:00
|
|
|
if (fsync(fd_) < 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Msync();
|
2012-08-27 19:10:26 +00:00
|
|
|
}
|
2012-09-24 21:01:01 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the size of valid data in the file. This will not match the
|
|
|
|
* size that is returned from the filesystem because we use mmap
|
|
|
|
* to extend file by map_size every time.
|
|
|
|
*/
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual uint64_t GetFileSize() override {
|
2012-09-24 21:01:01 +00:00
|
|
|
size_t used = dst_ - base_;
|
|
|
|
return file_offset_ + used;
|
|
|
|
}
|
2013-01-15 22:05:42 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status InvalidateCache(size_t offset, size_t length) override {
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifndef OS_LINUX
|
|
|
|
return Status::OK();
|
|
|
|
#else
|
2013-09-21 06:00:13 +00:00
|
|
|
// free OS pages
|
2013-11-17 07:44:39 +00:00
|
|
|
int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
|
2013-09-21 06:00:13 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(filename_, errno);
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-09-21 06:00:13 +00:00
|
|
|
}
|
|
|
|
|
2013-12-11 06:34:19 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Allocate(off_t offset, off_t len) override {
|
2013-10-05 05:32:05 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2014-03-18 04:52:14 +00:00
|
|
|
int alloc_status = fallocate(
|
|
|
|
fd_, fallocate_with_keep_size_ ? FALLOC_FL_KEEP_SIZE : 0, offset, len);
|
|
|
|
if (alloc_status == 0) {
|
2013-01-15 22:05:42 +00:00
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
}
|
2013-01-28 19:18:50 +00:00
|
|
|
#endif
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2012-10-01 22:41:44 +00:00
|
|
|
// Use posix write to write data to a file.
|
|
|
|
class PosixWritableFile : public WritableFile {
|
|
|
|
private:
|
|
|
|
const std::string filename_;
|
|
|
|
int fd_;
|
|
|
|
uint64_t filesize_;
|
2014-03-19 22:40:12 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2014-03-18 04:52:14 +00:00
|
|
|
bool fallocate_with_keep_size_;
|
2014-03-19 22:40:12 +00:00
|
|
|
#endif
|
2012-10-01 22:41:44 +00:00
|
|
|
|
|
|
|
public:
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
PosixWritableFile(const std::string& fname, int fd, const EnvOptions& options)
|
|
|
|
: filename_(fname), fd_(fd), filesize_(0) {
|
2014-03-19 22:40:12 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
|
|
|
fallocate_with_keep_size_ = options.fallocate_with_keep_size;
|
|
|
|
#endif
|
2013-06-07 22:35:17 +00:00
|
|
|
assert(!options.use_mmap_writes);
|
2012-10-01 22:41:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~PosixWritableFile() {
|
|
|
|
if (fd_ >= 0) {
|
|
|
|
PosixWritableFile::Close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Append(const Slice& data) override {
|
2013-10-10 07:03:08 +00:00
|
|
|
const char* src = data.data();
|
2012-10-01 22:41:44 +00:00
|
|
|
size_t left = data.size();
|
2015-09-18 18:23:50 +00:00
|
|
|
while (left != 0) {
|
|
|
|
ssize_t done = write(fd_, src, left);
|
|
|
|
if (done < 0) {
|
|
|
|
if (errno == EINTR) {
|
|
|
|
continue;
|
2012-10-01 22:41:44 +00:00
|
|
|
}
|
2015-09-18 18:23:50 +00:00
|
|
|
return IOError(filename_, errno);
|
2012-10-01 22:41:44 +00:00
|
|
|
}
|
2015-09-18 18:23:50 +00:00
|
|
|
left -= done;
|
|
|
|
src += done;
|
|
|
|
}
|
|
|
|
filesize_ += data.size();
|
2012-10-01 22:41:44 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-09-11 16:57:02 +00:00
|
|
|
// Means Close() will properly take care of truncate
|
|
|
|
// and it does not need any additional information
|
|
|
|
virtual Status Truncate(uint64_t size) override {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Close() override {
|
2012-10-01 22:41:44 +00:00
|
|
|
Status s;
|
2013-04-05 06:49:43 +00:00
|
|
|
|
2014-03-06 23:59:27 +00:00
|
|
|
size_t block_size;
|
|
|
|
size_t last_allocated_block;
|
|
|
|
GetPreallocationStatus(&block_size, &last_allocated_block);
|
|
|
|
if (last_allocated_block > 0) {
|
|
|
|
// trim the extra space preallocated at the end of the file
|
2014-10-29 19:24:49 +00:00
|
|
|
// NOTE(ljin): we probably don't want to surface failure as an IOError,
|
|
|
|
// but it will be nice to log these errors.
|
2014-10-29 22:38:34 +00:00
|
|
|
int dummy __attribute__((unused));
|
|
|
|
dummy = ftruncate(fd_, filesize_);
|
2014-10-29 19:24:49 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
|
|
|
// in some file systems, ftruncate only trims trailing space if the
|
|
|
|
// new file size is smaller than the current size. Calling fallocate
|
|
|
|
// with FALLOC_FL_PUNCH_HOLE flag to explicitly release these unused
|
|
|
|
// blocks. FALLOC_FL_PUNCH_HOLE is supported on at least the following
|
|
|
|
// filesystems:
|
|
|
|
// XFS (since Linux 2.6.38)
|
|
|
|
// ext4 (since Linux 3.0)
|
|
|
|
// Btrfs (since Linux 3.7)
|
|
|
|
// tmpfs (since Linux 3.5)
|
|
|
|
// We ignore error since failure of this operation does not affect
|
|
|
|
// correctness.
|
2015-07-03 00:23:41 +00:00
|
|
|
IOSTATS_TIMER_GUARD(allocate_nanos);
|
2014-10-29 19:24:49 +00:00
|
|
|
fallocate(fd_, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
|
|
|
|
filesize_, block_size * last_allocated_block - filesize_);
|
|
|
|
#endif
|
2014-03-06 23:59:27 +00:00
|
|
|
}
|
|
|
|
|
2012-10-01 22:41:44 +00:00
|
|
|
if (close(fd_) < 0) {
|
2014-10-29 19:24:49 +00:00
|
|
|
s = IOError(filename_, errno);
|
2012-10-01 22:41:44 +00:00
|
|
|
}
|
|
|
|
fd_ = -1;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out the cached data to the OS cache
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Flush() override {
|
2012-10-01 22:41:44 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Sync() override {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
if (fdatasync(fd_) < 0) {
|
2012-10-01 22:41:44 +00:00
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Fsync() override {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
if (fsync(fd_) < 0) {
|
2012-10-01 22:41:44 +00:00
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
[wal changes 3/3] method in DB to sync WAL without blocking writers
Summary:
Subj. We really need this feature.
Previous diff D40899 has most of the changes to make this possible, this diff just adds the method.
Test Plan: `make check`, the new test fails without this diff; ran with ASAN, TSAN and valgrind.
Reviewers: igor, rven, IslamAbdelRahman, anthony, kradhakrishnan, tnovak, yhchiang, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, maykov, hermanlee4, yoshinorim, tnovak, dhruba
Differential Revision: https://reviews.facebook.net/D40905
2015-08-05 13:06:39 +00:00
|
|
|
virtual bool IsSyncThreadSafe() const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual uint64_t GetFileSize() override { return filesize_; }
|
2013-01-15 22:05:42 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status InvalidateCache(size_t offset, size_t length) override {
|
2013-11-17 07:44:39 +00:00
|
|
|
#ifndef OS_LINUX
|
|
|
|
return Status::OK();
|
|
|
|
#else
|
2013-09-21 06:00:13 +00:00
|
|
|
// free OS pages
|
2013-11-17 07:44:39 +00:00
|
|
|
int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
|
2013-09-21 06:00:13 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(filename_, errno);
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-09-21 06:00:13 +00:00
|
|
|
}
|
|
|
|
|
2013-12-11 06:34:19 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Allocate(off_t offset, off_t len) override {
|
2013-10-05 05:32:05 +00:00
|
|
|
TEST_KILL_RANDOM(rocksdb_kill_odds);
|
2015-07-03 00:23:41 +00:00
|
|
|
IOSTATS_TIMER_GUARD(allocate_nanos);
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
int alloc_status;
|
2015-07-03 00:23:41 +00:00
|
|
|
alloc_status = fallocate(
|
2014-03-18 04:52:14 +00:00
|
|
|
fd_, fallocate_with_keep_size_ ? FALLOC_FL_KEEP_SIZE : 0, offset, len);
|
|
|
|
if (alloc_status == 0) {
|
2013-01-15 22:05:42 +00:00
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
}
|
[RocksDB] Sync file to disk incrementally
Summary:
During compaction, we sync the output files after they are fully written out. This causes unnecessary blocking of the compaction thread and burstiness of the write traffic.
This diff simply asks the OS to sync data incrementally as they are written, on the background. The hope is that, at the final sync, most of the data are already on disk and we would block less on the sync call. Thus, each compaction runs faster and we could use fewer number of compaction threads to saturate IO.
In addition, the write traffic will be smoothed out, hopefully reducing the IO P99 latency too.
Some quick tests show 10~20% improvement in per thread compaction throughput. Combined with posix advice on compaction read, just 5 threads are enough to almost saturate the udb flash bandwidth for 800 bytes write only benchmark.
What's more promising is that, with saturated IO, iostat shows average wait time is actually smoother and much smaller.
For the write only test 800bytes test:
Before the change: await occillate between 10ms and 3ms
After the change: await ranges 1-3ms
Will test against read-modify-write workload too, see if high read latency P99 could be resolved.
Will introduce a parameter to control the sync interval in a follow up diff after cleaning up EnvOptions.
Test Plan: make check; db_bench; db_stress
Reviewers: dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11115
2013-06-04 22:51:50 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status RangeSync(off_t offset, off_t nbytes) override {
|
[RocksDB] Sync file to disk incrementally
Summary:
During compaction, we sync the output files after they are fully written out. This causes unnecessary blocking of the compaction thread and burstiness of the write traffic.
This diff simply asks the OS to sync data incrementally as they are written, on the background. The hope is that, at the final sync, most of the data are already on disk and we would block less on the sync call. Thus, each compaction runs faster and we could use fewer number of compaction threads to saturate IO.
In addition, the write traffic will be smoothed out, hopefully reducing the IO P99 latency too.
Some quick tests show 10~20% improvement in per thread compaction throughput. Combined with posix advice on compaction read, just 5 threads are enough to almost saturate the udb flash bandwidth for 800 bytes write only benchmark.
What's more promising is that, with saturated IO, iostat shows average wait time is actually smoother and much smaller.
For the write only test 800bytes test:
Before the change: await occillate between 10ms and 3ms
After the change: await ranges 1-3ms
Will test against read-modify-write workload too, see if high read latency P99 could be resolved.
Will introduce a parameter to control the sync interval in a follow up diff after cleaning up EnvOptions.
Test Plan: make check; db_bench; db_stress
Reviewers: dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11115
2013-06-04 22:51:50 +00:00
|
|
|
if (sync_file_range(fd_, offset, nbytes, SYNC_FILE_RANGE_WRITE) == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual size_t GetUniqueId(char* id, size_t max_size) const override {
|
2013-09-02 06:23:40 +00:00
|
|
|
return GetUniqueIdFromFile(fd_, id, max_size);
|
|
|
|
}
|
2013-01-28 19:18:50 +00:00
|
|
|
#endif
|
2012-10-01 22:41:44 +00:00
|
|
|
};
|
|
|
|
|
2014-01-27 19:02:21 +00:00
|
|
|
class PosixDirectory : public Directory {
|
|
|
|
public:
|
|
|
|
explicit PosixDirectory(int fd) : fd_(fd) {}
|
|
|
|
~PosixDirectory() {
|
|
|
|
close(fd_);
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Fsync() override {
|
2014-01-27 19:02:21 +00:00
|
|
|
if (fsync(fd_) == -1) {
|
|
|
|
return IOError("directory", errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
int fd_;
|
|
|
|
};
|
|
|
|
|
2012-08-18 07:26:50 +00:00
|
|
|
static int LockOrUnlock(const std::string& fname, int fd, bool lock) {
|
|
|
|
mutex_lockedFiles.Lock();
|
|
|
|
if (lock) {
|
|
|
|
// If it already exists in the lockedFiles set, then it is already locked,
|
|
|
|
// and fail this lock attempt. Otherwise, insert it into lockedFiles.
|
|
|
|
// This check is needed because fcntl() does not detect lock conflict
|
|
|
|
// if the fcntl is issued by the same thread that earlier acquired
|
|
|
|
// this lock.
|
|
|
|
if (lockedFiles.insert(fname).second == false) {
|
|
|
|
mutex_lockedFiles.Unlock();
|
|
|
|
errno = ENOLCK;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we are unlocking, then verify that we had locked it earlier,
|
|
|
|
// it should already exist in lockedFiles. Remove it from lockedFiles.
|
|
|
|
if (lockedFiles.erase(fname) != 1) {
|
|
|
|
mutex_lockedFiles.Unlock();
|
|
|
|
errno = ENOLCK;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
errno = 0;
|
|
|
|
struct flock f;
|
|
|
|
memset(&f, 0, sizeof(f));
|
|
|
|
f.l_type = (lock ? F_WRLCK : F_UNLCK);
|
|
|
|
f.l_whence = SEEK_SET;
|
|
|
|
f.l_start = 0;
|
|
|
|
f.l_len = 0; // Lock/unlock entire file
|
2012-08-18 07:26:50 +00:00
|
|
|
int value = fcntl(fd, F_SETLK, &f);
|
|
|
|
if (value == -1 && lock) {
|
|
|
|
// if there is an error in locking, then remove the pathname from lockedfiles
|
|
|
|
lockedFiles.erase(fname);
|
|
|
|
}
|
|
|
|
mutex_lockedFiles.Unlock();
|
|
|
|
return value;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
class PosixFileLock : public FileLock {
|
|
|
|
public:
|
|
|
|
int fd_;
|
2012-08-18 07:26:50 +00:00
|
|
|
std::string filename;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2013-09-12 07:53:30 +00:00
|
|
|
void PthreadCall(const char* label, int result) {
|
|
|
|
if (result != 0) {
|
|
|
|
fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
|
2014-12-18 00:25:09 +00:00
|
|
|
abort();
|
2013-09-12 07:53:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
class PosixEnv : public Env {
|
|
|
|
public:
|
|
|
|
PosixEnv();
|
2013-03-19 21:39:28 +00:00
|
|
|
|
2014-12-22 20:20:17 +00:00
|
|
|
virtual ~PosixEnv() {
|
2013-09-12 07:53:30 +00:00
|
|
|
for (const auto tid : threads_to_join_) {
|
|
|
|
pthread_join(tid, nullptr);
|
|
|
|
}
|
2014-12-22 20:20:17 +00:00
|
|
|
for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
|
|
|
|
thread_pools_[pool_id].JoinAllThreads();
|
|
|
|
}
|
|
|
|
// All threads must be joined before the deletion of
|
|
|
|
// thread_status_updater_.
|
|
|
|
delete thread_status_updater_;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 02:42:07 +00:00
|
|
|
void SetFD_CLOEXEC(int fd, const EnvOptions* options) {
|
2013-06-07 22:35:17 +00:00
|
|
|
if ((options == nullptr || options->set_fd_cloexec) && fd > 0) {
|
2013-04-10 02:42:07 +00:00
|
|
|
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual Status NewSequentialFile(const std::string& fname,
|
2013-03-15 00:00:04 +00:00
|
|
|
unique_ptr<SequentialFile>* result,
|
2015-02-26 19:28:41 +00:00
|
|
|
const EnvOptions& options) override {
|
2013-01-20 10:07:13 +00:00
|
|
|
result->reset();
|
2014-03-31 21:45:26 +00:00
|
|
|
FILE* f = nullptr;
|
|
|
|
do {
|
2015-07-03 00:23:41 +00:00
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2014-03-31 21:45:26 +00:00
|
|
|
f = fopen(fname.c_str(), "r");
|
|
|
|
} while (f == nullptr && errno == EINTR);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (f == nullptr) {
|
|
|
|
*result = nullptr;
|
2011-07-15 00:20:57 +00:00
|
|
|
return IOError(fname, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
2013-04-10 02:42:07 +00:00
|
|
|
int fd = fileno(f);
|
|
|
|
SetFD_CLOEXEC(fd, &options);
|
2013-03-15 00:00:04 +00:00
|
|
|
result->reset(new PosixSequentialFile(fname, f, options));
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
2013-03-15 00:00:04 +00:00
|
|
|
unique_ptr<RandomAccessFile>* result,
|
2015-02-26 19:28:41 +00:00
|
|
|
const EnvOptions& options) override {
|
2013-01-20 10:07:13 +00:00
|
|
|
result->reset();
|
2012-03-15 16:14:00 +00:00
|
|
|
Status s;
|
2015-07-03 00:23:41 +00:00
|
|
|
int fd;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
fd = open(fname.c_str(), O_RDONLY);
|
|
|
|
}
|
2013-04-10 02:42:07 +00:00
|
|
|
SetFD_CLOEXEC(fd, &options);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (fd < 0) {
|
2012-03-15 16:14:00 +00:00
|
|
|
s = IOError(fname, errno);
|
2013-06-07 22:35:17 +00:00
|
|
|
} else if (options.use_mmap_reads && sizeof(void*) >= 8) {
|
2012-09-15 03:57:15 +00:00
|
|
|
// Use of mmap for random reads has been removed because it
|
|
|
|
// kills performance when storage is fast.
|
|
|
|
// Use mmap when virtual address-space is plentiful.
|
|
|
|
uint64_t size;
|
|
|
|
s = GetFileSize(fname, &size);
|
|
|
|
if (s.ok()) {
|
2013-03-01 02:04:58 +00:00
|
|
|
void* base = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
|
2012-09-15 03:57:15 +00:00
|
|
|
if (base != MAP_FAILED) {
|
2013-09-21 06:00:13 +00:00
|
|
|
result->reset(new PosixMmapReadableFile(fd, fname, base,
|
|
|
|
size, options));
|
2012-09-15 03:57:15 +00:00
|
|
|
} else {
|
|
|
|
s = IOError(fname, errno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(fd);
|
2012-03-15 16:14:00 +00:00
|
|
|
} else {
|
2013-03-15 00:00:04 +00:00
|
|
|
result->reset(new PosixRandomAccessFile(fname, fd, options));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2012-03-15 16:14:00 +00:00
|
|
|
return s;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status NewWritableFile(const std::string& fname,
|
2013-03-15 00:00:04 +00:00
|
|
|
unique_ptr<WritableFile>* result,
|
2015-02-26 19:28:41 +00:00
|
|
|
const EnvOptions& options) override {
|
2013-01-20 10:07:13 +00:00
|
|
|
result->reset();
|
2011-03-18 22:37:00 +00:00
|
|
|
Status s;
|
2014-03-31 21:45:26 +00:00
|
|
|
int fd = -1;
|
|
|
|
do {
|
2015-07-03 00:23:41 +00:00
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2014-03-31 21:45:26 +00:00
|
|
|
fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
|
|
|
|
} while (fd < 0 && errno == EINTR);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (fd < 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(fname, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
2013-04-10 02:42:07 +00:00
|
|
|
SetFD_CLOEXEC(fd, &options);
|
2013-06-07 22:35:17 +00:00
|
|
|
if (options.use_mmap_writes) {
|
2013-03-15 00:00:04 +00:00
|
|
|
if (!checkedDiskForMmap_) {
|
|
|
|
// this will be executed once in the program's lifetime.
|
2013-03-13 20:50:26 +00:00
|
|
|
// do not use mmapWrite on non ext-3/xfs/tmpfs systems.
|
2013-03-15 00:00:04 +00:00
|
|
|
if (!SupportsFastAllocate(fname)) {
|
|
|
|
forceMmapOff = true;
|
|
|
|
}
|
|
|
|
checkedDiskForMmap_ = true;
|
2013-03-13 20:50:26 +00:00
|
|
|
}
|
|
|
|
}
|
2013-06-07 22:35:17 +00:00
|
|
|
if (options.use_mmap_writes && !forceMmapOff) {
|
2013-03-15 00:00:04 +00:00
|
|
|
result->reset(new PosixMmapFile(fname, fd, page_size_, options));
|
2012-10-01 22:41:44 +00:00
|
|
|
} else {
|
2013-11-17 07:44:39 +00:00
|
|
|
// disable mmap writes
|
|
|
|
EnvOptions no_mmap_writes_options = options;
|
|
|
|
no_mmap_writes_options.use_mmap_writes = false;
|
|
|
|
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
result->reset(new PosixWritableFile(fname, fd, no_mmap_writes_options));
|
2012-10-01 22:41:44 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-01-27 19:02:21 +00:00
|
|
|
virtual Status NewDirectory(const std::string& name,
|
2015-02-26 19:28:41 +00:00
|
|
|
unique_ptr<Directory>* result) override {
|
2014-01-27 19:02:21 +00:00
|
|
|
result->reset();
|
2015-07-03 00:23:41 +00:00
|
|
|
int fd;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
fd = open(name.c_str(), 0);
|
|
|
|
}
|
2014-01-27 19:02:21 +00:00
|
|
|
if (fd < 0) {
|
|
|
|
return IOError(name, errno);
|
|
|
|
} else {
|
|
|
|
result->reset(new PosixDirectory(fd));
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-07-21 00:20:40 +00:00
|
|
|
virtual Status FileExists(const std::string& fname) override {
|
|
|
|
int result = access(fname.c_str(), F_OK);
|
|
|
|
|
|
|
|
if (result == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (errno) {
|
|
|
|
case EACCES:
|
|
|
|
case ELOOP:
|
|
|
|
case ENAMETOOLONG:
|
|
|
|
case ENOENT:
|
|
|
|
case ENOTDIR:
|
|
|
|
return Status::NotFound();
|
|
|
|
default:
|
|
|
|
assert(result == EIO || result == ENOMEM);
|
|
|
|
return Status::IOError("Unexpected error(" + ToString(result) +
|
|
|
|
") accessing file `" + fname + "' ");
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status GetChildren(const std::string& dir,
|
2015-02-26 19:28:41 +00:00
|
|
|
std::vector<std::string>* result) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
result->clear();
|
|
|
|
DIR* d = opendir(dir.c_str());
|
2013-03-01 02:04:58 +00:00
|
|
|
if (d == nullptr) {
|
2011-07-15 00:20:57 +00:00
|
|
|
return IOError(dir, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
struct dirent* entry;
|
2013-03-01 02:04:58 +00:00
|
|
|
while ((entry = readdir(d)) != nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
result->push_back(entry->d_name);
|
|
|
|
}
|
|
|
|
closedir(d);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status DeleteFile(const std::string& fname) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status result;
|
|
|
|
if (unlink(fname.c_str()) != 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError(fname, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status CreateDir(const std::string& name) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status result;
|
|
|
|
if (mkdir(name.c_str(), 0755) != 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError(name, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status CreateDirIfMissing(const std::string& name) override {
|
2012-11-26 21:56:45 +00:00
|
|
|
Status result;
|
|
|
|
if (mkdir(name.c_str(), 0755) != 0) {
|
|
|
|
if (errno != EEXIST) {
|
|
|
|
result = IOError(name, errno);
|
2013-01-07 18:11:18 +00:00
|
|
|
} else if (!DirExists(name)) { // Check that name is actually a
|
|
|
|
// directory.
|
|
|
|
// Message is taken from mkdir
|
|
|
|
result = Status::IOError("`"+name+"' exists but is not a directory");
|
2012-11-26 21:56:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status DeleteDir(const std::string& name) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status result;
|
|
|
|
if (rmdir(name.c_str()) != 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError(name, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status GetFileSize(const std::string& fname,
|
|
|
|
uint64_t* size) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status s;
|
|
|
|
struct stat sbuf;
|
|
|
|
if (stat(fname.c_str(), &sbuf) != 0) {
|
|
|
|
*size = 0;
|
2011-07-15 00:20:57 +00:00
|
|
|
s = IOError(fname, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
|
|
|
*size = sbuf.st_size;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2012-11-26 21:56:45 +00:00
|
|
|
virtual Status GetFileModificationTime(const std::string& fname,
|
2015-02-26 19:28:41 +00:00
|
|
|
uint64_t* file_mtime) override {
|
2012-11-26 21:56:45 +00:00
|
|
|
struct stat s;
|
|
|
|
if (stat(fname.c_str(), &s) !=0) {
|
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
|
|
|
*file_mtime = static_cast<uint64_t>(s.st_mtime);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status RenameFile(const std::string& src,
|
|
|
|
const std::string& target) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Status result;
|
|
|
|
if (rename(src.c_str(), target.c_str()) != 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError(src, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status LinkFile(const std::string& src,
|
|
|
|
const std::string& target) override {
|
2014-11-14 19:38:26 +00:00
|
|
|
Status result;
|
|
|
|
if (link(src.c_str(), target.c_str()) != 0) {
|
|
|
|
if (errno == EXDEV) {
|
|
|
|
return Status::NotSupported("No cross FS links allowed");
|
|
|
|
}
|
|
|
|
result = IOError(src, errno);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status LockFile(const std::string& fname, FileLock** lock) override {
|
2013-03-01 02:04:58 +00:00
|
|
|
*lock = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Status result;
|
2015-07-03 00:23:41 +00:00
|
|
|
int fd;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
if (fd < 0) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError(fname, errno);
|
2012-08-18 07:26:50 +00:00
|
|
|
} else if (LockOrUnlock(fname, fd, true) == -1) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError("lock " + fname, errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
close(fd);
|
|
|
|
} else {
|
2013-04-10 02:42:07 +00:00
|
|
|
SetFD_CLOEXEC(fd, nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
PosixFileLock* my_lock = new PosixFileLock;
|
|
|
|
my_lock->fd_ = fd;
|
2012-08-18 07:26:50 +00:00
|
|
|
my_lock->filename = fname;
|
2011-03-18 22:37:00 +00:00
|
|
|
*lock = my_lock;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status UnlockFile(FileLock* lock) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
|
|
|
|
Status result;
|
2012-08-18 07:26:50 +00:00
|
|
|
if (LockOrUnlock(my_lock->filename, my_lock->fd_, false) == -1) {
|
2011-07-15 00:20:57 +00:00
|
|
|
result = IOError("unlock", errno);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
close(my_lock->fd_);
|
|
|
|
delete my_lock;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-03-17 01:49:14 +00:00
|
|
|
virtual void Schedule(void (*function)(void* arg1), void* arg,
|
|
|
|
Priority pri = LOW, void* tag = nullptr) override;
|
|
|
|
|
|
|
|
virtual int UnSchedule(void* arg, Priority pri) override;
|
2013-03-19 21:39:28 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void StartThread(void (*function)(void* arg), void* arg) override;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void WaitForJoin() override;
|
2014-02-26 01:47:37 +00:00
|
|
|
|
2014-03-10 23:14:48 +00:00
|
|
|
virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const override;
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status GetTestDirectory(std::string* result) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
const char* env = getenv("TEST_TMPDIR");
|
|
|
|
if (env && env[0] != '\0') {
|
|
|
|
*result = env;
|
|
|
|
} else {
|
|
|
|
char buf[100];
|
2013-10-05 05:32:05 +00:00
|
|
|
snprintf(buf, sizeof(buf), "/tmp/rocksdbtest-%d", int(geteuid()));
|
2011-03-18 22:37:00 +00:00
|
|
|
*result = buf;
|
|
|
|
}
|
|
|
|
// Directory may already exist
|
|
|
|
CreateDir(*result);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-12-22 20:20:17 +00:00
|
|
|
virtual Status GetThreadList(
|
|
|
|
std::vector<ThreadStatus>* thread_list) override {
|
|
|
|
assert(thread_status_updater_);
|
|
|
|
return thread_status_updater_->GetThreadList(thread_list);
|
|
|
|
}
|
|
|
|
|
2014-05-29 17:57:22 +00:00
|
|
|
static uint64_t gettid(pthread_t tid) {
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t thread_id = 0;
|
2011-03-21 19:40:57 +00:00
|
|
|
memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
|
2011-07-21 02:40:18 +00:00
|
|
|
return thread_id;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-29 17:57:22 +00:00
|
|
|
static uint64_t gettid() {
|
|
|
|
pthread_t tid = pthread_self();
|
|
|
|
return gettid(tid);
|
|
|
|
}
|
|
|
|
|
2015-06-11 21:32:10 +00:00
|
|
|
virtual uint64_t GetThreadID() const override {
|
2015-06-11 21:18:02 +00:00
|
|
|
return gettid(pthread_self());
|
|
|
|
}
|
|
|
|
|
2013-01-20 10:07:13 +00:00
|
|
|
virtual Status NewLogger(const std::string& fname,
|
2015-02-26 19:28:41 +00:00
|
|
|
shared_ptr<Logger>* result) override {
|
2015-07-03 00:23:41 +00:00
|
|
|
FILE* f;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
f = fopen(fname.c_str(), "w");
|
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
if (f == nullptr) {
|
2013-01-20 10:07:13 +00:00
|
|
|
result->reset();
|
2011-07-21 02:40:18 +00:00
|
|
|
return IOError(fname, errno);
|
|
|
|
} else {
|
2013-04-10 02:42:07 +00:00
|
|
|
int fd = fileno(f);
|
t6913679: Use fallocate on LOG FILESS
Summary: Use fallocate on LOG FILES to
Test Plan:
make check
+
===check with strace===
[arya@devvm1441 ~/rocksdb] strace -e trace=fallocate ./ldb --db=/tmp/test_new scan
fallocate(3, 01, 0, 4194304) = 0
Reviewers: sdong, anthony, IslamAbdelRahman, kradhakrishnan, lgalanis, rven, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D45969
2015-09-02 18:17:02 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
|
|
|
fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 4 * 1024 * 1024);
|
|
|
|
#endif
|
2013-04-10 02:42:07 +00:00
|
|
|
SetFD_CLOEXEC(fd, nullptr);
|
2013-10-31 22:36:40 +00:00
|
|
|
result->reset(new PosixLogger(f, &PosixEnv::gettid, this));
|
2011-07-21 02:40:18 +00:00
|
|
|
return Status::OK();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual uint64_t NowMicros() override {
|
2015-03-03 19:29:31 +00:00
|
|
|
struct timeval tv;
|
|
|
|
gettimeofday(&tv, nullptr);
|
|
|
|
return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual uint64_t NowNanos() override {
|
2015-03-18 18:26:10 +00:00
|
|
|
#if defined(OS_LINUX) || defined(OS_FREEBSD)
|
2015-03-03 19:29:31 +00:00
|
|
|
struct timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
return static_cast<uint64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
2015-03-17 11:03:11 +00:00
|
|
|
#elif defined(__MACH__)
|
2015-03-03 19:29:31 +00:00
|
|
|
clock_serv_t cclock;
|
|
|
|
mach_timespec_t ts;
|
|
|
|
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
|
|
|
|
clock_get_time(cclock, &ts);
|
|
|
|
mach_port_deallocate(mach_task_self(), cclock);
|
|
|
|
return static_cast<uint64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
2015-03-18 18:26:10 +00:00
|
|
|
#else
|
|
|
|
return std::chrono::duration_cast<std::chrono::nanoseconds>(
|
|
|
|
std::chrono::steady_clock::now().time_since_epoch()).count();
|
|
|
|
#endif
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void SleepForMicroseconds(int micros) override { usleep(micros); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status GetHostName(char* name, uint64_t len) override {
|
2014-11-13 19:39:30 +00:00
|
|
|
int ret = gethostname(name, static_cast<size_t>(len));
|
2012-08-14 22:20:36 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
if (errno == EFAULT || errno == EINVAL)
|
|
|
|
return Status::InvalidArgument(strerror(errno));
|
|
|
|
else
|
|
|
|
return IOError("GetHostName", errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status GetCurrentTime(int64_t* unix_time) override {
|
2013-03-01 02:04:58 +00:00
|
|
|
time_t ret = time(nullptr);
|
2012-08-14 22:20:36 +00:00
|
|
|
if (ret == (time_t) -1) {
|
|
|
|
return IOError("GetCurrentTime", errno);
|
|
|
|
}
|
|
|
|
*unix_time = (int64_t) ret;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status GetAbsolutePath(const std::string& db_path,
|
2015-02-26 19:28:41 +00:00
|
|
|
std::string* output_path) override {
|
2012-08-14 22:20:36 +00:00
|
|
|
if (db_path.find('/') == 0) {
|
|
|
|
*output_path = db_path;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
char the_path[256];
|
|
|
|
char* ret = getcwd(the_path, 256);
|
2013-03-01 02:04:58 +00:00
|
|
|
if (ret == nullptr) {
|
2012-08-14 22:20:36 +00:00
|
|
|
return Status::IOError(strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
*output_path = ret;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2012-11-29 00:42:36 +00:00
|
|
|
// Allow increasing the number of worker threads.
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void SetBackgroundThreads(int num, Priority pri) override {
|
2013-09-12 07:53:30 +00:00
|
|
|
assert(pri >= Priority::LOW && pri <= Priority::HIGH);
|
|
|
|
thread_pools_[pri].SetBackgroundThreads(num);
|
2012-09-19 22:21:09 +00:00
|
|
|
}
|
|
|
|
|
2014-11-03 22:11:33 +00:00
|
|
|
// Allow increasing the number of worker threads.
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void IncBackgroundThreadsIfNeeded(int num, Priority pri) override {
|
2014-11-03 22:11:33 +00:00
|
|
|
assert(pri >= Priority::LOW && pri <= Priority::HIGH);
|
|
|
|
thread_pools_[pri].IncBackgroundThreadsIfNeeded(num);
|
|
|
|
}
|
|
|
|
|
2014-08-14 03:49:58 +00:00
|
|
|
virtual void LowerThreadPoolIOPriority(Priority pool = LOW) override {
|
|
|
|
assert(pool >= Priority::LOW && pool <= Priority::HIGH);
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
thread_pools_[pool].LowerIOPriority();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual std::string TimeToString(uint64_t secondsSince1970) override {
|
2012-10-19 21:00:53 +00:00
|
|
|
const time_t seconds = (time_t)secondsSince1970;
|
|
|
|
struct tm t;
|
|
|
|
int maxsize = 64;
|
|
|
|
std::string dummy;
|
|
|
|
dummy.reserve(maxsize);
|
|
|
|
dummy.resize(maxsize);
|
|
|
|
char* p = &dummy[0];
|
|
|
|
localtime_r(&seconds, &t);
|
|
|
|
snprintf(p, maxsize,
|
|
|
|
"%04d/%02d/%02d-%02d:%02d:%02d ",
|
|
|
|
t.tm_year + 1900,
|
|
|
|
t.tm_mon + 1,
|
|
|
|
t.tm_mday,
|
|
|
|
t.tm_hour,
|
|
|
|
t.tm_min,
|
|
|
|
t.tm_sec);
|
|
|
|
return dummy;
|
|
|
|
}
|
|
|
|
|
2015-05-19 00:03:59 +00:00
|
|
|
EnvOptions OptimizeForLogWrite(const EnvOptions& env_options,
|
|
|
|
const DBOptions& db_options) const override {
|
2014-03-18 04:52:14 +00:00
|
|
|
EnvOptions optimized = env_options;
|
|
|
|
optimized.use_mmap_writes = false;
|
2015-05-19 00:03:59 +00:00
|
|
|
optimized.bytes_per_sync = db_options.wal_bytes_per_sync;
|
2014-03-28 22:04:11 +00:00
|
|
|
// TODO(icanadi) it's faster if fallocate_with_keep_size is false, but it
|
|
|
|
// breaks TransactionLogIteratorStallAtLastRecord unit test. Fix the unit
|
|
|
|
// test and make this false
|
|
|
|
optimized.fallocate_with_keep_size = true;
|
2014-03-18 04:52:14 +00:00
|
|
|
return optimized;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
EnvOptions OptimizeForManifestWrite(
|
|
|
|
const EnvOptions& env_options) const override {
|
2014-03-18 04:52:14 +00:00
|
|
|
EnvOptions optimized = env_options;
|
|
|
|
optimized.use_mmap_writes = false;
|
|
|
|
optimized.fallocate_with_keep_size = true;
|
|
|
|
return optimized;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2013-03-21 18:12:42 +00:00
|
|
|
bool checkedDiskForMmap_;
|
|
|
|
bool forceMmapOff; // do we override Env options?
|
2013-03-13 20:50:26 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-01-07 18:11:18 +00:00
|
|
|
// Returns true iff the named directory exists and is a directory.
|
|
|
|
virtual bool DirExists(const std::string& dname) {
|
|
|
|
struct stat statbuf;
|
|
|
|
if (stat(dname.c_str(), &statbuf) == 0) {
|
|
|
|
return S_ISDIR(statbuf.st_mode);
|
|
|
|
}
|
|
|
|
return false; // stat() failed return false
|
|
|
|
}
|
|
|
|
|
2013-03-13 20:50:26 +00:00
|
|
|
bool SupportsFastAllocate(const std::string& path) {
|
2013-12-11 19:18:00 +00:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2013-03-13 20:50:26 +00:00
|
|
|
struct statfs s;
|
|
|
|
if (statfs(path.c_str(), &s)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
switch (s.f_type) {
|
|
|
|
case EXT4_SUPER_MAGIC:
|
|
|
|
return true;
|
|
|
|
case XFS_SUPER_MAGIC:
|
|
|
|
return true;
|
|
|
|
case TMPFS_MAGIC:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2013-11-17 07:44:39 +00:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
2013-03-13 20:50:26 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
size_t page_size_;
|
|
|
|
|
|
|
|
|
2013-09-12 07:53:30 +00:00
|
|
|
class ThreadPool {
|
|
|
|
public:
|
2014-03-10 23:14:48 +00:00
|
|
|
ThreadPool()
|
|
|
|
: total_threads_limit_(1),
|
|
|
|
bgthreads_(0),
|
|
|
|
queue_(),
|
|
|
|
queue_len_(0),
|
2014-08-14 03:49:58 +00:00
|
|
|
exit_all_threads_(false),
|
2014-12-22 20:20:17 +00:00
|
|
|
low_io_priority_(false),
|
|
|
|
env_(nullptr) {
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr));
|
|
|
|
PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, nullptr));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-09-12 07:53:30 +00:00
|
|
|
~ThreadPool() {
|
2014-12-22 20:20:17 +00:00
|
|
|
assert(bgthreads_.size() == 0U);
|
|
|
|
}
|
|
|
|
|
|
|
|
void JoinAllThreads() {
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
|
|
|
assert(!exit_all_threads_);
|
|
|
|
exit_all_threads_ = true;
|
|
|
|
PthreadCall("signalall", pthread_cond_broadcast(&bgsignal_));
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
for (const auto tid : bgthreads_) {
|
|
|
|
pthread_join(tid, nullptr);
|
|
|
|
}
|
2014-12-22 20:20:17 +00:00
|
|
|
bgthreads_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetHostEnv(Env* env) {
|
|
|
|
env_ = env;
|
2013-09-12 07:53:30 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-08-14 03:49:58 +00:00
|
|
|
void LowerIOPriority() {
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
|
|
|
low_io_priority_ = true;
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-05-19 17:40:18 +00:00
|
|
|
// Return true if there is at least one thread needs to terminate.
|
|
|
|
bool HasExcessiveThread() {
|
|
|
|
return static_cast<int>(bgthreads_.size()) > total_threads_limit_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true iff the current thread is the excessive thread to terminate.
|
|
|
|
// Always terminate the running thread that is added last, even if there are
|
|
|
|
// more than one thread to terminate.
|
|
|
|
bool IsLastExcessiveThread(size_t thread_id) {
|
2014-05-19 21:25:11 +00:00
|
|
|
return HasExcessiveThread() && thread_id == bgthreads_.size() - 1;
|
2014-05-19 17:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Is one of the threads to terminate.
|
|
|
|
bool IsExcessiveThread(size_t thread_id) {
|
|
|
|
return static_cast<int>(thread_id) >= total_threads_limit_;
|
|
|
|
}
|
|
|
|
|
2014-11-20 18:49:32 +00:00
|
|
|
// Return the thread priority.
|
|
|
|
// This would allow its member-thread to know its priority.
|
|
|
|
Env::Priority GetThreadPriority() {
|
|
|
|
return priority_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the thread priority.
|
|
|
|
void SetThreadPriority(Env::Priority priority) {
|
|
|
|
priority_ = priority;
|
|
|
|
}
|
|
|
|
|
2014-05-19 17:40:18 +00:00
|
|
|
void BGThread(size_t thread_id) {
|
2014-08-14 03:49:58 +00:00
|
|
|
bool low_io_priority = false;
|
2013-09-12 07:53:30 +00:00
|
|
|
while (true) {
|
|
|
|
// Wait until there is an item that is ready to run
|
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
2014-05-19 17:40:18 +00:00
|
|
|
// Stop waiting if the thread needs to do work or needs to terminate.
|
|
|
|
while (!exit_all_threads_ && !IsLastExcessiveThread(thread_id) &&
|
|
|
|
(queue_.empty() || IsExcessiveThread(thread_id))) {
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_));
|
|
|
|
}
|
|
|
|
if (exit_all_threads_) { // mechanism to let BG threads exit safely
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
break;
|
|
|
|
}
|
2014-05-19 17:40:18 +00:00
|
|
|
if (IsLastExcessiveThread(thread_id)) {
|
|
|
|
// Current thread is the last generated one and is excessive.
|
|
|
|
// We always terminate excessive thread in the reverse order of
|
|
|
|
// generation time.
|
2014-05-19 21:25:11 +00:00
|
|
|
auto terminating_thread = bgthreads_.back();
|
|
|
|
pthread_detach(terminating_thread);
|
2014-05-19 17:40:18 +00:00
|
|
|
bgthreads_.pop_back();
|
|
|
|
if (HasExcessiveThread()) {
|
|
|
|
// There is still at least more excessive thread to terminate.
|
|
|
|
WakeUpAllThreads();
|
|
|
|
}
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
break;
|
|
|
|
}
|
2013-09-12 07:53:30 +00:00
|
|
|
void (*function)(void*) = queue_.front().function;
|
|
|
|
void* arg = queue_.front().arg;
|
|
|
|
queue_.pop_front();
|
2014-11-11 21:47:22 +00:00
|
|
|
queue_len_.store(static_cast<unsigned int>(queue_.size()),
|
|
|
|
std::memory_order_relaxed);
|
2013-03-29 19:59:15 +00:00
|
|
|
|
2014-08-14 03:49:58 +00:00
|
|
|
bool decrease_io_priority = (low_io_priority != low_io_priority_);
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
2014-08-14 03:49:58 +00:00
|
|
|
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
if (decrease_io_priority) {
|
|
|
|
#define IOPRIO_CLASS_SHIFT (13)
|
|
|
|
#define IOPRIO_PRIO_VALUE(class, data) \
|
|
|
|
(((class) << IOPRIO_CLASS_SHIFT) | data)
|
|
|
|
// Put schedule into IOPRIO_CLASS_IDLE class (lowest)
|
|
|
|
// These system calls only have an effect when used in conjunction
|
|
|
|
// with an I/O scheduler that supports I/O priorities. As at
|
|
|
|
// kernel 2.6.17 the only such scheduler is the Completely
|
|
|
|
// Fair Queuing (CFQ) I/O scheduler.
|
|
|
|
// To change scheduler:
|
|
|
|
// echo cfq > /sys/block/<device_name>/queue/schedule
|
|
|
|
// Tunables to consider:
|
|
|
|
// /sys/block/<device_name>/queue/slice_idle
|
|
|
|
// /sys/block/<device_name>/queue/slice_sync
|
|
|
|
syscall(SYS_ioprio_set,
|
|
|
|
1, // IOPRIO_WHO_PROCESS
|
|
|
|
0, // current thread
|
|
|
|
IOPRIO_PRIO_VALUE(3, 0));
|
|
|
|
low_io_priority = true;
|
|
|
|
}
|
2014-08-14 12:01:01 +00:00
|
|
|
#else
|
|
|
|
(void)decrease_io_priority; // avoid 'unused variable' error
|
2014-08-14 03:49:58 +00:00
|
|
|
#endif
|
2013-09-12 07:53:30 +00:00
|
|
|
(*function)(arg);
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-19 17:40:18 +00:00
|
|
|
// Helper struct for passing arguments when creating threads.
|
|
|
|
struct BGThreadMetadata {
|
|
|
|
ThreadPool* thread_pool_;
|
|
|
|
size_t thread_id_; // Thread count in the thread.
|
|
|
|
explicit BGThreadMetadata(ThreadPool* thread_pool, size_t thread_id)
|
|
|
|
: thread_pool_(thread_pool), thread_id_(thread_id) {}
|
|
|
|
};
|
|
|
|
|
2013-09-12 07:53:30 +00:00
|
|
|
static void* BGThreadWrapper(void* arg) {
|
2014-05-19 17:40:18 +00:00
|
|
|
BGThreadMetadata* meta = reinterpret_cast<BGThreadMetadata*>(arg);
|
|
|
|
size_t thread_id = meta->thread_id_;
|
|
|
|
ThreadPool* tp = meta->thread_pool_;
|
2014-11-26 07:39:52 +00:00
|
|
|
#if ROCKSDB_USING_THREAD_STATUS
|
2014-11-20 18:49:32 +00:00
|
|
|
// for thread-status
|
2015-06-17 18:21:18 +00:00
|
|
|
ThreadStatusUtil::RegisterThread(tp->env_,
|
2014-11-20 18:49:32 +00:00
|
|
|
(tp->GetThreadPriority() == Env::Priority::HIGH ?
|
2014-12-30 18:39:13 +00:00
|
|
|
ThreadStatus::HIGH_PRIORITY :
|
|
|
|
ThreadStatus::LOW_PRIORITY));
|
2014-11-26 07:39:52 +00:00
|
|
|
#endif
|
2014-05-19 17:40:18 +00:00
|
|
|
delete meta;
|
|
|
|
tp->BGThread(thread_id);
|
2014-11-26 07:39:52 +00:00
|
|
|
#if ROCKSDB_USING_THREAD_STATUS
|
2014-12-22 20:20:17 +00:00
|
|
|
ThreadStatusUtil::UnregisterThread();
|
2014-11-26 07:39:52 +00:00
|
|
|
#endif
|
2013-09-12 07:53:30 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-19 17:40:18 +00:00
|
|
|
void WakeUpAllThreads() {
|
|
|
|
PthreadCall("signalall", pthread_cond_broadcast(&bgsignal_));
|
|
|
|
}
|
|
|
|
|
2014-11-03 22:11:33 +00:00
|
|
|
void SetBackgroundThreadsInternal(int num, bool allow_reduce) {
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
2014-05-19 17:40:18 +00:00
|
|
|
if (exit_all_threads_) {
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
return;
|
|
|
|
}
|
2014-11-03 22:11:33 +00:00
|
|
|
if (num > total_threads_limit_ ||
|
|
|
|
(num < total_threads_limit_ && allow_reduce)) {
|
2014-12-09 18:22:07 +00:00
|
|
|
total_threads_limit_ = std::max(1, num);
|
2014-05-19 17:40:18 +00:00
|
|
|
WakeUpAllThreads();
|
|
|
|
StartBGThreads();
|
2013-09-12 07:53:30 +00:00
|
|
|
}
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-09-12 07:53:30 +00:00
|
|
|
|
2014-11-03 22:11:33 +00:00
|
|
|
void IncBackgroundThreadsIfNeeded(int num) {
|
|
|
|
SetBackgroundThreadsInternal(num, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetBackgroundThreads(int num) {
|
|
|
|
SetBackgroundThreadsInternal(num, true);
|
|
|
|
}
|
|
|
|
|
2014-05-19 17:40:18 +00:00
|
|
|
void StartBGThreads() {
|
2013-09-12 07:53:30 +00:00
|
|
|
// Start background thread if necessary
|
|
|
|
while ((int)bgthreads_.size() < total_threads_limit_) {
|
|
|
|
pthread_t t;
|
|
|
|
PthreadCall(
|
2014-05-19 17:40:18 +00:00
|
|
|
"create thread",
|
|
|
|
pthread_create(&t, nullptr, &ThreadPool::BGThreadWrapper,
|
|
|
|
new BGThreadMetadata(this, bgthreads_.size())));
|
2013-11-27 02:00:43 +00:00
|
|
|
|
|
|
|
// Set the thread name to aid debugging
|
2013-12-05 07:00:33 +00:00
|
|
|
#if defined(_GNU_SOURCE) && defined(__GLIBC_PREREQ)
|
|
|
|
#if __GLIBC_PREREQ(2, 12)
|
2013-11-27 02:00:43 +00:00
|
|
|
char name_buf[16];
|
2015-07-13 19:11:05 +00:00
|
|
|
snprintf(name_buf, sizeof name_buf, "rocksdb:bg%" ROCKSDB_PRIszt,
|
|
|
|
bgthreads_.size());
|
2013-11-27 02:00:43 +00:00
|
|
|
name_buf[sizeof name_buf - 1] = '\0';
|
|
|
|
pthread_setname_np(t, name_buf);
|
2013-12-05 07:00:33 +00:00
|
|
|
#endif
|
2013-11-27 02:00:43 +00:00
|
|
|
#endif
|
|
|
|
|
2013-09-12 07:53:30 +00:00
|
|
|
bgthreads_.push_back(t);
|
|
|
|
}
|
2014-05-19 17:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 01:49:14 +00:00
|
|
|
void Schedule(void (*function)(void* arg1), void* arg, void* tag) {
|
2014-05-19 17:40:18 +00:00
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
|
|
|
|
|
|
|
if (exit_all_threads_) {
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
StartBGThreads();
|
2013-09-12 07:53:30 +00:00
|
|
|
|
|
|
|
// Add to priority queue
|
|
|
|
queue_.push_back(BGItem());
|
|
|
|
queue_.back().function = function;
|
|
|
|
queue_.back().arg = arg;
|
2015-03-17 01:49:14 +00:00
|
|
|
queue_.back().tag = tag;
|
2014-11-11 21:47:22 +00:00
|
|
|
queue_len_.store(static_cast<unsigned int>(queue_.size()),
|
|
|
|
std::memory_order_relaxed);
|
2013-09-12 07:53:30 +00:00
|
|
|
|
2014-05-19 17:40:18 +00:00
|
|
|
if (!HasExcessiveThread()) {
|
|
|
|
// Wake up at least one waiting thread.
|
|
|
|
PthreadCall("signal", pthread_cond_signal(&bgsignal_));
|
|
|
|
} else {
|
|
|
|
// Need to wake up all threads to make sure the one woken
|
|
|
|
// up is not the one to terminate.
|
|
|
|
WakeUpAllThreads();
|
|
|
|
}
|
2013-09-12 07:53:30 +00:00
|
|
|
|
2013-03-19 21:39:28 +00:00
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-03-17 01:49:14 +00:00
|
|
|
int UnSchedule(void* arg) {
|
|
|
|
int count = 0;
|
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
|
|
|
|
|
|
|
// Remove from priority queue
|
|
|
|
BGQueue::iterator it = queue_.begin();
|
|
|
|
while (it != queue_.end()) {
|
|
|
|
if (arg == (*it).tag) {
|
|
|
|
it = queue_.erase(it);
|
|
|
|
count++;
|
|
|
|
} else {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
queue_len_.store(static_cast<unsigned int>(queue_.size()),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2014-03-10 23:14:48 +00:00
|
|
|
unsigned int GetQueueLen() const {
|
|
|
|
return queue_len_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2013-09-12 07:53:30 +00:00
|
|
|
private:
|
|
|
|
// Entry per Schedule() call
|
2015-03-17 01:49:14 +00:00
|
|
|
struct BGItem {
|
|
|
|
void* arg;
|
|
|
|
void (*function)(void*);
|
|
|
|
void* tag;
|
|
|
|
};
|
2013-09-12 07:53:30 +00:00
|
|
|
typedef std::deque<BGItem> BGQueue;
|
|
|
|
|
|
|
|
pthread_mutex_t mu_;
|
|
|
|
pthread_cond_t bgsignal_;
|
|
|
|
int total_threads_limit_;
|
|
|
|
std::vector<pthread_t> bgthreads_;
|
|
|
|
BGQueue queue_;
|
2014-03-10 23:14:48 +00:00
|
|
|
std::atomic_uint queue_len_; // Queue length. Used for stats reporting
|
2013-09-12 07:53:30 +00:00
|
|
|
bool exit_all_threads_;
|
2014-08-14 03:49:58 +00:00
|
|
|
bool low_io_priority_;
|
2014-11-20 18:49:32 +00:00
|
|
|
Env::Priority priority_;
|
2014-12-22 20:20:17 +00:00
|
|
|
Env* env_;
|
2013-09-12 07:53:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<ThreadPool> thread_pools_;
|
|
|
|
|
|
|
|
pthread_mutex_t mu_;
|
|
|
|
std::vector<pthread_t> threads_to_join_;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
PosixEnv::PosixEnv() : checkedDiskForMmap_(false),
|
|
|
|
forceMmapOff(false),
|
|
|
|
page_size_(getpagesize()),
|
|
|
|
thread_pools_(Priority::TOTAL) {
|
|
|
|
PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr));
|
2014-11-20 18:49:32 +00:00
|
|
|
for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
|
|
|
|
thread_pools_[pool_id].SetThreadPriority(
|
|
|
|
static_cast<Env::Priority>(pool_id));
|
2014-12-22 20:20:17 +00:00
|
|
|
// This allows later initializing the thread-local-env of each thread.
|
|
|
|
thread_pools_[pool_id].SetHostEnv(this);
|
2014-11-20 18:49:32 +00:00
|
|
|
}
|
2014-12-22 20:20:17 +00:00
|
|
|
thread_status_updater_ = CreateThreadStatusUpdater();
|
2013-09-12 07:53:30 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 01:49:14 +00:00
|
|
|
void PosixEnv::Schedule(void (*function)(void* arg1), void* arg, Priority pri,
|
|
|
|
void* tag) {
|
2013-09-12 07:53:30 +00:00
|
|
|
assert(pri >= Priority::LOW && pri <= Priority::HIGH);
|
2015-03-17 01:49:14 +00:00
|
|
|
thread_pools_[pri].Schedule(function, arg, tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
int PosixEnv::UnSchedule(void* arg, Priority pri) {
|
|
|
|
return thread_pools_[pri].UnSchedule(arg);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-03-10 23:14:48 +00:00
|
|
|
unsigned int PosixEnv::GetThreadPoolQueueLen(Priority pri) const {
|
|
|
|
assert(pri >= Priority::LOW && pri <= Priority::HIGH);
|
|
|
|
return thread_pools_[pri].GetQueueLen();
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
struct StartThreadState {
|
|
|
|
void (*user_function)(void*);
|
|
|
|
void* arg;
|
|
|
|
};
|
2014-08-30 04:21:49 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
static void* StartThreadWrapper(void* arg) {
|
|
|
|
StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
|
|
|
|
state->user_function(state->arg);
|
|
|
|
delete state;
|
2013-03-01 02:04:58 +00:00
|
|
|
return nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
|
|
|
|
pthread_t t;
|
|
|
|
StartThreadState* state = new StartThreadState;
|
|
|
|
state->user_function = function;
|
|
|
|
state->arg = arg;
|
|
|
|
PthreadCall("start thread",
|
2013-03-01 02:04:58 +00:00
|
|
|
pthread_create(&t, nullptr, &StartThreadWrapper, state));
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("lock", pthread_mutex_lock(&mu_));
|
2013-03-19 21:39:28 +00:00
|
|
|
threads_to_join_.push_back(t);
|
2013-09-12 07:53:30 +00:00
|
|
|
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-02-26 01:47:37 +00:00
|
|
|
void PosixEnv::WaitForJoin() {
|
|
|
|
for (const auto tid : threads_to_join_) {
|
|
|
|
pthread_join(tid, nullptr);
|
|
|
|
}
|
|
|
|
threads_to_join_.clear();
|
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-18 21:50:54 +00:00
|
|
|
std::string Env::GenerateUniqueId() {
|
|
|
|
std::string uuid_file = "/proc/sys/kernel/random/uuid";
|
2015-07-21 00:20:40 +00:00
|
|
|
|
|
|
|
Status s = FileExists(uuid_file);
|
|
|
|
if (s.ok()) {
|
2013-10-18 21:50:54 +00:00
|
|
|
std::string uuid;
|
2015-07-21 00:20:40 +00:00
|
|
|
s = ReadFileToString(this, uuid_file, &uuid);
|
2013-10-18 21:50:54 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
return uuid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Could not read uuid_file - generate uuid using "nanos-random"
|
|
|
|
Random64 r(time(nullptr));
|
|
|
|
uint64_t random_uuid_portion =
|
|
|
|
r.Uniform(std::numeric_limits<uint64_t>::max());
|
|
|
|
uint64_t nanos_uuid_portion = NowNanos();
|
|
|
|
char uuid2[200];
|
2013-11-17 07:44:39 +00:00
|
|
|
snprintf(uuid2,
|
|
|
|
200,
|
|
|
|
"%lx-%lx",
|
|
|
|
(unsigned long)nanos_uuid_portion,
|
|
|
|
(unsigned long)random_uuid_portion);
|
2013-10-18 21:50:54 +00:00
|
|
|
return uuid2;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Env* Env::Default() {
|
2013-04-23 01:10:28 +00:00
|
|
|
static PosixEnv default_env;
|
2013-03-19 21:39:28 +00:00
|
|
|
return &default_env;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|