2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-10-31 22:08:10 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "env/mock_env.h"
|
2014-10-31 22:08:10 +00:00
|
|
|
#include <algorithm>
|
|
|
|
#include <chrono>
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "port/sys_time.h"
|
2017-07-28 23:23:50 +00:00
|
|
|
#include "util/cast_util.h"
|
2014-10-31 22:16:31 +00:00
|
|
|
#include "util/murmurhash.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/rate_limiter.h"
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class MemFile {
|
|
|
|
public:
|
2015-04-13 23:15:05 +00:00
|
|
|
explicit MemFile(Env* env, const std::string& fn, bool _is_lock_file = false)
|
|
|
|
: env_(env),
|
|
|
|
fn_(fn),
|
2014-11-11 21:47:22 +00:00
|
|
|
refs_(0),
|
2015-01-27 22:44:19 +00:00
|
|
|
is_lock_file_(_is_lock_file),
|
|
|
|
locked_(false),
|
2014-11-11 21:47:22 +00:00
|
|
|
size_(0),
|
|
|
|
modified_time_(Now()),
|
|
|
|
rnd_(static_cast<uint32_t>(
|
|
|
|
MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
|
|
|
|
fsynced_bytes_(0) {}
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
void Ref() {
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
++refs_;
|
|
|
|
}
|
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
bool is_lock_file() const { return is_lock_file_; }
|
|
|
|
|
|
|
|
bool Lock() {
|
|
|
|
assert(is_lock_file_);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (locked_) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2015-01-28 22:28:49 +00:00
|
|
|
locked_ = true;
|
2015-01-27 22:44:19 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Unlock() {
|
|
|
|
assert(is_lock_file_);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
locked_ = false;
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
void Unref() {
|
|
|
|
bool do_delete = false;
|
|
|
|
{
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
--refs_;
|
|
|
|
assert(refs_ >= 0);
|
|
|
|
if (refs_ <= 0) {
|
|
|
|
do_delete = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_delete) {
|
|
|
|
delete this;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
uint64_t Size() const { return size_; }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
void Truncate(size_t size) {
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (size < size_) {
|
|
|
|
data_.resize(size);
|
|
|
|
size_ = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:16:31 +00:00
|
|
|
void CorruptBuffer() {
|
|
|
|
if (fsynced_bytes_ >= size_) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
uint64_t buffered_bytes = size_ - fsynced_bytes_;
|
2014-11-11 21:47:22 +00:00
|
|
|
uint64_t start =
|
|
|
|
fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
|
2014-10-31 22:16:31 +00:00
|
|
|
uint64_t end = std::min(start + 512, size_.load());
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
for (uint64_t pos = start; pos < end; ++pos) {
|
2018-03-06 20:27:07 +00:00
|
|
|
data_[static_cast<size_t>(pos)] = static_cast<char>(rnd_.Uniform(256));
|
2014-10-31 22:16:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
|
|
|
|
MutexLock lock(&mutex_);
|
2016-05-27 19:10:26 +00:00
|
|
|
const uint64_t available = Size() - std::min(Size(), offset);
|
2018-03-06 20:27:07 +00:00
|
|
|
size_t offset_ = static_cast<size_t>(offset);
|
2014-10-31 22:08:10 +00:00
|
|
|
if (n > available) {
|
2018-03-06 20:27:07 +00:00
|
|
|
n = static_cast<size_t>(available);
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
if (n == 0) {
|
|
|
|
*result = Slice();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
if (scratch) {
|
2018-03-06 20:27:07 +00:00
|
|
|
memcpy(scratch, &(data_[offset_]), n);
|
2014-10-31 22:08:10 +00:00
|
|
|
*result = Slice(scratch, n);
|
|
|
|
} else {
|
2018-03-06 20:27:07 +00:00
|
|
|
*result = Slice(&(data_[offset_]), n);
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2017-03-13 16:41:30 +00:00
|
|
|
Status Write(uint64_t offset, const Slice& data) {
|
|
|
|
MutexLock lock(&mutex_);
|
2018-03-06 20:27:07 +00:00
|
|
|
size_t offset_ = static_cast<size_t>(offset);
|
2017-03-13 16:41:30 +00:00
|
|
|
if (offset + data.size() > data_.size()) {
|
2018-03-06 20:27:07 +00:00
|
|
|
data_.resize(offset_ + data.size());
|
2017-03-13 16:41:30 +00:00
|
|
|
}
|
2018-03-06 20:27:07 +00:00
|
|
|
data_.replace(offset_, data.size(), data.data(), data.size());
|
2017-03-13 16:41:30 +00:00
|
|
|
size_ = data_.size();
|
|
|
|
modified_time_ = Now();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
Status Append(const Slice& data) {
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
data_.append(data.data(), data.size());
|
|
|
|
size_ = data_.size();
|
|
|
|
modified_time_ = Now();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Fsync() {
|
2014-10-31 22:16:31 +00:00
|
|
|
fsynced_bytes_ = size_.load();
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
uint64_t ModifiedTime() const { return modified_time_; }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
uint64_t Now() {
|
2017-07-05 19:02:00 +00:00
|
|
|
int64_t unix_time = 0;
|
2015-04-13 23:15:05 +00:00
|
|
|
auto s = env_->GetCurrentTime(&unix_time);
|
|
|
|
assert(s.ok());
|
|
|
|
return static_cast<uint64_t>(unix_time);
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Private since only Unref() should be used to delete it.
|
2018-04-13 00:55:14 +00:00
|
|
|
~MemFile() { assert(refs_ == 0); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
// No copying allowed.
|
|
|
|
MemFile(const MemFile&);
|
|
|
|
void operator=(const MemFile&);
|
|
|
|
|
2015-04-13 23:15:05 +00:00
|
|
|
Env* env_;
|
2014-10-31 22:08:10 +00:00
|
|
|
const std::string fn_;
|
|
|
|
mutable port::Mutex mutex_;
|
|
|
|
int refs_;
|
2015-01-27 22:44:19 +00:00
|
|
|
bool is_lock_file_;
|
|
|
|
bool locked_;
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2014-10-31 22:16:31 +00:00
|
|
|
// Data written into this file, all bytes before fsynced_bytes are
|
|
|
|
// persistent.
|
2014-10-31 22:08:10 +00:00
|
|
|
std::string data_;
|
|
|
|
std::atomic<uint64_t> size_;
|
|
|
|
std::atomic<uint64_t> modified_time_;
|
2014-10-31 22:16:31 +00:00
|
|
|
|
|
|
|
Random rnd_;
|
|
|
|
std::atomic<uint64_t> fsynced_bytes_;
|
2014-10-31 22:08:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2015-02-03 06:32:11 +00:00
|
|
|
class MockSequentialFile : public SequentialFile {
|
2014-10-31 22:08:10 +00:00
|
|
|
public:
|
2015-02-03 06:32:11 +00:00
|
|
|
explicit MockSequentialFile(MemFile* file) : file_(file), pos_(0) {
|
2014-10-31 22:08:10 +00:00
|
|
|
file_->Ref();
|
|
|
|
}
|
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
~MockSequentialFile() override { file_->Unref(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Read(size_t n, Slice* result, char* scratch) override {
|
2014-10-31 22:08:10 +00:00
|
|
|
Status s = file_->Read(pos_, n, result, scratch);
|
|
|
|
if (s.ok()) {
|
|
|
|
pos_ += result->size();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Skip(uint64_t n) override {
|
2014-10-31 22:08:10 +00:00
|
|
|
if (pos_ > file_->Size()) {
|
|
|
|
return Status::IOError("pos_ > file_->Size()");
|
|
|
|
}
|
2018-03-06 20:27:07 +00:00
|
|
|
const uint64_t available = file_->Size() - pos_;
|
2014-10-31 22:08:10 +00:00
|
|
|
if (n > available) {
|
|
|
|
n = available;
|
|
|
|
}
|
2018-09-06 01:07:53 +00:00
|
|
|
pos_ += static_cast<size_t>(n);
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
MemFile* file_;
|
|
|
|
size_t pos_;
|
|
|
|
};
|
|
|
|
|
2015-02-03 06:32:11 +00:00
|
|
|
class MockRandomAccessFile : public RandomAccessFile {
|
2014-10-31 22:08:10 +00:00
|
|
|
public:
|
2018-04-13 00:55:14 +00:00
|
|
|
explicit MockRandomAccessFile(MemFile* file) : file_(file) { file_->Ref(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
~MockRandomAccessFile() override { file_->Unref(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const override {
|
2014-10-31 22:08:10 +00:00
|
|
|
return file_->Read(offset, n, result, scratch);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
MemFile* file_;
|
|
|
|
};
|
|
|
|
|
2017-03-13 16:41:30 +00:00
|
|
|
class MockRandomRWFile : public RandomRWFile {
|
|
|
|
public:
|
|
|
|
explicit MockRandomRWFile(MemFile* file) : file_(file) { file_->Ref(); }
|
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
~MockRandomRWFile() override { file_->Unref(); }
|
2017-03-13 16:41:30 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Write(uint64_t offset, const Slice& data) override {
|
2017-03-13 16:41:30 +00:00
|
|
|
return file_->Write(offset, data);
|
|
|
|
}
|
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const override {
|
2017-03-13 16:41:30 +00:00
|
|
|
return file_->Read(offset, n, result, scratch);
|
|
|
|
}
|
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Close() override { return file_->Fsync(); }
|
2017-03-13 16:41:30 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Flush() override { return Status::OK(); }
|
2017-03-13 16:41:30 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Sync() override { return file_->Fsync(); }
|
2017-03-13 16:41:30 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
MemFile* file_;
|
|
|
|
};
|
|
|
|
|
2015-02-03 06:32:11 +00:00
|
|
|
class MockWritableFile : public WritableFile {
|
2014-10-31 22:08:10 +00:00
|
|
|
public:
|
2015-02-03 06:32:11 +00:00
|
|
|
MockWritableFile(MemFile* file, RateLimiter* rate_limiter)
|
2018-04-13 00:55:14 +00:00
|
|
|
: file_(file), rate_limiter_(rate_limiter) {
|
2014-10-31 22:08:10 +00:00
|
|
|
file_->Ref();
|
|
|
|
}
|
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
~MockWritableFile() override { file_->Unref(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Append(const Slice& data) override {
|
2018-03-06 20:27:07 +00:00
|
|
|
size_t bytes_written = 0;
|
2014-10-31 22:08:10 +00:00
|
|
|
while (bytes_written < data.size()) {
|
|
|
|
auto bytes = RequestToken(data.size() - bytes_written);
|
|
|
|
Status s = file_->Append(Slice(data.data() + bytes_written, bytes));
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
bytes_written += bytes;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Truncate(uint64_t size) override {
|
2018-03-06 20:27:07 +00:00
|
|
|
file_->Truncate(static_cast<size_t>(size));
|
2015-09-11 16:57:02 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Close() override { return file_->Fsync(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Flush() override { return Status::OK(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Sync() override { return file_->Fsync(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
uint64_t GetFileSize() override { return file_->Size(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
inline size_t RequestToken(size_t bytes) {
|
|
|
|
if (rate_limiter_ && io_priority_ < Env::IO_TOTAL) {
|
2018-04-13 00:55:14 +00:00
|
|
|
bytes = std::min(
|
|
|
|
bytes, static_cast<size_t>(rate_limiter_->GetSingleBurstBytes()));
|
2014-10-31 22:08:10 +00:00
|
|
|
rate_limiter_->Request(bytes, io_priority_);
|
|
|
|
}
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemFile* file_;
|
|
|
|
RateLimiter* rate_limiter_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockEnvDirectory : public Directory {
|
|
|
|
public:
|
2019-02-19 21:36:04 +00:00
|
|
|
Status Fsync() override { return Status::OK(); }
|
2014-10-31 22:08:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class MockEnvFileLock : public FileLock {
|
|
|
|
public:
|
2018-04-13 00:55:14 +00:00
|
|
|
explicit MockEnvFileLock(const std::string& fname) : fname_(fname) {}
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
std::string FileName() const { return fname_; }
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
const std::string fname_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class TestMemLogger : public Logger {
|
|
|
|
private:
|
|
|
|
std::unique_ptr<WritableFile> file_;
|
|
|
|
std::atomic_size_t log_size_;
|
|
|
|
static const uint64_t flush_every_seconds_ = 5;
|
|
|
|
std::atomic_uint_fast64_t last_flush_micros_;
|
|
|
|
Env* env_;
|
move dump stats to a separate thread (#4382)
Summary:
Currently statistics are supposed to be dumped to info log at intervals of `options.stats_dump_period_sec`. However the implementation choice was to bind it with compaction thread, meaning if the database has been serving very light traffic, the stats may not get dumped at all.
We decided to separate stats dumping into a new timed thread using `TimerQueue`, which is already used in blob_db. This will allow us schedule new timed tasks with more deterministic behavior.
Tested with db_bench using `--stats_dump_period_sec=20` in command line:
> LOG:2018/09/17-14:07:45.575025 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG:2018/09/17-14:08:05.643286 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG:2018/09/17-14:08:25.691325 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG:2018/09/17-14:08:45.740989 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG content:
> 2018/09/17-14:07:45.575025 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
2018/09/17-14:07:45.575080 7fe99fbfe700 [WARN] [db/db_impl.cc:606]
** DB Stats **
Uptime(secs): 20.0 total, 20.0 interval
Cumulative writes: 4447K writes, 4447K keys, 4447K commit groups, 1.0 writes per commit group, ingest: 5.57 GB, 285.01 MB/s
Cumulative WAL: 4447K writes, 0 syncs, 4447638.00 writes per sync, written: 5.57 GB, 285.01 MB/s
Cumulative stall: 00:00:0.012 H:M:S, 0.1 percent
Interval writes: 4447K writes, 4447K keys, 4447K commit groups, 1.0 writes per commit group, ingest: 5700.71 MB, 285.01 MB/s
Interval WAL: 4447K writes, 0 syncs, 4447638.00 writes per sync, written: 5.57 MB, 285.01 MB/s
Interval stall: 00:00:0.012 H:M:S, 0.1 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4382
Differential Revision: D9933051
Pulled By: miasantreble
fbshipit-source-id: 6d12bb1e4977674eea4bf2d2ac6d486b814bb2fa
2018-10-09 05:52:58 +00:00
|
|
|
std::atomic<bool> flush_pending_;
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
TestMemLogger(std::unique_ptr<WritableFile> f, Env* env,
|
|
|
|
const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
|
|
|
|
: Logger(log_level),
|
|
|
|
file_(std::move(f)),
|
|
|
|
log_size_(0),
|
|
|
|
last_flush_micros_(0),
|
|
|
|
env_(env),
|
|
|
|
flush_pending_(false) {}
|
2019-02-19 21:36:04 +00:00
|
|
|
~TestMemLogger() override {}
|
2014-10-31 22:08:10 +00:00
|
|
|
|
2019-02-19 21:36:04 +00:00
|
|
|
void Flush() override {
|
2014-10-31 22:08:10 +00:00
|
|
|
if (flush_pending_) {
|
|
|
|
flush_pending_ = false;
|
|
|
|
}
|
|
|
|
last_flush_micros_ = env_->NowMicros();
|
|
|
|
}
|
2015-02-01 19:08:19 +00:00
|
|
|
|
|
|
|
using Logger::Logv;
|
2019-02-19 21:36:04 +00:00
|
|
|
void Logv(const char* format, va_list ap) override {
|
2014-10-31 22:08:10 +00:00
|
|
|
// We try twice: the first time with a fixed-size stack allocated buffer,
|
|
|
|
// and the second time with a much larger dynamically allocated buffer.
|
|
|
|
char buffer[500];
|
|
|
|
for (int iter = 0; iter < 2; iter++) {
|
|
|
|
char* base;
|
|
|
|
int bufsize;
|
|
|
|
if (iter == 0) {
|
|
|
|
bufsize = sizeof(buffer);
|
|
|
|
base = buffer;
|
|
|
|
} else {
|
|
|
|
bufsize = 30000;
|
|
|
|
base = new char[bufsize];
|
|
|
|
}
|
|
|
|
char* p = base;
|
|
|
|
char* limit = base + bufsize;
|
|
|
|
|
|
|
|
struct timeval now_tv;
|
|
|
|
gettimeofday(&now_tv, nullptr);
|
|
|
|
const time_t seconds = now_tv.tv_sec;
|
2017-06-13 11:33:54 +00:00
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(t));
|
2017-10-23 21:20:53 +00:00
|
|
|
struct tm* ret __attribute__((__unused__));
|
|
|
|
ret = localtime_r(&seconds, &t);
|
2017-06-08 17:38:45 +00:00
|
|
|
assert(ret);
|
2018-04-13 00:55:14 +00:00
|
|
|
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d ",
|
|
|
|
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
|
|
|
|
t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec));
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
// Print the message
|
|
|
|
if (p < limit) {
|
|
|
|
va_list backup_ap;
|
|
|
|
va_copy(backup_ap, ap);
|
|
|
|
p += vsnprintf(p, limit - p, format, backup_ap);
|
|
|
|
va_end(backup_ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate to available space if necessary
|
|
|
|
if (p >= limit) {
|
|
|
|
if (iter == 0) {
|
2018-04-13 00:55:14 +00:00
|
|
|
continue; // Try again with larger buffer
|
2014-10-31 22:08:10 +00:00
|
|
|
} else {
|
|
|
|
p = limit - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add newline if necessary
|
|
|
|
if (p == base || p[-1] != '\n') {
|
|
|
|
*p++ = '\n';
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(p <= limit);
|
|
|
|
const size_t write_size = p - base;
|
|
|
|
|
|
|
|
file_->Append(Slice(base, write_size));
|
|
|
|
flush_pending_ = true;
|
|
|
|
log_size_ += write_size;
|
2018-04-13 00:55:14 +00:00
|
|
|
uint64_t now_micros =
|
|
|
|
static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
|
2014-10-31 22:08:10 +00:00
|
|
|
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
|
|
|
|
flush_pending_ = false;
|
|
|
|
last_flush_micros_ = now_micros;
|
|
|
|
}
|
|
|
|
if (base != buffer) {
|
|
|
|
delete[] base;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
size_t GetLogFileSize() const override { return log_size_; }
|
2014-10-31 22:08:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // Anonymous namespace
|
|
|
|
|
2015-04-13 23:15:05 +00:00
|
|
|
MockEnv::MockEnv(Env* base_env) : EnvWrapper(base_env), fake_sleep_micros_(0) {}
|
2014-10-31 22:08:10 +00:00
|
|
|
|
|
|
|
MockEnv::~MockEnv() {
|
|
|
|
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i) {
|
|
|
|
i->second->Unref();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
// Partial implementation of the Env interface.
|
2014-10-31 22:08:10 +00:00
|
|
|
Status MockEnv::NewSequentialFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<SequentialFile>* result,
|
2018-03-05 21:08:17 +00:00
|
|
|
const EnvOptions& /*soptions*/) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) == file_map_.end()) {
|
2018-03-07 20:39:19 +00:00
|
|
|
*result = nullptr;
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
auto* f = file_map_[fn];
|
2015-01-27 22:44:19 +00:00
|
|
|
if (f->is_lock_file()) {
|
|
|
|
return Status::InvalidArgument(fn, "Cannot open a lock file.");
|
|
|
|
}
|
2015-02-03 06:32:11 +00:00
|
|
|
result->reset(new MockSequentialFile(f));
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::NewRandomAccessFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomAccessFile>* result,
|
2018-03-05 21:08:17 +00:00
|
|
|
const EnvOptions& /*soptions*/) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) == file_map_.end()) {
|
2018-03-07 20:39:19 +00:00
|
|
|
*result = nullptr;
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
auto* f = file_map_[fn];
|
2015-01-27 22:44:19 +00:00
|
|
|
if (f->is_lock_file()) {
|
|
|
|
return Status::InvalidArgument(fn, "Cannot open a lock file.");
|
|
|
|
}
|
2015-02-03 06:32:11 +00:00
|
|
|
result->reset(new MockRandomAccessFile(f));
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2017-03-13 16:41:30 +00:00
|
|
|
Status MockEnv::NewRandomRWFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomRWFile>* result,
|
2018-03-05 21:08:17 +00:00
|
|
|
const EnvOptions& /*soptions*/) {
|
2017-03-13 16:41:30 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) == file_map_.end()) {
|
2018-03-07 20:39:19 +00:00
|
|
|
*result = nullptr;
|
2017-03-13 16:41:30 +00:00
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
auto* f = file_map_[fn];
|
|
|
|
if (f->is_lock_file()) {
|
|
|
|
return Status::InvalidArgument(fn, "Cannot open a lock file.");
|
|
|
|
}
|
|
|
|
result->reset(new MockRandomRWFile(f));
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::ReuseWritableFile(const std::string& fname,
|
|
|
|
const std::string& old_fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<WritableFile>* result,
|
2017-03-13 16:41:30 +00:00
|
|
|
const EnvOptions& options) {
|
|
|
|
auto s = RenameFile(old_fname, fname);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
result->reset();
|
|
|
|
return NewWritableFile(fname, result, options);
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
Status MockEnv::NewWritableFile(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<WritableFile>* result,
|
2016-12-22 20:51:29 +00:00
|
|
|
const EnvOptions& env_options) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) != file_map_.end()) {
|
|
|
|
DeleteFileInternal(fn);
|
|
|
|
}
|
2015-04-13 23:15:05 +00:00
|
|
|
MemFile* file = new MemFile(this, fn, false);
|
2014-10-31 22:08:10 +00:00
|
|
|
file->Ref();
|
|
|
|
file_map_[fn] = file;
|
|
|
|
|
2015-02-03 06:32:11 +00:00
|
|
|
result->reset(new MockWritableFile(file, env_options.rate_limiter));
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
Status MockEnv::NewDirectory(const std::string& /*name*/,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<Directory>* result) {
|
2014-10-31 22:08:10 +00:00
|
|
|
result->reset(new MockEnvDirectory());
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-07-21 00:20:40 +00:00
|
|
|
Status MockEnv::FileExists(const std::string& fname) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) != file_map_.end()) {
|
|
|
|
// File exists
|
2015-07-21 00:20:40 +00:00
|
|
|
return Status::OK();
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
// Now also check if fn exists as a dir
|
|
|
|
for (const auto& iter : file_map_) {
|
|
|
|
const std::string& filename = iter.first;
|
2018-04-13 00:55:14 +00:00
|
|
|
if (filename.size() >= fn.size() + 1 && filename[fn.size()] == '/' &&
|
2014-10-31 22:08:10 +00:00
|
|
|
Slice(filename).starts_with(Slice(fn))) {
|
2015-07-21 00:20:40 +00:00
|
|
|
return Status::OK();
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-21 00:20:40 +00:00
|
|
|
return Status::NotFound();
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::GetChildren(const std::string& dir,
|
2018-04-13 00:55:14 +00:00
|
|
|
std::vector<std::string>* result) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto d = NormalizePath(dir);
|
2016-12-12 20:38:43 +00:00
|
|
|
bool found_dir = false;
|
2014-10-31 22:08:10 +00:00
|
|
|
{
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
result->clear();
|
|
|
|
for (const auto& iter : file_map_) {
|
|
|
|
const std::string& filename = iter.first;
|
|
|
|
|
2016-12-12 20:38:43 +00:00
|
|
|
if (filename == d) {
|
|
|
|
found_dir = true;
|
|
|
|
} else if (filename.size() >= d.size() + 1 && filename[d.size()] == '/' &&
|
|
|
|
Slice(filename).starts_with(Slice(d))) {
|
|
|
|
found_dir = true;
|
2014-10-31 22:08:10 +00:00
|
|
|
size_t next_slash = filename.find('/', d.size() + 1);
|
|
|
|
if (next_slash != std::string::npos) {
|
2018-04-13 00:55:14 +00:00
|
|
|
result->push_back(
|
|
|
|
filename.substr(d.size() + 1, next_slash - d.size() - 1));
|
2014-10-31 22:08:10 +00:00
|
|
|
} else {
|
|
|
|
result->push_back(filename.substr(d.size() + 1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result->erase(std::unique(result->begin(), result->end()), result->end());
|
2016-12-12 20:38:43 +00:00
|
|
|
return found_dir ? Status::OK() : Status::NotFound();
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MockEnv::DeleteFileInternal(const std::string& fname) {
|
|
|
|
assert(fname == NormalizePath(fname));
|
2015-01-27 22:44:19 +00:00
|
|
|
const auto& pair = file_map_.find(fname);
|
|
|
|
if (pair != file_map_.end()) {
|
|
|
|
pair->second->Unref();
|
|
|
|
file_map_.erase(fname);
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::DeleteFile(const std::string& fname) {
|
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) == file_map_.end()) {
|
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
DeleteFileInternal(fn);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-04-27 04:08:46 +00:00
|
|
|
Status MockEnv::Truncate(const std::string& fname, size_t size) {
|
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
auto iter = file_map_.find(fn);
|
|
|
|
if (iter == file_map_.end()) {
|
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
iter->second->Truncate(size);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
Status MockEnv::CreateDir(const std::string& dirname) {
|
2016-12-12 20:38:43 +00:00
|
|
|
auto dn = NormalizePath(dirname);
|
|
|
|
if (file_map_.find(dn) == file_map_.end()) {
|
|
|
|
MemFile* file = new MemFile(this, dn, false);
|
|
|
|
file->Ref();
|
|
|
|
file_map_[dn] = file;
|
|
|
|
} else {
|
|
|
|
return Status::IOError();
|
|
|
|
}
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::CreateDirIfMissing(const std::string& dirname) {
|
2016-12-12 20:38:43 +00:00
|
|
|
CreateDir(dirname);
|
2014-10-31 22:08:10 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::DeleteDir(const std::string& dirname) {
|
2016-12-12 20:38:43 +00:00
|
|
|
return DeleteFile(dirname);
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::GetFileSize(const std::string& fname, uint64_t* file_size) {
|
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
auto iter = file_map_.find(fn);
|
|
|
|
if (iter == file_map_.end()) {
|
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
*file_size = iter->second->Size();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::GetFileModificationTime(const std::string& fname,
|
2018-04-13 00:55:14 +00:00
|
|
|
uint64_t* time) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
auto iter = file_map_.find(fn);
|
|
|
|
if (iter == file_map_.end()) {
|
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
*time = iter->second->ModifiedTime();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-11-06 19:14:28 +00:00
|
|
|
Status MockEnv::RenameFile(const std::string& src, const std::string& dest) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto s = NormalizePath(src);
|
2014-11-06 19:14:28 +00:00
|
|
|
auto t = NormalizePath(dest);
|
2014-10-31 22:08:10 +00:00
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(s) == file_map_.end()) {
|
|
|
|
return Status::IOError(s, "File not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
DeleteFileInternal(t);
|
|
|
|
file_map_[t] = file_map_[s];
|
|
|
|
file_map_.erase(s);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-11-14 19:38:26 +00:00
|
|
|
Status MockEnv::LinkFile(const std::string& src, const std::string& dest) {
|
|
|
|
auto s = NormalizePath(src);
|
|
|
|
auto t = NormalizePath(dest);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(s) == file_map_.end()) {
|
|
|
|
return Status::IOError(s, "File not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
DeleteFileInternal(t);
|
|
|
|
file_map_[t] = file_map_[s];
|
2017-03-13 16:41:30 +00:00
|
|
|
file_map_[t]->Ref(); // Otherwise it might get deleted when noone uses s
|
2014-11-14 19:38:26 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
Status MockEnv::NewLogger(const std::string& fname,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::shared_ptr<Logger>* result) {
|
2014-10-31 22:08:10 +00:00
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
auto iter = file_map_.find(fn);
|
|
|
|
MemFile* file = nullptr;
|
|
|
|
if (iter == file_map_.end()) {
|
2015-04-13 23:15:05 +00:00
|
|
|
file = new MemFile(this, fn, false);
|
2014-10-31 22:08:10 +00:00
|
|
|
file->Ref();
|
|
|
|
file_map_[fn] = file;
|
|
|
|
} else {
|
|
|
|
file = iter->second;
|
|
|
|
}
|
2015-02-03 06:32:11 +00:00
|
|
|
std::unique_ptr<WritableFile> f(new MockWritableFile(file, nullptr));
|
2014-10-31 22:08:10 +00:00
|
|
|
result->reset(new TestMemLogger(std::move(f), this));
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::LockFile(const std::string& fname, FileLock** flock) {
|
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
{
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
if (file_map_.find(fn) != file_map_.end()) {
|
2015-01-27 22:44:19 +00:00
|
|
|
if (!file_map_[fn]->is_lock_file()) {
|
|
|
|
return Status::InvalidArgument(fname, "Not a lock file.");
|
|
|
|
}
|
|
|
|
if (!file_map_[fn]->Lock()) {
|
|
|
|
return Status::IOError(fn, "Lock is already held.");
|
|
|
|
}
|
|
|
|
} else {
|
2015-04-13 23:15:05 +00:00
|
|
|
auto* file = new MemFile(this, fn, true);
|
2015-01-27 22:44:19 +00:00
|
|
|
file->Ref();
|
|
|
|
file->Lock();
|
2015-02-04 23:32:06 +00:00
|
|
|
file_map_[fn] = file;
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*flock = new MockEnvFileLock(fn);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::UnlockFile(FileLock* flock) {
|
2017-07-28 23:23:50 +00:00
|
|
|
std::string fn =
|
|
|
|
static_cast_with_check<MockEnvFileLock, FileLock>(flock)->FileName();
|
2014-10-31 22:08:10 +00:00
|
|
|
{
|
|
|
|
MutexLock lock(&mutex_);
|
2015-01-27 22:44:19 +00:00
|
|
|
if (file_map_.find(fn) != file_map_.end()) {
|
|
|
|
if (!file_map_[fn]->is_lock_file()) {
|
|
|
|
return Status::InvalidArgument(fn, "Not a lock file.");
|
|
|
|
}
|
|
|
|
file_map_[fn]->Unlock();
|
2014-10-31 22:08:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
delete flock;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockEnv::GetTestDirectory(std::string* path) {
|
|
|
|
*path = "/test";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-04-13 23:15:05 +00:00
|
|
|
Status MockEnv::GetCurrentTime(int64_t* unix_time) {
|
|
|
|
auto s = EnvWrapper::GetCurrentTime(unix_time);
|
2017-07-05 19:02:00 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
*unix_time += fake_sleep_micros_.load() / (1000 * 1000);
|
|
|
|
}
|
2015-04-13 23:15:05 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t MockEnv::NowMicros() {
|
|
|
|
return EnvWrapper::NowMicros() + fake_sleep_micros_.load();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t MockEnv::NowNanos() {
|
|
|
|
return EnvWrapper::NowNanos() + fake_sleep_micros_.load() * 1000;
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:16:31 +00:00
|
|
|
Status MockEnv::CorruptBuffer(const std::string& fname) {
|
|
|
|
auto fn = NormalizePath(fname);
|
|
|
|
MutexLock lock(&mutex_);
|
|
|
|
auto iter = file_map_.find(fn);
|
|
|
|
if (iter == file_map_.end()) {
|
|
|
|
return Status::IOError(fn, "File not found");
|
|
|
|
}
|
|
|
|
iter->second->CorruptBuffer();
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
std::string MockEnv::NormalizePath(const std::string path) {
|
|
|
|
std::string dst;
|
|
|
|
for (auto c : path) {
|
|
|
|
if (!dst.empty() && c == '/' && dst.back() == '/') {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
dst.push_back(c);
|
|
|
|
}
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2015-04-13 23:15:05 +00:00
|
|
|
void MockEnv::FakeSleepForMicroseconds(int64_t micros) {
|
|
|
|
fake_sleep_micros_.fetch_add(micros);
|
|
|
|
}
|
|
|
|
|
2017-06-01 22:30:27 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// This is to maintain the behavior before swithcing from InMemoryEnv to MockEnv
|
|
|
|
Env* NewMemEnv(Env* base_env) { return new MockEnv(base_env); }
|
|
|
|
|
|
|
|
#else // ROCKSDB_LITE
|
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
Env* NewMemEnv(Env* /*base_env*/) { return nullptr; }
|
2017-06-01 22:30:27 +00:00
|
|
|
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
2014-10-31 22:08:10 +00:00
|
|
|
} // namespace rocksdb
|