2015-01-15 18:28:10 +00:00
|
|
|
// Copyright (c) 2015, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
|
|
|
// Copyright 2014 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
// This test uses a custom Env to keep track of the state of a filesystem as of
|
|
|
|
// the last "sync". It then checks for data loss errors by purposely dropping
|
|
|
|
// file data (or entire files) not protected by a "sync".
|
|
|
|
|
|
|
|
#include <map>
|
|
|
|
#include <set>
|
|
|
|
#include "db/db_impl.h"
|
|
|
|
#include "db/filename.h"
|
|
|
|
#include "db/log_format.h"
|
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/table.h"
|
|
|
|
#include "rocksdb/write_batch.h"
|
|
|
|
#include "util/logging.h"
|
2015-01-27 22:44:19 +00:00
|
|
|
#include "util/mock_env.h"
|
2015-01-15 18:28:10 +00:00
|
|
|
#include "util/mutexlock.h"
|
2015-07-16 02:58:28 +00:00
|
|
|
#include "util/sync_point.h"
|
2015-01-15 18:28:10 +00:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
static const int kValueSize = 1000;
|
|
|
|
static const int kMaxNumValues = 2000;
|
|
|
|
static const size_t kNumIterations = 3;
|
|
|
|
|
|
|
|
class TestWritableFile;
|
|
|
|
class FaultInjectionTestEnv;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Assume a filename, and not a directory name like "/foo/bar/"
|
|
|
|
static std::string GetDirName(const std::string filename) {
|
|
|
|
size_t found = filename.find_last_of("/\\");
|
|
|
|
if (found == std::string::npos) {
|
|
|
|
return "";
|
|
|
|
} else {
|
|
|
|
return filename.substr(0, found);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
// Trim the tailing "/" in the end of `str`
|
|
|
|
static std::string TrimDirname(const std::string& str) {
|
|
|
|
size_t found = str.find_last_not_of("/");
|
|
|
|
if (found == std::string::npos) {
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
return str.substr(0, found + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return pair <parent directory name, file name> of a full path.
|
|
|
|
static std::pair<std::string, std::string> GetDirAndName(
|
|
|
|
const std::string& name) {
|
|
|
|
std::string dirname = GetDirName(name);
|
|
|
|
std::string fname = name.substr(dirname.size() + 1);
|
|
|
|
return std::make_pair(dirname, fname);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// A basic file truncation function suitable for this test.
|
2015-01-27 22:44:19 +00:00
|
|
|
Status Truncate(Env* env, const std::string& filename, uint64_t length) {
|
2015-01-15 18:28:10 +00:00
|
|
|
unique_ptr<SequentialFile> orig_file;
|
|
|
|
const EnvOptions options;
|
|
|
|
Status s = env->NewSequentialFile(filename, &orig_file, options);
|
2015-01-27 21:44:04 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Cannot truncate file %s: %s\n", filename.c_str(),
|
|
|
|
s.ToString().c_str());
|
2015-01-15 18:28:10 +00:00
|
|
|
return s;
|
2015-01-27 21:44:04 +00:00
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
|
2015-07-01 23:13:49 +00:00
|
|
|
std::unique_ptr<char[]> scratch(new char[length]);
|
2015-01-15 18:28:10 +00:00
|
|
|
rocksdb::Slice result;
|
2015-07-01 23:13:49 +00:00
|
|
|
s = orig_file->Read(length, &result, scratch.get());
|
|
|
|
#ifdef OS_WIN
|
|
|
|
orig_file.reset();
|
|
|
|
#endif
|
2015-01-15 18:28:10 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
std::string tmp_name = GetDirName(filename) + "/truncate.tmp";
|
|
|
|
unique_ptr<WritableFile> tmp_file;
|
|
|
|
s = env->NewWritableFile(tmp_name, &tmp_file, options);
|
|
|
|
if (s.ok()) {
|
|
|
|
s = tmp_file->Append(result);
|
|
|
|
if (s.ok()) {
|
|
|
|
s = env->RenameFile(tmp_name, filename);
|
|
|
|
} else {
|
2015-01-28 00:34:16 +00:00
|
|
|
fprintf(stderr, "Cannot rename file %s to %s: %s\n", tmp_name.c_str(),
|
2015-01-27 21:44:04 +00:00
|
|
|
filename.c_str(), s.ToString().c_str());
|
2015-01-15 18:28:10 +00:00
|
|
|
env->DeleteFile(tmp_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 21:44:04 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Cannot truncate file %s: %s\n", filename.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct FileState {
|
|
|
|
std::string filename_;
|
|
|
|
ssize_t pos_;
|
|
|
|
ssize_t pos_at_last_sync_;
|
|
|
|
ssize_t pos_at_last_flush_;
|
|
|
|
|
|
|
|
explicit FileState(const std::string& filename)
|
|
|
|
: filename_(filename),
|
|
|
|
pos_(-1),
|
|
|
|
pos_at_last_sync_(-1),
|
|
|
|
pos_at_last_flush_(-1) { }
|
|
|
|
|
|
|
|
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
|
|
|
|
|
|
|
|
bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; }
|
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
Status DropUnsyncedData(Env* env) const;
|
2015-01-26 23:22:18 +00:00
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
Status DropRandomUnsyncedData(Env* env, Random* rand) const;
|
2015-01-15 18:28:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
// A wrapper around WritableFileWriter* file
|
2015-01-15 18:28:10 +00:00
|
|
|
// is written to or sync'ed.
|
|
|
|
class TestWritableFile : public WritableFile {
|
|
|
|
public:
|
|
|
|
explicit TestWritableFile(const std::string& fname,
|
|
|
|
unique_ptr<WritableFile>&& f,
|
|
|
|
FaultInjectionTestEnv* env);
|
|
|
|
virtual ~TestWritableFile();
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status Append(const Slice& data) override;
|
|
|
|
virtual Status Close() override;
|
|
|
|
virtual Status Flush() override;
|
|
|
|
virtual Status Sync() override;
|
2015-01-15 18:28:10 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
FileState state_;
|
|
|
|
unique_ptr<WritableFile> target_;
|
|
|
|
bool writable_file_opened_;
|
|
|
|
FaultInjectionTestEnv* env_;
|
|
|
|
};
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
class TestDirectory : public Directory {
|
|
|
|
public:
|
|
|
|
explicit TestDirectory(FaultInjectionTestEnv* env, std::string dirname,
|
|
|
|
Directory* dir)
|
|
|
|
: env_(env), dirname_(dirname), dir_(dir) {}
|
|
|
|
~TestDirectory() {}
|
|
|
|
|
|
|
|
virtual Status Fsync() override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
FaultInjectionTestEnv* env_;
|
|
|
|
std::string dirname_;
|
|
|
|
unique_ptr<Directory> dir_;
|
|
|
|
};
|
2015-01-15 18:28:10 +00:00
|
|
|
|
|
|
|
class FaultInjectionTestEnv : public EnvWrapper {
|
|
|
|
public:
|
|
|
|
explicit FaultInjectionTestEnv(Env* base)
|
|
|
|
: EnvWrapper(base),
|
|
|
|
filesystem_active_(true) {}
|
|
|
|
virtual ~FaultInjectionTestEnv() { }
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
Status NewDirectory(const std::string& name,
|
|
|
|
unique_ptr<Directory>* result) override {
|
|
|
|
unique_ptr<Directory> r;
|
|
|
|
Status s = target()->NewDirectory(name, &r);
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(s);
|
2015-01-23 02:34:23 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
result->reset(new TestDirectory(this, TrimDirname(name), r.release()));
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
Status NewWritableFile(const std::string& fname,
|
|
|
|
unique_ptr<WritableFile>* result,
|
2015-02-26 19:28:41 +00:00
|
|
|
const EnvOptions& soptions) override {
|
2015-07-16 02:58:28 +00:00
|
|
|
if (!IsFilesystemActive()) {
|
|
|
|
return Status::Corruption("Not Active");
|
|
|
|
}
|
|
|
|
// Not allow overwriting files
|
2015-07-21 00:20:40 +00:00
|
|
|
Status s = target()->FileExists(fname);
|
|
|
|
if (s.ok()) {
|
2015-07-16 02:58:28 +00:00
|
|
|
return Status::Corruption("File already exists.");
|
2015-07-21 00:20:40 +00:00
|
|
|
} else if (!s.IsNotFound()) {
|
|
|
|
assert(s.IsIOError());
|
|
|
|
return s;
|
2015-07-16 02:58:28 +00:00
|
|
|
}
|
2015-07-21 00:20:40 +00:00
|
|
|
s = target()->NewWritableFile(fname, result, soptions);
|
2015-01-15 18:28:10 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
result->reset(new TestWritableFile(fname, std::move(*result), this));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
// WritableFileWriter* file is opened
|
2015-01-15 18:28:10 +00:00
|
|
|
// again then it will be truncated - so forget our saved state.
|
|
|
|
UntrackFile(fname);
|
|
|
|
MutexLock l(&mutex_);
|
2015-01-28 00:34:16 +00:00
|
|
|
open_files_.insert(fname);
|
2015-01-23 02:34:23 +00:00
|
|
|
auto dir_and_name = GetDirAndName(fname);
|
|
|
|
auto& list = dir_to_new_files_since_last_sync_[dir_and_name.first];
|
|
|
|
list.insert(dir_and_name.second);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status DeleteFile(const std::string& f) override {
|
2015-07-16 02:58:28 +00:00
|
|
|
if (!IsFilesystemActive()) {
|
|
|
|
return Status::Corruption("Not Active");
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
Status s = EnvWrapper::DeleteFile(f);
|
2015-01-27 21:44:04 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Cannot delete file %s: %s\n", f.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(s);
|
2015-01-15 18:28:10 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
UntrackFile(f);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status RenameFile(const std::string& s,
|
|
|
|
const std::string& t) override {
|
2015-07-16 02:58:28 +00:00
|
|
|
if (!IsFilesystemActive()) {
|
|
|
|
return Status::Corruption("Not Active");
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
Status ret = EnvWrapper::RenameFile(s, t);
|
|
|
|
|
|
|
|
if (ret.ok()) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
if (db_file_state_.find(s) != db_file_state_.end()) {
|
|
|
|
db_file_state_[t] = db_file_state_[s];
|
|
|
|
db_file_state_.erase(s);
|
|
|
|
}
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
auto sdn = GetDirAndName(s);
|
|
|
|
auto tdn = GetDirAndName(t);
|
|
|
|
if (dir_to_new_files_since_last_sync_[sdn.first].erase(sdn.second) != 0) {
|
|
|
|
auto& tlist = dir_to_new_files_since_last_sync_[tdn.first];
|
|
|
|
assert(tlist.find(tdn.second) == tlist.end());
|
|
|
|
tlist.insert(tdn.second);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void WritableFileClosed(const FileState& state) {
|
|
|
|
MutexLock l(&mutex_);
|
2015-01-28 00:34:16 +00:00
|
|
|
if (open_files_.find(state.filename_) != open_files_.end()) {
|
|
|
|
db_file_state_[state.filename_] = state;
|
|
|
|
open_files_.erase(state.filename_);
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-01-26 23:22:18 +00:00
|
|
|
// For every file that is not fully synced, make a call to `func` with
|
|
|
|
// FileState of the file as the parameter.
|
2015-01-27 22:44:19 +00:00
|
|
|
Status DropFileData(std::function<Status(Env*, FileState)> func) {
|
2015-01-15 18:28:10 +00:00
|
|
|
Status s;
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
for (std::map<std::string, FileState>::const_iterator it =
|
|
|
|
db_file_state_.begin();
|
|
|
|
s.ok() && it != db_file_state_.end(); ++it) {
|
|
|
|
const FileState& state = it->second;
|
|
|
|
if (!state.IsFullySynced()) {
|
2015-01-27 22:44:19 +00:00
|
|
|
s = func(target(), state);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-01-26 23:22:18 +00:00
|
|
|
Status DropUnsyncedFileData() {
|
2015-01-27 22:44:19 +00:00
|
|
|
return DropFileData([&](Env* env, const FileState& state) {
|
|
|
|
return state.DropUnsyncedData(env);
|
|
|
|
});
|
2015-01-26 23:22:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DropRandomUnsyncedFileData(Random* rnd) {
|
2015-01-27 22:44:19 +00:00
|
|
|
return DropFileData([&](Env* env, const FileState& state) {
|
|
|
|
return state.DropRandomUnsyncedData(env, rnd);
|
2015-01-26 23:22:18 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
Status DeleteFilesCreatedAfterLastDirSync() {
|
|
|
|
// Because DeleteFile access this container make a copy to avoid deadlock
|
2015-01-23 02:34:23 +00:00
|
|
|
std::map<std::string, std::set<std::string>> map_copy;
|
|
|
|
{
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
map_copy.insert(dir_to_new_files_since_last_sync_.begin(),
|
|
|
|
dir_to_new_files_since_last_sync_.end());
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
for (auto& pair : map_copy) {
|
|
|
|
for (std::string name : pair.second) {
|
|
|
|
Status s = DeleteFile(pair.first + "/" + name);
|
2015-01-28 00:34:16 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2015-01-23 02:34:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
void ResetState() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
db_file_state_.clear();
|
2015-01-23 02:34:23 +00:00
|
|
|
dir_to_new_files_since_last_sync_.clear();
|
2015-01-27 21:44:04 +00:00
|
|
|
SetFilesystemActiveNoLock(true);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void UntrackFile(const std::string& f) {
|
|
|
|
MutexLock l(&mutex_);
|
2015-01-23 02:34:23 +00:00
|
|
|
auto dir_and_name = GetDirAndName(f);
|
|
|
|
dir_to_new_files_since_last_sync_[dir_and_name.first].erase(
|
|
|
|
dir_and_name.second);
|
2015-01-15 18:28:10 +00:00
|
|
|
db_file_state_.erase(f);
|
2015-01-28 00:34:16 +00:00
|
|
|
open_files_.erase(f);
|
2015-01-23 02:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SyncDir(const std::string& dirname) {
|
2015-01-26 21:59:38 +00:00
|
|
|
MutexLock l(&mutex_);
|
2015-01-23 02:34:23 +00:00
|
|
|
dir_to_new_files_since_last_sync_.erase(dirname);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setting the filesystem to inactive is the test equivalent to simulating a
|
|
|
|
// system reset. Setting to inactive will freeze our saved filesystem state so
|
|
|
|
// that it will stop being recorded. It can then be reset back to the state at
|
|
|
|
// the time of the reset.
|
2015-01-27 21:44:04 +00:00
|
|
|
bool IsFilesystemActive() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return filesystem_active_;
|
|
|
|
}
|
|
|
|
void SetFilesystemActiveNoLock(bool active) { filesystem_active_ = active; }
|
|
|
|
void SetFilesystemActive(bool active) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
SetFilesystemActiveNoLock(active);
|
|
|
|
}
|
2015-01-28 00:34:16 +00:00
|
|
|
void AssertNoOpenFile() { ASSERT_TRUE(open_files_.empty()); }
|
2015-01-15 18:28:10 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
port::Mutex mutex_;
|
|
|
|
std::map<std::string, FileState> db_file_state_;
|
2015-01-28 00:34:16 +00:00
|
|
|
std::set<std::string> open_files_;
|
2015-01-23 02:34:23 +00:00
|
|
|
std::unordered_map<std::string, std::set<std::string>>
|
|
|
|
dir_to_new_files_since_last_sync_;
|
2015-01-15 18:28:10 +00:00
|
|
|
bool filesystem_active_; // Record flushes, syncs, writes
|
|
|
|
};
|
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
Status FileState::DropUnsyncedData(Env* env) const {
|
2015-01-15 18:28:10 +00:00
|
|
|
ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
|
2015-01-27 22:44:19 +00:00
|
|
|
return Truncate(env, filename_, sync_pos);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
Status FileState::DropRandomUnsyncedData(Env* env, Random* rand) const {
|
2015-01-26 23:22:18 +00:00
|
|
|
ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
|
|
|
|
assert(pos_ >= sync_pos);
|
|
|
|
int range = static_cast<int>(pos_ - sync_pos);
|
|
|
|
uint64_t truncated_size =
|
|
|
|
static_cast<uint64_t>(sync_pos) + rand->Uniform(range);
|
2015-01-27 22:44:19 +00:00
|
|
|
return Truncate(env, filename_, truncated_size);
|
2015-01-26 23:22:18 +00:00
|
|
|
}
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
Status TestDirectory::Fsync() {
|
|
|
|
env_->SyncDir(dirname_);
|
|
|
|
return dir_->Fsync();
|
|
|
|
}
|
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
TestWritableFile::TestWritableFile(const std::string& fname,
|
|
|
|
unique_ptr<WritableFile>&& f,
|
|
|
|
FaultInjectionTestEnv* env)
|
|
|
|
: state_(fname),
|
|
|
|
target_(std::move(f)),
|
|
|
|
writable_file_opened_(true),
|
|
|
|
env_(env) {
|
|
|
|
assert(target_ != nullptr);
|
|
|
|
state_.pos_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
TestWritableFile::~TestWritableFile() {
|
|
|
|
if (writable_file_opened_) {
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TestWritableFile::Append(const Slice& data) {
|
2015-07-16 02:58:28 +00:00
|
|
|
if (!env_->IsFilesystemActive()) {
|
|
|
|
return Status::Corruption("Not Active");
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
Status s = target_->Append(data);
|
2015-07-16 02:58:28 +00:00
|
|
|
if (s.ok()) {
|
2015-01-15 18:28:10 +00:00
|
|
|
state_.pos_ += data.size();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TestWritableFile::Close() {
|
|
|
|
writable_file_opened_ = false;
|
|
|
|
Status s = target_->Close();
|
|
|
|
if (s.ok()) {
|
|
|
|
env_->WritableFileClosed(state_);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TestWritableFile::Flush() {
|
|
|
|
Status s = target_->Flush();
|
|
|
|
if (s.ok() && env_->IsFilesystemActive()) {
|
|
|
|
state_.pos_at_last_flush_ = state_.pos_;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TestWritableFile::Sync() {
|
|
|
|
if (!env_->IsFilesystemActive()) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-01-23 02:34:23 +00:00
|
|
|
// No need to actual sync.
|
|
|
|
state_.pos_at_last_sync_ = state_.pos_;
|
|
|
|
return Status::OK();
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
class FaultInjectionTest : public testing::Test,
|
|
|
|
public testing::WithParamInterface<bool> {
|
2015-01-23 02:34:23 +00:00
|
|
|
protected:
|
|
|
|
enum OptionConfig {
|
|
|
|
kDefault,
|
|
|
|
kDifferentDataDir,
|
|
|
|
kWalDir,
|
|
|
|
kSyncWal,
|
|
|
|
kWalDirSyncWal,
|
2015-01-27 22:44:19 +00:00
|
|
|
kMultiLevels,
|
2015-01-23 02:34:23 +00:00
|
|
|
kEnd,
|
|
|
|
};
|
|
|
|
int option_config_;
|
|
|
|
// When need to make sure data is persistent, sync WAL
|
2015-01-24 00:03:24 +00:00
|
|
|
bool sync_use_wal_;
|
2015-01-23 02:34:23 +00:00
|
|
|
// When need to make sure data is persistent, call DB::CompactRange()
|
2015-01-24 00:03:24 +00:00
|
|
|
bool sync_use_compact_;
|
2015-01-23 02:34:23 +00:00
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
bool sequential_order_;
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
protected:
|
2015-01-15 18:28:10 +00:00
|
|
|
public:
|
2015-01-23 02:34:23 +00:00
|
|
|
enum ExpectedVerifResult { kValExpectFound, kValExpectNoError };
|
|
|
|
enum ResetMethod {
|
|
|
|
kResetDropUnsyncedData,
|
2015-01-26 23:22:18 +00:00
|
|
|
kResetDropRandomUnsyncedData,
|
2015-01-23 02:34:23 +00:00
|
|
|
kResetDeleteUnsyncedFiles,
|
|
|
|
kResetDropAndDeleteUnsynced
|
|
|
|
};
|
2015-01-15 18:28:10 +00:00
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
std::unique_ptr<Env> base_env_;
|
2015-01-15 18:28:10 +00:00
|
|
|
FaultInjectionTestEnv* env_;
|
|
|
|
std::string dbname_;
|
|
|
|
shared_ptr<Cache> tiny_cache_;
|
|
|
|
Options options_;
|
|
|
|
DB* db_;
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
FaultInjectionTest()
|
|
|
|
: option_config_(kDefault),
|
2015-01-24 00:03:24 +00:00
|
|
|
sync_use_wal_(false),
|
|
|
|
sync_use_compact_(true),
|
2015-01-27 22:44:19 +00:00
|
|
|
base_env_(nullptr),
|
2015-01-23 02:34:23 +00:00
|
|
|
env_(NULL),
|
|
|
|
db_(NULL) {
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
~FaultInjectionTest() {
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
}
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
bool ChangeOptions() {
|
|
|
|
option_config_++;
|
|
|
|
if (option_config_ >= kEnd) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2015-01-27 22:44:19 +00:00
|
|
|
if (option_config_ == kMultiLevels) {
|
|
|
|
base_env_.reset(new MockEnv(Env::Default()));
|
|
|
|
}
|
2015-01-23 02:34:23 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the current option configuration.
|
|
|
|
Options CurrentOptions() {
|
2015-01-24 00:03:24 +00:00
|
|
|
sync_use_wal_ = false;
|
|
|
|
sync_use_compact_ = true;
|
2015-01-23 02:34:23 +00:00
|
|
|
Options options;
|
|
|
|
switch (option_config_) {
|
|
|
|
case kWalDir:
|
|
|
|
options.wal_dir = test::TmpDir(env_) + "/fault_test_wal";
|
|
|
|
break;
|
|
|
|
case kDifferentDataDir:
|
|
|
|
options.db_paths.emplace_back(test::TmpDir(env_) + "/fault_test_data",
|
|
|
|
1000000U);
|
|
|
|
break;
|
|
|
|
case kSyncWal:
|
2015-01-24 00:03:24 +00:00
|
|
|
sync_use_wal_ = true;
|
|
|
|
sync_use_compact_ = false;
|
2015-01-23 02:34:23 +00:00
|
|
|
break;
|
|
|
|
case kWalDirSyncWal:
|
|
|
|
options.wal_dir = test::TmpDir(env_) + "/fault_test_wal";
|
2015-01-24 00:03:24 +00:00
|
|
|
sync_use_wal_ = true;
|
|
|
|
sync_use_compact_ = false;
|
2015-01-23 02:34:23 +00:00
|
|
|
break;
|
2015-01-27 22:44:19 +00:00
|
|
|
case kMultiLevels:
|
|
|
|
options.write_buffer_size = 64 * 1024;
|
|
|
|
options.target_file_size_base = 64 * 1024;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.level0_slowdown_writes_trigger = 2;
|
|
|
|
options.level0_stop_writes_trigger = 4;
|
|
|
|
options.max_bytes_for_level_base = 128 * 1024;
|
|
|
|
options.max_write_buffer_number = 2;
|
|
|
|
options.max_background_compactions = 8;
|
|
|
|
options.max_background_flushes = 8;
|
|
|
|
sync_use_wal_ = true;
|
|
|
|
sync_use_compact_ = false;
|
|
|
|
break;
|
2015-01-23 02:34:23 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
Status NewDB() {
|
|
|
|
assert(db_ == NULL);
|
|
|
|
assert(tiny_cache_ == nullptr);
|
|
|
|
assert(env_ == NULL);
|
|
|
|
|
2015-01-27 22:44:19 +00:00
|
|
|
env_ =
|
|
|
|
new FaultInjectionTestEnv(base_env_ ? base_env_.get() : Env::Default());
|
2015-01-15 18:28:10 +00:00
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
options_ = CurrentOptions();
|
2015-01-15 18:28:10 +00:00
|
|
|
options_.env = env_;
|
|
|
|
options_.paranoid_checks = true;
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
tiny_cache_ = NewLRUCache(100);
|
|
|
|
table_options.block_cache = tiny_cache_;
|
|
|
|
options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
dbname_ = test::TmpDir() + "/fault_test";
|
|
|
|
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, options_));
|
2015-01-23 02:34:23 +00:00
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
options_.create_if_missing = true;
|
|
|
|
Status s = OpenDB();
|
|
|
|
options_.create_if_missing = false;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
void SetUp() override {
|
|
|
|
sequential_order_ = GetParam();
|
|
|
|
ASSERT_OK(NewDB());
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
void TearDown() override {
|
2015-01-15 18:28:10 +00:00
|
|
|
CloseDB();
|
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
Status s = DestroyDB(dbname_, options_);
|
2015-01-15 18:28:10 +00:00
|
|
|
|
|
|
|
delete env_;
|
|
|
|
env_ = NULL;
|
|
|
|
|
|
|
|
tiny_cache_.reset();
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
ASSERT_OK(s);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
void Build(const WriteOptions& write_options, int start_idx, int num_vals) {
|
2015-01-15 18:28:10 +00:00
|
|
|
std::string key_space, value_space;
|
|
|
|
WriteBatch batch;
|
|
|
|
for (int i = start_idx; i < start_idx + num_vals; i++) {
|
2015-07-16 19:18:32 +00:00
|
|
|
Slice key = Key(i, &key_space);
|
2015-01-15 18:28:10 +00:00
|
|
|
batch.Clear();
|
|
|
|
batch.Put(key, Value(i, &value_space));
|
2015-01-23 02:34:23 +00:00
|
|
|
ASSERT_OK(db_->Write(write_options, &batch));
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
Status ReadValue(int i, std::string* val) const {
|
2015-01-15 18:28:10 +00:00
|
|
|
std::string key_space, value_space;
|
2015-07-16 19:18:32 +00:00
|
|
|
Slice key = Key(i, &key_space);
|
2015-01-15 18:28:10 +00:00
|
|
|
Value(i, &value_space);
|
|
|
|
ReadOptions options;
|
|
|
|
return db_->Get(options, key, val);
|
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
Status Verify(int start_idx, int num_vals,
|
|
|
|
ExpectedVerifResult expected) const {
|
2015-01-15 18:28:10 +00:00
|
|
|
std::string val;
|
|
|
|
std::string value_space;
|
|
|
|
Status s;
|
|
|
|
for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) {
|
|
|
|
Value(i, &value_space);
|
2015-07-16 19:18:32 +00:00
|
|
|
s = ReadValue(i, &val);
|
2015-01-23 02:34:23 +00:00
|
|
|
if (s.ok()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_EQ(value_space, val);
|
2015-01-23 02:34:23 +00:00
|
|
|
}
|
|
|
|
if (expected == kValExpectFound) {
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "Error when read %dth record (expect found): %s\n", i,
|
|
|
|
s.ToString().c_str());
|
|
|
|
return s;
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
2015-01-23 02:34:23 +00:00
|
|
|
} else if (!s.ok() && !s.IsNotFound()) {
|
|
|
|
fprintf(stderr, "Error when read %dth record: %s\n", i,
|
|
|
|
s.ToString().c_str());
|
|
|
|
return s;
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-23 02:34:23 +00:00
|
|
|
return Status::OK();
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return the ith key
|
2015-07-16 19:18:32 +00:00
|
|
|
Slice Key(int i, std::string* storage) const {
|
2015-07-16 02:58:28 +00:00
|
|
|
int num = i;
|
2015-07-16 19:18:32 +00:00
|
|
|
if (!sequential_order_) {
|
2015-07-16 02:58:28 +00:00
|
|
|
// random transfer
|
|
|
|
const int m = 0x5bd1e995;
|
|
|
|
num *= m;
|
|
|
|
num ^= num << 24;
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
char buf[100];
|
2015-07-16 02:58:28 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%016d", num);
|
2015-01-15 18:28:10 +00:00
|
|
|
storage->assign(buf, strlen(buf));
|
|
|
|
return Slice(*storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the value to associate with the specified key
|
|
|
|
Slice Value(int k, std::string* storage) const {
|
|
|
|
Random r(k);
|
|
|
|
return test::RandomString(&r, kValueSize, storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status OpenDB() {
|
|
|
|
delete db_;
|
|
|
|
db_ = NULL;
|
|
|
|
env_->ResetState();
|
|
|
|
return DB::Open(options_, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CloseDB() {
|
|
|
|
delete db_;
|
|
|
|
db_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteAllData() {
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
WriteOptions options;
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
|
|
|
|
}
|
|
|
|
|
|
|
|
delete iter;
|
2015-01-23 02:34:23 +00:00
|
|
|
|
|
|
|
FlushOptions flush_options;
|
|
|
|
flush_options.wait = true;
|
|
|
|
db_->Flush(flush_options);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-01-26 23:22:18 +00:00
|
|
|
// rnd cannot be null for kResetDropRandomUnsyncedData
|
|
|
|
void ResetDBState(ResetMethod reset_method, Random* rnd = nullptr) {
|
2015-01-28 00:34:16 +00:00
|
|
|
env_->AssertNoOpenFile();
|
2015-01-15 18:28:10 +00:00
|
|
|
switch (reset_method) {
|
2015-01-23 02:34:23 +00:00
|
|
|
case kResetDropUnsyncedData:
|
2015-01-15 18:28:10 +00:00
|
|
|
ASSERT_OK(env_->DropUnsyncedFileData());
|
|
|
|
break;
|
2015-01-26 23:22:18 +00:00
|
|
|
case kResetDropRandomUnsyncedData:
|
|
|
|
ASSERT_OK(env_->DropRandomUnsyncedFileData(rnd));
|
|
|
|
break;
|
2015-01-23 02:34:23 +00:00
|
|
|
case kResetDeleteUnsyncedFiles:
|
|
|
|
ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
|
|
|
|
break;
|
|
|
|
case kResetDropAndDeleteUnsynced:
|
|
|
|
ASSERT_OK(env_->DropUnsyncedFileData());
|
2015-01-15 18:28:10 +00:00
|
|
|
ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
|
|
|
|
DeleteAllData();
|
2015-01-23 02:34:23 +00:00
|
|
|
|
|
|
|
WriteOptions write_options;
|
2015-01-24 00:03:24 +00:00
|
|
|
write_options.sync = sync_use_wal_;
|
2015-01-23 02:34:23 +00:00
|
|
|
|
|
|
|
Build(write_options, 0, num_pre_sync);
|
2015-01-24 00:03:24 +00:00
|
|
|
if (sync_use_compact_) {
|
2015-06-17 21:36:14 +00:00
|
|
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
2015-01-23 02:34:23 +00:00
|
|
|
}
|
|
|
|
write_options.sync = false;
|
|
|
|
Build(write_options, num_pre_sync, num_post_sync);
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
|
2015-01-26 23:22:18 +00:00
|
|
|
int num_pre_sync, int num_post_sync,
|
|
|
|
Random* rnd = nullptr) {
|
2015-01-15 18:28:10 +00:00
|
|
|
env_->SetFilesystemActive(false);
|
|
|
|
CloseDB();
|
2015-01-26 23:22:18 +00:00
|
|
|
ResetDBState(reset_method, rnd);
|
2015-01-15 18:28:10 +00:00
|
|
|
ASSERT_OK(OpenDB());
|
2015-01-23 02:34:23 +00:00
|
|
|
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::kValExpectFound));
|
2015-01-15 18:28:10 +00:00
|
|
|
ASSERT_OK(Verify(num_pre_sync, num_post_sync,
|
2015-01-23 02:34:23 +00:00
|
|
|
FaultInjectionTest::kValExpectNoError));
|
2015-07-16 19:18:32 +00:00
|
|
|
WaitCompactionFinish();
|
|
|
|
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::kValExpectFound));
|
|
|
|
ASSERT_OK(Verify(num_pre_sync, num_post_sync,
|
|
|
|
FaultInjectionTest::kValExpectNoError));
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void NoWriteTestPreFault() {
|
|
|
|
}
|
|
|
|
|
|
|
|
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
|
|
|
|
CloseDB();
|
|
|
|
ResetDBState(reset_method);
|
|
|
|
ASSERT_OK(OpenDB());
|
|
|
|
}
|
2015-07-16 19:18:32 +00:00
|
|
|
|
|
|
|
void WaitCompactionFinish() {
|
|
|
|
static_cast<DBImpl*>(db_)->TEST_WaitForCompact();
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "", ""));
|
|
|
|
}
|
2015-01-15 18:28:10 +00:00
|
|
|
};
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
TEST_P(FaultInjectionTest, FaultTest) {
|
2015-01-23 02:34:23 +00:00
|
|
|
do {
|
|
|
|
Random rnd(301);
|
|
|
|
|
2015-01-24 00:26:38 +00:00
|
|
|
for (size_t idx = 0; idx < kNumIterations; idx++) {
|
|
|
|
int num_pre_sync = rnd.Uniform(kMaxNumValues);
|
|
|
|
int num_post_sync = rnd.Uniform(kMaxNumValues);
|
2015-01-15 18:28:10 +00:00
|
|
|
|
2015-01-23 02:34:23 +00:00
|
|
|
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
2015-01-24 00:26:38 +00:00
|
|
|
PartialCompactTestReopenWithFault(kResetDropUnsyncedData, num_pre_sync,
|
|
|
|
num_post_sync);
|
2015-01-23 02:34:23 +00:00
|
|
|
NoWriteTestPreFault();
|
2015-01-24 00:26:38 +00:00
|
|
|
NoWriteTestReopenWithFault(kResetDropUnsyncedData);
|
|
|
|
|
2015-01-26 23:22:18 +00:00
|
|
|
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
|
|
|
PartialCompactTestReopenWithFault(kResetDropRandomUnsyncedData,
|
|
|
|
num_pre_sync, num_post_sync, &rnd);
|
|
|
|
NoWriteTestPreFault();
|
|
|
|
NoWriteTestReopenWithFault(kResetDropUnsyncedData);
|
|
|
|
|
2015-01-24 00:26:38 +00:00
|
|
|
// Setting a separate data path won't pass the test as we don't sync
|
|
|
|
// it after creating new files,
|
2015-01-26 21:59:38 +00:00
|
|
|
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
|
|
|
PartialCompactTestReopenWithFault(kResetDropAndDeleteUnsynced,
|
|
|
|
num_pre_sync, num_post_sync);
|
|
|
|
NoWriteTestPreFault();
|
|
|
|
NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
|
|
|
|
|
|
|
|
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
|
|
|
// No new files created so we expect all values since no files will be
|
|
|
|
// dropped.
|
|
|
|
PartialCompactTestReopenWithFault(kResetDeleteUnsyncedFiles, num_pre_sync,
|
|
|
|
num_post_sync);
|
|
|
|
NoWriteTestPreFault();
|
|
|
|
NoWriteTestReopenWithFault(kResetDeleteUnsyncedFiles);
|
2015-01-23 02:34:23 +00:00
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|
|
|
|
|
2015-04-09 04:18:05 +00:00
|
|
|
class SleepingBackgroundTask {
|
|
|
|
public:
|
|
|
|
SleepingBackgroundTask()
|
|
|
|
: bg_cv_(&mutex_), should_sleep_(true), done_with_sleep_(false) {}
|
|
|
|
void DoSleep() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
while (should_sleep_) {
|
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
|
|
|
done_with_sleep_ = true;
|
|
|
|
bg_cv_.SignalAll();
|
|
|
|
}
|
|
|
|
void WakeUp() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
should_sleep_ = false;
|
|
|
|
bg_cv_.SignalAll();
|
|
|
|
while (!done_with_sleep_) {
|
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void DoSleepTask(void* arg) {
|
|
|
|
reinterpret_cast<SleepingBackgroundTask*>(arg)->DoSleep();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
port::Mutex mutex_;
|
|
|
|
port::CondVar bg_cv_; // Signalled when background work finishes
|
|
|
|
bool should_sleep_;
|
|
|
|
bool done_with_sleep_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Previous log file is not fsynced if sync is forced after log rolling.
|
[wal changes 2/3] write with sync=true syncs previous unsynced wals to prevent illegal data loss
Summary:
I'll just copy internal task summary here:
"
This sequence will cause data loss in the middle after an sync write:
non-sync write key 1
flush triggered, not yet scheduled
sync write key 2
system crash
After rebooting, users might see key 2 but not key 1, which violates the API of sync write.
This can be reproduced using unit test FaultInjectionTest::DISABLED_WriteOptionSyncTest.
One way to fix it is for a sync write, if there is outstanding unsynced log files, we need to syc them too.
"
This diff should be considered together with the next diff D40905; in isolation this fix probably could be a little simpler.
Test Plan: `make check`; added a test for that (DBTest.SyncingPreviousLogs) before noticing FaultInjectionTest.WriteOptionSyncTest (keeping both since mine asserts a bit more); both tests fail without this diff; for D40905 stacked on top of this diff, ran tests with ASAN, TSAN and valgrind
Reviewers: rven, yhchiang, IslamAbdelRahman, anthony, kradhakrishnan, igor, sdong
Reviewed By: sdong
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D40899
2015-07-22 10:28:08 +00:00
|
|
|
TEST_P(FaultInjectionTest, WriteOptionSyncTest) {
|
2015-04-09 04:18:05 +00:00
|
|
|
SleepingBackgroundTask sleeping_task_low;
|
|
|
|
env_->SetBackgroundThreads(1, Env::HIGH);
|
|
|
|
// Block the job queue to prevent flush job from running.
|
|
|
|
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
|
|
|
|
Env::Priority::HIGH);
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
write_options.sync = false;
|
|
|
|
|
|
|
|
std::string key_space, value_space;
|
2015-07-16 19:18:32 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
db_->Put(write_options, Key(1, &key_space), Value(1, &value_space)));
|
2015-04-09 04:18:05 +00:00
|
|
|
FlushOptions flush_options;
|
|
|
|
flush_options.wait = false;
|
|
|
|
ASSERT_OK(db_->Flush(flush_options));
|
|
|
|
write_options.sync = true;
|
2015-07-16 19:18:32 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
db_->Put(write_options, Key(2, &key_space), Value(2, &value_space)));
|
2015-04-09 04:18:05 +00:00
|
|
|
|
|
|
|
env_->SetFilesystemActive(false);
|
|
|
|
NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
|
|
|
|
sleeping_task_low.WakeUp();
|
|
|
|
|
|
|
|
ASSERT_OK(OpenDB());
|
|
|
|
std::string val;
|
|
|
|
Value(2, &value_space);
|
2015-07-16 19:18:32 +00:00
|
|
|
ASSERT_OK(ReadValue(2, &val));
|
2015-04-09 04:18:05 +00:00
|
|
|
ASSERT_EQ(value_space, val);
|
|
|
|
|
|
|
|
Value(1, &value_space);
|
2015-07-16 19:18:32 +00:00
|
|
|
ASSERT_OK(ReadValue(1, &val));
|
2015-04-09 04:18:05 +00:00
|
|
|
ASSERT_EQ(value_space, val);
|
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
TEST_P(FaultInjectionTest, UninstalledCompaction) {
|
2015-07-16 02:58:28 +00:00
|
|
|
options_.target_file_size_base = 32 * 1024;
|
|
|
|
options_.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
options_.level0_file_num_compaction_trigger = 6;
|
|
|
|
options_.level0_stop_writes_trigger = 1 << 10;
|
|
|
|
options_.level0_slowdown_writes_trigger = 1 << 10;
|
|
|
|
options_.max_background_compactions = 1;
|
|
|
|
OpenDB();
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
if (!sequential_order_) {
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"FaultInjectionTest::FaultTest:0", "DBImpl::BGWorkCompaction"},
|
|
|
|
{"CompactionJob::Run():End", "FaultInjectionTest::FaultTest:1"},
|
|
|
|
{"FaultInjectionTest::FaultTest:2",
|
|
|
|
"DBImpl::BackgroundCompaction:NonTrivial:AfterRun"},
|
|
|
|
});
|
|
|
|
}
|
2015-07-16 02:58:28 +00:00
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
int kNumKeys = 1000;
|
2015-07-16 19:18:32 +00:00
|
|
|
Build(WriteOptions(), 0, kNumKeys);
|
2015-07-16 02:58:28 +00:00
|
|
|
FlushOptions flush_options;
|
|
|
|
flush_options.wait = true;
|
|
|
|
db_->Flush(flush_options);
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "", ""));
|
|
|
|
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:0");
|
|
|
|
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:1");
|
|
|
|
env_->SetFilesystemActive(false);
|
|
|
|
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:2");
|
|
|
|
CloseDB();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
ResetDBState(kResetDropUnsyncedData);
|
|
|
|
|
|
|
|
std::atomic<bool> opened(false);
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::Open:Opened", [&](void* arg) { opened.store(true); });
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::BGWorkCompaction",
|
|
|
|
[&](void* arg) { ASSERT_TRUE(opened.load()); });
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
ASSERT_OK(OpenDB());
|
2015-07-16 19:18:32 +00:00
|
|
|
ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound));
|
|
|
|
WaitCompactionFinish();
|
|
|
|
ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound));
|
2015-07-16 02:58:28 +00:00
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
2015-07-16 19:18:32 +00:00
|
|
|
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
|
2015-07-16 02:58:28 +00:00
|
|
|
}
|
|
|
|
|
2015-07-16 19:18:32 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(FaultTest, FaultInjectionTest, ::testing::Bool());
|
|
|
|
|
2015-01-15 18:28:10 +00:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2015-01-15 18:28:10 +00:00
|
|
|
}
|