Allow users to disable some kill points in db_stress

Summary:
Give a name for every kill point, and allow users to disable some kill points based on prefixes. The kill points can be passed by db_stress through a command line paramter. This provides a way for users to boost the chance of triggering low frequency kill points
This allow follow up changes in crash test scripts to improve crash test coverage.

Test Plan:
Manually run db_stress with variable values of --kill_random_test and --kill_prefix_blacklist. Like this:
 --kill_random_test=2 --kill_prefix_blacklist=Posix,WritableFileWriter::Append,WritableFileWriter::WriteBuffered,WritableFileWriter::Sync

Reviewers: igor, kradhakrishnan, rven, IslamAbdelRahman, yhchiang

Reviewed By: yhchiang

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D48735
This commit is contained in:
sdong 2015-10-14 14:08:50 -07:00
parent d306a7ea85
commit e1a5ff857b
6 changed files with 68 additions and 25 deletions

View File

@ -963,7 +963,7 @@ class WinWritableFile : public WritableFile {
virtual Status Allocate(off_t offset, off_t len) override {
Status status;
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("WinWritableFile::Allocate", rocksdb_kill_odds);
// Make sure that we reserve an aligned amount of space
// since the reservation block size is driven outside so we want

View File

@ -277,6 +277,11 @@ static const bool FLAGS_kill_random_test_dummy __attribute__((unused)) =
RegisterFlagValidator(&FLAGS_kill_random_test, &ValidateInt32Positive);
extern int rocksdb_kill_odds;
DEFINE_string(kill_prefix_blacklist, "",
"If non-empty, kill points with prefix in the list given will be"
" skipped. Items are comma-separated.");
extern std::vector<std::string> rocksdb_kill_prefix_blacklist;
DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");
DEFINE_int32(target_file_size_base, 64 * KB,
@ -356,6 +361,21 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
fprintf(stdout, "Cannot parse compression type '%s'\n", ctype);
return rocksdb::kSnappyCompression; //default value
}
std::vector<std::string> SplitString(std::string src) {
std::vector<std::string> ret;
if (src.empty()) {
return ret;
}
size_t pos = 0;
size_t pos_comma;
while ((pos_comma = src.find(',', pos)) != std::string::npos) {
ret.push_back(src.substr(pos, pos_comma - pos));
pos = pos_comma + 1;
}
ret.push_back(src.substr(pos, src.length()));
return ret;
}
} // namespace
DEFINE_string(compression_type, "snappy",
@ -1921,6 +1941,14 @@ class StressTest {
fprintf(stdout, "Memtablerep : %s\n", memtablerep);
fprintf(stdout, "Test kill odd : %d\n", rocksdb_kill_odds);
if (!rocksdb_kill_prefix_blacklist.empty()) {
fprintf(stdout, "Skipping kill points prefixes:\n");
for (auto& p : rocksdb_kill_prefix_blacklist) {
fprintf(stdout, " %s\n", p.c_str());
}
}
fprintf(stdout, "------------------------------------------------\n");
}
@ -1952,7 +1980,6 @@ class StressTest {
options_.disableDataSync = FLAGS_disable_data_sync;
options_.use_fsync = FLAGS_use_fsync;
options_.allow_mmap_reads = FLAGS_mmap_read;
rocksdb_kill_odds = FLAGS_kill_random_test;
options_.target_file_size_base = FLAGS_target_file_size_base;
options_.target_file_size_multiplier = FLAGS_target_file_size_multiplier;
options_.max_bytes_for_level_base = FLAGS_max_bytes_for_level_base;
@ -2186,6 +2213,9 @@ int main(int argc, char** argv) {
FLAGS_db = default_db_path;
}
rocksdb_kill_odds = FLAGS_kill_random_test;
rocksdb_kill_prefix_blacklist = SplitString(FLAGS_kill_prefix_blacklist);
rocksdb::StressTest stress;
if (stress.Run()) {
return 0;

View File

@ -368,7 +368,7 @@ class PosixMmapFile : public WritableFile {
}
Status UnmapCurrentRegion() {
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::UnmapCurrentRegion:0", rocksdb_kill_odds);
if (base_ != nullptr) {
int munmap_status = munmap(base_, limit_ - base_);
if (munmap_status != 0) {
@ -392,7 +392,7 @@ class PosixMmapFile : public WritableFile {
#ifdef ROCKSDB_FALLOCATE_PRESENT
assert(base_ == nullptr);
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::UnmapCurrentRegion:0", rocksdb_kill_odds);
// we can't fallocate with FALLOC_FL_KEEP_SIZE here
if (allow_fallocate_) {
IOSTATS_TIMER_GUARD(allocate_nanos);
@ -407,13 +407,13 @@ class PosixMmapFile : public WritableFile {
}
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::Append:1", rocksdb_kill_odds);
void* ptr = mmap(nullptr, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED,
fd_, file_offset_);
if (ptr == MAP_FAILED) {
return Status::IOError("MMap failed on " + filename_);
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::Append:2", rocksdb_kill_odds);
base_ = reinterpret_cast<char*>(ptr);
limit_ = base_ + map_size_;
@ -434,7 +434,7 @@ class PosixMmapFile : public WritableFile {
size_t p1 = TruncateToPageBoundary(last_sync_ - base_);
size_t p2 = TruncateToPageBoundary(dst_ - base_ - 1);
last_sync_ = dst_;
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::Msync:0", rocksdb_kill_odds);
if (msync(base_ + p1, p2 - p1 + page_size_, MS_SYNC) < 0) {
return IOError(filename_, errno);
}
@ -484,7 +484,7 @@ class PosixMmapFile : public WritableFile {
if (!s.ok()) {
return s;
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::Append:0", rocksdb_kill_odds);
}
size_t n = (left <= avail) ? left : avail;
@ -576,7 +576,7 @@ class PosixMmapFile : public WritableFile {
#ifdef ROCKSDB_FALLOCATE_PRESENT
virtual Status Allocate(off_t offset, off_t len) override {
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixMmapFile::Allocate:0", rocksdb_kill_odds);
int alloc_status = 0;
if (allow_fallocate_) {
alloc_status =
@ -722,7 +722,7 @@ class PosixWritableFile : public WritableFile {
#ifdef ROCKSDB_FALLOCATE_PRESENT
virtual Status Allocate(off_t offset, off_t len) override {
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("PosixWritableFile::Allocate:0", rocksdb_kill_odds);
IOSTATS_TIMER_GUARD(allocate_nanos);
int alloc_status = 0;
if (allow_fallocate_) {

View File

@ -57,7 +57,8 @@ Status WritableFileWriter::Append(const Slice& data) {
pending_sync_ = true;
pending_fsync_ = true;
TEST_KILL_RANDOM(rocksdb_kill_odds * REDUCE_ODDS2);
TEST_KILL_RANDOM("WritableFileWriter::Append:0",
rocksdb_kill_odds * REDUCE_ODDS2);
{
IOSTATS_TIMER_GUARD(prepare_write_nanos);
@ -114,7 +115,7 @@ Status WritableFileWriter::Append(const Slice& data) {
s = WriteBuffered(src, left);
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("WritableFileWriter::Append:1", rocksdb_kill_odds);
filesize_ += data.size();
return Status::OK();
}
@ -141,7 +142,7 @@ Status WritableFileWriter::Close() {
s = interim;
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("WritableFileWriter::Close:0", rocksdb_kill_odds);
interim = writable_file_->Close();
if (!interim.ok() && s.ok()) {
s = interim;
@ -156,7 +157,8 @@ Status WritableFileWriter::Close() {
// write out the cached data to the OS cache
Status WritableFileWriter::Flush() {
Status s;
TEST_KILL_RANDOM(rocksdb_kill_odds * REDUCE_ODDS2);
TEST_KILL_RANDOM("WritableFileWriter::Flush:0",
rocksdb_kill_odds * REDUCE_ODDS2);
if (buf_.CurrentSize() > 0) {
if (use_os_buffer_) {
@ -209,14 +211,14 @@ Status WritableFileWriter::Sync(bool use_fsync) {
if (!s.ok()) {
return s;
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("WritableFileWriter::Sync:0", rocksdb_kill_odds);
if (!direct_io_ && pending_sync_) {
s = SyncInternal(use_fsync);
if (!s.ok()) {
return s;
}
}
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("WritableFileWriter::Sync:1", rocksdb_kill_odds);
pending_sync_ = false;
if (use_fsync) {
pending_fsync_ = false;
@ -294,7 +296,7 @@ Status WritableFileWriter::WriteBuffered(const char* data, size_t size) {
}
IOSTATS_ADD(bytes_written, allowed);
TEST_KILL_RANDOM(rocksdb_kill_odds);
TEST_KILL_RANDOM("WritableFileWriter::WriteBuffered:0", rocksdb_kill_odds);
left -= allowed;
src += allowed;

View File

@ -8,11 +8,19 @@
#include "util/random.h"
int rocksdb_kill_odds = 0;
std::vector<std::string> rocksdb_kill_prefix_blacklist;
#ifndef NDEBUG
namespace rocksdb {
void TestKillRandom(int odds, const std::string& srcfile, int srcline) {
void TestKillRandom(std::string kill_point, int odds,
const std::string& srcfile, int srcline) {
for (auto& p : rocksdb_kill_prefix_blacklist) {
if (kill_point.substr(0, p.length()) == p) {
return;
}
}
time_t curtime = time(nullptr);
Random r((uint32_t)curtime);

View File

@ -15,26 +15,29 @@
// This is only set from db_stress.cc and for testing only.
// If non-zero, kill at various points in source code with probability 1/this
extern int rocksdb_kill_odds;
// If kill point has a prefix on this list, will skip killing.
extern std::vector<std::string> rocksdb_kill_prefix_blacklist;
#ifdef NDEBUG
// empty in release build
#define TEST_KILL_RANDOM(rocksdb_kill_odds)
#define TEST_KILL_RANDOM(kill_point, rocksdb_kill_odds)
#else
namespace rocksdb {
// Kill the process with probablity 1/odds for testing.
extern void TestKillRandom(int odds, const std::string& srcfile, int srcline);
extern void TestKillRandom(std::string kill_point, int odds,
const std::string& srcfile, int srcline);
// To avoid crashing always at some frequently executed codepaths (during
// kill random test), use this factor to reduce odds
#define REDUCE_ODDS 2
#define REDUCE_ODDS2 4
#define TEST_KILL_RANDOM(rocksdb_kill_odds) \
{ \
if (rocksdb_kill_odds > 0) { \
TestKillRandom(rocksdb_kill_odds, __FILE__, __LINE__); \
} \
#define TEST_KILL_RANDOM(kill_point, rocksdb_kill_odds) \
{ \
if (rocksdb_kill_odds > 0) { \
TestKillRandom(kill_point, rocksdb_kill_odds, __FILE__, __LINE__); \
} \
}
} // namespace rocksdb
#endif