mirror of https://github.com/facebook/rocksdb.git
FaultInjectionTestFS follow-up and clean-up (#12861)
Summary: In follow-up to https://github.com/facebook/rocksdb/issues/12852: * Use std::copy in place of copy_n for potentially overlapping buffer * Get rid of troublesome -1 idiom from `pos_at_last_append_` and `pos_at_last_sync_` * Small improvements to test FaultInjectionFSTest.ReadUnsyncedData Pull Request resolved: https://github.com/facebook/rocksdb/pull/12861 Test Plan: CI, crash test, etc. Reviewed By: cbi42 Differential Revision: D59757484 Pulled By: pdillinger fbshipit-source-id: c6fbdc2e97c959983184925a855cc8b0285fa23f
This commit is contained in:
parent
b800b5eb6a
commit
0e3e43f4d1
|
@ -573,72 +573,77 @@ TEST(FaultInjectionFSTest, ReadUnsyncedData) {
|
|||
|
||||
// This is a randomized mini-stress test, to reduce the chances of bugs in
|
||||
// FaultInjectionTestFS being caught only in db_stress, where they are
|
||||
// difficult to debug.
|
||||
uint32_t len = Random::GetTLSInstance()->Uniform(10000) + 1;
|
||||
Random rnd(len);
|
||||
// difficult to debug. ~1000 iterations might be needed to debug relevant
|
||||
// code changes. Limiting to 10 for each regular unit test run.
|
||||
auto seed = Random::GetTLSInstance()->Next();
|
||||
for (int i = 0; i < 10; i++, seed++) {
|
||||
Random rnd(seed);
|
||||
uint32_t len = rnd.Uniform(10000) + 1;
|
||||
|
||||
std::string f = test::PerThreadDBPath("read_unsynced.data");
|
||||
std::string data = rnd.RandomString(len);
|
||||
std::string f =
|
||||
test::PerThreadDBPath("read_unsynced." + std::to_string(seed));
|
||||
std::string data = rnd.RandomString(len);
|
||||
|
||||
// Create partially synced file
|
||||
std::unique_ptr<FSWritableFile> w;
|
||||
ASSERT_OK(fault_fs->NewWritableFile(f, {}, &w, nullptr));
|
||||
uint32_t synced_len = rnd.Uniform(len + 1);
|
||||
ASSERT_OK(w->Append(Slice(data.data(), synced_len), {}, nullptr));
|
||||
if (synced_len > 0) {
|
||||
ASSERT_OK(w->Sync({}, nullptr));
|
||||
}
|
||||
ASSERT_OK(w->Append(Slice(data.data() + synced_len, len - synced_len), {},
|
||||
nullptr));
|
||||
// Create partially synced file
|
||||
std::unique_ptr<FSWritableFile> w;
|
||||
ASSERT_OK(fault_fs->NewWritableFile(f, {}, &w, nullptr));
|
||||
uint32_t synced_len = rnd.Uniform(len + 1);
|
||||
ASSERT_OK(w->Append(Slice(data.data(), synced_len), {}, nullptr));
|
||||
if (synced_len > 0) {
|
||||
ASSERT_OK(w->Sync({}, nullptr));
|
||||
}
|
||||
ASSERT_OK(w->Append(Slice(data.data() + synced_len, len - synced_len), {},
|
||||
nullptr));
|
||||
|
||||
// Test file size includes unsynced data
|
||||
{
|
||||
uint64_t file_size;
|
||||
ASSERT_OK(fault_fs->GetFileSize(f, {}, &file_size, nullptr));
|
||||
ASSERT_EQ(len, file_size);
|
||||
}
|
||||
// Test file size includes unsynced data
|
||||
{
|
||||
uint64_t file_size;
|
||||
ASSERT_OK(fault_fs->GetFileSize(f, {}, &file_size, nullptr));
|
||||
ASSERT_EQ(len, file_size);
|
||||
}
|
||||
|
||||
// Test read file contents, with two reads that probably don't
|
||||
// align with the unsynced split. And maybe a sync or write between
|
||||
// the two reads.
|
||||
std::unique_ptr<FSSequentialFile> r;
|
||||
ASSERT_OK(fault_fs->NewSequentialFile(f, {}, &r, nullptr));
|
||||
uint32_t first_read_len = rnd.Uniform(len + 1);
|
||||
Slice sl;
|
||||
std::unique_ptr<char[]> scratch(new char[first_read_len]);
|
||||
ASSERT_OK(r->Read(first_read_len, {}, &sl, scratch.get(), nullptr));
|
||||
ASSERT_EQ(first_read_len, sl.size());
|
||||
ASSERT_EQ(0, sl.compare(Slice(data.data(), first_read_len)));
|
||||
// Test read file contents, with two reads that probably don't
|
||||
// align with the unsynced split. And maybe a sync or write between
|
||||
// the two reads.
|
||||
std::unique_ptr<FSSequentialFile> r;
|
||||
ASSERT_OK(fault_fs->NewSequentialFile(f, {}, &r, nullptr));
|
||||
uint32_t first_read_len = rnd.Uniform(len + 1);
|
||||
Slice sl;
|
||||
std::unique_ptr<char[]> scratch(new char[first_read_len]);
|
||||
ASSERT_OK(r->Read(first_read_len, {}, &sl, scratch.get(), nullptr));
|
||||
ASSERT_EQ(first_read_len, sl.size());
|
||||
ASSERT_EQ(0, sl.compare(Slice(data.data(), first_read_len)));
|
||||
|
||||
// Maybe a sync and/or write and/or close between the two reads.
|
||||
if (rnd.OneIn(2)) {
|
||||
ASSERT_OK(w->Sync({}, nullptr));
|
||||
}
|
||||
if (rnd.OneIn(2)) {
|
||||
uint32_t more_len = rnd.Uniform(1000) + 1;
|
||||
std::string more_data = rnd.RandomString(more_len);
|
||||
ASSERT_OK(w->Append(more_data, {}, nullptr));
|
||||
data += more_data;
|
||||
len += more_len;
|
||||
}
|
||||
if (rnd.OneIn(2)) {
|
||||
ASSERT_OK(w->Sync({}, nullptr));
|
||||
}
|
||||
if (rnd.OneIn(2)) {
|
||||
ASSERT_OK(w->Close({}, nullptr));
|
||||
w.reset();
|
||||
}
|
||||
// Maybe a sync and/or write and/or close between the two reads.
|
||||
if (rnd.OneIn(2)) {
|
||||
ASSERT_OK(w->Sync({}, nullptr));
|
||||
}
|
||||
if (rnd.OneIn(2)) {
|
||||
uint32_t more_len = rnd.Uniform(1000) + 1;
|
||||
std::string more_data = rnd.RandomString(more_len);
|
||||
ASSERT_OK(w->Append(more_data, {}, nullptr));
|
||||
data += more_data;
|
||||
len += more_len;
|
||||
}
|
||||
if (rnd.OneIn(2)) {
|
||||
ASSERT_OK(w->Sync({}, nullptr));
|
||||
}
|
||||
if (rnd.OneIn(2)) {
|
||||
ASSERT_OK(w->Close({}, nullptr));
|
||||
w.reset();
|
||||
}
|
||||
|
||||
// Second read some of, all of, or more than rest of file
|
||||
uint32_t second_read_len = rnd.Uniform(len + 1);
|
||||
scratch.reset(new char[second_read_len]);
|
||||
ASSERT_OK(r->Read(second_read_len, {}, &sl, scratch.get(), nullptr));
|
||||
if (len - first_read_len < second_read_len) {
|
||||
ASSERT_EQ(len - first_read_len, sl.size());
|
||||
} else {
|
||||
ASSERT_EQ(second_read_len, sl.size());
|
||||
// Second read some of, all of, or more than rest of file
|
||||
uint32_t second_read_len = rnd.Uniform(len + 1);
|
||||
scratch.reset(new char[second_read_len]);
|
||||
ASSERT_OK(r->Read(second_read_len, {}, &sl, scratch.get(), nullptr));
|
||||
if (len - first_read_len < second_read_len) {
|
||||
ASSERT_EQ(len - first_read_len, sl.size());
|
||||
} else {
|
||||
ASSERT_EQ(second_read_len, sl.size());
|
||||
}
|
||||
ASSERT_EQ(0, sl.compare(Slice(data.data() + first_read_len, sl.size())));
|
||||
}
|
||||
ASSERT_EQ(0, sl.compare(Slice(data.data() + first_read_len, sl.size())));
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
|
|
|
@ -151,7 +151,8 @@ TestFSWritableFile::TestFSWritableFile(const std::string& fname,
|
|||
fs_(fs),
|
||||
unsync_data_loss_(fs_->InjectUnsyncedDataLoss()) {
|
||||
assert(target_ != nullptr);
|
||||
state_.pos_at_last_append_ = 0;
|
||||
assert(state_.pos_at_last_append_ == 0);
|
||||
assert(state_.pos_at_last_sync_ == 0);
|
||||
}
|
||||
|
||||
TestFSWritableFile::~TestFSWritableFile() {
|
||||
|
@ -377,15 +378,13 @@ IOStatus TestFSWritableFile::RangeSync(uint64_t offset, uint64_t nbytes,
|
|||
}
|
||||
// Assumes caller passes consecutive byte ranges.
|
||||
uint64_t sync_limit = offset + nbytes;
|
||||
uint64_t buf_begin =
|
||||
state_.pos_at_last_sync_ < 0 ? 0 : state_.pos_at_last_sync_;
|
||||
|
||||
IOStatus io_s;
|
||||
if (sync_limit < buf_begin) {
|
||||
if (sync_limit < state_.pos_at_last_sync_) {
|
||||
return io_s;
|
||||
}
|
||||
uint64_t num_to_sync = std::min(static_cast<uint64_t>(state_.buffer_.size()),
|
||||
sync_limit - buf_begin);
|
||||
sync_limit - state_.pos_at_last_sync_);
|
||||
Slice buf_to_sync(state_.buffer_.data(), num_to_sync);
|
||||
io_s = target_->Append(buf_to_sync, options, dbg);
|
||||
state_.buffer_ = state_.buffer_.substr(num_to_sync);
|
||||
|
@ -564,10 +563,10 @@ namespace {
|
|||
// copying data there if needed.
|
||||
void MoveToScratchIfNeeded(Slice* result, char* scratch) {
|
||||
if (result->data() != scratch) {
|
||||
// NOTE: might overlap
|
||||
std::copy_n(result->data(), result->size(), scratch);
|
||||
// NOTE: might overlap, where result is later in scratch
|
||||
std::copy(result->data(), result->data() + result->size(), scratch);
|
||||
*result = Slice(scratch, result->size());
|
||||
}
|
||||
*result = Slice(scratch, result->size());
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -576,13 +575,13 @@ void FaultInjectionTestFS::ReadUnsynced(const std::string& fname,
|
|||
Slice* result, char* scratch,
|
||||
int64_t* pos_at_last_sync) {
|
||||
*result = Slice(scratch, 0); // default empty result
|
||||
assert(*pos_at_last_sync == -1); // default "unknown"
|
||||
|
||||
MutexLock l(&mutex_);
|
||||
auto it = db_file_state_.find(fname);
|
||||
if (it != db_file_state_.end()) {
|
||||
auto& st = it->second;
|
||||
// Use 0 to mean "tracked but nothing synced"
|
||||
*pos_at_last_sync = std::max(st.pos_at_last_sync_, int64_t{0});
|
||||
*pos_at_last_sync = static_cast<int64_t>(st.pos_at_last_sync_);
|
||||
// Find overlap between [offset, offset + n) and
|
||||
// [*pos_at_last_sync, *pos_at_last_sync + st.buffer_.size())
|
||||
int64_t begin = std::max(static_cast<int64_t>(offset), *pos_at_last_sync);
|
||||
|
@ -661,6 +660,8 @@ IOStatus TestFSSequentialFile::Read(size_t n, const IOOptions& options,
|
|||
// any more from target because we established a "point in time" for
|
||||
// completing this Read in which we read as much tail data (unsynced) as
|
||||
// we could.
|
||||
|
||||
// We got pos_at_last_sync info if we got any unsynced data.
|
||||
assert(pos_at_last_sync >= 0 || unsynced_result.size() == 0);
|
||||
|
||||
// Combined data is already lined up in scratch.
|
||||
|
|
|
@ -41,17 +41,15 @@ enum class FaultInjectionIOType {
|
|||
|
||||
struct FSFileState {
|
||||
std::string filename_;
|
||||
int64_t pos_at_last_append_;
|
||||
int64_t pos_at_last_sync_;
|
||||
uint64_t pos_at_last_append_ = 0;
|
||||
uint64_t pos_at_last_sync_ = 0;
|
||||
std::string buffer_;
|
||||
|
||||
explicit FSFileState(const std::string& filename)
|
||||
: filename_(filename), pos_at_last_append_(-1), pos_at_last_sync_(-1) {}
|
||||
|
||||
FSFileState() : pos_at_last_append_(-1), pos_at_last_sync_(-1) {}
|
||||
explicit FSFileState(const std::string& filename = {})
|
||||
: filename_(filename) {}
|
||||
|
||||
bool IsFullySynced() const {
|
||||
return pos_at_last_append_ <= 0 || pos_at_last_append_ == pos_at_last_sync_;
|
||||
return pos_at_last_append_ == pos_at_last_sync_;
|
||||
}
|
||||
|
||||
IOStatus DropUnsyncedData();
|
||||
|
|
Loading…
Reference in New Issue