2020-03-04 20:30:34 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright 2014 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
// This test uses a custom FileSystem to keep track of the state of a file
|
|
|
|
// system the last "Sync". The data being written is cached in a "buffer".
|
|
|
|
// Only when "Sync" is called, the data will be persistent. It can similate
|
|
|
|
// file data loss (or entire files) not protected by a "Sync". For any of the
|
|
|
|
// FileSystem related operations, by specify the "IOStatus Error", a specific
|
|
|
|
// error can be returned when file system is not activated.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
#include <algorithm>
|
2020-03-04 20:30:34 +00:00
|
|
|
#include <map>
|
|
|
|
#include <set>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "file/filename.h"
|
2021-04-20 21:56:33 +00:00
|
|
|
#include "rocksdb/file_system.h"
|
2020-03-04 20:30:34 +00:00
|
|
|
#include "util/mutexlock.h"
|
|
|
|
#include "util/random.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/thread_local.h"
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
class TestFSWritableFile;
|
|
|
|
class FaultInjectionTestFS;
|
|
|
|
|
|
|
|
struct FSFileState {
|
|
|
|
std::string filename_;
|
|
|
|
ssize_t pos_;
|
|
|
|
ssize_t pos_at_last_sync_;
|
|
|
|
ssize_t pos_at_last_flush_;
|
|
|
|
std::string buffer_;
|
|
|
|
|
|
|
|
explicit FSFileState(const std::string& filename)
|
|
|
|
: filename_(filename),
|
|
|
|
pos_(-1),
|
|
|
|
pos_at_last_sync_(-1),
|
|
|
|
pos_at_last_flush_(-1) {}
|
|
|
|
|
|
|
|
FSFileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
|
|
|
|
|
|
|
|
bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; }
|
|
|
|
|
|
|
|
IOStatus DropUnsyncedData();
|
|
|
|
|
|
|
|
IOStatus DropRandomUnsyncedData(Random* rand);
|
|
|
|
};
|
|
|
|
|
|
|
|
// A wrapper around WritableFileWriter* file
|
|
|
|
// is written to or sync'ed.
|
|
|
|
class TestFSWritableFile : public FSWritableFile {
|
|
|
|
public:
|
|
|
|
explicit TestFSWritableFile(const std::string& fname,
|
2021-02-11 06:18:33 +00:00
|
|
|
const FileOptions& file_opts,
|
2020-03-04 20:30:34 +00:00
|
|
|
std::unique_ptr<FSWritableFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs);
|
|
|
|
virtual ~TestFSWritableFile();
|
|
|
|
virtual IOStatus Append(const Slice& data, const IOOptions&,
|
|
|
|
IODebugContext*) override;
|
Using existing crc32c checksum in checksum handoff for Manifest and WAL (#8412)
Summary:
In PR https://github.com/facebook/rocksdb/issues/7523 , checksum handoff is introduced in RocksDB for WAL, Manifest, and SST files. When user enable checksum handoff for a certain type of file, before the data is written to the lower layer storage system, we calculate the checksum (crc32c) of each piece of data and pass the checksum down with the data, such that data verification can be down by the lower layer storage system if it has the capability. However, it cannot cover the whole lifetime of the data in the memory and also it potentially introduces extra checksum calculation overhead.
In this PR, we introduce a new interface in WritableFileWriter::Append, which allows the caller be able to pass the data and the checksum (crc32c) together. In this way, WritableFileWriter can directly use the pass-in checksum (crc32c) to generate the checksum of data being passed down to the storage system. It saves the calculation overhead and achieves higher protection coverage. When a new checksum is added with the data, we use Crc32cCombine https://github.com/facebook/rocksdb/issues/8305 to combine the existing checksum and the new checksum. To avoid the segmenting of data by rate-limiter before it is stored, rate-limiter is called enough times to accumulate enough credits for a certain write. This design only support Manifest and WAL which use log_writer in the current stage.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8412
Test Plan: make check, add new testing cases.
Reviewed By: anand1976
Differential Revision: D29151545
Pulled By: zhichao-cao
fbshipit-source-id: 75e2278c5126cfd58393c67b1efd18dcc7a30772
2021-06-25 07:46:33 +00:00
|
|
|
virtual IOStatus Append(const Slice& data, const IOOptions& options,
|
2021-02-11 06:18:33 +00:00
|
|
|
const DataVerificationInfo& verification_info,
|
Using existing crc32c checksum in checksum handoff for Manifest and WAL (#8412)
Summary:
In PR https://github.com/facebook/rocksdb/issues/7523 , checksum handoff is introduced in RocksDB for WAL, Manifest, and SST files. When user enable checksum handoff for a certain type of file, before the data is written to the lower layer storage system, we calculate the checksum (crc32c) of each piece of data and pass the checksum down with the data, such that data verification can be down by the lower layer storage system if it has the capability. However, it cannot cover the whole lifetime of the data in the memory and also it potentially introduces extra checksum calculation overhead.
In this PR, we introduce a new interface in WritableFileWriter::Append, which allows the caller be able to pass the data and the checksum (crc32c) together. In this way, WritableFileWriter can directly use the pass-in checksum (crc32c) to generate the checksum of data being passed down to the storage system. It saves the calculation overhead and achieves higher protection coverage. When a new checksum is added with the data, we use Crc32cCombine https://github.com/facebook/rocksdb/issues/8305 to combine the existing checksum and the new checksum. To avoid the segmenting of data by rate-limiter before it is stored, rate-limiter is called enough times to accumulate enough credits for a certain write. This design only support Manifest and WAL which use log_writer in the current stage.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8412
Test Plan: make check, add new testing cases.
Reviewed By: anand1976
Differential Revision: D29151545
Pulled By: zhichao-cao
fbshipit-source-id: 75e2278c5126cfd58393c67b1efd18dcc7a30772
2021-06-25 07:46:33 +00:00
|
|
|
IODebugContext* dbg) override;
|
2020-03-04 20:30:34 +00:00
|
|
|
virtual IOStatus Truncate(uint64_t size, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
return target_->Truncate(size, options, dbg);
|
|
|
|
}
|
|
|
|
virtual IOStatus Close(const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
virtual IOStatus Flush(const IOOptions&, IODebugContext*) override;
|
|
|
|
virtual IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
virtual bool IsSyncThreadSafe() const override { return true; }
|
|
|
|
virtual IOStatus PositionedAppend(const Slice& data, uint64_t offset,
|
|
|
|
const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
return target_->PositionedAppend(data, offset, options, dbg);
|
|
|
|
}
|
2020-09-24 02:00:30 +00:00
|
|
|
IOStatus PositionedAppend(const Slice& data, uint64_t offset,
|
|
|
|
const IOOptions& options,
|
Using existing crc32c checksum in checksum handoff for Manifest and WAL (#8412)
Summary:
In PR https://github.com/facebook/rocksdb/issues/7523 , checksum handoff is introduced in RocksDB for WAL, Manifest, and SST files. When user enable checksum handoff for a certain type of file, before the data is written to the lower layer storage system, we calculate the checksum (crc32c) of each piece of data and pass the checksum down with the data, such that data verification can be down by the lower layer storage system if it has the capability. However, it cannot cover the whole lifetime of the data in the memory and also it potentially introduces extra checksum calculation overhead.
In this PR, we introduce a new interface in WritableFileWriter::Append, which allows the caller be able to pass the data and the checksum (crc32c) together. In this way, WritableFileWriter can directly use the pass-in checksum (crc32c) to generate the checksum of data being passed down to the storage system. It saves the calculation overhead and achieves higher protection coverage. When a new checksum is added with the data, we use Crc32cCombine https://github.com/facebook/rocksdb/issues/8305 to combine the existing checksum and the new checksum. To avoid the segmenting of data by rate-limiter before it is stored, rate-limiter is called enough times to accumulate enough credits for a certain write. This design only support Manifest and WAL which use log_writer in the current stage.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8412
Test Plan: make check, add new testing cases.
Reviewed By: anand1976
Differential Revision: D29151545
Pulled By: zhichao-cao
fbshipit-source-id: 75e2278c5126cfd58393c67b1efd18dcc7a30772
2021-06-25 07:46:33 +00:00
|
|
|
const DataVerificationInfo& verification_info,
|
|
|
|
IODebugContext* dbg) override;
|
2020-04-11 00:18:56 +00:00
|
|
|
virtual size_t GetRequiredBufferAlignment() const override {
|
|
|
|
return target_->GetRequiredBufferAlignment();
|
|
|
|
}
|
2020-03-04 20:30:34 +00:00
|
|
|
virtual bool use_direct_io() const override {
|
|
|
|
return target_->use_direct_io();
|
|
|
|
};
|
|
|
|
|
|
|
|
private:
|
|
|
|
FSFileState state_;
|
2021-02-11 06:18:33 +00:00
|
|
|
FileOptions file_opts_;
|
2020-03-04 20:30:34 +00:00
|
|
|
std::unique_ptr<FSWritableFile> target_;
|
|
|
|
bool writable_file_opened_;
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
port::Mutex mutex_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A wrapper around WritableFileWriter* file
|
|
|
|
// is written to or sync'ed.
|
|
|
|
class TestFSRandomRWFile : public FSRandomRWFile {
|
|
|
|
public:
|
|
|
|
explicit TestFSRandomRWFile(const std::string& fname,
|
|
|
|
std::unique_ptr<FSRandomRWFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs);
|
|
|
|
virtual ~TestFSRandomRWFile();
|
|
|
|
IOStatus Write(uint64_t offset, const Slice& data, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override;
|
|
|
|
IOStatus Close(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
IOStatus Flush(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
size_t GetRequiredBufferAlignment() const override {
|
|
|
|
return target_->GetRequiredBufferAlignment();
|
|
|
|
}
|
|
|
|
bool use_direct_io() const override { return target_->use_direct_io(); };
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<FSRandomRWFile> target_;
|
|
|
|
bool file_opened_;
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
};
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
class TestFSRandomAccessFile : public FSRandomAccessFile {
|
|
|
|
public:
|
|
|
|
explicit TestFSRandomAccessFile(const std::string& fname,
|
|
|
|
std::unique_ptr<FSRandomAccessFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs);
|
|
|
|
~TestFSRandomAccessFile() override {}
|
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override;
|
2021-09-16 22:59:57 +00:00
|
|
|
IOStatus MultiRead(FSReadRequest* reqs, size_t num_reqs,
|
|
|
|
const IOOptions& options, IODebugContext* dbg) override;
|
2020-04-11 00:18:56 +00:00
|
|
|
size_t GetRequiredBufferAlignment() const override {
|
|
|
|
return target_->GetRequiredBufferAlignment();
|
|
|
|
}
|
|
|
|
bool use_direct_io() const override { return target_->use_direct_io(); }
|
|
|
|
|
2021-06-10 18:01:44 +00:00
|
|
|
size_t GetUniqueId(char* id, size_t max_size) const override;
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
private:
|
|
|
|
std::unique_ptr<FSRandomAccessFile> target_;
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
};
|
|
|
|
|
2021-09-13 15:45:13 +00:00
|
|
|
class TestFSSequentialFile : public FSSequentialFileOwnerWrapper {
|
2021-07-06 18:04:04 +00:00
|
|
|
public:
|
2021-09-13 15:45:13 +00:00
|
|
|
explicit TestFSSequentialFile(std::unique_ptr<FSSequentialFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs)
|
|
|
|
: FSSequentialFileOwnerWrapper(std::move(f)), fs_(fs) {}
|
2021-07-06 18:04:04 +00:00
|
|
|
IOStatus Read(size_t n, const IOOptions& options, Slice* result,
|
|
|
|
char* scratch, IODebugContext* dbg) override;
|
|
|
|
IOStatus PositionedRead(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
};
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
class TestFSDirectory : public FSDirectory {
|
|
|
|
public:
|
|
|
|
explicit TestFSDirectory(FaultInjectionTestFS* fs, std::string dirname,
|
|
|
|
FSDirectory* dir)
|
|
|
|
: fs_(fs), dirname_(dirname), dir_(dir) {}
|
|
|
|
~TestFSDirectory() {}
|
|
|
|
|
|
|
|
virtual IOStatus Fsync(const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
2021-11-03 19:20:19 +00:00
|
|
|
virtual IOStatus FsyncWithDirOptions(
|
|
|
|
const IOOptions& options, IODebugContext* dbg,
|
|
|
|
const DirFsyncOptions& dir_fsync_options) override;
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
private:
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
std::string dirname_;
|
|
|
|
std::unique_ptr<FSDirectory> dir_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FaultInjectionTestFS : public FileSystemWrapper {
|
|
|
|
public:
|
2020-07-29 05:58:28 +00:00
|
|
|
explicit FaultInjectionTestFS(const std::shared_ptr<FileSystem>& base)
|
2020-04-11 00:18:56 +00:00
|
|
|
: FileSystemWrapper(base),
|
|
|
|
filesystem_active_(true),
|
|
|
|
filesystem_writable_(false),
|
2020-12-17 19:51:04 +00:00
|
|
|
thread_local_error_(new ThreadLocalPtr(DeleteThreadLocalErrorContext)),
|
|
|
|
enable_write_error_injection_(false),
|
2021-04-28 17:57:11 +00:00
|
|
|
enable_metadata_write_error_injection_(false),
|
2021-02-11 06:18:33 +00:00
|
|
|
write_error_rand_(0),
|
2021-04-28 17:57:11 +00:00
|
|
|
write_error_one_in_(0),
|
|
|
|
metadata_write_error_one_in_(0),
|
2021-07-06 18:04:04 +00:00
|
|
|
read_error_one_in_(0),
|
2021-06-10 18:01:44 +00:00
|
|
|
ingest_data_corruption_before_write_(false),
|
|
|
|
fail_get_file_unique_id_(false) {}
|
2020-07-29 05:58:28 +00:00
|
|
|
virtual ~FaultInjectionTestFS() { error_.PermitUncheckedError(); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2021-11-02 16:06:02 +00:00
|
|
|
static const char* kClassName() { return "FaultInjectionTestFS"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
IOStatus NewDirectory(const std::string& name, const IOOptions& options,
|
|
|
|
std::unique_ptr<FSDirectory>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus NewWritableFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus ReopenWritableFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus NewRandomRWFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSRandomRWFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus NewRandomAccessFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSRandomAccessFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
2021-07-06 18:04:04 +00:00
|
|
|
IOStatus NewSequentialFile(const std::string& f, const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSSequentialFile>* r,
|
|
|
|
IODebugContext* dbg) override;
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
virtual IOStatus DeleteFile(const std::string& f, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
virtual IOStatus RenameFile(const std::string& s, const std::string& t,
|
|
|
|
const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
2021-10-11 23:22:10 +00:00
|
|
|
virtual IOStatus LinkFile(const std::string& src, const std::string& target,
|
|
|
|
const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
// Undef to eliminate clash on Windows
|
|
|
|
#undef GetFreeSpace
|
|
|
|
virtual IOStatus GetFreeSpace(const std::string& path,
|
|
|
|
const IOOptions& options, uint64_t* disk_free,
|
|
|
|
IODebugContext* dbg) override {
|
2020-10-02 23:39:17 +00:00
|
|
|
IOStatus io_s;
|
2021-07-21 01:08:55 +00:00
|
|
|
if (!IsFilesystemActive() &&
|
|
|
|
error_.subcode() == IOStatus::SubCode::kNoSpace) {
|
2020-03-04 20:30:34 +00:00
|
|
|
*disk_free = 0;
|
|
|
|
} else {
|
2020-10-02 23:39:17 +00:00
|
|
|
io_s = target()->GetFreeSpace(path, options, disk_free, dbg);
|
2020-03-04 20:30:34 +00:00
|
|
|
}
|
2020-10-02 23:39:17 +00:00
|
|
|
return io_s;
|
2020-03-04 20:30:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void WritableFileClosed(const FSFileState& state);
|
|
|
|
|
|
|
|
void WritableFileSynced(const FSFileState& state);
|
|
|
|
|
|
|
|
void WritableFileAppended(const FSFileState& state);
|
|
|
|
|
|
|
|
IOStatus DropUnsyncedFileData();
|
|
|
|
|
|
|
|
IOStatus DropRandomUnsyncedFileData(Random* rnd);
|
|
|
|
|
|
|
|
IOStatus DeleteFilesCreatedAfterLastDirSync(const IOOptions& options,
|
|
|
|
IODebugContext* dbg);
|
|
|
|
|
|
|
|
void ResetState();
|
|
|
|
|
|
|
|
void UntrackFile(const std::string& f);
|
|
|
|
|
|
|
|
void SyncDir(const std::string& dirname) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
dir_to_new_files_since_last_sync_.erase(dirname);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting the filesystem to inactive is the test equivalent to simulating a
|
|
|
|
// system reset. Setting to inactive will freeze our saved filesystem state so
|
|
|
|
// that it will stop being recorded. It can then be reset back to the state at
|
|
|
|
// the time of the reset.
|
|
|
|
bool IsFilesystemActive() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return filesystem_active_;
|
|
|
|
}
|
2020-04-11 00:18:56 +00:00
|
|
|
|
|
|
|
// Setting filesystem_writable_ makes NewWritableFile. ReopenWritableFile,
|
|
|
|
// and NewRandomRWFile bypass FaultInjectionTestFS and go directly to the
|
|
|
|
// target FS
|
|
|
|
bool IsFilesystemDirectWritable() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return filesystem_writable_;
|
|
|
|
}
|
2021-07-16 23:08:14 +00:00
|
|
|
bool ShouldUseDiretWritable(const std::string& file_name) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
if (filesystem_writable_) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
FileType file_type = kTempFile;
|
|
|
|
uint64_t file_number = 0;
|
|
|
|
if (!TryParseFileName(file_name, &file_number, &file_type)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return skip_direct_writable_types_.find(file_type) !=
|
|
|
|
skip_direct_writable_types_.end();
|
|
|
|
}
|
2020-03-04 20:30:34 +00:00
|
|
|
void SetFilesystemActiveNoLock(
|
|
|
|
bool active, IOStatus error = IOStatus::Corruption("Not active")) {
|
2020-10-02 23:39:17 +00:00
|
|
|
error.PermitUncheckedError();
|
2020-03-04 20:30:34 +00:00
|
|
|
filesystem_active_ = active;
|
|
|
|
if (!active) {
|
|
|
|
error_ = error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void SetFilesystemActive(
|
|
|
|
bool active, IOStatus error = IOStatus::Corruption("Not active")) {
|
|
|
|
MutexLock l(&mutex_);
|
2020-10-02 23:39:17 +00:00
|
|
|
error.PermitUncheckedError();
|
2020-03-04 20:30:34 +00:00
|
|
|
SetFilesystemActiveNoLock(active, error);
|
|
|
|
}
|
2020-04-11 00:18:56 +00:00
|
|
|
void SetFilesystemDirectWritable(
|
|
|
|
bool writable) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
filesystem_writable_ = writable;
|
|
|
|
}
|
2021-10-11 23:22:10 +00:00
|
|
|
void AssertNoOpenFile() { assert(open_managed_files_.empty()); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
IOStatus GetError() { return error_; }
|
|
|
|
|
|
|
|
void SetFileSystemIOError(IOStatus io_error) {
|
|
|
|
MutexLock l(&mutex_);
|
2020-10-02 23:39:17 +00:00
|
|
|
io_error.PermitUncheckedError();
|
2020-03-04 20:30:34 +00:00
|
|
|
error_ = io_error;
|
|
|
|
}
|
|
|
|
|
2021-02-11 06:18:33 +00:00
|
|
|
// To simulate the data corruption before data is written in FS
|
|
|
|
void IngestDataCorruptionBeforeWrite() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
ingest_data_corruption_before_write_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NoDataCorruptionBeforeWrite() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
ingest_data_corruption_before_write_ = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShouldDataCorruptionBeforeWrite() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return ingest_data_corruption_before_write_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetChecksumHandoffFuncType(const ChecksumType& func_type) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
checksum_handoff_func_tpye_ = func_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ChecksumType& GetChecksumHandoffFuncType() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return checksum_handoff_func_tpye_;
|
|
|
|
}
|
|
|
|
|
2021-06-10 18:01:44 +00:00
|
|
|
void SetFailGetUniqueId(bool flag) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
fail_get_file_unique_id_ = flag;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShouldFailGetUniqueId() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return fail_get_file_unique_id_;
|
|
|
|
}
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// Specify what the operation, so we can inject the right type of error
|
|
|
|
enum ErrorOperation : char {
|
|
|
|
kRead = 0,
|
2021-09-21 21:47:09 +00:00
|
|
|
kMultiReadSingleReq = 1,
|
|
|
|
kMultiRead = 2,
|
2020-04-11 00:18:56 +00:00
|
|
|
kOpen,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Set thread-local parameters for error injection. The first argument,
|
|
|
|
// seed is the seed for the random number generator, and one_in determines
|
|
|
|
// the probability of injecting error (i.e an error is injected with
|
|
|
|
// 1/one_in probability)
|
|
|
|
void SetThreadLocalReadErrorContext(uint32_t seed, int one_in) {
|
|
|
|
struct ErrorContext* ctx =
|
|
|
|
static_cast<struct ErrorContext*>(thread_local_error_->Get());
|
|
|
|
if (ctx == nullptr) {
|
|
|
|
ctx = new ErrorContext(seed);
|
|
|
|
thread_local_error_->Reset(ctx);
|
|
|
|
}
|
|
|
|
ctx->one_in = one_in;
|
|
|
|
ctx->count = 0;
|
|
|
|
}
|
|
|
|
|
2020-04-13 17:58:43 +00:00
|
|
|
static void DeleteThreadLocalErrorContext(void *p) {
|
|
|
|
ErrorContext* ctx = static_cast<ErrorContext*>(p);
|
|
|
|
delete ctx;
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
// This is to set the parameters for the write error injection.
|
|
|
|
// seed is the seed for the random number generator, and one_in determines
|
|
|
|
// the probability of injecting error (i.e an error is injected with
|
|
|
|
// 1/one_in probability). For write error, we can specify the error we
|
|
|
|
// want to inject. Types decides the file types we want to inject the
|
|
|
|
// error (e.g., Wal files, SST files), which is empty by default.
|
|
|
|
void SetRandomWriteError(uint32_t seed, int one_in, IOStatus error,
|
2021-06-30 23:45:44 +00:00
|
|
|
bool inject_for_all_file_types,
|
2020-12-17 19:51:04 +00:00
|
|
|
const std::vector<FileType>& types) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
Random tmp_rand(seed);
|
|
|
|
error.PermitUncheckedError();
|
|
|
|
error_ = error;
|
|
|
|
write_error_rand_ = tmp_rand;
|
|
|
|
write_error_one_in_ = one_in;
|
2021-06-30 23:45:44 +00:00
|
|
|
inject_for_all_file_types_ = inject_for_all_file_types;
|
2020-12-17 19:51:04 +00:00
|
|
|
write_error_allowed_types_ = types;
|
|
|
|
}
|
|
|
|
|
2021-07-16 23:08:14 +00:00
|
|
|
void SetSkipDirectWritableTypes(const std::set<FileType>& types) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
skip_direct_writable_types_ = types;
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:57:11 +00:00
|
|
|
void SetRandomMetadataWriteError(int one_in) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
metadata_write_error_one_in_ = one_in;
|
|
|
|
}
|
2021-07-06 18:04:04 +00:00
|
|
|
// If the value is not 0, it is enabled. Otherwise, it is disabled.
|
|
|
|
void SetRandomReadError(int one_in) { read_error_one_in_ = one_in; }
|
|
|
|
|
|
|
|
bool ShouldInjectRandomReadError() {
|
|
|
|
return read_error_one_in() &&
|
|
|
|
Random::GetTLSInstance()->OneIn(read_error_one_in());
|
|
|
|
}
|
2021-04-28 17:57:11 +00:00
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
// Inject an write error with randomlized parameter and the predefined
|
|
|
|
// error type. Only the allowed file types will inject the write error
|
|
|
|
IOStatus InjectWriteError(const std::string& file_name);
|
|
|
|
|
2021-04-28 17:57:11 +00:00
|
|
|
// Ingest error to metadata operations.
|
|
|
|
IOStatus InjectMetadataWriteError();
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// Inject an error. For a READ operation, a status of IOError(), a
|
|
|
|
// corruption in the contents of scratch, or truncation of slice
|
|
|
|
// are the types of error with equal probability. For OPEN,
|
|
|
|
// its always an IOError.
|
2021-09-21 21:47:09 +00:00
|
|
|
// fault_injected returns whether a fault is injected. It is needed
|
|
|
|
// because some fault is inected with IOStatus to be OK.
|
2021-07-06 18:04:04 +00:00
|
|
|
IOStatus InjectThreadSpecificReadError(ErrorOperation op, Slice* slice,
|
2021-09-21 21:47:09 +00:00
|
|
|
bool direct_io, char* scratch,
|
|
|
|
bool need_count_increase,
|
|
|
|
bool* fault_injected);
|
2020-04-11 00:18:56 +00:00
|
|
|
|
|
|
|
// Get the count of how many times we injected since the previous call
|
|
|
|
int GetAndResetErrorCount() {
|
|
|
|
ErrorContext* ctx =
|
|
|
|
static_cast<ErrorContext*>(thread_local_error_->Get());
|
|
|
|
int count = 0;
|
|
|
|
if (ctx != nullptr) {
|
|
|
|
count = ctx->count;
|
|
|
|
ctx->count = 0;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void EnableErrorInjection() {
|
|
|
|
ErrorContext* ctx =
|
|
|
|
static_cast<ErrorContext*>(thread_local_error_->Get());
|
|
|
|
if (ctx) {
|
|
|
|
ctx->enable_error_injection = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
void EnableWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_write_error_injection_ = true;
|
|
|
|
}
|
2021-04-28 17:57:11 +00:00
|
|
|
void EnableMetadataWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_metadata_write_error_injection_ = true;
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
void DisableWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_write_error_injection_ = false;
|
|
|
|
}
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
void DisableErrorInjection() {
|
|
|
|
ErrorContext* ctx =
|
|
|
|
static_cast<ErrorContext*>(thread_local_error_->Get());
|
|
|
|
if (ctx) {
|
|
|
|
ctx->enable_error_injection = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:57:11 +00:00
|
|
|
void DisableMetadataWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_metadata_write_error_injection_ = false;
|
|
|
|
}
|
|
|
|
|
2021-07-06 18:04:04 +00:00
|
|
|
int read_error_one_in() const { return read_error_one_in_.load(); }
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// We capture a backtrace every time a fault is injected, for debugging
|
|
|
|
// purposes. This call prints the backtrace to stderr and frees the
|
|
|
|
// saved callstack
|
|
|
|
void PrintFaultBacktrace();
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
private:
|
|
|
|
port::Mutex mutex_;
|
|
|
|
std::map<std::string, FSFileState> db_file_state_;
|
2021-10-11 23:22:10 +00:00
|
|
|
std::set<std::string> open_managed_files_;
|
2021-07-07 23:20:40 +00:00
|
|
|
// directory -> (file name -> file contents to recover)
|
|
|
|
// When data is recovered from unsyned parent directory, the files with
|
|
|
|
// empty file contents to recover is deleted. Those with non-empty ones
|
|
|
|
// will be recovered to content accordingly.
|
|
|
|
std::unordered_map<std::string, std::map<std::string, std::string>>
|
2020-03-04 20:30:34 +00:00
|
|
|
dir_to_new_files_since_last_sync_;
|
|
|
|
bool filesystem_active_; // Record flushes, syncs, writes
|
2020-04-11 00:18:56 +00:00
|
|
|
bool filesystem_writable_; // Bypass FaultInjectionTestFS and go directly
|
|
|
|
// to underlying FS for writable files
|
2020-03-04 20:30:34 +00:00
|
|
|
IOStatus error_;
|
2020-04-11 00:18:56 +00:00
|
|
|
|
2020-04-24 20:03:08 +00:00
|
|
|
enum ErrorType : int {
|
|
|
|
kErrorTypeStatus = 0,
|
|
|
|
kErrorTypeCorruption,
|
|
|
|
kErrorTypeTruncated,
|
|
|
|
kErrorTypeMax
|
|
|
|
};
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
struct ErrorContext {
|
|
|
|
Random rand;
|
|
|
|
int one_in;
|
|
|
|
int count;
|
|
|
|
bool enable_error_injection;
|
|
|
|
void* callstack;
|
2021-09-21 21:47:09 +00:00
|
|
|
std::string message;
|
2020-04-11 00:18:56 +00:00
|
|
|
int frames;
|
2020-04-24 20:03:08 +00:00
|
|
|
ErrorType type;
|
2020-04-11 00:18:56 +00:00
|
|
|
|
|
|
|
explicit ErrorContext(uint32_t seed)
|
2020-04-17 21:36:51 +00:00
|
|
|
: rand(seed),
|
|
|
|
enable_error_injection(false),
|
|
|
|
callstack(nullptr),
|
|
|
|
frames(0) {}
|
2020-04-14 18:04:39 +00:00
|
|
|
~ErrorContext() {
|
|
|
|
if (callstack) {
|
|
|
|
free(callstack);
|
|
|
|
}
|
|
|
|
}
|
2020-04-11 00:18:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<ThreadLocalPtr> thread_local_error_;
|
2020-12-17 19:51:04 +00:00
|
|
|
bool enable_write_error_injection_;
|
2021-04-28 17:57:11 +00:00
|
|
|
bool enable_metadata_write_error_injection_;
|
2020-12-17 19:51:04 +00:00
|
|
|
Random write_error_rand_;
|
|
|
|
int write_error_one_in_;
|
2021-04-28 17:57:11 +00:00
|
|
|
int metadata_write_error_one_in_;
|
2021-07-06 18:04:04 +00:00
|
|
|
std::atomic<int> read_error_one_in_;
|
2021-06-30 23:45:44 +00:00
|
|
|
bool inject_for_all_file_types_;
|
2020-12-17 19:51:04 +00:00
|
|
|
std::vector<FileType> write_error_allowed_types_;
|
2021-07-16 23:08:14 +00:00
|
|
|
// File types where direct writable is skipped.
|
|
|
|
std::set<FileType> skip_direct_writable_types_;
|
2021-02-11 06:18:33 +00:00
|
|
|
bool ingest_data_corruption_before_write_;
|
|
|
|
ChecksumType checksum_handoff_func_tpye_;
|
2021-06-10 18:01:44 +00:00
|
|
|
bool fail_get_file_unique_id_;
|
2021-07-16 23:08:14 +00:00
|
|
|
|
|
|
|
// Extract number of type from file name. Return false if failing to fine
|
|
|
|
// them.
|
|
|
|
bool TryParseFileName(const std::string& file_name, uint64_t* number,
|
|
|
|
FileType* type);
|
2020-03-04 20:30:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|