2020-03-04 20:30:34 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright 2014 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
// This test uses a custom FileSystem to keep track of the state of a file
|
|
|
|
// system the last "Sync". The data being written is cached in a "buffer".
|
|
|
|
// Only when "Sync" is called, the data will be persistent. It can similate
|
|
|
|
// file data loss (or entire files) not protected by a "Sync". For any of the
|
|
|
|
// FileSystem related operations, by specify the "IOStatus Error", a specific
|
|
|
|
// error can be returned when file system is not activated.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
#include <algorithm>
|
2020-03-04 20:30:34 +00:00
|
|
|
#include <map>
|
|
|
|
#include <set>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "file/filename.h"
|
2021-04-20 21:56:33 +00:00
|
|
|
#include "rocksdb/file_system.h"
|
2020-03-04 20:30:34 +00:00
|
|
|
#include "util/mutexlock.h"
|
|
|
|
#include "util/random.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/thread_local.h"
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
class TestFSWritableFile;
|
|
|
|
class FaultInjectionTestFS;
|
|
|
|
|
|
|
|
struct FSFileState {
|
|
|
|
std::string filename_;
|
2024-06-18 21:41:14 +00:00
|
|
|
ssize_t pos_at_last_append_;
|
2020-03-04 20:30:34 +00:00
|
|
|
ssize_t pos_at_last_sync_;
|
|
|
|
std::string buffer_;
|
|
|
|
|
|
|
|
explicit FSFileState(const std::string& filename)
|
2024-06-18 21:41:14 +00:00
|
|
|
: filename_(filename), pos_at_last_append_(-1), pos_at_last_sync_(-1) {}
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-06-18 21:41:14 +00:00
|
|
|
FSFileState() : pos_at_last_append_(-1), pos_at_last_sync_(-1) {}
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-06-18 21:41:14 +00:00
|
|
|
bool IsFullySynced() const {
|
|
|
|
return pos_at_last_append_ <= 0 || pos_at_last_append_ == pos_at_last_sync_;
|
|
|
|
}
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
IOStatus DropUnsyncedData();
|
|
|
|
|
|
|
|
IOStatus DropRandomUnsyncedData(Random* rand);
|
|
|
|
};
|
|
|
|
|
|
|
|
// A wrapper around WritableFileWriter* file
|
|
|
|
// is written to or sync'ed.
|
|
|
|
class TestFSWritableFile : public FSWritableFile {
|
|
|
|
public:
|
|
|
|
explicit TestFSWritableFile(const std::string& fname,
|
2021-02-11 06:18:33 +00:00
|
|
|
const FileOptions& file_opts,
|
2020-03-04 20:30:34 +00:00
|
|
|
std::unique_ptr<FSWritableFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs);
|
|
|
|
virtual ~TestFSWritableFile();
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus Append(const Slice& data, const IOOptions&,
|
|
|
|
IODebugContext*) override;
|
|
|
|
IOStatus Append(const Slice& data, const IOOptions& options,
|
|
|
|
const DataVerificationInfo& verification_info,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
IOStatus Truncate(uint64_t size, const IOOptions& options,
|
2024-06-18 21:41:14 +00:00
|
|
|
IODebugContext* dbg) override;
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus Close(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
IOStatus Flush(const IOOptions&, IODebugContext*) override;
|
|
|
|
IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
IOStatus RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/,
|
|
|
|
const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
bool IsSyncThreadSafe() const override { return true; }
|
|
|
|
IOStatus PositionedAppend(const Slice& data, uint64_t offset,
|
|
|
|
const IOOptions& options,
|
2024-06-18 21:41:14 +00:00
|
|
|
IODebugContext* dbg) override;
|
2020-09-24 02:00:30 +00:00
|
|
|
IOStatus PositionedAppend(const Slice& data, uint64_t offset,
|
|
|
|
const IOOptions& options,
|
Using existing crc32c checksum in checksum handoff for Manifest and WAL (#8412)
Summary:
In PR https://github.com/facebook/rocksdb/issues/7523 , checksum handoff is introduced in RocksDB for WAL, Manifest, and SST files. When user enable checksum handoff for a certain type of file, before the data is written to the lower layer storage system, we calculate the checksum (crc32c) of each piece of data and pass the checksum down with the data, such that data verification can be down by the lower layer storage system if it has the capability. However, it cannot cover the whole lifetime of the data in the memory and also it potentially introduces extra checksum calculation overhead.
In this PR, we introduce a new interface in WritableFileWriter::Append, which allows the caller be able to pass the data and the checksum (crc32c) together. In this way, WritableFileWriter can directly use the pass-in checksum (crc32c) to generate the checksum of data being passed down to the storage system. It saves the calculation overhead and achieves higher protection coverage. When a new checksum is added with the data, we use Crc32cCombine https://github.com/facebook/rocksdb/issues/8305 to combine the existing checksum and the new checksum. To avoid the segmenting of data by rate-limiter before it is stored, rate-limiter is called enough times to accumulate enough credits for a certain write. This design only support Manifest and WAL which use log_writer in the current stage.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8412
Test Plan: make check, add new testing cases.
Reviewed By: anand1976
Differential Revision: D29151545
Pulled By: zhichao-cao
fbshipit-source-id: 75e2278c5126cfd58393c67b1efd18dcc7a30772
2021-06-25 07:46:33 +00:00
|
|
|
const DataVerificationInfo& verification_info,
|
|
|
|
IODebugContext* dbg) override;
|
2024-01-31 21:14:42 +00:00
|
|
|
size_t GetRequiredBufferAlignment() const override {
|
2020-04-11 00:18:56 +00:00
|
|
|
return target_->GetRequiredBufferAlignment();
|
|
|
|
}
|
2024-01-31 21:14:42 +00:00
|
|
|
bool use_direct_io() const override { return target_->use_direct_io(); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
uint64_t GetFileSize(const IOOptions& options, IODebugContext* dbg) override {
|
2024-01-30 20:55:41 +00:00
|
|
|
MutexLock l(&mutex_);
|
2024-01-30 17:49:32 +00:00
|
|
|
return target_->GetFileSize(options, dbg);
|
|
|
|
}
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
private:
|
Add missing synchronization in TestFSWritableFile (#10544)
Summary:
**Context:**
ajkr's command revealed an existing TSAN data race between `TestFSWritableFile::Append` and `TestFSWritableFile::Sync` on `TestFSWritableFile::state_`
```
$ make clean && COMPILE_WITH_TSAN=1 make -j56 db_stress
$ python3 tools/db_crashtest.py blackbox --simple --duration=3600 --interval=10 --sync_fault_injection=1 --disable_wal=0 --max_key=10000 --checkpoint_one_in=1000
```
The race is due to concurrent access from [checkpoint's WAL sync](https://github.com/facebook/rocksdb/blob/7.4.fb/utilities/fault_injection_fs.cc#L324) and [db put's WAL write when ‘sync_fault_injection=1 ‘](https://github.com/facebook/rocksdb/blob/7.4.fb/utilities/fault_injection_fs.cc#L208) to the `state_` on the same WAL `TestFSWritableFile` under the missing synchronization.
```
WARNING: ThreadSanitizer: data race (pid=11275)
Write of size 8 at 0x7b480003d850 by thread T23 (mutexes: write M69230):
#0 rocksdb::TestFSWritableFile::Sync(rocksdb::IOOptions const&, rocksdb::IODebugContext*) internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:297 (db_stress+0x716004)
https://github.com/facebook/rocksdb/issues/1 rocksdb::(anonymous namespace)::CompositeWritableFileWrapper::Sync() internal_repo_rocksdb/repo/env/composite_env.cc:154 (db_stress+0x4dfa78)
https://github.com/facebook/rocksdb/issues/2 rocksdb::(anonymous namespace)::LegacyWritableFileWrapper::Sync(rocksdb::IOOptions const&, rocksdb::IODebugContext*) internal_repo_rocksdb/repo/env/env.cc:280 (db_stress+0x6dfd24)
https://github.com/facebook/rocksdb/issues/3 rocksdb::WritableFileWriter::SyncInternal(bool) internal_repo_rocksdb/repo/file/writable_file_writer.cc:460 (db_stress+0xa1b98c)
https://github.com/facebook/rocksdb/issues/4 rocksdb::WritableFileWriter::SyncWithoutFlush(bool) internal_repo_rocksdb/repo/file/writable_file_writer.cc:435 (db_stress+0xa1e441)
https://github.com/facebook/rocksdb/issues/5 rocksdb::DBImpl::SyncWAL() internal_repo_rocksdb/repo/db/db_impl/db_impl.cc:1385 (db_stress+0x529458)
https://github.com/facebook/rocksdb/issues/6 rocksdb::DBImpl::FlushWAL(bool) internal_repo_rocksdb/repo/db/db_impl/db_impl.cc:1339 (db_stress+0x54f82a)
https://github.com/facebook/rocksdb/issues/7 rocksdb::DBImpl::GetLiveFilesStorageInfo(rocksdb::LiveFilesStorageInfoOptions const&, std::vector<rocksdb::LiveFileStorageInfo, std::allocator<rocksdb::LiveFileStorageInfo> >*) internal_repo_rocksdb/repo/db/db_filesnapshot.cc:387 (db_stress+0x5c831d)
https://github.com/facebook/rocksdb/issues/8 rocksdb::CheckpointImpl::CreateCustomCheckpoint(std::function<rocksdb::Status (std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, rocksdb::FileType)>, std::function<rocksdb::Status (std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, rocksdb::FileType, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, rocksdb::Temperature)>, std::function<rocksdb::Status (std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, rocksdb::FileType)>, unsigned long*, unsigned long, bool) internal_repo_rocksdb/repo/utilities/checkpoint/checkpoint_impl.cc:214 (db_stress+0x4c0343)
https://github.com/facebook/rocksdb/issues/9 rocksdb::CheckpointImpl::CreateCheckpoint(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, unsigned long*) internal_repo_rocksdb/repo/utilities/checkpoint/checkpoint_impl.cc:123 (db_stress+0x4c237e)
https://github.com/facebook/rocksdb/issues/10 rocksdb::StressTest::TestCheckpoint(rocksdb::ThreadState*, std::vector<int, std::allocator<int> > const&, std::vector<long, std::allocator<long> > const&) internal_repo_rocksdb/repo/db_stress_tool/db_stress_test_base.cc:1699 (db_stress+0x328340)
https://github.com/facebook/rocksdb/issues/11 rocksdb::StressTest::OperateDb(rocksdb::ThreadState*) internal_repo_rocksdb/repo/db_stress_tool/db_stress_test_base.cc:825 (db_stress+0x33921f)
https://github.com/facebook/rocksdb/issues/12 rocksdb::ThreadBody(void*) internal_repo_rocksdb/repo/db_stress_tool/db_stress_driver.cc:33 (db_stress+0x354857)
https://github.com/facebook/rocksdb/issues/13 rocksdb::(anonymous namespace)::StartThreadWrapper(void*) internal_repo_rocksdb/repo/env/env_posix.cc:447 (db_stress+0x6eb2ad)
Previous read of size 8 at 0x7b480003d850 by thread T64 (mutexes: write M980798978697532600, write M253744503184415024, write M1262):
#0 memcpy <null> (db_stress+0xbc9696)
https://github.com/facebook/rocksdb/issues/1 operator= internal_repo_rocksdb/repo/utilities/fault_injection_fs.h:35 (db_stress+0x70d5f1)
https://github.com/facebook/rocksdb/issues/2 rocksdb::FaultInjectionTestFS::WritableFileAppended(rocksdb::FSFileState const&) internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:827 (db_stress+0x70d5f1)
https://github.com/facebook/rocksdb/issues/3 rocksdb::TestFSWritableFile::Append(rocksdb::Slice const&, rocksdb::IOOptions const&, rocksdb::IODebugContext*) internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:173 (db_stress+0x7143af)
https://github.com/facebook/rocksdb/issues/4 rocksdb::(anonymous namespace)::CompositeWritableFileWrapper::Append(rocksdb::Slice const&) internal_repo_rocksdb/repo/env/composite_env.cc:115 (db_stress+0x4de3ab)
https://github.com/facebook/rocksdb/issues/5 rocksdb::(anonymous namespace)::LegacyWritableFileWrapper::Append(rocksdb::Slice const&, rocksdb::IOOptions const&, rocksdb::IODebugContext*) internal_repo_rocksdb/repo/env/env.cc:248 (db_stress+0x6df44b)
https://github.com/facebook/rocksdb/issues/6 rocksdb::WritableFileWriter::WriteBuffered(char const*, unsigned long, rocksdb::Env::IOPriority) internal_repo_rocksdb/repo/file/writable_file_writer.cc:551 (db_stress+0xa1a953)
https://github.com/facebook/rocksdb/issues/7 rocksdb::WritableFileWriter::Flush(rocksdb::Env::IOPriority) internal_repo_rocksdb/repo/file/writable_file_writer.cc:327 (db_stress+0xa16ee8)
https://github.com/facebook/rocksdb/issues/8 rocksdb::log::Writer::AddRecord(rocksdb::Slice const&, rocksdb::Env::IOPriority) internal_repo_rocksdb/repo/db/log_writer.cc:147 (db_stress+0x7f121f)
https://github.com/facebook/rocksdb/issues/9 rocksdb::DBImpl::WriteToWAL(rocksdb::WriteBatch const&, rocksdb::log::Writer*, unsigned long*, unsigned long*, rocksdb::Env::IOPriority, rocksdb::DBImpl::LogFileNumberSize&) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:1285 (db_stress+0x695042)
https://github.com/facebook/rocksdb/issues/10 rocksdb::DBImpl::WriteToWAL(rocksdb::WriteThread::WriteGroup const&, rocksdb::log::Writer*, unsigned long*, bool, bool, unsigned long, rocksdb::DBImpl::LogFileNumberSize&) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:1328 (db_stress+0x6907e8)
https://github.com/facebook/rocksdb/issues/11 rocksdb::DBImpl::PipelinedWriteImpl(rocksdb::WriteOptions const&, rocksdb::WriteBatch*, rocksdb::WriteCallback*, unsigned long*, unsigned long, bool, unsigned long*) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:731 (db_stress+0x68e8a7)
https://github.com/facebook/rocksdb/issues/12 rocksdb::DBImpl::WriteImpl(rocksdb::WriteOptions const&, rocksdb::WriteBatch*, rocksdb::WriteCallback*, unsigned long*, unsigned long, bool, unsigned long*, unsigned long, rocksdb::PreReleaseCallback*, rocksdb::PostMemTableCallback*) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:283 (db_stress+0x688370)
https://github.com/facebook/rocksdb/issues/13 rocksdb::DBImpl::Write(rocksdb::WriteOptions const&, rocksdb::WriteBatch*) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:126 (db_stress+0x69a7b5)
https://github.com/facebook/rocksdb/issues/14 rocksdb::DB::Put(rocksdb::WriteOptions const&, rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&, rocksdb::Slice const&, rocksdb::Slice const&) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:2247 (db_stress+0x698634)
https://github.com/facebook/rocksdb/issues/15 rocksdb::DBImpl::Put(rocksdb::WriteOptions const&, rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&, rocksdb::Slice const&, rocksdb::Slice const&) internal_repo_rocksdb/repo/db/db_impl/db_impl_write.cc:37 (db_stress+0x699868)
https://github.com/facebook/rocksdb/issues/16 rocksdb::NonBatchedOpsStressTest::TestPut(rocksdb::ThreadState*, rocksdb::WriteOptions&, rocksdb::ReadOptions const&, std::vector<int, std::allocator<int> > const&, std::vector<long, std::allocator<long> > const&, char (&) [100], std::unique_ptr<rocksdb::MutexLock, std::default_delete<rocksdb::MutexLock> >&) internal_repo_rocksdb/repo/db_stress_tool/no_batched_ops_stress.cc:681 (db_stress+0x38d20c)
https://github.com/facebook/rocksdb/issues/17 rocksdb::StressTest::OperateDb(rocksdb::ThreadState*) internal_repo_rocksdb/repo/db_stress_tool/db_stress_test_base.cc:897 (db_stress+0x3399ec)
https://github.com/facebook/rocksdb/issues/18 rocksdb::ThreadBody(void*) internal_repo_rocksdb/repo/db_stress_tool/db_stress_driver.cc:33 (db_stress+0x354857)
https://github.com/facebook/rocksdb/issues/19 rocksdb::(anonymous namespace)::StartThreadWrapper(void*) internal_repo_rocksdb/repo/env/env_posix.cc:447 (db_stress+0x6eb2ad)
Location is heap block of size 352 at 0x7b480003d800 allocated by thread T23:
#0 operator new(unsigned long) <null> (db_stress+0xb685dc)
https://github.com/facebook/rocksdb/issues/1 rocksdb::FaultInjectionTestFS::NewWritableFile(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, rocksdb::FileOptions const&, std::unique_ptr<rocksdb::FSWritableFile, std::default_delete<rocksdb::FSWritableFile> >*, rocksdb::IODebugContext*) internal_repo_rocksdb/repo/utilities/fault_injection_fs.cc:506 (db_stress+0x711192)
https://github.com/facebook/rocksdb/issues/2 rocksdb::CompositeEnv::NewWritableFile(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unique_ptr<rocksdb::WritableFile, std::default_delete<rocksdb::WritableFile> >*, rocksdb::EnvOptions const&) internal_repo_rocksdb/repo/env/composite_env.cc:329 (db_stress+0x4d33fa)
https://github.com/facebook/rocksdb/issues/3 rocksdb::EnvWrapper::NewWritableFile(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unique_ptr<rocksdb::WritableFile, std::default_delete<rocksdb::WritableFile> >*, rocksdb::EnvOptions const&) internal_repo_rocksdb/repo/include/rocksdb/env.h:1425 (db_stress+0x300662)
...
```
**Summary:**
- Added the missing lock in functions mentioned above along with three other functions with a similar need in TestFSWritableFile
- Added clarification comment
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10544
Test Plan: - Past the above race condition repro
Reviewed By: ajkr
Differential Revision: D38886634
Pulled By: hx235
fbshipit-source-id: 0571bae9615f35b16fbd8168204607e306b1b486
2022-08-22 22:50:22 +00:00
|
|
|
FSFileState state_; // Need protection by mutex_
|
2021-02-11 06:18:33 +00:00
|
|
|
FileOptions file_opts_;
|
2020-03-04 20:30:34 +00:00
|
|
|
std::unique_ptr<FSWritableFile> target_;
|
|
|
|
bool writable_file_opened_;
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
port::Mutex mutex_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A wrapper around WritableFileWriter* file
|
|
|
|
// is written to or sync'ed.
|
|
|
|
class TestFSRandomRWFile : public FSRandomRWFile {
|
|
|
|
public:
|
|
|
|
explicit TestFSRandomRWFile(const std::string& fname,
|
|
|
|
std::unique_ptr<FSRandomRWFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs);
|
|
|
|
virtual ~TestFSRandomRWFile();
|
|
|
|
IOStatus Write(uint64_t offset, const Slice& data, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override;
|
|
|
|
IOStatus Close(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
IOStatus Flush(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override;
|
|
|
|
size_t GetRequiredBufferAlignment() const override {
|
|
|
|
return target_->GetRequiredBufferAlignment();
|
|
|
|
}
|
2024-01-24 15:16:00 +00:00
|
|
|
bool use_direct_io() const override { return target_->use_direct_io(); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<FSRandomRWFile> target_;
|
|
|
|
bool file_opened_;
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
};
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
class TestFSRandomAccessFile : public FSRandomAccessFile {
|
|
|
|
public:
|
|
|
|
explicit TestFSRandomAccessFile(const std::string& fname,
|
2022-10-24 23:38:09 +00:00
|
|
|
std::unique_ptr<FSRandomAccessFile>&& f,
|
|
|
|
FaultInjectionTestFS* fs);
|
2020-04-11 00:18:56 +00:00
|
|
|
~TestFSRandomAccessFile() override {}
|
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override;
|
2022-12-15 23:48:50 +00:00
|
|
|
IOStatus ReadAsync(FSReadRequest& req, const IOOptions& opts,
|
2024-02-16 17:14:55 +00:00
|
|
|
std::function<void(FSReadRequest&, void*)> cb,
|
2022-12-15 23:48:50 +00:00
|
|
|
void* cb_arg, void** io_handle, IOHandleDeleter* del_fn,
|
|
|
|
IODebugContext* dbg) override;
|
2021-09-16 22:59:57 +00:00
|
|
|
IOStatus MultiRead(FSReadRequest* reqs, size_t num_reqs,
|
|
|
|
const IOOptions& options, IODebugContext* dbg) override;
|
2020-04-11 00:18:56 +00:00
|
|
|
size_t GetRequiredBufferAlignment() const override {
|
|
|
|
return target_->GetRequiredBufferAlignment();
|
|
|
|
}
|
|
|
|
bool use_direct_io() const override { return target_->use_direct_io(); }
|
|
|
|
|
2021-06-10 18:01:44 +00:00
|
|
|
size_t GetUniqueId(char* id, size_t max_size) const override;
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
private:
|
|
|
|
std::unique_ptr<FSRandomAccessFile> target_;
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
};
|
|
|
|
|
2021-09-13 15:45:13 +00:00
|
|
|
class TestFSSequentialFile : public FSSequentialFileOwnerWrapper {
|
2021-07-06 18:04:04 +00:00
|
|
|
public:
|
2021-09-13 15:45:13 +00:00
|
|
|
explicit TestFSSequentialFile(std::unique_ptr<FSSequentialFile>&& f,
|
2024-06-04 22:25:23 +00:00
|
|
|
FaultInjectionTestFS* fs, std::string fname)
|
|
|
|
: FSSequentialFileOwnerWrapper(std::move(f)),
|
|
|
|
fs_(fs),
|
|
|
|
fname_(std::move(fname)) {}
|
2021-07-06 18:04:04 +00:00
|
|
|
IOStatus Read(size_t n, const IOOptions& options, Slice* result,
|
|
|
|
char* scratch, IODebugContext* dbg) override;
|
|
|
|
IOStatus PositionedRead(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
FaultInjectionTestFS* fs_;
|
2024-06-04 22:25:23 +00:00
|
|
|
std::string fname_;
|
|
|
|
size_t read_pos_ = 0;
|
2021-07-06 18:04:04 +00:00
|
|
|
};
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
class TestFSDirectory : public FSDirectory {
|
|
|
|
public:
|
|
|
|
explicit TestFSDirectory(FaultInjectionTestFS* fs, std::string dirname,
|
|
|
|
FSDirectory* dir)
|
2024-06-04 22:25:23 +00:00
|
|
|
: fs_(fs), dirname_(std::move(dirname)), dir_(dir) {}
|
2020-03-04 20:30:34 +00:00
|
|
|
~TestFSDirectory() {}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override;
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus Close(const IOOptions& options, IODebugContext* dbg) override;
|
Explicitly closing all directory file descriptors (#10049)
Summary:
Currently, the DB directory file descriptor is left open until the deconstruction process (`DB::Close()` does not close the file descriptor). To verify this, comment out the lines between `db_ = nullptr` and `db_->Close()` (line 512, 513, 514, 515 in ldb_cmd.cc) to leak the ``db_'' object, build `ldb` tool and run
```
strace --trace=open,openat,close ./ldb --db=$TEST_TMPDIR --ignore_unknown_options put K1 V1 --create_if_missing
```
There is one directory file descriptor that is not closed in the strace log.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10049
Test Plan: Add a new unit test DBBasicTest.DBCloseAllDirectoryFDs: Open a database with different WAL directory and three different data directories, and all directory file descriptors should be closed after calling Close(). Explicitly call Close() after a directory file descriptor is not used so that the counter of directory open and close should be equivalent.
Reviewed By: ajkr, hx235
Differential Revision: D36722135
Pulled By: littlepig2013
fbshipit-source-id: 07bdc2abc417c6b30997b9bbef1f79aa757b21ff
2022-06-02 01:03:34 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus FsyncWithDirOptions(
|
2021-11-03 19:20:19 +00:00
|
|
|
const IOOptions& options, IODebugContext* dbg,
|
|
|
|
const DirFsyncOptions& dir_fsync_options) override;
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
private:
|
|
|
|
FaultInjectionTestFS* fs_;
|
|
|
|
std::string dirname_;
|
|
|
|
std::unique_ptr<FSDirectory> dir_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FaultInjectionTestFS : public FileSystemWrapper {
|
|
|
|
public:
|
2020-07-29 05:58:28 +00:00
|
|
|
explicit FaultInjectionTestFS(const std::shared_ptr<FileSystem>& base)
|
2020-04-11 00:18:56 +00:00
|
|
|
: FileSystemWrapper(base),
|
|
|
|
filesystem_active_(true),
|
|
|
|
filesystem_writable_(false),
|
2024-06-04 22:25:23 +00:00
|
|
|
read_unsynced_data_(true),
|
Ensure Close() before LinkFile() for WALs in Checkpoint (#12734)
Summary:
POSIX semantics for LinkFile (hard links) allow linking a file
that is still being written two, with both the source and destination
showing any subsequent writes to the source. This may not be practical
semantics for some FileSystem implementations such as remote storage.
They might only link the flushed or sync-ed file contents at time of
LinkFile, or might even have undefined behavior if LinkFile is called on
a file still open for write (not yet "sealed"). This change builds on https://github.com/facebook/rocksdb/issues/12731
to bring more hygiene to our handling of WAL files in Checkpoint.
Specifically, we now Close WAL files as soon as they are either
(a) inactive and fully synced, or (b) inactive and obsolete (so maybe
never fully synced), rather than letting Close() happen in handling
obsolete files (maybe a background thread). This should not be a
performance issue as Close() should be trivial cost relative to other
IO ops, but just in case:
* We don't Close() while holding a mutex, to avoid blocking, and
* The old behavior is available with a new kill switch option
`background_close_inactive_wals`.
Stacked on https://github.com/facebook/rocksdb/issues/12731
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12734
Test Plan:
Extended existing unit test, especially adding a hygiene
check to FaultInjectionTestFS to detect LinkFile() on a file still open
for writes. FaultInjectionTestFS already has relevant tracking data, and
tests can opt out of the new check, as in a smoke test I have left for
the old, deprecated functionality `background_close_inactive_wals=true`.
Also ran lengthy blackbox_crash_test to ensure the hygiene check is OK
with the crash test. (The only place I can find we use LinkFile in
production is Checkpoint.)
Reviewed By: cbi42
Differential Revision: D58295284
Pulled By: pdillinger
fbshipit-source-id: 64d90ed8477e2366c19eaf9c4c5ad60b82cac5c6
2024-06-12 18:48:45 +00:00
|
|
|
allow_link_open_file_(false),
|
2020-12-17 19:51:04 +00:00
|
|
|
thread_local_error_(new ThreadLocalPtr(DeleteThreadLocalErrorContext)),
|
|
|
|
enable_write_error_injection_(false),
|
2021-04-28 17:57:11 +00:00
|
|
|
enable_metadata_write_error_injection_(false),
|
2021-02-11 06:18:33 +00:00
|
|
|
write_error_rand_(0),
|
2021-04-28 17:57:11 +00:00
|
|
|
write_error_one_in_(0),
|
|
|
|
metadata_write_error_one_in_(0),
|
2021-07-06 18:04:04 +00:00
|
|
|
read_error_one_in_(0),
|
2021-06-10 18:01:44 +00:00
|
|
|
ingest_data_corruption_before_write_(false),
|
2024-03-28 01:37:58 +00:00
|
|
|
checksum_handoff_func_type_(kCRC32c),
|
2021-06-10 18:01:44 +00:00
|
|
|
fail_get_file_unique_id_(false) {}
|
2020-07-29 05:58:28 +00:00
|
|
|
virtual ~FaultInjectionTestFS() { error_.PermitUncheckedError(); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2021-11-02 16:06:02 +00:00
|
|
|
static const char* kClassName() { return "FaultInjectionTestFS"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
IOStatus NewDirectory(const std::string& name, const IOOptions& options,
|
|
|
|
std::unique_ptr<FSDirectory>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus NewWritableFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus ReopenWritableFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
2024-04-05 02:26:42 +00:00
|
|
|
IOStatus ReuseWritableFile(const std::string& fname,
|
|
|
|
const std::string& old_fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
IOStatus NewRandomRWFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSRandomRWFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
|
|
|
|
|
|
|
IOStatus NewRandomAccessFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSRandomAccessFile>* result,
|
|
|
|
IODebugContext* dbg) override;
|
2021-07-06 18:04:04 +00:00
|
|
|
IOStatus NewSequentialFile(const std::string& f, const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSSequentialFile>* r,
|
|
|
|
IODebugContext* dbg) override;
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus DeleteFile(const std::string& f, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override;
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-06-04 22:25:23 +00:00
|
|
|
IOStatus GetFileSize(const std::string& f, const IOOptions& options,
|
|
|
|
uint64_t* file_size, IODebugContext* dbg) override;
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus RenameFile(const std::string& s, const std::string& t,
|
|
|
|
const IOOptions& options, IODebugContext* dbg) override;
|
2020-03-04 20:30:34 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus LinkFile(const std::string& src, const std::string& target,
|
|
|
|
const IOOptions& options, IODebugContext* dbg) override;
|
2021-10-11 23:22:10 +00:00
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
// Undef to eliminate clash on Windows
|
|
|
|
#undef GetFreeSpace
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus GetFreeSpace(const std::string& path, const IOOptions& options,
|
|
|
|
uint64_t* disk_free, IODebugContext* dbg) override {
|
2020-10-02 23:39:17 +00:00
|
|
|
IOStatus io_s;
|
2021-07-21 01:08:55 +00:00
|
|
|
if (!IsFilesystemActive() &&
|
|
|
|
error_.subcode() == IOStatus::SubCode::kNoSpace) {
|
2020-03-04 20:30:34 +00:00
|
|
|
*disk_free = 0;
|
|
|
|
} else {
|
2020-10-02 23:39:17 +00:00
|
|
|
io_s = target()->GetFreeSpace(path, options, disk_free, dbg);
|
2020-03-04 20:30:34 +00:00
|
|
|
}
|
2020-10-02 23:39:17 +00:00
|
|
|
return io_s;
|
2020-03-04 20:30:34 +00:00
|
|
|
}
|
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus Poll(std::vector<void*>& io_handles,
|
|
|
|
size_t min_completions) override;
|
2022-12-15 23:48:50 +00:00
|
|
|
|
2024-01-31 21:14:42 +00:00
|
|
|
IOStatus AbortIO(std::vector<void*>& io_handles) override;
|
2022-12-15 23:48:50 +00:00
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
void WritableFileClosed(const FSFileState& state);
|
|
|
|
|
|
|
|
void WritableFileSynced(const FSFileState& state);
|
|
|
|
|
|
|
|
void WritableFileAppended(const FSFileState& state);
|
|
|
|
|
|
|
|
IOStatus DropUnsyncedFileData();
|
|
|
|
|
|
|
|
IOStatus DropRandomUnsyncedFileData(Random* rnd);
|
|
|
|
|
|
|
|
IOStatus DeleteFilesCreatedAfterLastDirSync(const IOOptions& options,
|
|
|
|
IODebugContext* dbg);
|
|
|
|
|
|
|
|
void ResetState();
|
|
|
|
|
|
|
|
void UntrackFile(const std::string& f);
|
|
|
|
|
|
|
|
void SyncDir(const std::string& dirname) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
dir_to_new_files_since_last_sync_.erase(dirname);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting the filesystem to inactive is the test equivalent to simulating a
|
|
|
|
// system reset. Setting to inactive will freeze our saved filesystem state so
|
|
|
|
// that it will stop being recorded. It can then be reset back to the state at
|
|
|
|
// the time of the reset.
|
|
|
|
bool IsFilesystemActive() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return filesystem_active_;
|
|
|
|
}
|
2020-04-11 00:18:56 +00:00
|
|
|
|
|
|
|
// Setting filesystem_writable_ makes NewWritableFile. ReopenWritableFile,
|
|
|
|
// and NewRandomRWFile bypass FaultInjectionTestFS and go directly to the
|
|
|
|
// target FS
|
|
|
|
bool IsFilesystemDirectWritable() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return filesystem_writable_;
|
|
|
|
}
|
2021-07-16 23:08:14 +00:00
|
|
|
bool ShouldUseDiretWritable(const std::string& file_name) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
if (filesystem_writable_) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
FileType file_type = kTempFile;
|
|
|
|
uint64_t file_number = 0;
|
|
|
|
if (!TryParseFileName(file_name, &file_number, &file_type)) {
|
|
|
|
return false;
|
|
|
|
}
|
2023-09-18 23:23:26 +00:00
|
|
|
return direct_writable_types_.find(file_type) !=
|
|
|
|
direct_writable_types_.end();
|
2021-07-16 23:08:14 +00:00
|
|
|
}
|
2020-03-04 20:30:34 +00:00
|
|
|
void SetFilesystemActiveNoLock(
|
|
|
|
bool active, IOStatus error = IOStatus::Corruption("Not active")) {
|
2020-10-02 23:39:17 +00:00
|
|
|
error.PermitUncheckedError();
|
2020-03-04 20:30:34 +00:00
|
|
|
filesystem_active_ = active;
|
|
|
|
if (!active) {
|
|
|
|
error_ = error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void SetFilesystemActive(
|
|
|
|
bool active, IOStatus error = IOStatus::Corruption("Not active")) {
|
|
|
|
MutexLock l(&mutex_);
|
2020-10-02 23:39:17 +00:00
|
|
|
error.PermitUncheckedError();
|
2020-03-04 20:30:34 +00:00
|
|
|
SetFilesystemActiveNoLock(active, error);
|
|
|
|
}
|
2022-10-24 23:38:09 +00:00
|
|
|
void SetFilesystemDirectWritable(bool writable) {
|
2020-04-11 00:18:56 +00:00
|
|
|
MutexLock l(&mutex_);
|
|
|
|
filesystem_writable_ = writable;
|
|
|
|
}
|
2024-06-04 22:25:23 +00:00
|
|
|
// In places (e.g. GetSortedWals()) RocksDB relies on querying the file size
|
|
|
|
// or even reading the contents of files currently open for writing, and
|
|
|
|
// as in POSIX semantics, expects to see the flushed size and contents
|
|
|
|
// regardless of what has been synced. FaultInjectionTestFS historically
|
|
|
|
// did not emulate this behavior, only showing synced data from such read
|
|
|
|
// operations. (Different from FaultInjectionTestEnv--sigh.) Calling this
|
|
|
|
// function with false restores this historical behavior for testing
|
|
|
|
// stability, but use of this semantics must be phased out as it is
|
|
|
|
// inconsistent with expected FileSystem semantics. In other words, this
|
|
|
|
// functionality is DEPRECATED. Intended to be set after construction and
|
|
|
|
// unchanged (not thread safe).
|
|
|
|
void SetReadUnsyncedData(bool read_unsynced_data) {
|
|
|
|
read_unsynced_data_ = read_unsynced_data;
|
|
|
|
}
|
|
|
|
bool ReadUnsyncedData() const { return read_unsynced_data_; }
|
Ensure Close() before LinkFile() for WALs in Checkpoint (#12734)
Summary:
POSIX semantics for LinkFile (hard links) allow linking a file
that is still being written two, with both the source and destination
showing any subsequent writes to the source. This may not be practical
semantics for some FileSystem implementations such as remote storage.
They might only link the flushed or sync-ed file contents at time of
LinkFile, or might even have undefined behavior if LinkFile is called on
a file still open for write (not yet "sealed"). This change builds on https://github.com/facebook/rocksdb/issues/12731
to bring more hygiene to our handling of WAL files in Checkpoint.
Specifically, we now Close WAL files as soon as they are either
(a) inactive and fully synced, or (b) inactive and obsolete (so maybe
never fully synced), rather than letting Close() happen in handling
obsolete files (maybe a background thread). This should not be a
performance issue as Close() should be trivial cost relative to other
IO ops, but just in case:
* We don't Close() while holding a mutex, to avoid blocking, and
* The old behavior is available with a new kill switch option
`background_close_inactive_wals`.
Stacked on https://github.com/facebook/rocksdb/issues/12731
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12734
Test Plan:
Extended existing unit test, especially adding a hygiene
check to FaultInjectionTestFS to detect LinkFile() on a file still open
for writes. FaultInjectionTestFS already has relevant tracking data, and
tests can opt out of the new check, as in a smoke test I have left for
the old, deprecated functionality `background_close_inactive_wals=true`.
Also ran lengthy blackbox_crash_test to ensure the hygiene check is OK
with the crash test. (The only place I can find we use LinkFile in
production is Checkpoint.)
Reviewed By: cbi42
Differential Revision: D58295284
Pulled By: pdillinger
fbshipit-source-id: 64d90ed8477e2366c19eaf9c4c5ad60b82cac5c6
2024-06-12 18:48:45 +00:00
|
|
|
|
|
|
|
// FaultInjectionTestFS normally includes a hygiene check for FileSystem
|
|
|
|
// implementations that only support LinkFile() on closed files (not open
|
|
|
|
// for write). Setting this to true bypasses the check.
|
|
|
|
void SetAllowLinkOpenFile(bool allow_link_open_file = true) {
|
|
|
|
allow_link_open_file_ = allow_link_open_file;
|
|
|
|
}
|
|
|
|
|
2021-10-11 23:22:10 +00:00
|
|
|
void AssertNoOpenFile() { assert(open_managed_files_.empty()); }
|
2020-03-04 20:30:34 +00:00
|
|
|
|
|
|
|
IOStatus GetError() { return error_; }
|
|
|
|
|
|
|
|
void SetFileSystemIOError(IOStatus io_error) {
|
|
|
|
MutexLock l(&mutex_);
|
2020-10-02 23:39:17 +00:00
|
|
|
io_error.PermitUncheckedError();
|
2020-03-04 20:30:34 +00:00
|
|
|
error_ = io_error;
|
|
|
|
}
|
|
|
|
|
2021-02-11 06:18:33 +00:00
|
|
|
// To simulate the data corruption before data is written in FS
|
|
|
|
void IngestDataCorruptionBeforeWrite() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
ingest_data_corruption_before_write_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NoDataCorruptionBeforeWrite() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
ingest_data_corruption_before_write_ = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShouldDataCorruptionBeforeWrite() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return ingest_data_corruption_before_write_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetChecksumHandoffFuncType(const ChecksumType& func_type) {
|
|
|
|
MutexLock l(&mutex_);
|
2024-03-28 01:37:58 +00:00
|
|
|
checksum_handoff_func_type_ = func_type;
|
2021-02-11 06:18:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const ChecksumType& GetChecksumHandoffFuncType() {
|
|
|
|
MutexLock l(&mutex_);
|
2024-03-28 01:37:58 +00:00
|
|
|
return checksum_handoff_func_type_;
|
2021-02-11 06:18:33 +00:00
|
|
|
}
|
|
|
|
|
2021-06-10 18:01:44 +00:00
|
|
|
void SetFailGetUniqueId(bool flag) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
fail_get_file_unique_id_ = flag;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShouldFailGetUniqueId() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return fail_get_file_unique_id_;
|
|
|
|
}
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// Specify what the operation, so we can inject the right type of error
|
|
|
|
enum ErrorOperation : char {
|
|
|
|
kRead = 0,
|
2021-09-21 21:47:09 +00:00
|
|
|
kMultiReadSingleReq = 1,
|
|
|
|
kMultiRead = 2,
|
2020-04-11 00:18:56 +00:00
|
|
|
kOpen,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Set thread-local parameters for error injection. The first argument,
|
|
|
|
// seed is the seed for the random number generator, and one_in determines
|
|
|
|
// the probability of injecting error (i.e an error is injected with
|
|
|
|
// 1/one_in probability)
|
2023-09-05 17:41:29 +00:00
|
|
|
void SetThreadLocalReadErrorContext(uint32_t seed, int one_in,
|
|
|
|
bool retryable) {
|
2020-04-11 00:18:56 +00:00
|
|
|
struct ErrorContext* ctx =
|
2022-10-24 23:38:09 +00:00
|
|
|
static_cast<struct ErrorContext*>(thread_local_error_->Get());
|
2020-04-11 00:18:56 +00:00
|
|
|
if (ctx == nullptr) {
|
|
|
|
ctx = new ErrorContext(seed);
|
|
|
|
thread_local_error_->Reset(ctx);
|
|
|
|
}
|
|
|
|
ctx->one_in = one_in;
|
|
|
|
ctx->count = 0;
|
2023-09-05 17:41:29 +00:00
|
|
|
ctx->retryable = retryable;
|
2020-04-11 00:18:56 +00:00
|
|
|
}
|
|
|
|
|
2022-10-24 23:38:09 +00:00
|
|
|
static void DeleteThreadLocalErrorContext(void* p) {
|
2020-04-13 17:58:43 +00:00
|
|
|
ErrorContext* ctx = static_cast<ErrorContext*>(p);
|
|
|
|
delete ctx;
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
// This is to set the parameters for the write error injection.
|
|
|
|
// seed is the seed for the random number generator, and one_in determines
|
|
|
|
// the probability of injecting error (i.e an error is injected with
|
|
|
|
// 1/one_in probability). For write error, we can specify the error we
|
|
|
|
// want to inject. Types decides the file types we want to inject the
|
|
|
|
// error (e.g., Wal files, SST files), which is empty by default.
|
|
|
|
void SetRandomWriteError(uint32_t seed, int one_in, IOStatus error,
|
2021-06-30 23:45:44 +00:00
|
|
|
bool inject_for_all_file_types,
|
2020-12-17 19:51:04 +00:00
|
|
|
const std::vector<FileType>& types) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
Random tmp_rand(seed);
|
|
|
|
error.PermitUncheckedError();
|
|
|
|
error_ = error;
|
|
|
|
write_error_rand_ = tmp_rand;
|
|
|
|
write_error_one_in_ = one_in;
|
2021-06-30 23:45:44 +00:00
|
|
|
inject_for_all_file_types_ = inject_for_all_file_types;
|
2020-12-17 19:51:04 +00:00
|
|
|
write_error_allowed_types_ = types;
|
|
|
|
}
|
|
|
|
|
2023-09-18 23:23:26 +00:00
|
|
|
void SetDirectWritableTypes(const std::set<FileType>& types) {
|
2021-07-16 23:08:14 +00:00
|
|
|
MutexLock l(&mutex_);
|
2023-09-18 23:23:26 +00:00
|
|
|
direct_writable_types_ = types;
|
2021-07-16 23:08:14 +00:00
|
|
|
}
|
|
|
|
|
2021-04-28 17:57:11 +00:00
|
|
|
void SetRandomMetadataWriteError(int one_in) {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
metadata_write_error_one_in_ = one_in;
|
|
|
|
}
|
2021-07-06 18:04:04 +00:00
|
|
|
// If the value is not 0, it is enabled. Otherwise, it is disabled.
|
|
|
|
void SetRandomReadError(int one_in) { read_error_one_in_ = one_in; }
|
|
|
|
|
|
|
|
bool ShouldInjectRandomReadError() {
|
2024-01-05 17:48:19 +00:00
|
|
|
auto one_in = read_error_one_in();
|
|
|
|
return one_in > 0 && Random::GetTLSInstance()->OneIn(one_in);
|
2021-07-06 18:04:04 +00:00
|
|
|
}
|
2021-04-28 17:57:11 +00:00
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
// Inject an write error with randomlized parameter and the predefined
|
|
|
|
// error type. Only the allowed file types will inject the write error
|
|
|
|
IOStatus InjectWriteError(const std::string& file_name);
|
|
|
|
|
2021-04-28 17:57:11 +00:00
|
|
|
// Ingest error to metadata operations.
|
|
|
|
IOStatus InjectMetadataWriteError();
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// Inject an error. For a READ operation, a status of IOError(), a
|
|
|
|
// corruption in the contents of scratch, or truncation of slice
|
|
|
|
// are the types of error with equal probability. For OPEN,
|
|
|
|
// its always an IOError.
|
2021-09-21 21:47:09 +00:00
|
|
|
// fault_injected returns whether a fault is injected. It is needed
|
|
|
|
// because some fault is inected with IOStatus to be OK.
|
2021-07-06 18:04:04 +00:00
|
|
|
IOStatus InjectThreadSpecificReadError(ErrorOperation op, Slice* slice,
|
2021-09-21 21:47:09 +00:00
|
|
|
bool direct_io, char* scratch,
|
|
|
|
bool need_count_increase,
|
|
|
|
bool* fault_injected);
|
2020-04-11 00:18:56 +00:00
|
|
|
|
|
|
|
// Get the count of how many times we injected since the previous call
|
|
|
|
int GetAndResetErrorCount() {
|
2022-10-24 23:38:09 +00:00
|
|
|
ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
|
2020-04-11 00:18:56 +00:00
|
|
|
int count = 0;
|
|
|
|
if (ctx != nullptr) {
|
|
|
|
count = ctx->count;
|
|
|
|
ctx->count = 0;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void EnableErrorInjection() {
|
2022-10-24 23:38:09 +00:00
|
|
|
ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
|
2020-04-11 00:18:56 +00:00
|
|
|
if (ctx) {
|
|
|
|
ctx->enable_error_injection = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
void EnableWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_write_error_injection_ = true;
|
|
|
|
}
|
2021-04-28 17:57:11 +00:00
|
|
|
void EnableMetadataWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_metadata_write_error_injection_ = true;
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:51:04 +00:00
|
|
|
void DisableWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_write_error_injection_ = false;
|
|
|
|
}
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
void DisableErrorInjection() {
|
2022-10-24 23:38:09 +00:00
|
|
|
ErrorContext* ctx = static_cast<ErrorContext*>(thread_local_error_->Get());
|
2020-04-11 00:18:56 +00:00
|
|
|
if (ctx) {
|
|
|
|
ctx->enable_error_injection = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:57:11 +00:00
|
|
|
void DisableMetadataWriteErrorInjection() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
enable_metadata_write_error_injection_ = false;
|
|
|
|
}
|
|
|
|
|
2021-07-06 18:04:04 +00:00
|
|
|
int read_error_one_in() const { return read_error_one_in_.load(); }
|
|
|
|
|
Fix a bug causing duplicate trailing entries in WritableFile (buffered IO) (#9236)
Summary:
`db_stress` is a user of `FaultInjectionTestFS`. After injecting a write error, `db_stress` probabilistically determins
data drop (https://github.com/facebook/rocksdb/blob/6.27.fb/db_stress_tool/db_stress_test_base.cc#L2615:L2619).
In some of our recent runs of `db_stress`, we found duplicate trailing entries corresponding to file trivial move in
the MANIFEST, causing the recovery to fail, because the file move operation is not idempotent: you cannot delete a
file from a given level twice.
Investigation suggests that data buffering in both `WritableFileWriter` and `FaultInjectionTestFS` may be the root cause.
WritableFileWriter buffers data to write in a memory buffer, `WritableFileWriter::buf_`. After each
`WriteBuffered()`/`WriteBufferedWithChecksum()` succeeds, the `buf_` is cleared.
If the underlying file `WritableFileWriter::writable_file_` is opened in buffered IO mode, then `FaultInjectionTestFS`
buffers data written for each file until next file sync. After an injected error, user of `FaultInjectionFS` can
choose to drop some or none of previously buffered data. If `db_stress` does not drop any unsynced data, then
such data will still exist in the `FaultInjectionTestFS`'s buffer.
Existing implementation of `WritableileWriter::WriteBuffered()` does not clear `buf_` if there is an error. This may lead
to the data being buffered two copies: one in `WritableFileWriter`, and another in `FaultInjectionTestFS`.
We also know that the `WritableFileWriter` of MANIFEST file will close upon an error. During `Close()`, it will flush the
content in `buf_`. If no write error is injected to `FaultInjectionTestFS` this time, then we end up with two copies of the
data appended to the file.
To fix, we clear the `WritableFileWriter::buf_` upon failure as well. We focus this PR on files opened in non-direct mode.
This PR includes a unit test to reproduce a case when write error injection
to `WritableFile` can cause duplicate trailing entries.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9236
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D33033984
Pulled By: riversand963
fbshipit-source-id: ebfa5a0db8cbf1ed73100528b34fcba543c5db31
2021-12-13 16:59:20 +00:00
|
|
|
int write_error_one_in() const { return write_error_one_in_; }
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
// We capture a backtrace every time a fault is injected, for debugging
|
|
|
|
// purposes. This call prints the backtrace to stderr and frees the
|
|
|
|
// saved callstack
|
|
|
|
void PrintFaultBacktrace();
|
|
|
|
|
2024-06-04 22:25:23 +00:00
|
|
|
void AddUnsyncedToRead(const std::string& fname, size_t offset, size_t n,
|
|
|
|
Slice* result, char* scratch);
|
|
|
|
|
2020-03-04 20:30:34 +00:00
|
|
|
private:
|
|
|
|
port::Mutex mutex_;
|
|
|
|
std::map<std::string, FSFileState> db_file_state_;
|
2021-10-11 23:22:10 +00:00
|
|
|
std::set<std::string> open_managed_files_;
|
2021-07-07 23:20:40 +00:00
|
|
|
// directory -> (file name -> file contents to recover)
|
|
|
|
// When data is recovered from unsyned parent directory, the files with
|
|
|
|
// empty file contents to recover is deleted. Those with non-empty ones
|
|
|
|
// will be recovered to content accordingly.
|
|
|
|
std::unordered_map<std::string, std::map<std::string, std::string>>
|
2020-03-04 20:30:34 +00:00
|
|
|
dir_to_new_files_since_last_sync_;
|
2022-10-24 23:38:09 +00:00
|
|
|
bool filesystem_active_; // Record flushes, syncs, writes
|
2020-04-11 00:18:56 +00:00
|
|
|
bool filesystem_writable_; // Bypass FaultInjectionTestFS and go directly
|
|
|
|
// to underlying FS for writable files
|
2024-06-04 22:25:23 +00:00
|
|
|
bool read_unsynced_data_; // See SetReadUnsyncedData()
|
Ensure Close() before LinkFile() for WALs in Checkpoint (#12734)
Summary:
POSIX semantics for LinkFile (hard links) allow linking a file
that is still being written two, with both the source and destination
showing any subsequent writes to the source. This may not be practical
semantics for some FileSystem implementations such as remote storage.
They might only link the flushed or sync-ed file contents at time of
LinkFile, or might even have undefined behavior if LinkFile is called on
a file still open for write (not yet "sealed"). This change builds on https://github.com/facebook/rocksdb/issues/12731
to bring more hygiene to our handling of WAL files in Checkpoint.
Specifically, we now Close WAL files as soon as they are either
(a) inactive and fully synced, or (b) inactive and obsolete (so maybe
never fully synced), rather than letting Close() happen in handling
obsolete files (maybe a background thread). This should not be a
performance issue as Close() should be trivial cost relative to other
IO ops, but just in case:
* We don't Close() while holding a mutex, to avoid blocking, and
* The old behavior is available with a new kill switch option
`background_close_inactive_wals`.
Stacked on https://github.com/facebook/rocksdb/issues/12731
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12734
Test Plan:
Extended existing unit test, especially adding a hygiene
check to FaultInjectionTestFS to detect LinkFile() on a file still open
for writes. FaultInjectionTestFS already has relevant tracking data, and
tests can opt out of the new check, as in a smoke test I have left for
the old, deprecated functionality `background_close_inactive_wals=true`.
Also ran lengthy blackbox_crash_test to ensure the hygiene check is OK
with the crash test. (The only place I can find we use LinkFile in
production is Checkpoint.)
Reviewed By: cbi42
Differential Revision: D58295284
Pulled By: pdillinger
fbshipit-source-id: 64d90ed8477e2366c19eaf9c4c5ad60b82cac5c6
2024-06-12 18:48:45 +00:00
|
|
|
bool allow_link_open_file_; // See SetAllowLinkOpenFile()
|
2020-03-04 20:30:34 +00:00
|
|
|
IOStatus error_;
|
2020-04-11 00:18:56 +00:00
|
|
|
|
2020-04-24 20:03:08 +00:00
|
|
|
enum ErrorType : int {
|
|
|
|
kErrorTypeStatus = 0,
|
|
|
|
kErrorTypeCorruption,
|
|
|
|
kErrorTypeTruncated,
|
|
|
|
kErrorTypeMax
|
|
|
|
};
|
|
|
|
|
2020-04-11 00:18:56 +00:00
|
|
|
struct ErrorContext {
|
|
|
|
Random rand;
|
|
|
|
int one_in;
|
|
|
|
int count;
|
|
|
|
bool enable_error_injection;
|
|
|
|
void* callstack;
|
2021-09-21 21:47:09 +00:00
|
|
|
std::string message;
|
2020-04-11 00:18:56 +00:00
|
|
|
int frames;
|
2020-04-24 20:03:08 +00:00
|
|
|
ErrorType type;
|
2023-09-05 17:41:29 +00:00
|
|
|
bool retryable;
|
2020-04-11 00:18:56 +00:00
|
|
|
|
|
|
|
explicit ErrorContext(uint32_t seed)
|
2020-04-17 21:36:51 +00:00
|
|
|
: rand(seed),
|
|
|
|
enable_error_injection(false),
|
|
|
|
callstack(nullptr),
|
2023-09-05 17:41:29 +00:00
|
|
|
frames(0),
|
|
|
|
retryable(false) {}
|
2020-04-14 18:04:39 +00:00
|
|
|
~ErrorContext() {
|
|
|
|
if (callstack) {
|
|
|
|
free(callstack);
|
|
|
|
}
|
|
|
|
}
|
2020-04-11 00:18:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<ThreadLocalPtr> thread_local_error_;
|
2020-12-17 19:51:04 +00:00
|
|
|
bool enable_write_error_injection_;
|
2021-04-28 17:57:11 +00:00
|
|
|
bool enable_metadata_write_error_injection_;
|
2020-12-17 19:51:04 +00:00
|
|
|
Random write_error_rand_;
|
|
|
|
int write_error_one_in_;
|
2021-04-28 17:57:11 +00:00
|
|
|
int metadata_write_error_one_in_;
|
2021-07-06 18:04:04 +00:00
|
|
|
std::atomic<int> read_error_one_in_;
|
2021-06-30 23:45:44 +00:00
|
|
|
bool inject_for_all_file_types_;
|
2020-12-17 19:51:04 +00:00
|
|
|
std::vector<FileType> write_error_allowed_types_;
|
2021-07-16 23:08:14 +00:00
|
|
|
// File types where direct writable is skipped.
|
2023-09-18 23:23:26 +00:00
|
|
|
std::set<FileType> direct_writable_types_;
|
2021-02-11 06:18:33 +00:00
|
|
|
bool ingest_data_corruption_before_write_;
|
2024-03-28 01:37:58 +00:00
|
|
|
ChecksumType checksum_handoff_func_type_;
|
2021-06-10 18:01:44 +00:00
|
|
|
bool fail_get_file_unique_id_;
|
2021-07-16 23:08:14 +00:00
|
|
|
|
|
|
|
// Extract number of type from file name. Return false if failing to fine
|
|
|
|
// them.
|
|
|
|
bool TryParseFileName(const std::string& file_name, uint64_t* number,
|
|
|
|
FileType* type);
|
2020-03-04 20:30:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|