mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-30 22:41:48 +00:00
f97e33454f
Summary:
A recent crash test failure shows that auto recovery from WAL write failure can cause CFs to be inconsistent. A unit test repro in P1569398553. The following is an example sequence of events:
```
0. manual_wal_flush is true. There are multiple CFs in a DB.
1. Submit a write batch with updates to multiple CF
2. A FlushWAL or a memtable swtich that will try to write the buffered WAL data. Fail this write so that buffered WAL data is dropped: 4b1d595306/file/writable_file_writer.cc (L624)
The error needs to be retryable to start background auto recovery.
3. One CF successfully flushes its memtable during auto recovery.
4. Crash the process.
5. Reopen the DB, one CF will have the update as a result of successful flush. Other CFs will miss all the updates in the write batch since WAL does not have them.
```
This can happen if a users configures manual_wal_flush, uses more than one CF, and can hit retryable error for WAL writes. This PR is a short-term fix that upgrades WAL related errors to fatal and not trigger auto recovery.
A long-term fix may be not drop buffered WAL data by checking how much data is actually written, or require atomically flushing all column families during error recovery from this kind of errors.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12995
Test Plan:
added unit test to check error severity and if recovery is triggered. A crash test repro command that fails in a few runs before this PR:
```
python3 ./tools/db_crashtest.py blackbox --interval=60 --metadata_write_fault_one_in=1000 --column_families=10 --exclude_wal_from_write_fault_injection=0 --manual_wal_flush_one_in=1000 --WAL_size_limit_MB=10240 --WAL_ttl_seconds=0 --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --adm_policy=1 --advise_random_on_open=1 --allow_data_in_errors=True --allow_fallocate=1 --async_io=0 --auto_readahead_size=0 --avoid_flush_during_recovery=1 --avoid_flush_during_shutdown=1 --avoid_unnecessary_blocking_io=0 --backup_max_size=104857600 --backup_one_in=0 --batch_protection_bytes_per_key=0 --bgerror_resume_retry_interval=100 --block_align=1 --block_protection_bytes_per_key=0 --block_size=16384 --bloom_before_level=2147483647 --bottommost_compression_type=none --bottommost_file_compaction_delay=0 --bytes_per_sync=0 --cache_index_and_filter_blocks=1 --cache_index_and_filter_blocks_with_high_priority=1 --cache_size=33554432 --cache_type=auto_hyper_clock_cache --charge_compression_dictionary_building_buffer=0 --charge_file_metadata=1 --charge_filter_construction=1 --charge_table_reader=0 --check_multiget_consistency=0 --check_multiget_entity_consistency=0 --checkpoint_one_in=0 --checksum_type=kxxHash64 --clear_column_family_one_in=0 --compact_files_one_in=0 --compact_range_one_in=0 --compaction_pri=1 --compaction_readahead_size=1048576 --compaction_ttl=0 --compress_format_version=1 --compressed_secondary_cache_size=8388608 --compression_checksum=0 --compression_max_dict_buffer_bytes=0 --compression_max_dict_bytes=0 --compression_parallel_threads=4 --compression_type=none --compression_use_zstd_dict_trainer=1 --compression_zstd_max_train_bytes=0 --continuous_verification_interval=0 --daily_offpeak_time_utc= --data_block_index_type=0 --db_write_buffer_size=0 --decouple_partitioned_filters=1 --default_temperature=kCold --default_write_temperature=kWarm --delete_obsolete_files_period_micros=30000000 --delpercent=4 --delrangepercent=1 --destroy_db_initially=0 --detect_filter_construct_corruption=0 --disable_file_deletions_one_in=1000000 --disable_manual_compaction_one_in=1000000 --disable_wal=0 --dump_malloc_stats=1 --enable_checksum_handoff=1 --enable_compaction_filter=0 --enable_custom_split_merge=0 --enable_do_not_compress_roles=0 --enable_index_compression=0 --enable_memtable_insert_with_hint_prefix_extractor=0 --enable_pipelined_write=1 --enable_sst_partitioner_factory=0 --enable_thread_tracking=1 --enable_write_thread_adaptive_yield=1 --error_recovery_with_no_fault_injection=1 --fail_if_options_file_error=1 --fifo_allow_compaction=1 --file_checksum_impl=big --fill_cache=1 --flush_one_in=1000000 --format_version=6 --get_all_column_family_metadata_one_in=1000000 --get_current_wal_file_one_in=0 --get_live_files_apis_one_in=10000 --get_properties_of_all_tables_one_in=1000000 --get_property_one_in=100000 --get_sorted_wal_files_one_in=0 --hard_pending_compaction_bytes_limit=274877906944 --index_block_restart_interval=4 --index_shortening=1 --index_type=0 --ingest_external_file_one_in=0 --initial_auto_readahead_size=16384 --inplace_update_support=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --key_may_exist_one_in=100000 --last_level_temperature=kWarm --level_compaction_dynamic_level_bytes=0 --lock_wal_one_in=10000 --log_file_time_to_roll=0 --log_readahead_size=0 --long_running_snapshots=0 --lowest_used_cache_tier=2 --manifest_preallocation_size=5120 --mark_for_compaction_one_file_in=10 --max_auto_readahead_size=0 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=100000 --max_key_len=3 --max_log_file_size=0 --max_manifest_file_size=1073741824 --max_sequential_skip_in_iterations=16 --max_total_wal_size=0 --max_write_batch_group_size_bytes=16777216 --max_write_buffer_number=10 --max_write_buffer_size_to_maintain=2097152 --memtable_insert_hint_per_batch=1 --memtable_max_range_deletions=0 --memtable_prefix_bloom_size_ratio=0.001 --memtable_protection_bytes_per_key=2 --memtable_whole_key_filtering=0 --memtablerep=skip_list --metadata_charge_policy=1 --metadata_read_fault_one_in=0 --min_write_buffer_number_to_merge=1 --mmap_read=1 --mock_direct_io=False --nooverwritepercent=1 --num_file_reads_for_auto_readahead=2 --open_files=100 --open_metadata_read_fault_one_in=0 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --optimize_filters_for_hits=0 --optimize_filters_for_memory=0 --optimize_multiget_for_io=0 --paranoid_file_checks=1 --paranoid_memory_checks=0 --partition_filters=0 --partition_pinning=2 --pause_background_one_in=10000 --periodic_compaction_seconds=0 --prefix_size=8 --prefixpercent=5 --prepopulate_block_cache=0 --preserve_internal_time_seconds=0 --progress_reports=0 --promote_l0_one_in=0 --read_amp_bytes_per_bit=0 --read_fault_one_in=0 --readahead_size=524288 --readpercent=45 --recycle_log_file_num=0 --reopen=0 --report_bg_io_stats=0 --reset_stats_one_in=10000 --sample_for_compression=5 --secondary_cache_fault_one_in=0 --secondary_cache_uri= --set_options_one_in=10000 --skip_stats_update_on_db_open=1 --snapshot_hold_ops=100000 --soft_pending_compaction_bytes_limit=1048576 --sqfc_name=bar --sqfc_version=1 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --stats_dump_period_sec=600 --stats_history_buffer_size=1048576 --strict_bytes_per_sync=1 --subcompactions=2 --sync=0 --sync_fault_injection=1 --table_cache_numshardbits=6 --target_file_size_base=524288 --target_file_size_multiplier=2 --test_batches_snapshots=0 --top_level_index_pinning=3 --uncache_aggressiveness=8 --universal_max_read_amp=-1 --unpartitioned_pinning=2 --use_adaptive_mutex=1 --use_adaptive_mutex_lru=0 --use_attribute_group=1 --use_delta_encoding=0 --use_direct_io_for_flush_and_compaction=0 --use_direct_reads=0 --use_full_merge_v1=0 --use_get_entity=0 --use_merge=1 --use_multi_cf_iterator=1 --use_multi_get_entity=0 --use_multiget=0 --use_put_entity_one_in=1 --use_sqfc_for_range_queries=0 --use_timed_put_one_in=0 --use_write_buffer_manager=0 --user_timestamp_size=0 --value_size_mult=32 --verification_only=0 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_compression=1 --verify_db_one_in=100000 --verify_file_checksums_one_in=1000000 --verify_iterator_with_expected_state_one_in=5 --verify_sst_unique_id_in_manifest=1 --wal_bytes_per_sync=0 --wal_compression=none --write_buffer_size=4194304 --write_dbid_to_manifest=0 --write_fault_one_in=50 --writepercent=35 --ops_per_thread=100000 --preserve_unverified_changes=1
```
Reviewed By: hx235
Differential Revision: D62888510
Pulled By: cbi42
fbshipit-source-id: 308bdbbb8d897cc8eba950155cd0e37cf7eb76fe
832 lines
35 KiB
C++
832 lines
35 KiB
C++
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
#include "db/error_handler.h"
|
|
|
|
#include "db/db_impl/db_impl.h"
|
|
#include "db/event_helpers.h"
|
|
#include "file/sst_file_manager_impl.h"
|
|
#include "logging/logging.h"
|
|
#include "port/lang.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// Maps to help decide the severity of an error based on the
|
|
// BackgroundErrorReason, Code, SubCode and whether db_options.paranoid_checks
|
|
// is set or not. There are 3 maps, going from most specific to least specific
|
|
// (i.e from all 4 fields in a tuple to only the BackgroundErrorReason and
|
|
// paranoid_checks). The less specific map serves as a catch all in case we miss
|
|
// a specific error code or subcode.
|
|
std::map<std::tuple<BackgroundErrorReason, Status::Code, Status::SubCode, bool>,
|
|
Status::Severity>
|
|
ErrorSeverityMap = {
|
|
// Errors during BG compaction
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
true),
|
|
Status::Severity::kSoftError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, Status::SubCode::kSpaceLimit,
|
|
true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during BG flush
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
Status::SubCode::kNoSpace, true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
Status::SubCode::kNoSpace, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
Status::SubCode::kSpaceLimit, true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
Status::SubCode::kIOFenced, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
Status::SubCode::kIOFenced, false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during Write
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
false),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during MANIFEST write
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWrite,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWrite,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
false),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWrite,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWrite,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during BG flush with WAL disabled
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kSpaceLimit,
|
|
true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during MANIFEST write when WAL is disabled
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWriteNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
true),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWriteNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kNoSpace,
|
|
false),
|
|
Status::Severity::kHardError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWriteNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWriteNoWAL,
|
|
Status::Code::kIOError, Status::SubCode::kIOFenced,
|
|
false),
|
|
Status::Severity::kFatalError},
|
|
|
|
};
|
|
|
|
std::map<std::tuple<BackgroundErrorReason, Status::Code, bool>,
|
|
Status::Severity>
|
|
DefaultErrorSeverityMap = {
|
|
// Errors during BG compaction
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kCorruption, true),
|
|
Status::Severity::kUnrecoverableError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kCorruption, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction,
|
|
Status::Code::kIOError, false),
|
|
Status::Severity::kNoError},
|
|
// Errors during BG flush
|
|
{std::make_tuple(BackgroundErrorReason::kFlush,
|
|
Status::Code::kCorruption, true),
|
|
Status::Severity::kUnrecoverableError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush,
|
|
Status::Code::kCorruption, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, Status::Code::kIOError,
|
|
false),
|
|
Status::Severity::kNoError},
|
|
// Errors during Write
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kCorruption, true),
|
|
Status::Severity::kUnrecoverableError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kCorruption, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kIOError, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback,
|
|
Status::Code::kIOError, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWrite,
|
|
Status::Code::kIOError, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWrite,
|
|
Status::Code::kIOError, false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during BG flush with WAL disabled
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kCorruption, true),
|
|
Status::Severity::kUnrecoverableError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kCorruption, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlushNoWAL,
|
|
Status::Code::kIOError, false),
|
|
Status::Severity::kNoError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWriteNoWAL,
|
|
Status::Code::kIOError, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kManifestWriteNoWAL,
|
|
Status::Code::kIOError, false),
|
|
Status::Severity::kFatalError},
|
|
};
|
|
|
|
std::map<std::tuple<BackgroundErrorReason, bool>, Status::Severity>
|
|
DefaultReasonMap = {
|
|
// Errors during BG compaction
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kCompaction, false),
|
|
Status::Severity::kNoError},
|
|
// Errors during BG flush
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kFlush, false),
|
|
Status::Severity::kNoError},
|
|
// Errors during Write
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kWriteCallback, false),
|
|
Status::Severity::kFatalError},
|
|
// Errors during Memtable update
|
|
{std::make_tuple(BackgroundErrorReason::kMemTable, true),
|
|
Status::Severity::kFatalError},
|
|
{std::make_tuple(BackgroundErrorReason::kMemTable, false),
|
|
Status::Severity::kFatalError},
|
|
};
|
|
|
|
void ErrorHandler::CancelErrorRecovery() {
|
|
db_mutex_->AssertHeld();
|
|
|
|
// We'll release the lock before calling sfm, so make sure no new
|
|
// recovery gets scheduled at that point
|
|
auto_recovery_ = false;
|
|
SstFileManagerImpl* sfm =
|
|
static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
|
|
if (sfm) {
|
|
// This may or may not cancel a pending recovery
|
|
db_mutex_->Unlock();
|
|
bool cancelled = sfm->CancelErrorRecovery(this);
|
|
db_mutex_->Lock();
|
|
if (cancelled) {
|
|
recovery_in_prog_ = false;
|
|
}
|
|
}
|
|
|
|
// If auto recovery is also runing to resume from the retryable error,
|
|
// we should wait and end the auto recovery.
|
|
EndAutoRecovery();
|
|
}
|
|
|
|
// This is the main function for looking at an error during a background
|
|
// operation and deciding the severity, and error recovery strategy. The high
|
|
// level algorithm is as follows -
|
|
// 1. Classify the severity of the error based on the ErrorSeverityMap,
|
|
// DefaultErrorSeverityMap and DefaultReasonMap defined earlier
|
|
// 2. Call a Status code specific override function to adjust the severity
|
|
// if needed. The reason for this is our ability to recover may depend on
|
|
// the exact options enabled in DBOptions
|
|
// 3. Determine if auto recovery is possible. A listener notification callback
|
|
// is called, which can disable the auto recovery even if we decide its
|
|
// feasible
|
|
// 4. For Status::NoSpace() errors, rely on SstFileManagerImpl to control
|
|
// the actual recovery. If no sst file manager is specified in DBOptions,
|
|
// a default one is allocated during DB::Open(), so there will always be
|
|
// one.
|
|
// This can also get called as part of a recovery operation. In that case, we
|
|
// also track the error separately in recovery_error_ so we can tell in the
|
|
// end whether recovery succeeded or not
|
|
void ErrorHandler::HandleKnownErrors(const Status& bg_err,
|
|
BackgroundErrorReason reason) {
|
|
db_mutex_->AssertHeld();
|
|
if (bg_err.ok()) {
|
|
return;
|
|
}
|
|
|
|
ROCKS_LOG_INFO(db_options_.info_log,
|
|
"ErrorHandler: Set regular background error\n");
|
|
|
|
bool paranoid = db_options_.paranoid_checks;
|
|
Status::Severity sev = Status::Severity::kFatalError;
|
|
Status new_bg_err;
|
|
DBRecoverContext context;
|
|
bool found = false;
|
|
|
|
{
|
|
auto entry = ErrorSeverityMap.find(
|
|
std::make_tuple(reason, bg_err.code(), bg_err.subcode(), paranoid));
|
|
if (entry != ErrorSeverityMap.end()) {
|
|
sev = entry->second;
|
|
found = true;
|
|
}
|
|
}
|
|
|
|
if (!found) {
|
|
auto entry = DefaultErrorSeverityMap.find(
|
|
std::make_tuple(reason, bg_err.code(), paranoid));
|
|
if (entry != DefaultErrorSeverityMap.end()) {
|
|
sev = entry->second;
|
|
found = true;
|
|
}
|
|
}
|
|
|
|
if (!found) {
|
|
auto entry = DefaultReasonMap.find(std::make_tuple(reason, paranoid));
|
|
if (entry != DefaultReasonMap.end()) {
|
|
sev = entry->second;
|
|
}
|
|
}
|
|
|
|
new_bg_err = Status(bg_err, sev);
|
|
|
|
// Check if recovery is currently in progress. If it is, we will save this
|
|
// error so we can check it at the end to see if recovery succeeded or not
|
|
if (recovery_in_prog_ && recovery_error_.ok()) {
|
|
recovery_error_ = status_to_io_status(Status(new_bg_err));
|
|
}
|
|
|
|
bool auto_recovery = auto_recovery_;
|
|
if (new_bg_err.severity() >= Status::Severity::kFatalError && auto_recovery) {
|
|
auto_recovery = false;
|
|
}
|
|
|
|
// Allow some error specific overrides
|
|
if (new_bg_err.subcode() == IOStatus::SubCode::kNoSpace ||
|
|
new_bg_err.subcode() == IOStatus::SubCode::kSpaceLimit) {
|
|
new_bg_err = OverrideNoSpaceError(new_bg_err, &auto_recovery);
|
|
}
|
|
|
|
if (!new_bg_err.ok()) {
|
|
Status s = new_bg_err;
|
|
EventHelpers::NotifyOnBackgroundError(db_options_.listeners, reason, &s,
|
|
db_mutex_, &auto_recovery);
|
|
if (!s.ok() && (s.severity() > bg_error_.severity())) {
|
|
bg_error_ = s;
|
|
} else {
|
|
// This error is less severe than previously encountered error. Don't
|
|
// take any further action
|
|
return;
|
|
}
|
|
}
|
|
|
|
recover_context_ = context;
|
|
if (auto_recovery) {
|
|
recovery_in_prog_ = true;
|
|
|
|
// Kick-off error specific recovery
|
|
if (new_bg_err.subcode() == IOStatus::SubCode::kNoSpace ||
|
|
new_bg_err.subcode() == IOStatus::SubCode::kSpaceLimit) {
|
|
RecoverFromNoSpace();
|
|
}
|
|
}
|
|
if (bg_error_.severity() >= Status::Severity::kHardError) {
|
|
is_db_stopped_.store(true, std::memory_order_release);
|
|
}
|
|
}
|
|
|
|
// This is the main function for looking at IO related error during the
|
|
// background operations. The main logic is:
|
|
// File scope IO error is treated as retryable IO error in the write path. In
|
|
// RocksDB, If a file has write IO error and it is at file scope, RocksDB never
|
|
// write to the same file again. RocksDB will create a new file and rewrite the
|
|
// whole content. Thus, it is retryable.
|
|
// There are three main categories of error handling:
|
|
// 1) if the error is caused by data loss, the error is mapped to
|
|
// unrecoverable error. Application/user must take action to handle
|
|
// this situation (File scope case is excluded).
|
|
// 2) if the error is a Retryable IO error (i.e., it is a file scope IO error,
|
|
// or its retryable flag is set and not a data loss error), auto resume (
|
|
// DBImpl::ResumeImpl) may be called and the auto resume can be controlled
|
|
// by resume count and resume interval options. There are three sub-cases:
|
|
// a) if the error happens during compaction, it is mapped to a soft error.
|
|
// the compaction thread will reschedule a new compaction. This doesn't
|
|
// call auto resume.
|
|
// b) if the error happens during flush and also WAL is empty, it is mapped
|
|
// to a soft error. Note that, it includes the case that IO error happens
|
|
// in SST or manifest write during flush. Auto resume will be called.
|
|
// c) all other errors are mapped to hard error. Auto resume will be called.
|
|
// 3) for other cases, HandleKnownErrors(const Status& bg_err,
|
|
// BackgroundErrorReason reason) will be called to handle other error cases
|
|
// such as delegating to SstFileManager to handle no space error.
|
|
void ErrorHandler::SetBGError(const Status& bg_status,
|
|
BackgroundErrorReason reason, bool wal_related) {
|
|
db_mutex_->AssertHeld();
|
|
Status tmp_status = bg_status;
|
|
IOStatus bg_io_err = status_to_io_status(std::move(tmp_status));
|
|
|
|
if (bg_io_err.ok()) {
|
|
return;
|
|
}
|
|
ROCKS_LOG_WARN(db_options_.info_log, "Background IO error %s, reason %d",
|
|
bg_io_err.ToString().c_str(), static_cast<int>(reason));
|
|
|
|
RecordStats({ERROR_HANDLER_BG_ERROR_COUNT, ERROR_HANDLER_BG_IO_ERROR_COUNT},
|
|
{} /* int_histograms */);
|
|
|
|
Status new_bg_io_err = bg_io_err;
|
|
DBRecoverContext context;
|
|
if (bg_io_err.GetScope() != IOStatus::IOErrorScope::kIOErrorScopeFile &&
|
|
bg_io_err.GetDataLoss()) {
|
|
// First, data loss (non file scope) is treated as unrecoverable error. So
|
|
// it can directly overwrite any existing bg_error_.
|
|
bool auto_recovery = false;
|
|
Status bg_err(new_bg_io_err, Status::Severity::kUnrecoverableError);
|
|
CheckAndSetRecoveryAndBGError(bg_err);
|
|
ROCKS_LOG_INFO(
|
|
db_options_.info_log,
|
|
"ErrorHandler: Set background IO error as unrecoverable error\n");
|
|
EventHelpers::NotifyOnBackgroundError(db_options_.listeners, reason,
|
|
&bg_err, db_mutex_, &auto_recovery);
|
|
recover_context_ = context;
|
|
return;
|
|
}
|
|
if (wal_related) {
|
|
assert(reason == BackgroundErrorReason::kWriteCallback ||
|
|
reason == BackgroundErrorReason::kMemTable ||
|
|
reason == BackgroundErrorReason::kFlush);
|
|
}
|
|
if (db_options_.manual_wal_flush && wal_related && bg_io_err.IsIOError()) {
|
|
// With manual_wal_flush, a WAL write failure can drop buffered WAL writes.
|
|
// Memtables and WAL then become inconsistent. A successful memtable flush
|
|
// on one CF can cause CFs to be inconsistent upon restart. Before we fix
|
|
// the bug in auto recovery from WAL write failures that can flush one CF
|
|
// at a time, we set the error severity to fatal to disallow auto recovery.
|
|
// TODO: remove parameter `wal_related` once we can automatically recover
|
|
// from WAL write failures.
|
|
bool auto_recovery = false;
|
|
Status bg_err(new_bg_io_err, Status::Severity::kFatalError);
|
|
CheckAndSetRecoveryAndBGError(bg_err);
|
|
ROCKS_LOG_WARN(db_options_.info_log,
|
|
"ErrorHandler: A potentially WAL error happened, set "
|
|
"background IO error as fatal error\n");
|
|
EventHelpers::NotifyOnBackgroundError(db_options_.listeners, reason,
|
|
&bg_err, db_mutex_, &auto_recovery);
|
|
recover_context_ = context;
|
|
return;
|
|
}
|
|
|
|
if (bg_io_err.subcode() != IOStatus::SubCode::kNoSpace &&
|
|
(bg_io_err.GetScope() == IOStatus::IOErrorScope::kIOErrorScopeFile ||
|
|
bg_io_err.GetRetryable())) {
|
|
// Second, check if the error is a retryable IO error (file scope IO error
|
|
// is also treated as retryable IO error in RocksDB write path). if it is
|
|
// retryable error and its severity is higher than bg_error_, overwrite the
|
|
// bg_error_ with new error. In current stage, for retryable IO error of
|
|
// compaction, treat it as soft error. In other cases, treat the retryable
|
|
// IO error as hard error. Note that, all the NoSpace error should be
|
|
// handled by the SstFileManager::StartErrorRecovery(). Therefore, no matter
|
|
// it is retryable or file scope, this logic will be bypassed.
|
|
|
|
RecordStats({ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT},
|
|
{} /* int_histograms */);
|
|
ROCKS_LOG_INFO(db_options_.info_log,
|
|
"ErrorHandler: Set background retryable IO error\n");
|
|
if (BackgroundErrorReason::kCompaction == reason) {
|
|
// We map the retryable IO error during compaction to soft error. Since
|
|
// compaction can reschedule by itself. We will not set the BG error in
|
|
// this case
|
|
// TODO: a better way to set or clean the retryable IO error which
|
|
// happens during compaction SST file write.
|
|
RecordStats({ERROR_HANDLER_AUTORESUME_COUNT}, {} /* int_histograms */);
|
|
ROCKS_LOG_INFO(
|
|
db_options_.info_log,
|
|
"ErrorHandler: Compaction will schedule by itself to resume\n");
|
|
bool auto_recovery = false;
|
|
EventHelpers::NotifyOnBackgroundError(db_options_.listeners, reason,
|
|
&new_bg_io_err, db_mutex_,
|
|
&auto_recovery);
|
|
// Not used in this code path.
|
|
new_bg_io_err.PermitUncheckedError();
|
|
return;
|
|
}
|
|
|
|
Status::Severity severity;
|
|
if (BackgroundErrorReason::kFlushNoWAL == reason ||
|
|
BackgroundErrorReason::kManifestWriteNoWAL == reason) {
|
|
// When the BG Retryable IO error reason is flush without WAL,
|
|
// We map it to a soft error. At the same time, all the background work
|
|
// should be stopped except the BG work from recovery. Therefore, we
|
|
// set the soft_error_no_bg_work_ to true. At the same time, since DB
|
|
// continues to receive writes when BG error is soft error, to avoid
|
|
// to many small memtable being generated during auto resume, the flush
|
|
// reason is set to kErrorRecoveryRetryFlush.
|
|
severity = Status::Severity::kSoftError;
|
|
soft_error_no_bg_work_ = true;
|
|
context.flush_reason = FlushReason::kErrorRecoveryRetryFlush;
|
|
} else {
|
|
severity = Status::Severity::kHardError;
|
|
}
|
|
Status bg_err(new_bg_io_err, severity);
|
|
CheckAndSetRecoveryAndBGError(bg_err);
|
|
recover_context_ = context;
|
|
bool auto_recovery = db_options_.max_bgerror_resume_count > 0;
|
|
EventHelpers::NotifyOnBackgroundError(db_options_.listeners, reason,
|
|
&new_bg_io_err, db_mutex_,
|
|
&auto_recovery);
|
|
StartRecoverFromRetryableBGIOError(bg_io_err);
|
|
return;
|
|
}
|
|
HandleKnownErrors(new_bg_io_err, reason);
|
|
}
|
|
|
|
void ErrorHandler::AddFilesToQuarantine(
|
|
autovector<const autovector<uint64_t>*> files_to_quarantine) {
|
|
db_mutex_->AssertHeld();
|
|
std::ostringstream quarantine_files_oss;
|
|
bool is_first_one = true;
|
|
for (const auto* files : files_to_quarantine) {
|
|
assert(files);
|
|
for (uint64_t file_number : *files) {
|
|
files_to_quarantine_.push_back(file_number);
|
|
quarantine_files_oss << (is_first_one ? "" : ", ") << file_number;
|
|
is_first_one = false;
|
|
}
|
|
}
|
|
ROCKS_LOG_INFO(db_options_.info_log,
|
|
"ErrorHandler: added file numbers %s to quarantine.\n",
|
|
quarantine_files_oss.str().c_str());
|
|
}
|
|
|
|
void ErrorHandler::ClearFilesToQuarantine() {
|
|
db_mutex_->AssertHeld();
|
|
files_to_quarantine_.clear();
|
|
ROCKS_LOG_INFO(db_options_.info_log,
|
|
"ErrorHandler: cleared files in quarantine.\n");
|
|
}
|
|
|
|
Status ErrorHandler::OverrideNoSpaceError(const Status& bg_error,
|
|
bool* auto_recovery) {
|
|
if (bg_error.severity() >= Status::Severity::kFatalError) {
|
|
return bg_error;
|
|
}
|
|
|
|
if (db_options_.sst_file_manager.get() == nullptr) {
|
|
// We rely on SFM to poll for enough disk space and recover
|
|
*auto_recovery = false;
|
|
return bg_error;
|
|
}
|
|
|
|
if (db_options_.allow_2pc &&
|
|
(bg_error.severity() <= Status::Severity::kSoftError)) {
|
|
// Don't know how to recover, as the contents of the current WAL file may
|
|
// be inconsistent, and it may be needed for 2PC. If 2PC is not enabled,
|
|
// we can just flush the memtable and discard the log
|
|
*auto_recovery = false;
|
|
return Status(bg_error, Status::Severity::kFatalError);
|
|
}
|
|
|
|
{
|
|
uint64_t free_space;
|
|
if (db_options_.env->GetFreeSpace(db_options_.db_paths[0].path,
|
|
&free_space) == Status::NotSupported()) {
|
|
*auto_recovery = false;
|
|
}
|
|
}
|
|
|
|
return bg_error;
|
|
}
|
|
|
|
void ErrorHandler::RecoverFromNoSpace() {
|
|
SstFileManagerImpl* sfm =
|
|
static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
|
|
|
|
// Inform SFM of the error, so it can kick-off the recovery
|
|
if (sfm) {
|
|
sfm->StartErrorRecovery(this, bg_error_);
|
|
}
|
|
}
|
|
|
|
Status ErrorHandler::ClearBGError() {
|
|
db_mutex_->AssertHeld();
|
|
|
|
// Signal that recovery succeeded
|
|
if (recovery_error_.ok()) {
|
|
assert(files_to_quarantine_.empty());
|
|
Status old_bg_error = bg_error_;
|
|
// old_bg_error is only for notifying listeners, so may not be checked
|
|
old_bg_error.PermitUncheckedError();
|
|
// Clear and check the recovery IO and BG error
|
|
is_db_stopped_.store(false, std::memory_order_release);
|
|
bg_error_ = Status::OK();
|
|
recovery_error_ = IOStatus::OK();
|
|
bg_error_.PermitUncheckedError();
|
|
recovery_error_.PermitUncheckedError();
|
|
recovery_in_prog_ = false;
|
|
soft_error_no_bg_work_ = false;
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, old_bg_error,
|
|
bg_error_, db_mutex_);
|
|
}
|
|
return recovery_error_;
|
|
}
|
|
|
|
Status ErrorHandler::RecoverFromBGError(bool is_manual) {
|
|
InstrumentedMutexLock l(db_mutex_);
|
|
bool no_bg_work_original_flag = soft_error_no_bg_work_;
|
|
if (is_manual) {
|
|
// If its a manual recovery and there's a background recovery in progress
|
|
// return busy status
|
|
if (recovery_in_prog_) {
|
|
return Status::Busy("Recovery already in progress");
|
|
}
|
|
recovery_in_prog_ = true;
|
|
|
|
// In manual resume, we allow the bg work to run. If it is a auto resume,
|
|
// the bg work should follow this tag.
|
|
soft_error_no_bg_work_ = false;
|
|
|
|
// In manual resume, if the bg error is a soft error and also requires
|
|
// no bg work, the error must be recovered by call the flush with
|
|
// flush reason: kErrorRecoveryRetryFlush. In other case, the flush
|
|
// reason is set to kErrorRecovery.
|
|
if (no_bg_work_original_flag) {
|
|
recover_context_.flush_reason = FlushReason::kErrorRecoveryRetryFlush;
|
|
} else {
|
|
recover_context_.flush_reason = FlushReason::kErrorRecovery;
|
|
}
|
|
}
|
|
|
|
if (bg_error_.severity() == Status::Severity::kSoftError &&
|
|
recover_context_.flush_reason == FlushReason::kErrorRecovery) {
|
|
// Simply clear the background error and return
|
|
recovery_error_ = IOStatus::OK();
|
|
return ClearBGError();
|
|
}
|
|
|
|
// Reset recovery_error_. We will use this to record any errors that happen
|
|
// during the recovery process. While recovering, the only operations that
|
|
// can generate background errors should be the flush operations
|
|
recovery_error_ = IOStatus::OK();
|
|
recovery_error_.PermitUncheckedError();
|
|
Status s = db_->ResumeImpl(recover_context_);
|
|
if (s.ok()) {
|
|
soft_error_no_bg_work_ = false;
|
|
} else {
|
|
soft_error_no_bg_work_ = no_bg_work_original_flag;
|
|
}
|
|
|
|
// For manual recover, shutdown, and fatal error cases, set
|
|
// recovery_in_prog_ to false. For automatic background recovery, leave it
|
|
// as is regardless of success or failure as it will be retried
|
|
if (is_manual || s.IsShutdownInProgress() ||
|
|
bg_error_.severity() >= Status::Severity::kFatalError) {
|
|
recovery_in_prog_ = false;
|
|
}
|
|
return s;
|
|
}
|
|
|
|
void ErrorHandler::StartRecoverFromRetryableBGIOError(
|
|
const IOStatus& io_error) {
|
|
db_mutex_->AssertHeld();
|
|
if (bg_error_.ok() || io_error.ok()) {
|
|
return;
|
|
}
|
|
if (db_options_.max_bgerror_resume_count <= 0 || recovery_in_prog_) {
|
|
// Auto resume BG error is not enabled
|
|
return;
|
|
}
|
|
if (end_recovery_) {
|
|
// Can temporarily release db mutex
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
|
|
Status::ShutdownInProgress(),
|
|
db_mutex_);
|
|
db_mutex_->AssertHeld();
|
|
return;
|
|
}
|
|
RecordStats({ERROR_HANDLER_AUTORESUME_COUNT}, {} /* int_histograms */);
|
|
ROCKS_LOG_INFO(
|
|
db_options_.info_log,
|
|
"ErrorHandler: Call StartRecoverFromRetryableBGIOError to resume\n");
|
|
// Needs to be set in the same lock hold as setting BG error, otherwise
|
|
// intervening writes could see a BG error without a recovery and bail out.
|
|
recovery_in_prog_ = true;
|
|
|
|
if (recovery_thread_) {
|
|
// Ensure only one thread can execute the join().
|
|
std::unique_ptr<port::Thread> old_recovery_thread(
|
|
std::move(recovery_thread_));
|
|
// In this case, if recovery_in_prog_ is false, current thread should
|
|
// wait the previous recover thread to finish and create a new thread
|
|
// to recover from the bg error.
|
|
db_mutex_->Unlock();
|
|
TEST_SYNC_POINT(
|
|
"StartRecoverFromRetryableBGIOError:BeforeWaitingForOtherThread");
|
|
old_recovery_thread->join();
|
|
TEST_SYNC_POINT(
|
|
"StartRecoverFromRetryableBGIOError:AfterWaitingForOtherThread");
|
|
db_mutex_->Lock();
|
|
}
|
|
|
|
recovery_thread_.reset(
|
|
new port::Thread(&ErrorHandler::RecoverFromRetryableBGIOError, this));
|
|
}
|
|
|
|
// Automatic recover from Retryable BG IO error. Must be called after db
|
|
// mutex is released.
|
|
void ErrorHandler::RecoverFromRetryableBGIOError() {
|
|
assert(recovery_in_prog_);
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeStart");
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeStart2");
|
|
InstrumentedMutexLock l(db_mutex_);
|
|
if (end_recovery_) {
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
|
|
Status::ShutdownInProgress(),
|
|
db_mutex_);
|
|
|
|
recovery_in_prog_ = false;
|
|
return;
|
|
}
|
|
DBRecoverContext context = recover_context_;
|
|
context.flush_after_recovery = true;
|
|
int resume_count = db_options_.max_bgerror_resume_count;
|
|
uint64_t wait_interval = db_options_.bgerror_resume_retry_interval;
|
|
uint64_t retry_count = 0;
|
|
// Recover from the retryable error. Create a separate thread to do it.
|
|
while (resume_count > 0) {
|
|
if (end_recovery_) {
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
|
|
Status::ShutdownInProgress(),
|
|
db_mutex_);
|
|
recovery_in_prog_ = false;
|
|
return;
|
|
}
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeResume0");
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeResume1");
|
|
recovery_error_ = IOStatus::OK();
|
|
retry_count++;
|
|
Status s = db_->ResumeImpl(context);
|
|
RecordStats({ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT},
|
|
{} /* int_histograms */);
|
|
if (s.IsShutdownInProgress() ||
|
|
bg_error_.severity() >= Status::Severity::kFatalError) {
|
|
// If DB shutdown in progress or the error severity is higher than
|
|
// Hard Error, stop auto resume and returns.
|
|
recovery_in_prog_ = false;
|
|
RecordStats({} /* ticker_types */,
|
|
{{ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count}});
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
|
|
bg_error_, db_mutex_);
|
|
return;
|
|
}
|
|
if (!recovery_error_.ok() &&
|
|
recovery_error_.severity() <= Status::Severity::kHardError &&
|
|
recovery_error_.GetRetryable()) {
|
|
// If new BG IO error happens during auto recovery and it is retryable
|
|
// and its severity is Hard Error or lower, the auto resmue sleep for
|
|
// a period of time and redo auto resume if it is allowed.
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeWait0");
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeWait1");
|
|
int64_t wait_until = db_options_.clock->NowMicros() + wait_interval;
|
|
cv_.TimedWait(wait_until);
|
|
} else {
|
|
// There are three possibility: 1) recovery_error_ is set during resume
|
|
// and the error is not retryable, 2) recover is successful, 3) other
|
|
// error happens during resume and cannot be resumed here.
|
|
if (recovery_error_.ok() && s.ok()) {
|
|
// recover from the retryable IO error and no other BG errors. Clean
|
|
// the bg_error and notify user.
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:RecoverSuccess");
|
|
RecordStats({ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT},
|
|
{{ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count}});
|
|
return;
|
|
} else {
|
|
// In this case: 1) recovery_error_ is more serious or not retryable
|
|
// 2) other error happens. The auto recovery stops.
|
|
recovery_in_prog_ = false;
|
|
RecordStats({} /* ticker_types */,
|
|
{{ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count}});
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(
|
|
db_options_.listeners, bg_error_,
|
|
!recovery_error_.ok() ? recovery_error_ : s, db_mutex_);
|
|
return;
|
|
}
|
|
}
|
|
resume_count--;
|
|
}
|
|
recovery_in_prog_ = false;
|
|
EventHelpers::NotifyOnErrorRecoveryEnd(
|
|
db_options_.listeners, bg_error_,
|
|
Status::Aborted("Exceeded resume retry count"), db_mutex_);
|
|
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:LoopOut");
|
|
RecordStats({} /* ticker_types */,
|
|
{{ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count}});
|
|
}
|
|
|
|
void ErrorHandler::CheckAndSetRecoveryAndBGError(const Status& bg_err) {
|
|
if (recovery_in_prog_ && recovery_error_.ok()) {
|
|
recovery_error_ = status_to_io_status(Status(bg_err));
|
|
}
|
|
if (bg_err.severity() > bg_error_.severity()) {
|
|
bg_error_ = bg_err;
|
|
}
|
|
if (bg_error_.severity() >= Status::Severity::kHardError) {
|
|
is_db_stopped_.store(true, std::memory_order_release);
|
|
}
|
|
}
|
|
|
|
void ErrorHandler::EndAutoRecovery() {
|
|
db_mutex_->AssertHeld();
|
|
if (!end_recovery_) {
|
|
end_recovery_ = true;
|
|
}
|
|
if (recovery_thread_) {
|
|
// Ensure only one thread can execute the join().
|
|
std::unique_ptr<port::Thread> old_recovery_thread(
|
|
std::move(recovery_thread_));
|
|
db_mutex_->Unlock();
|
|
cv_.SignalAll();
|
|
old_recovery_thread->join();
|
|
db_mutex_->Lock();
|
|
}
|
|
TEST_SYNC_POINT("PostEndAutoRecovery");
|
|
}
|
|
|
|
void ErrorHandler::RecordStats(
|
|
const std::vector<Tickers>& ticker_types,
|
|
const std::vector<std::tuple<Histograms, uint64_t>>& int_histograms) {
|
|
if (bg_error_stats_ == nullptr) {
|
|
return;
|
|
}
|
|
for (const auto& ticker_type : ticker_types) {
|
|
RecordTick(bg_error_stats_.get(), ticker_type);
|
|
}
|
|
|
|
for (const auto& hist : int_histograms) {
|
|
RecordInHistogram(bg_error_stats_.get(), std::get<0>(hist),
|
|
std::get<1>(hist));
|
|
}
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|