2019-03-26 23:41:31 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2021-09-29 11:01:57 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2021-09-29 11:01:57 +00:00
|
|
|
#include "logging/logging.h"
|
2019-03-26 23:41:31 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-03-26 23:41:31 +00:00
|
|
|
|
2019-05-24 22:26:02 +00:00
|
|
|
// A wrapper class to hold log reader, log reporter, log status.
|
2019-04-24 19:05:29 +00:00
|
|
|
class LogReaderContainer {
|
|
|
|
public:
|
|
|
|
LogReaderContainer()
|
|
|
|
: reader_(nullptr), reporter_(nullptr), status_(nullptr) {}
|
|
|
|
LogReaderContainer(Env* env, std::shared_ptr<Logger> info_log,
|
|
|
|
std::string fname,
|
|
|
|
std::unique_ptr<SequentialFileReader>&& file_reader,
|
|
|
|
uint64_t log_number) {
|
|
|
|
LogReporter* reporter = new LogReporter();
|
|
|
|
status_ = new Status();
|
|
|
|
reporter->env = env;
|
|
|
|
reporter->info_log = info_log.get();
|
|
|
|
reporter->fname = std::move(fname);
|
|
|
|
reporter->status = status_;
|
|
|
|
reporter_ = reporter;
|
|
|
|
// We intentially make log::Reader do checksumming even if
|
|
|
|
// paranoid_checks==false so that corruptions cause entire commits
|
|
|
|
// to be skipped instead of propagating bad information (like overly
|
|
|
|
// large sequence numbers).
|
|
|
|
reader_ = new log::FragmentBufferedReader(info_log, std::move(file_reader),
|
|
|
|
reporter, true /*checksum*/,
|
|
|
|
log_number);
|
|
|
|
}
|
|
|
|
log::FragmentBufferedReader* reader_;
|
|
|
|
log::Reader::Reporter* reporter_;
|
|
|
|
Status* status_;
|
|
|
|
~LogReaderContainer() {
|
|
|
|
delete reader_;
|
|
|
|
delete reporter_;
|
|
|
|
delete status_;
|
|
|
|
}
|
2022-10-25 20:49:09 +00:00
|
|
|
|
2019-04-24 19:05:29 +00:00
|
|
|
private:
|
|
|
|
struct LogReporter : public log::Reader::Reporter {
|
|
|
|
Env* env;
|
|
|
|
Logger* info_log;
|
|
|
|
std::string fname;
|
|
|
|
Status* status; // nullptr if immutable_db_options_.paranoid_checks==false
|
|
|
|
void Corruption(size_t bytes, const Status& s) override {
|
|
|
|
ROCKS_LOG_WARN(info_log, "%s%s: dropping %d bytes; %s",
|
|
|
|
(this->status == nullptr ? "(ignoring error) " : ""),
|
|
|
|
fname.c_str(), static_cast<int>(bytes),
|
|
|
|
s.ToString().c_str());
|
|
|
|
if (this->status != nullptr && this->status->ok()) {
|
|
|
|
*this->status = s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2019-05-24 22:26:02 +00:00
|
|
|
// The secondary instance shares access to the storage as the primary.
|
|
|
|
// The secondary is able to read and replay changes described in both the
|
|
|
|
// MANIFEST and the WAL files without coordination with the primary.
|
|
|
|
// The secondary instance can be opened using `DB::OpenAsSecondary`. After
|
|
|
|
// that, it can call `DBImplSecondary::TryCatchUpWithPrimary` to make best
|
|
|
|
// effort attempts to catch up with the primary.
|
2022-08-29 20:36:23 +00:00
|
|
|
// TODO: Share common structure with CompactedDBImpl and DBImplReadOnly
|
2019-03-26 23:41:31 +00:00
|
|
|
class DBImplSecondary : public DBImpl {
|
|
|
|
public:
|
2021-04-22 20:01:00 +00:00
|
|
|
DBImplSecondary(const DBOptions& options, const std::string& dbname,
|
|
|
|
std::string secondary_path);
|
2019-03-26 23:41:31 +00:00
|
|
|
~DBImplSecondary() override;
|
|
|
|
|
2019-05-24 22:26:02 +00:00
|
|
|
// Recover by replaying MANIFEST and WAL. Also initialize manifest_reader_
|
|
|
|
// and log_readers_ to facilitate future operations.
|
2019-03-26 23:41:31 +00:00
|
|
|
Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
|
2020-09-17 22:39:25 +00:00
|
|
|
bool read_only, bool error_if_wal_file_exists,
|
2024-04-19 00:36:33 +00:00
|
|
|
bool error_if_data_exists_in_wals, bool is_retry = false,
|
|
|
|
uint64_t* = nullptr, RecoveryContext* recovery_ctx = nullptr,
|
|
|
|
bool* can_retry = nullptr) override;
|
2019-03-26 23:41:31 +00:00
|
|
|
|
2022-07-05 17:09:44 +00:00
|
|
|
// Can return IOError due to files being deleted by the primary. To avoid
|
|
|
|
// IOError in this case, application can coordinate between primary and
|
|
|
|
// secondaries so that primary will not delete files that are currently being
|
|
|
|
// used by the secondaries. The application can also provide a custom FS/Env
|
|
|
|
// implementation so that files will remain present until all primary and
|
|
|
|
// secondaries indicate that they can be deleted. As a partial hacky
|
|
|
|
// workaround, the secondaries can be opened with `max_open_files=-1` so that
|
|
|
|
// it eagerly keeps all talbe files open and is able to access the contents of
|
|
|
|
// deleted files via prior open fd.
|
2023-09-15 15:30:44 +00:00
|
|
|
using DBImpl::GetImpl;
|
|
|
|
Status GetImpl(const ReadOptions& options, const Slice& key,
|
|
|
|
GetImplOptions& get_impl_options) override;
|
2019-03-26 23:41:31 +00:00
|
|
|
|
|
|
|
using DBImpl::NewIterator;
|
2022-07-05 17:09:44 +00:00
|
|
|
// Operations on the created iterators can return IOError due to files being
|
|
|
|
// deleted by the primary. To avoid IOError in this case, application can
|
|
|
|
// coordinate between primary and secondaries so that primary will not delete
|
|
|
|
// files that are currently being used by the secondaries. The application can
|
|
|
|
// also provide a custom FS/Env implementation so that files will remain
|
|
|
|
// present until all primary and secondaries indicate that they can be
|
|
|
|
// deleted. As a partial hacky workaround, the secondaries can be opened with
|
|
|
|
// `max_open_files=-1` so that it eagerly keeps all talbe files open and is
|
|
|
|
// able to access the contents of deleted files via prior open fd.
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
Iterator* NewIterator(const ReadOptions& _read_options,
|
2019-03-26 23:41:31 +00:00
|
|
|
ColumnFamilyHandle* column_family) override;
|
|
|
|
|
|
|
|
ArenaWrappedDBIter* NewIteratorImpl(const ReadOptions& read_options,
|
Access DBImpl* and CFD* by CFHImpl* in Iterators (#12395)
Summary:
In the current implementation of iterators, `DBImpl*` and `ColumnFamilyData*` are held in `DBIter` and `ArenaWrappedDBIter` for two purposes: tracing and Refresh() API. With the introduction of a new iterator called MultiCfIterator in PR https://github.com/facebook/rocksdb/issues/12153 , which is a cross-column-family iterator that maintains multiple DBIters as child iterators from a consistent database state, we need to make some changes to the existing implementation. The new iterator will still be exposed through the generic Iterator interface with an additional capability to return AttributeGroups (via `attribute_groups()`) which is a list of wide columns grouped by column family. For more information about AttributeGroup, please refer to previous PRs: https://github.com/facebook/rocksdb/issues/11925 #11943, and https://github.com/facebook/rocksdb/issues/11977.
To be able to return AttributeGroup in the default single CF iterator created, access to `ColumnFamilyHandle*` within `DBIter` is necessary. However, this is not currently available in `DBIter`. Since `DBImpl*` and `ColumnFamilyData*` can be easily accessed via `ColumnFamilyHandleImpl*`, we have decided to replace the pointers to `ColumnFamilyData` and `DBImpl` in `DBIter` with a pointer to `ColumnFamilyHandleImpl`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12395
Test Plan:
# Summary
In the current implementation of iterators, `DBImpl*` and `ColumnFamilyData*` are held in `DBIter` and `ArenaWrappedDBIter` for two purposes: tracing and Refresh() API. With the introduction of a new iterator called MultiCfIterator in PR #12153 , which is a cross-column-family iterator that maintains multiple DBIters as child iterators from a consistent database state, we need to make some changes to the existing implementation. The new iterator will still be exposed through the generic Iterator interface with an additional capability to return AttributeGroups (via `attribute_groups()`) which is a list of wide columns grouped by column family. For more information about AttributeGroup, please refer to previous PRs: #11925 #11943, and #11977.
To be able to return AttributeGroup in the default single CF iterator created, access to `ColumnFamilyHandle*` within `DBIter` is necessary. However, this is not currently available in `DBIter`. Since `DBImpl*` and `ColumnFamilyData*` can be easily accessed via `ColumnFamilyHandleImpl*`, we have decided to replace the pointers to `ColumnFamilyData` and `DBImpl` in `DBIter` with a pointer to `ColumnFamilyHandleImpl`.
# Test Plan
There should be no behavior changes. Existing tests and CI for the correctness tests.
**Test for Perf Regression**
Build
```
$> make -j64 release
```
Setup
```
$> TEST_TMPDIR=/dev/shm/db_bench ./db_bench -benchmarks="filluniquerandom" -key_size=32 -value_size=512 -num=1000000 -compression_type=none
```
Run
```
TEST_TMPDIR=/dev/shm/db_bench ./db_bench -use_existing_db=1 -benchmarks="newiterator,seekrandom" -cache_size=10485760000
```
Before the change
```
DB path: [/dev/shm/db_bench/dbbench]
newiterator : 0.552 micros/op 1810157 ops/sec 0.552 seconds 1000000 operations;
DB path: [/dev/shm/db_bench/dbbench]
seekrandom : 4.502 micros/op 222143 ops/sec 4.502 seconds 1000000 operations; (0 of 1000000 found)
```
After the change
```
DB path: [/dev/shm/db_bench/dbbench]
newiterator : 0.520 micros/op 1924401 ops/sec 0.520 seconds 1000000 operations;
DB path: [/dev/shm/db_bench/dbbench]
seekrandom : 4.532 micros/op 220657 ops/sec 4.532 seconds 1000000 operations; (0 of 1000000 found)
```
Reviewed By: pdillinger
Differential Revision: D54332713
Pulled By: jaykorean
fbshipit-source-id: b28d897ad519e58b1ca82eb068a6319544a4fae5
2024-03-01 18:28:20 +00:00
|
|
|
ColumnFamilyHandleImpl* cfh,
|
|
|
|
SuperVersion* sv, SequenceNumber snapshot,
|
2021-08-24 22:39:31 +00:00
|
|
|
ReadCallback* read_callback,
|
|
|
|
bool expose_blob_index = false,
|
|
|
|
bool allow_refresh = true);
|
2019-03-26 23:41:31 +00:00
|
|
|
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
Status NewIterators(const ReadOptions& _read_options,
|
2019-03-26 23:41:31 +00:00
|
|
|
const std::vector<ColumnFamilyHandle*>& column_families,
|
|
|
|
std::vector<Iterator*>* iterators) override;
|
|
|
|
|
|
|
|
using DBImpl::Put;
|
|
|
|
Status Put(const WriteOptions& /*options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/, const Slice& /*key*/,
|
|
|
|
const Slice& /*value*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
2022-06-25 22:30:47 +00:00
|
|
|
using DBImpl::PutEntity;
|
|
|
|
Status PutEntity(const WriteOptions& /* options */,
|
|
|
|
ColumnFamilyHandle* /* column_family */,
|
|
|
|
const Slice& /* key */,
|
|
|
|
const WideColumns& /* columns */) override {
|
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
|
|
|
}
|
2023-11-07 00:52:51 +00:00
|
|
|
Status PutEntity(const WriteOptions& /* options */, const Slice& /* key */,
|
|
|
|
const AttributeGroups& /* attribute_groups */) override {
|
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
|
|
|
}
|
2022-06-25 22:30:47 +00:00
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
using DBImpl::Merge;
|
|
|
|
Status Merge(const WriteOptions& /*options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/, const Slice& /*key*/,
|
|
|
|
const Slice& /*value*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using DBImpl::Delete;
|
|
|
|
Status Delete(const WriteOptions& /*options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/,
|
|
|
|
const Slice& /*key*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using DBImpl::SingleDelete;
|
|
|
|
Status SingleDelete(const WriteOptions& /*options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/,
|
|
|
|
const Slice& /*key*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status Write(const WriteOptions& /*options*/,
|
|
|
|
WriteBatch* /*updates*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using DBImpl::CompactRange;
|
|
|
|
Status CompactRange(const CompactRangeOptions& /*options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/,
|
|
|
|
const Slice* /*begin*/, const Slice* /*end*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using DBImpl::CompactFiles;
|
|
|
|
Status CompactFiles(
|
|
|
|
const CompactionOptions& /*compact_options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/,
|
|
|
|
const std::vector<std::string>& /*input_file_names*/,
|
|
|
|
const int /*output_level*/, const int /*output_path_id*/ = -1,
|
|
|
|
std::vector<std::string>* const /*output_file_names*/ = nullptr,
|
|
|
|
CompactionJobInfo* /*compaction_job_info*/ = nullptr) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DisableFileDeletions() override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
2024-02-14 02:36:25 +00:00
|
|
|
Status EnableFileDeletions() override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status GetLiveFiles(std::vector<std::string>&,
|
|
|
|
uint64_t* /*manifest_file_size*/,
|
|
|
|
bool /*flush_memtable*/ = true) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using DBImpl::Flush;
|
|
|
|
Status Flush(const FlushOptions& /*options*/,
|
|
|
|
ColumnFamilyHandle* /*column_family*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 01:43:37 +00:00
|
|
|
using DBImpl::SetDBOptions;
|
|
|
|
Status SetDBOptions(const std::unordered_map<std::string, std::string>&
|
|
|
|
/*options_map*/) override {
|
|
|
|
// Currently not supported because changing certain options may cause
|
|
|
|
// flush/compaction.
|
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
|
|
|
}
|
|
|
|
|
|
|
|
using DBImpl::SetOptions;
|
|
|
|
Status SetOptions(
|
|
|
|
ColumnFamilyHandle* /*cfd*/,
|
|
|
|
const std::unordered_map<std::string, std::string>& /*options_map*/)
|
|
|
|
override {
|
|
|
|
// Currently not supported because changing certain options may cause
|
|
|
|
// flush/compaction and/or write to MANIFEST.
|
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
using DBImpl::SyncWAL;
|
|
|
|
Status SyncWAL() override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using DB::IngestExternalFile;
|
|
|
|
Status IngestExternalFile(
|
|
|
|
ColumnFamilyHandle* /*column_family*/,
|
|
|
|
const std::vector<std::string>& /*external_files*/,
|
|
|
|
const IngestExternalFileOptions& /*ingestion_options*/) override {
|
2019-05-18 02:16:51 +00:00
|
|
|
return Status::NotSupported("Not supported operation in secondary mode.");
|
2019-03-26 23:41:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to catch up with the primary by reading as much as possible from the
|
|
|
|
// log files until there is nothing more to read or encounters an error. If
|
|
|
|
// the amount of information in the log files to process is huge, this
|
|
|
|
// method can take long time due to all the I/O and CPU costs.
|
|
|
|
Status TryCatchUpWithPrimary() override;
|
|
|
|
|
2019-05-24 22:26:02 +00:00
|
|
|
// Try to find log reader using log_number from log_readers_ map, initialize
|
|
|
|
// if it doesn't exist
|
2019-04-24 19:05:29 +00:00
|
|
|
Status MaybeInitLogReader(uint64_t log_number,
|
|
|
|
log::FragmentBufferedReader** log_reader);
|
|
|
|
|
2019-06-17 22:36:20 +00:00
|
|
|
// Check if all live files exist on file system and that their file sizes
|
|
|
|
// matche to the in-memory records. It is possible that some live files may
|
|
|
|
// have been deleted by the primary. In this case, CheckConsistency() does
|
|
|
|
// not flag the missing file as inconsistency.
|
|
|
|
Status CheckConsistency() override;
|
|
|
|
|
2021-04-22 20:01:00 +00:00
|
|
|
#ifndef NDEBUG
|
2022-04-13 20:28:09 +00:00
|
|
|
Status TEST_CompactWithoutInstallation(const OpenAndCompactOptions& options,
|
|
|
|
ColumnFamilyHandle* cfh,
|
2021-04-22 20:01:00 +00:00
|
|
|
const CompactionServiceInput& input,
|
|
|
|
CompactionServiceResult* result) {
|
2022-04-13 20:28:09 +00:00
|
|
|
return CompactWithoutInstallation(options, cfh, input, result);
|
2021-04-22 20:01:00 +00:00
|
|
|
}
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
2019-05-18 02:16:51 +00:00
|
|
|
protected:
|
2022-08-29 20:36:23 +00:00
|
|
|
Status FlushForGetLiveFiles() override {
|
|
|
|
// No-op for read-only DB
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-12-03 01:43:37 +00:00
|
|
|
bool OwnTablesAndLogs() const override {
|
|
|
|
// Currently, the secondary instance does not own the database files. It
|
|
|
|
// simply opens the files of the primary instance and tracks their file
|
|
|
|
// descriptors until they become obsolete. In the future, the secondary may
|
|
|
|
// create links to database files. OwnTablesAndLogs will return true then.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Basic RocksDB follower implementation (#12540)
Summary:
A basic implementation of RocksDB follower mode, which opens a remote database (referred to as leader) on a distributed file system by tailing its MANIFEST. It leverages the secondary instance mode, but is different in some key ways -
1. It has its own directory with links to the leader's database
2. Periodically refreshes itself
3. (Future) Snapshot support
4. (Future) Garbage collection of obsolete links
5. (Long term) Memtable replication
There are two main classes implementing this functionality - `DBImplFollower` and `OnDemandFileSystem`. The former is derived from `DBImplSecondary`. Similar to `DBImplSecondary`, it implements recovery and catch up through MANIFEST tailing using the `ReactiveVersionSet`, but does not consider logs. In a future PR, we will implement memtable replication, which will eliminate the need to catch up using logs. In addition, the recovery and catch-up tries to avoid directory listing as repeated metadata operations are expensive.
The second main piece is the `OnDemandFileSystem`, which plugs in as an `Env` for the follower instance and creates the illusion of the follower directory as a clone of the leader directory. It creates links to SSTs on first reference. When the follower tails the MANIFEST and attempts to create a new `Version`, it calls `VerifyFileMetadata` to verify the size of the file, and optionally the unique ID of the file. During this process, links are created which prevent the underlying files from getting deallocated even if the leader deletes the files.
TODOs: Deletion of obsolete links, snapshots, robust checking against misconfigurations, better observability etc.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12540
Reviewed By: jowlyzhang
Differential Revision: D56315718
Pulled By: anand1976
fbshipit-source-id: d19e1aca43a6af4000cb8622a718031b69ebd97b
2024-04-20 02:13:31 +00:00
|
|
|
std::unique_ptr<log::FragmentBufferedReader> manifest_reader_;
|
|
|
|
std::unique_ptr<log::Reader::Reporter> manifest_reporter_;
|
|
|
|
std::unique_ptr<Status> manifest_reader_status_;
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
private:
|
|
|
|
friend class DB;
|
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
DBImplSecondary(const DBImplSecondary&);
|
|
|
|
void operator=(const DBImplSecondary&);
|
|
|
|
|
|
|
|
using DBImpl::Recover;
|
|
|
|
|
2019-05-18 02:16:51 +00:00
|
|
|
Status FindAndRecoverLogFiles(
|
|
|
|
std::unordered_set<ColumnFamilyData*>* cfds_changed,
|
|
|
|
JobContext* job_context);
|
2019-05-08 17:56:38 +00:00
|
|
|
Status FindNewLogNumbers(std::vector<uint64_t>* logs);
|
2019-05-24 22:26:02 +00:00
|
|
|
// After manifest recovery, replay WALs and refresh log_readers_ if necessary
|
|
|
|
// REQUIRES: log_numbers are sorted in ascending order
|
2019-04-24 19:05:29 +00:00
|
|
|
Status RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
|
|
|
|
SequenceNumber* next_sequence,
|
2019-05-18 02:16:51 +00:00
|
|
|
std::unordered_set<ColumnFamilyData*>* cfds_changed,
|
|
|
|
JobContext* job_context);
|
2019-04-24 19:05:29 +00:00
|
|
|
|
2021-04-22 20:01:00 +00:00
|
|
|
// Run compaction without installation, the output files will be placed in the
|
|
|
|
// secondary DB path. The LSM tree won't be changed, the secondary DB is still
|
|
|
|
// in read-only mode.
|
2022-04-13 20:28:09 +00:00
|
|
|
Status CompactWithoutInstallation(const OpenAndCompactOptions& options,
|
|
|
|
ColumnFamilyHandle* cfh,
|
2021-04-22 20:01:00 +00:00
|
|
|
const CompactionServiceInput& input,
|
|
|
|
CompactionServiceResult* result);
|
|
|
|
|
2019-05-18 02:16:51 +00:00
|
|
|
// Cache log readers for each log number, used for continue WAL replay
|
2019-04-24 19:05:29 +00:00
|
|
|
// after recovery
|
|
|
|
std::map<uint64_t, std::unique_ptr<LogReaderContainer>> log_readers_;
|
2019-05-18 02:16:51 +00:00
|
|
|
|
|
|
|
// Current WAL number replayed for each column family.
|
|
|
|
std::unordered_map<ColumnFamilyData*, uint64_t> cfd_to_current_log_;
|
2021-04-22 20:01:00 +00:00
|
|
|
|
|
|
|
const std::string secondary_path_;
|
2019-03-26 23:41:31 +00:00
|
|
|
};
|
2019-04-24 19:05:29 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|