2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2012-11-06 03:18:49 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl_readonly.h"
|
2015-07-14 01:10:31 +00:00
|
|
|
|
2020-07-03 02:24:25 +00:00
|
|
|
#include "db/arena_wrapped_db_iter.h"
|
2021-03-23 20:47:56 +00:00
|
|
|
#include "db/db_impl/compacted_db_impl.h"
|
2019-05-31 22:21:36 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2014-09-25 18:14:01 +00:00
|
|
|
#include "db/db_iter.h"
|
2016-11-04 01:40:23 +00:00
|
|
|
#include "db/merge_context.h"
|
2021-09-29 11:01:57 +00:00
|
|
|
#include "logging/logging.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
2020-07-03 02:24:25 +00:00
|
|
|
#include "util/cast_util.h"
|
2012-11-06 03:18:49 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2012-11-06 03:18:49 +00:00
|
|
|
|
2014-11-26 19:37:59 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2014-09-09 01:46:52 +00:00
|
|
|
DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
|
2014-02-05 21:12:23 +00:00
|
|
|
const std::string& dbname)
|
Make backups openable as read-only DBs (#8142)
Summary:
A current limitation of backups is that you don't know the
exact database state of when the backup was taken. With this new
feature, you can at least inspect the backup's DB state without
restoring it by opening it as a read-only DB.
Rather than add something like OpenAsReadOnlyDB to the BackupEngine API,
which would inhibit opening stackable DB implementations read-only
(if/when their APIs support it), we instead provide a DB name and Env
that can be used to open as a read-only DB.
Possible follow-up work:
* Add a version of GetBackupInfo for a single backup.
* Let CreateNewBackup return the BackupID of the newly-created backup.
Implementation details:
Refactored ChrootFileSystem to split off new base class RemapFileSystem,
which allows more general remapping of files. We use this base class to
implement BackupEngineImpl::RemapSharedFileSystem.
To minimize API impact, I decided to just add these fields `name_for_open`
and `env_for_open` to those set by GetBackupInfo when
include_file_details=true. Creating the RemapSharedFileSystem adds a bit
to the memory consumption, perhaps unnecessarily in some cases, but this
has been mitigated by (a) only initialize the RemapSharedFileSystem
lazily when GetBackupInfo with include_file_details=true is called, and
(b) using the existing `shared_ptr<FileInfo>` objects to hold most of the
mapping data.
To enhance API safety, RemapSharedFileSystem is wrapped by new
ReadOnlyFileSystem which rejects any attempts to write. This uncovered a
couple of places in which DB::OpenForReadOnly would write to the
filesystem, so I fixed these. Added a release note because this affects
logging.
Additional minor refactoring in backupable_db.cc to support the new
functionality.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8142
Test Plan:
new test (run with ASAN and UBSAN), added to stress test and
ran it for a while with amplified backup_one_in
Reviewed By: ajkr
Differential Revision: D27535408
Pulled By: pdillinger
fbshipit-source-id: 04666d310aa0261ef6b2385c43ca793ce1dfd148
2021-04-06 21:36:45 +00:00
|
|
|
: DBImpl(db_options, dbname, /*seq_per_batch*/ false,
|
|
|
|
/*batch_per_txn*/ true, /*read_only*/ true) {
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"Opening the db in read only mode");
|
2016-09-23 23:34:04 +00:00
|
|
|
LogFlush(immutable_db_options_.info_log);
|
2012-11-06 03:18:49 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
DBImplReadOnly::~DBImplReadOnly() {}
|
2012-11-06 03:18:49 +00:00
|
|
|
|
|
|
|
// Implementations of the DB interface
|
2014-09-09 01:46:52 +00:00
|
|
|
Status DBImplReadOnly::Get(const ReadOptions& read_options,
|
2014-02-11 01:04:44 +00:00
|
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
2017-03-13 18:44:50 +00:00
|
|
|
PinnableSlice* pinnable_val) {
|
|
|
|
assert(pinnable_val != nullptr);
|
2018-08-23 05:40:34 +00:00
|
|
|
// TODO: stopwatch DB_GET needed?, perf timer needed?
|
|
|
|
PERF_TIMER_GUARD(get_snapshot_time);
|
2012-11-06 03:18:49 +00:00
|
|
|
Status s;
|
|
|
|
SequenceNumber snapshot = versions_->LastSequence();
|
2020-07-03 02:24:25 +00:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2014-02-11 01:04:44 +00:00
|
|
|
auto cfd = cfh->cfd();
|
2018-08-01 07:14:43 +00:00
|
|
|
if (tracer_) {
|
|
|
|
InstrumentedMutexLock lock(&trace_mutex_);
|
|
|
|
if (tracer_) {
|
|
|
|
tracer_->Get(column_family, key);
|
|
|
|
}
|
|
|
|
}
|
2014-02-03 23:28:03 +00:00
|
|
|
SuperVersion* super_version = cfd->GetSuperVersion();
|
2013-12-03 02:34:05 +00:00
|
|
|
MergeContext merge_context;
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 19:29:29 +00:00
|
|
|
SequenceNumber max_covering_tombstone_seq = 0;
|
2012-11-06 03:18:49 +00:00
|
|
|
LookupKey lkey(key, snapshot);
|
2018-08-23 05:40:34 +00:00
|
|
|
PERF_TIMER_STOP(get_snapshot_time);
|
2020-03-02 23:58:32 +00:00
|
|
|
if (super_version->mem->Get(lkey, pinnable_val->GetSelf(),
|
|
|
|
/*timestamp=*/nullptr, &s, &merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 19:29:29 +00:00
|
|
|
&max_covering_tombstone_seq, read_options)) {
|
2017-03-13 18:44:50 +00:00
|
|
|
pinnable_val->PinSelf();
|
2018-08-23 05:40:34 +00:00
|
|
|
RecordTick(stats_, MEMTABLE_HIT);
|
2013-02-15 23:28:24 +00:00
|
|
|
} else {
|
2014-10-03 00:02:30 +00:00
|
|
|
PERF_TIMER_GUARD(get_from_output_files_time);
|
2020-03-02 23:58:32 +00:00
|
|
|
super_version->current->Get(read_options, lkey, pinnable_val,
|
|
|
|
/*timestamp=*/nullptr, &s, &merge_context,
|
|
|
|
&max_covering_tombstone_seq);
|
2018-08-23 05:40:34 +00:00
|
|
|
RecordTick(stats_, MEMTABLE_MISS);
|
2013-02-15 23:28:24 +00:00
|
|
|
}
|
2018-08-23 05:40:34 +00:00
|
|
|
RecordTick(stats_, NUMBER_KEYS_READ);
|
|
|
|
size_t size = pinnable_val->size();
|
|
|
|
RecordTick(stats_, BYTES_READ, size);
|
2019-02-28 18:14:19 +00:00
|
|
|
RecordInHistogram(stats_, BYTES_PER_READ, size);
|
2018-08-23 05:40:34 +00:00
|
|
|
PERF_COUNTER_ADD(get_read_bytes, size);
|
2012-11-06 03:18:49 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-09-08 22:04:34 +00:00
|
|
|
Iterator* DBImplReadOnly::NewIterator(const ReadOptions& read_options,
|
2014-02-11 01:04:44 +00:00
|
|
|
ColumnFamilyHandle* column_family) {
|
2020-07-03 02:24:25 +00:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2014-02-11 01:04:44 +00:00
|
|
|
auto cfd = cfh->cfd();
|
2014-02-03 23:28:03 +00:00
|
|
|
SuperVersion* super_version = cfd->GetSuperVersion()->Ref();
|
|
|
|
SequenceNumber latest_snapshot = versions_->LastSequence();
|
2018-11-28 23:26:56 +00:00
|
|
|
SequenceNumber read_seq =
|
|
|
|
read_options.snapshot != nullptr
|
|
|
|
? reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
|
|
|
|
->number_
|
|
|
|
: latest_snapshot;
|
2017-10-10 00:05:34 +00:00
|
|
|
ReadCallback* read_callback = nullptr; // No read callback provided.
|
2014-07-23 20:52:11 +00:00
|
|
|
auto db_iter = NewArenaWrappedDbIterator(
|
2018-05-21 21:33:55 +00:00
|
|
|
env_, read_options, *cfd->ioptions(), super_version->mutable_cf_options,
|
2020-12-05 05:28:26 +00:00
|
|
|
super_version->current, read_seq,
|
2016-03-01 02:38:03 +00:00
|
|
|
super_version->mutable_cf_options.max_sequential_skip_in_iterations,
|
2017-10-10 00:05:34 +00:00
|
|
|
super_version->version_number, read_callback);
|
2020-08-03 22:21:56 +00:00
|
|
|
auto internal_iter = NewInternalIterator(
|
|
|
|
db_iter->GetReadOptions(), cfd, super_version, db_iter->GetArena(),
|
|
|
|
db_iter->GetRangeDelAggregator(), read_seq,
|
|
|
|
/* allow_unprepared_value */ true);
|
2014-07-23 20:52:11 +00:00
|
|
|
db_iter->SetIterUnderDBIter(internal_iter);
|
|
|
|
return db_iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImplReadOnly::NewIterators(
|
2014-09-08 22:04:34 +00:00
|
|
|
const ReadOptions& read_options,
|
2014-07-23 20:52:11 +00:00
|
|
|
const std::vector<ColumnFamilyHandle*>& column_families,
|
|
|
|
std::vector<Iterator*>* iterators) {
|
2017-10-10 00:05:34 +00:00
|
|
|
ReadCallback* read_callback = nullptr; // No read callback provided.
|
2014-07-23 20:52:11 +00:00
|
|
|
if (iterators == nullptr) {
|
|
|
|
return Status::InvalidArgument("iterators not allowed to be nullptr");
|
|
|
|
}
|
|
|
|
iterators->clear();
|
|
|
|
iterators->reserve(column_families.size());
|
|
|
|
SequenceNumber latest_snapshot = versions_->LastSequence();
|
2018-11-28 23:26:56 +00:00
|
|
|
SequenceNumber read_seq =
|
|
|
|
read_options.snapshot != nullptr
|
|
|
|
? reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
|
|
|
|
->number_
|
|
|
|
: latest_snapshot;
|
2014-07-23 20:52:11 +00:00
|
|
|
|
|
|
|
for (auto cfh : column_families) {
|
2020-07-03 02:24:25 +00:00
|
|
|
auto* cfd = static_cast_with_check<ColumnFamilyHandleImpl>(cfh)->cfd();
|
2014-10-23 22:34:21 +00:00
|
|
|
auto* sv = cfd->GetSuperVersion()->Ref();
|
|
|
|
auto* db_iter = NewArenaWrappedDbIterator(
|
2020-12-05 05:28:26 +00:00
|
|
|
env_, read_options, *cfd->ioptions(), sv->mutable_cf_options,
|
|
|
|
sv->current, read_seq,
|
2016-03-01 02:38:03 +00:00
|
|
|
sv->mutable_cf_options.max_sequential_skip_in_iterations,
|
2017-10-10 00:05:34 +00:00
|
|
|
sv->version_number, read_callback);
|
2020-08-03 22:21:56 +00:00
|
|
|
auto* internal_iter = NewInternalIterator(
|
|
|
|
db_iter->GetReadOptions(), cfd, sv, db_iter->GetArena(),
|
|
|
|
db_iter->GetRangeDelAggregator(), read_seq,
|
|
|
|
/* allow_unprepared_value */ true);
|
2014-07-23 20:52:11 +00:00
|
|
|
db_iter->SetIterUnderDBIter(internal_iter);
|
|
|
|
iterators->push_back(db_iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
2012-11-06 03:18:49 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 01:55:25 +00:00
|
|
|
namespace {
|
Make backups openable as read-only DBs (#8142)
Summary:
A current limitation of backups is that you don't know the
exact database state of when the backup was taken. With this new
feature, you can at least inspect the backup's DB state without
restoring it by opening it as a read-only DB.
Rather than add something like OpenAsReadOnlyDB to the BackupEngine API,
which would inhibit opening stackable DB implementations read-only
(if/when their APIs support it), we instead provide a DB name and Env
that can be used to open as a read-only DB.
Possible follow-up work:
* Add a version of GetBackupInfo for a single backup.
* Let CreateNewBackup return the BackupID of the newly-created backup.
Implementation details:
Refactored ChrootFileSystem to split off new base class RemapFileSystem,
which allows more general remapping of files. We use this base class to
implement BackupEngineImpl::RemapSharedFileSystem.
To minimize API impact, I decided to just add these fields `name_for_open`
and `env_for_open` to those set by GetBackupInfo when
include_file_details=true. Creating the RemapSharedFileSystem adds a bit
to the memory consumption, perhaps unnecessarily in some cases, but this
has been mitigated by (a) only initialize the RemapSharedFileSystem
lazily when GetBackupInfo with include_file_details=true is called, and
(b) using the existing `shared_ptr<FileInfo>` objects to hold most of the
mapping data.
To enhance API safety, RemapSharedFileSystem is wrapped by new
ReadOnlyFileSystem which rejects any attempts to write. This uncovered a
couple of places in which DB::OpenForReadOnly would write to the
filesystem, so I fixed these. Added a release note because this affects
logging.
Additional minor refactoring in backupable_db.cc to support the new
functionality.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8142
Test Plan:
new test (run with ASAN and UBSAN), added to stress test and
ran it for a while with amplified backup_one_in
Reviewed By: ajkr
Differential Revision: D27535408
Pulled By: pdillinger
fbshipit-source-id: 04666d310aa0261ef6b2385c43ca793ce1dfd148
2021-04-06 21:36:45 +00:00
|
|
|
// Return OK if dbname exists in the file system or create it if
|
|
|
|
// create_if_missing
|
2020-06-04 01:55:25 +00:00
|
|
|
Status OpenForReadOnlyCheckExistence(const DBOptions& db_options,
|
|
|
|
const std::string& dbname) {
|
|
|
|
Status s;
|
|
|
|
if (!db_options.create_if_missing) {
|
|
|
|
// Attempt to read "CURRENT" file
|
|
|
|
const std::shared_ptr<FileSystem>& fs = db_options.env->GetFileSystem();
|
|
|
|
std::string manifest_path;
|
|
|
|
uint64_t manifest_file_number;
|
|
|
|
s = VersionSet::GetCurrentManifestPath(dbname, fs.get(), &manifest_path,
|
|
|
|
&manifest_file_number);
|
Make backups openable as read-only DBs (#8142)
Summary:
A current limitation of backups is that you don't know the
exact database state of when the backup was taken. With this new
feature, you can at least inspect the backup's DB state without
restoring it by opening it as a read-only DB.
Rather than add something like OpenAsReadOnlyDB to the BackupEngine API,
which would inhibit opening stackable DB implementations read-only
(if/when their APIs support it), we instead provide a DB name and Env
that can be used to open as a read-only DB.
Possible follow-up work:
* Add a version of GetBackupInfo for a single backup.
* Let CreateNewBackup return the BackupID of the newly-created backup.
Implementation details:
Refactored ChrootFileSystem to split off new base class RemapFileSystem,
which allows more general remapping of files. We use this base class to
implement BackupEngineImpl::RemapSharedFileSystem.
To minimize API impact, I decided to just add these fields `name_for_open`
and `env_for_open` to those set by GetBackupInfo when
include_file_details=true. Creating the RemapSharedFileSystem adds a bit
to the memory consumption, perhaps unnecessarily in some cases, but this
has been mitigated by (a) only initialize the RemapSharedFileSystem
lazily when GetBackupInfo with include_file_details=true is called, and
(b) using the existing `shared_ptr<FileInfo>` objects to hold most of the
mapping data.
To enhance API safety, RemapSharedFileSystem is wrapped by new
ReadOnlyFileSystem which rejects any attempts to write. This uncovered a
couple of places in which DB::OpenForReadOnly would write to the
filesystem, so I fixed these. Added a release note because this affects
logging.
Additional minor refactoring in backupable_db.cc to support the new
functionality.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8142
Test Plan:
new test (run with ASAN and UBSAN), added to stress test and
ran it for a while with amplified backup_one_in
Reviewed By: ajkr
Differential Revision: D27535408
Pulled By: pdillinger
fbshipit-source-id: 04666d310aa0261ef6b2385c43ca793ce1dfd148
2021-04-06 21:36:45 +00:00
|
|
|
} else {
|
|
|
|
// Historic behavior that doesn't necessarily make sense
|
|
|
|
s = db_options.env->CreateDirIfMissing(dbname);
|
2020-06-04 01:55:25 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2012-11-06 03:18:49 +00:00
|
|
|
Status DB::OpenForReadOnly(const Options& options, const std::string& dbname,
|
2020-09-17 22:39:25 +00:00
|
|
|
DB** dbptr, bool /*error_if_wal_file_exists*/) {
|
2020-06-04 01:55:25 +00:00
|
|
|
Status s = OpenForReadOnlyCheckExistence(options, dbname);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-02-15 23:28:24 +00:00
|
|
|
*dbptr = nullptr;
|
2012-11-06 03:18:49 +00:00
|
|
|
|
2014-09-25 18:14:01 +00:00
|
|
|
// Try to first open DB as fully compacted DB
|
|
|
|
s = CompactedDBImpl::Open(options, dbname, dbptr);
|
|
|
|
if (s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-01-06 21:31:06 +00:00
|
|
|
DBOptions db_options(options);
|
|
|
|
ColumnFamilyOptions cf_options(options);
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
column_families.push_back(
|
2014-04-09 16:56:17 +00:00
|
|
|
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
|
2020-06-04 01:55:25 +00:00
|
|
|
s = DBImplReadOnly::OpenForReadOnlyWithoutCheck(
|
|
|
|
db_options, dbname, column_families, &handles, dbptr);
|
2014-04-09 16:56:17 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
assert(handles.size() == 1);
|
|
|
|
// i can delete the handle since DBImpl is always holding a
|
|
|
|
// reference to default column family
|
|
|
|
delete handles[0];
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DB::OpenForReadOnly(
|
|
|
|
const DBOptions& db_options, const std::string& dbname,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
2020-06-04 01:55:25 +00:00
|
|
|
std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
|
2020-09-17 22:39:25 +00:00
|
|
|
bool error_if_wal_file_exists) {
|
2020-06-04 01:55:25 +00:00
|
|
|
// If dbname does not exist in the file system, should not do anything
|
|
|
|
Status s = OpenForReadOnlyCheckExistence(db_options, dbname);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DBImplReadOnly::OpenForReadOnlyWithoutCheck(
|
|
|
|
db_options, dbname, column_families, handles, dbptr,
|
2020-09-17 22:39:25 +00:00
|
|
|
error_if_wal_file_exists);
|
2020-06-04 01:55:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImplReadOnly::OpenForReadOnlyWithoutCheck(
|
|
|
|
const DBOptions& db_options, const std::string& dbname,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
2014-04-09 16:56:17 +00:00
|
|
|
std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
|
2020-09-17 22:39:25 +00:00
|
|
|
bool error_if_wal_file_exists) {
|
2014-04-09 16:56:17 +00:00
|
|
|
*dbptr = nullptr;
|
|
|
|
handles->clear();
|
2014-02-05 21:12:23 +00:00
|
|
|
|
2017-10-06 01:00:38 +00:00
|
|
|
SuperVersionContext sv_context(/* create_superversion */ true);
|
2014-02-05 21:12:23 +00:00
|
|
|
DBImplReadOnly* impl = new DBImplReadOnly(db_options, dbname);
|
|
|
|
impl->mutex_.Lock();
|
2014-01-24 22:30:28 +00:00
|
|
|
Status s = impl->Recover(column_families, true /* read only */,
|
2020-09-17 22:39:25 +00:00
|
|
|
error_if_wal_file_exists);
|
2014-04-09 16:56:17 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
// set column family handles
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
auto cfd =
|
|
|
|
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
|
|
|
if (cfd == nullptr) {
|
2020-08-06 22:16:50 +00:00
|
|
|
s = Status::InvalidArgument("Column family not found", cf.name);
|
2014-04-09 16:56:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
handles->push_back(new ColumnFamilyHandleImpl(cfd, impl, &impl->mutex_));
|
|
|
|
}
|
|
|
|
}
|
2014-02-03 21:13:36 +00:00
|
|
|
if (s.ok()) {
|
2014-02-03 21:44:47 +00:00
|
|
|
for (auto cfd : *impl->versions_->GetColumnFamilySet()) {
|
2017-10-06 01:00:38 +00:00
|
|
|
sv_context.NewSuperVersion();
|
|
|
|
cfd->InstallSuperVersion(&sv_context, &impl->mutex_);
|
2014-02-03 21:44:47 +00:00
|
|
|
}
|
2014-02-03 21:13:36 +00:00
|
|
|
}
|
2012-11-06 03:18:49 +00:00
|
|
|
impl->mutex_.Unlock();
|
2017-10-06 01:00:38 +00:00
|
|
|
sv_context.Clean();
|
2012-11-06 03:18:49 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
*dbptr = impl;
|
2014-11-20 18:49:32 +00:00
|
|
|
for (auto* h : *handles) {
|
|
|
|
impl->NewThreadStatusCfInfo(
|
2020-07-03 02:24:25 +00:00
|
|
|
static_cast_with_check<ColumnFamilyHandleImpl>(h)->cfd());
|
2014-11-20 18:49:32 +00:00
|
|
|
}
|
2012-11-06 03:18:49 +00:00
|
|
|
} else {
|
2014-04-09 16:56:17 +00:00
|
|
|
for (auto h : *handles) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
handles->clear();
|
2012-11-06 03:18:49 +00:00
|
|
|
delete impl;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
#else // !ROCKSDB_LITE
|
2014-11-26 19:37:59 +00:00
|
|
|
|
2018-04-13 00:55:14 +00:00
|
|
|
Status DB::OpenForReadOnly(const Options& /*options*/,
|
|
|
|
const std::string& /*dbname*/, DB** /*dbptr*/,
|
2020-09-17 22:39:25 +00:00
|
|
|
bool /*error_if_wal_file_exists*/) {
|
2014-11-26 19:37:59 +00:00
|
|
|
return Status::NotSupported("Not supported in ROCKSDB_LITE.");
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DB::OpenForReadOnly(
|
2018-04-13 00:55:14 +00:00
|
|
|
const DBOptions& /*db_options*/, const std::string& /*dbname*/,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>& /*column_families*/,
|
|
|
|
std::vector<ColumnFamilyHandle*>* /*handles*/, DB** /*dbptr*/,
|
2020-09-17 22:39:25 +00:00
|
|
|
bool /*error_if_wal_file_exists*/) {
|
2014-11-26 19:37:59 +00:00
|
|
|
return Status::NotSupported("Not supported in ROCKSDB_LITE.");
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
2014-04-09 16:56:17 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|