mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-27 02:44:18 +00:00
3122cb4358
Summary: ajkr reminded me that we have a rule of not including per-kv related data in `WriteOptions`. Namely, `WriteOptions` should not include information about "what-to-write", but should just include information about "how-to-write". According to this rule, `WriteOptions::timestamp` (experimental) is clearly a violation. Therefore, this PR removes `WriteOptions::timestamp` for compliance. After the removal, we need to pass timestamp info via another set of APIs. This PR proposes a set of overloaded functions `Put(write_opts, key, value, ts)`, `Delete(write_opts, key, ts)`, and `SingleDelete(write_opts, key, ts)`. Planned to add `Write(write_opts, batch, ts)`, but its complexity made me reconsider doing it in another PR (maybe). For better checking and returning error early, we also add a new set of APIs to `WriteBatch` that take extra `timestamp` information when writing to `WriteBatch`es. These set of APIs in `WriteBatchWithIndex` are currently not supported, and are on our TODO list. Removed `WriteBatch::AssignTimestamps()` and renamed `WriteBatch::AssignTimestamp()` to `WriteBatch::UpdateTimestamps()` since this method require that all keys have space for timestamps allocated already and multiple timestamps can be updated. The constructor of `WriteBatch` now takes a fourth argument `default_cf_ts_sz` which is the timestamp size of the default column family. This will be used to allocate space when calling APIs that do not specify a column family handle. Also, updated `DB::Get()`, `DB::MultiGet()`, `DB::NewIterator()`, `DB::NewIterators()` methods, replacing some assertions about timestamp to returning Status code. Pull Request resolved: https://github.com/facebook/rocksdb/pull/8946 Test Plan: make check ./db_bench -benchmarks=fillseq,fillrandom,readrandom,readseq,deleterandom -user_timestamp_size=8 ./db_stress --user_timestamp_size=8 -nooverwritepercent=0 -test_secondary=0 -secondary_catch_up_one_in=0 -continuous_verification_interval=0 Make sure there is no perf regression by running the following ``` ./db_bench_opt -db=/dev/shm/rocksdb -use_existing_db=0 -level0_stop_writes_trigger=256 -level0_slowdown_writes_trigger=256 -level0_file_num_compaction_trigger=256 -disable_wal=1 -duration=10 -benchmarks=fillrandom ``` Before this PR ``` DB path: [/dev/shm/rocksdb] fillrandom : 1.831 micros/op 546235 ops/sec; 60.4 MB/s ``` After this PR ``` DB path: [/dev/shm/rocksdb] fillrandom : 1.820 micros/op 549404 ops/sec; 60.8 MB/s ``` Reviewed By: ltamasi Differential Revision: D33721359 Pulled By: riversand963 fbshipit-source-id: c131561534272c120ffb80711d42748d21badf09
185 lines
6.4 KiB
C++
185 lines
6.4 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
#include "db/db_impl/compacted_db_impl.h"
|
|
|
|
#include "db/db_impl/db_impl.h"
|
|
#include "db/version_set.h"
|
|
#include "logging/logging.h"
|
|
#include "table/get_context.h"
|
|
#include "util/cast_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
extern void MarkKeyMayExist(void* arg);
|
|
extern bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
|
|
const Slice& v, bool hit_and_return);
|
|
|
|
CompactedDBImpl::CompactedDBImpl(const DBOptions& options,
|
|
const std::string& dbname)
|
|
: DBImpl(options, dbname, /*seq_per_batch*/ false, +/*batch_per_txn*/ true,
|
|
/*read_only*/ true),
|
|
cfd_(nullptr),
|
|
version_(nullptr),
|
|
user_comparator_(nullptr) {}
|
|
|
|
CompactedDBImpl::~CompactedDBImpl() {
|
|
}
|
|
|
|
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
|
size_t right = files_.num_files - 1;
|
|
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
|
|
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
|
|
};
|
|
return static_cast<size_t>(std::lower_bound(files_.files,
|
|
files_.files + right, key, cmp) - files_.files);
|
|
}
|
|
|
|
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
|
|
const Slice& key, PinnableSlice* value) {
|
|
assert(user_comparator_);
|
|
if (options.timestamp || user_comparator_->timestamp_size()) {
|
|
// TODO: support timestamp
|
|
return Status::NotSupported();
|
|
}
|
|
GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
|
|
GetContext::kNotFound, key, value, nullptr, nullptr,
|
|
nullptr, true, nullptr, nullptr);
|
|
LookupKey lkey(key, kMaxSequenceNumber);
|
|
Status s = files_.files[FindFile(key)].fd.table_reader->Get(
|
|
options, lkey.internal_key(), &get_context, nullptr);
|
|
if (!s.ok() && !s.IsNotFound()) {
|
|
return s;
|
|
}
|
|
if (get_context.State() == GetContext::kFound) {
|
|
return Status::OK();
|
|
}
|
|
return Status::NotFound();
|
|
}
|
|
|
|
std::vector<Status> CompactedDBImpl::MultiGet(const ReadOptions& options,
|
|
const std::vector<ColumnFamilyHandle*>&,
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
assert(user_comparator_);
|
|
if (user_comparator_->timestamp_size() || options.timestamp) {
|
|
// TODO: support timestamp
|
|
return std::vector<Status>(keys.size(), Status::NotSupported());
|
|
}
|
|
autovector<TableReader*, 16> reader_list;
|
|
for (const auto& key : keys) {
|
|
const FdWithKeyRange& f = files_.files[FindFile(key)];
|
|
if (user_comparator_->Compare(key, ExtractUserKey(f.smallest_key)) < 0) {
|
|
reader_list.push_back(nullptr);
|
|
} else {
|
|
LookupKey lkey(key, kMaxSequenceNumber);
|
|
f.fd.table_reader->Prepare(lkey.internal_key());
|
|
reader_list.push_back(f.fd.table_reader);
|
|
}
|
|
}
|
|
|
|
std::vector<Status> statuses(keys.size(), Status::NotFound());
|
|
values->resize(keys.size());
|
|
int idx = 0;
|
|
for (auto* r : reader_list) {
|
|
if (r != nullptr) {
|
|
PinnableSlice pinnable_val;
|
|
std::string& value = (*values)[idx];
|
|
GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
|
|
GetContext::kNotFound, keys[idx], &pinnable_val,
|
|
nullptr, nullptr, nullptr, true, nullptr, nullptr);
|
|
LookupKey lkey(keys[idx], kMaxSequenceNumber);
|
|
Status s = r->Get(options, lkey.internal_key(), &get_context, nullptr);
|
|
assert(static_cast<size_t>(idx) < statuses.size());
|
|
if (!s.ok() && !s.IsNotFound()) {
|
|
statuses[idx] = s;
|
|
} else {
|
|
value.assign(pinnable_val.data(), pinnable_val.size());
|
|
if (get_context.State() == GetContext::kFound) {
|
|
statuses[idx] = Status::OK();
|
|
}
|
|
}
|
|
}
|
|
++idx;
|
|
}
|
|
return statuses;
|
|
}
|
|
|
|
Status CompactedDBImpl::Init(const Options& options) {
|
|
SuperVersionContext sv_context(/* create_superversion */ true);
|
|
mutex_.Lock();
|
|
ColumnFamilyDescriptor cf(kDefaultColumnFamilyName,
|
|
ColumnFamilyOptions(options));
|
|
Status s = Recover({cf}, true /* read only */, false, true);
|
|
if (s.ok()) {
|
|
cfd_ = static_cast_with_check<ColumnFamilyHandleImpl>(DefaultColumnFamily())
|
|
->cfd();
|
|
cfd_->InstallSuperVersion(&sv_context, &mutex_);
|
|
}
|
|
mutex_.Unlock();
|
|
sv_context.Clean();
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
NewThreadStatusCfInfo(cfd_);
|
|
version_ = cfd_->GetSuperVersion()->current;
|
|
user_comparator_ = cfd_->user_comparator();
|
|
auto* vstorage = version_->storage_info();
|
|
if (vstorage->num_non_empty_levels() == 0) {
|
|
return Status::NotSupported("no file exists");
|
|
}
|
|
const LevelFilesBrief& l0 = vstorage->LevelFilesBrief(0);
|
|
// L0 should not have files
|
|
if (l0.num_files > 1) {
|
|
return Status::NotSupported("L0 contain more than 1 file");
|
|
}
|
|
if (l0.num_files == 1) {
|
|
if (vstorage->num_non_empty_levels() > 1) {
|
|
return Status::NotSupported("Both L0 and other level contain files");
|
|
}
|
|
files_ = l0;
|
|
return Status::OK();
|
|
}
|
|
|
|
for (int i = 1; i < vstorage->num_non_empty_levels() - 1; ++i) {
|
|
if (vstorage->LevelFilesBrief(i).num_files > 0) {
|
|
return Status::NotSupported("Other levels also contain files");
|
|
}
|
|
}
|
|
|
|
int level = vstorage->num_non_empty_levels() - 1;
|
|
if (vstorage->LevelFilesBrief(level).num_files > 0) {
|
|
files_ = vstorage->LevelFilesBrief(level);
|
|
return Status::OK();
|
|
}
|
|
return Status::NotSupported("no file exists");
|
|
}
|
|
|
|
Status CompactedDBImpl::Open(const Options& options,
|
|
const std::string& dbname, DB** dbptr) {
|
|
*dbptr = nullptr;
|
|
|
|
if (options.max_open_files != -1) {
|
|
return Status::InvalidArgument("require max_open_files = -1");
|
|
}
|
|
if (options.merge_operator.get() != nullptr) {
|
|
return Status::InvalidArgument("merge operator is not supported");
|
|
}
|
|
DBOptions db_options(options);
|
|
std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
|
|
Status s = db->Init(options);
|
|
if (s.ok()) {
|
|
db->StartPeriodicWorkScheduler();
|
|
ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
|
|
"Opened the db as fully compacted mode");
|
|
LogFlush(db->immutable_db_options_.info_log);
|
|
*dbptr = db.release();
|
|
}
|
|
return s;
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
#endif // ROCKSDB_LITE
|