2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-09-25 18:14:01 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
2015-07-14 01:10:31 +00:00
|
|
|
#include "db/compacted_db_impl.h"
|
2020-07-03 02:24:25 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2014-09-25 18:14:01 +00:00
|
|
|
#include "db/version_set.h"
|
2014-09-29 18:09:09 +00:00
|
|
|
#include "table/get_context.h"
|
2020-07-03 02:24:25 +00:00
|
|
|
#include "util/cast_util.h"
|
2014-09-25 18:14:01 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-09-25 18:14:01 +00:00
|
|
|
|
|
|
|
extern void MarkKeyMayExist(void* arg);
|
|
|
|
extern bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
|
2014-09-29 18:09:09 +00:00
|
|
|
const Slice& v, bool hit_and_return);
|
2014-09-25 18:14:01 +00:00
|
|
|
|
|
|
|
CompactedDBImpl::CompactedDBImpl(
|
|
|
|
const DBOptions& options, const std::string& dbname)
|
2017-10-27 18:19:31 +00:00
|
|
|
: DBImpl(options, dbname), cfd_(nullptr), version_(nullptr),
|
|
|
|
user_comparator_(nullptr) {
|
2014-09-25 18:14:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CompactedDBImpl::~CompactedDBImpl() {
|
|
|
|
}
|
|
|
|
|
2014-09-25 20:34:51 +00:00
|
|
|
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
2014-09-25 18:14:01 +00:00
|
|
|
size_t right = files_.num_files - 1;
|
2018-09-27 17:33:04 +00:00
|
|
|
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
|
|
|
|
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
|
|
|
|
};
|
|
|
|
return static_cast<size_t>(std::lower_bound(files_.files,
|
|
|
|
files_.files + right, key, cmp) - files_.files);
|
2014-09-25 20:34:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-13 18:44:50 +00:00
|
|
|
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
|
|
|
|
const Slice& key, PinnableSlice* value) {
|
2014-09-29 18:09:09 +00:00
|
|
|
GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
|
2017-01-08 22:08:51 +00:00
|
|
|
GetContext::kNotFound, key, value, nullptr, nullptr,
|
2020-03-02 23:58:32 +00:00
|
|
|
nullptr, true, nullptr, nullptr);
|
2014-09-25 18:14:01 +00:00
|
|
|
LookupKey lkey(key, kMaxSequenceNumber);
|
2018-05-21 21:33:55 +00:00
|
|
|
files_.files[FindFile(key)].fd.table_reader->Get(options, lkey.internal_key(),
|
|
|
|
&get_context, nullptr);
|
2014-09-29 18:09:09 +00:00
|
|
|
if (get_context.State() == GetContext::kFound) {
|
2014-09-25 18:14:01 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
|
2014-09-25 20:34:51 +00:00
|
|
|
std::vector<Status> CompactedDBImpl::MultiGet(const ReadOptions& options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>&,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
autovector<TableReader*, 16> reader_list;
|
|
|
|
for (const auto& key : keys) {
|
|
|
|
const FdWithKeyRange& f = files_.files[FindFile(key)];
|
|
|
|
if (user_comparator_->Compare(key, ExtractUserKey(f.smallest_key)) < 0) {
|
|
|
|
reader_list.push_back(nullptr);
|
|
|
|
} else {
|
|
|
|
LookupKey lkey(key, kMaxSequenceNumber);
|
|
|
|
f.fd.table_reader->Prepare(lkey.internal_key());
|
|
|
|
reader_list.push_back(f.fd.table_reader);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::vector<Status> statuses(keys.size(), Status::NotFound());
|
|
|
|
values->resize(keys.size());
|
|
|
|
int idx = 0;
|
|
|
|
for (auto* r : reader_list) {
|
|
|
|
if (r != nullptr) {
|
2017-03-13 18:44:50 +00:00
|
|
|
PinnableSlice pinnable_val;
|
|
|
|
std::string& value = (*values)[idx];
|
2014-09-29 18:09:09 +00:00
|
|
|
GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
|
2017-03-13 18:44:50 +00:00
|
|
|
GetContext::kNotFound, keys[idx], &pinnable_val,
|
2020-03-02 23:58:32 +00:00
|
|
|
nullptr, nullptr, nullptr, true, nullptr, nullptr);
|
2014-09-25 20:34:51 +00:00
|
|
|
LookupKey lkey(keys[idx], kMaxSequenceNumber);
|
2018-05-21 21:33:55 +00:00
|
|
|
r->Get(options, lkey.internal_key(), &get_context, nullptr);
|
2017-03-13 18:44:50 +00:00
|
|
|
value.assign(pinnable_val.data(), pinnable_val.size());
|
2014-09-29 18:09:09 +00:00
|
|
|
if (get_context.State() == GetContext::kFound) {
|
2014-09-25 20:34:51 +00:00
|
|
|
statuses[idx] = Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++idx;
|
|
|
|
}
|
|
|
|
return statuses;
|
|
|
|
}
|
|
|
|
|
2014-09-25 18:14:01 +00:00
|
|
|
Status CompactedDBImpl::Init(const Options& options) {
|
2017-10-06 01:00:38 +00:00
|
|
|
SuperVersionContext sv_context(/* create_superversion */ true);
|
2014-09-25 18:14:01 +00:00
|
|
|
mutex_.Lock();
|
|
|
|
ColumnFamilyDescriptor cf(kDefaultColumnFamilyName,
|
|
|
|
ColumnFamilyOptions(options));
|
2016-04-21 22:32:06 +00:00
|
|
|
Status s = Recover({cf}, true /* read only */, false, true);
|
2014-09-25 18:14:01 +00:00
|
|
|
if (s.ok()) {
|
2020-07-03 02:24:25 +00:00
|
|
|
cfd_ = static_cast_with_check<ColumnFamilyHandleImpl>(DefaultColumnFamily())
|
|
|
|
->cfd();
|
2017-10-06 01:00:38 +00:00
|
|
|
cfd_->InstallSuperVersion(&sv_context, &mutex_);
|
2014-09-25 18:14:01 +00:00
|
|
|
}
|
|
|
|
mutex_.Unlock();
|
2017-10-06 01:00:38 +00:00
|
|
|
sv_context.Clean();
|
2014-09-25 18:14:01 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-11-20 18:49:32 +00:00
|
|
|
NewThreadStatusCfInfo(cfd_);
|
2014-09-25 18:14:01 +00:00
|
|
|
version_ = cfd_->GetSuperVersion()->current;
|
|
|
|
user_comparator_ = cfd_->user_comparator();
|
2014-10-31 15:48:19 +00:00
|
|
|
auto* vstorage = version_->storage_info();
|
2015-04-01 23:55:08 +00:00
|
|
|
if (vstorage->num_non_empty_levels() == 0) {
|
|
|
|
return Status::NotSupported("no file exists");
|
|
|
|
}
|
2014-10-27 22:49:46 +00:00
|
|
|
const LevelFilesBrief& l0 = vstorage->LevelFilesBrief(0);
|
2014-09-25 18:14:01 +00:00
|
|
|
// L0 should not have files
|
2014-10-28 17:03:13 +00:00
|
|
|
if (l0.num_files > 1) {
|
2014-09-25 18:14:01 +00:00
|
|
|
return Status::NotSupported("L0 contain more than 1 file");
|
|
|
|
}
|
2014-10-28 17:03:13 +00:00
|
|
|
if (l0.num_files == 1) {
|
2014-11-04 01:45:55 +00:00
|
|
|
if (vstorage->num_non_empty_levels() > 1) {
|
2014-09-25 18:14:01 +00:00
|
|
|
return Status::NotSupported("Both L0 and other level contain files");
|
|
|
|
}
|
2014-10-28 17:03:13 +00:00
|
|
|
files_ = l0;
|
2014-09-25 18:14:01 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-11-04 01:45:55 +00:00
|
|
|
for (int i = 1; i < vstorage->num_non_empty_levels() - 1; ++i) {
|
2014-10-27 22:49:46 +00:00
|
|
|
if (vstorage->LevelFilesBrief(i).num_files > 0) {
|
2014-09-25 18:14:01 +00:00
|
|
|
return Status::NotSupported("Other levels also contain files");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-04 01:45:55 +00:00
|
|
|
int level = vstorage->num_non_empty_levels() - 1;
|
2014-10-27 22:49:46 +00:00
|
|
|
if (vstorage->LevelFilesBrief(level).num_files > 0) {
|
|
|
|
files_ = vstorage->LevelFilesBrief(level);
|
2014-09-25 18:14:01 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::NotSupported("no file exists");
|
|
|
|
}
|
|
|
|
|
|
|
|
Status CompactedDBImpl::Open(const Options& options,
|
|
|
|
const std::string& dbname, DB** dbptr) {
|
|
|
|
*dbptr = nullptr;
|
|
|
|
|
|
|
|
if (options.max_open_files != -1) {
|
|
|
|
return Status::InvalidArgument("require max_open_files = -1");
|
|
|
|
}
|
|
|
|
if (options.merge_operator.get() != nullptr) {
|
|
|
|
return Status::InvalidArgument("merge operator is not supported");
|
|
|
|
}
|
|
|
|
DBOptions db_options(options);
|
|
|
|
std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
|
|
|
|
Status s = db->Init(options);
|
|
|
|
if (s.ok()) {
|
move dump stats to a separate thread (#4382)
Summary:
Currently statistics are supposed to be dumped to info log at intervals of `options.stats_dump_period_sec`. However the implementation choice was to bind it with compaction thread, meaning if the database has been serving very light traffic, the stats may not get dumped at all.
We decided to separate stats dumping into a new timed thread using `TimerQueue`, which is already used in blob_db. This will allow us schedule new timed tasks with more deterministic behavior.
Tested with db_bench using `--stats_dump_period_sec=20` in command line:
> LOG:2018/09/17-14:07:45.575025 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG:2018/09/17-14:08:05.643286 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG:2018/09/17-14:08:25.691325 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG:2018/09/17-14:08:45.740989 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
LOG content:
> 2018/09/17-14:07:45.575025 7fe99fbfe700 [WARN] [db/db_impl.cc:605] ------- DUMPING STATS -------
2018/09/17-14:07:45.575080 7fe99fbfe700 [WARN] [db/db_impl.cc:606]
** DB Stats **
Uptime(secs): 20.0 total, 20.0 interval
Cumulative writes: 4447K writes, 4447K keys, 4447K commit groups, 1.0 writes per commit group, ingest: 5.57 GB, 285.01 MB/s
Cumulative WAL: 4447K writes, 0 syncs, 4447638.00 writes per sync, written: 5.57 GB, 285.01 MB/s
Cumulative stall: 00:00:0.012 H:M:S, 0.1 percent
Interval writes: 4447K writes, 4447K keys, 4447K commit groups, 1.0 writes per commit group, ingest: 5700.71 MB, 285.01 MB/s
Interval WAL: 4447K writes, 0 syncs, 4447638.00 writes per sync, written: 5.57 MB, 285.01 MB/s
Interval stall: 00:00:0.012 H:M:S, 0.1 percent
** Compaction Stats [default] **
Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4382
Differential Revision: D9933051
Pulled By: miasantreble
fbshipit-source-id: 6d12bb1e4977674eea4bf2d2ac6d486b814bb2fa
2018-10-09 05:52:58 +00:00
|
|
|
db->StartTimedTasks();
|
2017-03-16 02:22:52 +00:00
|
|
|
ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
|
|
|
|
"Opened the db as fully compacted mode");
|
2016-09-23 23:34:04 +00:00
|
|
|
LogFlush(db->immutable_db_options_.info_log);
|
2014-09-25 18:14:01 +00:00
|
|
|
*dbptr = db.release();
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-09-25 18:14:01 +00:00
|
|
|
#endif // ROCKSDB_LITE
|