mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-27 02:44:18 +00:00
151242ce46
Summary: **Context:** The existing stat rocksdb.sst.read.micros does not reflect each of compaction and flush cases but aggregate them, which is not so helpful for us to understand IO read behavior of each of them. **Summary** - Update `StopWatch` and `RandomAccessFileReader` to record `rocksdb.sst.read.micros` and `rocksdb.file.{flush/compaction}.read.micros` - Fixed the default histogram in `RandomAccessFileReader` - New field `ReadOptions/IOOptions::io_activity`; Pass `ReadOptions` through paths under db open, flush and compaction to where we can prepare `IOOptions` and pass it to `RandomAccessFileReader` - Use `thread_status_util` for assertion in `DbStressFSWrapper` for continuous testing on we are passing correct `io_activity` under db open, flush and compaction Pull Request resolved: https://github.com/facebook/rocksdb/pull/11288 Test Plan: - **Stress test** - **Db bench 1: rocksdb.sst.read.micros COUNT ≈ sum of rocksdb.file.read.flush.micros's and rocksdb.file.read.compaction.micros's.** (without blob) - May not be exactly the same due to `HistogramStat::Add` only guarantees atomic not accuracy across threads. ``` ./db_bench -db=/dev/shm/testdb/ -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=50000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 (-use_plain_table=1 -prefix_size=10) ``` ``` // BlockBasedTable rocksdb.sst.read.micros P50 : 2.009374 P95 : 4.968548 P99 : 8.110362 P100 : 43.000000 COUNT : 40456 SUM : 114805 rocksdb.file.read.flush.micros P50 : 1.871841 P95 : 3.872407 P99 : 5.540541 P100 : 43.000000 COUNT : 2250 SUM : 6116 rocksdb.file.read.compaction.micros P50 : 2.023109 P95 : 5.029149 P99 : 8.196910 P100 : 26.000000 COUNT : 38206 SUM : 108689 // PlainTable Does not apply ``` - **Db bench 2: performance** **Read** SETUP: db with 900 files ``` ./db_bench -db=/dev/shm/testdb/ -benchmarks="fillseq" -key_size=32 -value_size=512 -num=50000 -write_buffer_size=655 -disable_auto_compactions=true -target_file_size_base=655 -compression_type=none ```run till convergence ``` ./db_bench -seed=1678564177044286 -use_existing_db=true -db=/dev/shm/testdb -benchmarks=readrandom[-X60] -statistics=true -num=1000000 -disable_auto_compactions=true -compression_type=none -bloom_bits=3 ``` Pre-change `readrandom [AVG 60 runs] : 21568 (± 248) ops/sec` Post-change (no regression, -0.3%) `readrandom [AVG 60 runs] : 21486 (± 236) ops/sec` **Compaction/Flush**run till convergence ``` ./db_bench -db=/dev/shm/testdb2/ -seed=1678564177044286 -benchmarks="fillseq[-X60]" -key_size=32 -value_size=512 -num=50000 -write_buffer_size=655 -disable_auto_compactions=false -target_file_size_base=655 -compression_type=none rocksdb.sst.read.micros COUNT : 33820 rocksdb.sst.read.flush.micros COUNT : 1800 rocksdb.sst.read.compaction.micros COUNT : 32020 ``` Pre-change `fillseq [AVG 46 runs] : 1391 (± 214) ops/sec; 0.7 (± 0.1) MB/sec` Post-change (no regression, ~-0.4%) `fillseq [AVG 46 runs] : 1385 (± 216) ops/sec; 0.7 (± 0.1) MB/sec` Reviewed By: ajkr Differential Revision: D44007011 Pulled By: hx235 fbshipit-source-id: a54c89e4846dfc9a135389edf3f3eedfea257132
261 lines
9 KiB
C++
261 lines
9 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#include "db/db_impl/compacted_db_impl.h"
|
|
|
|
#include "db/db_impl/db_impl.h"
|
|
#include "db/version_set.h"
|
|
#include "logging/logging.h"
|
|
#include "table/get_context.h"
|
|
#include "util/cast_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
extern void MarkKeyMayExist(void* arg);
|
|
extern bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
|
|
const Slice& v, bool hit_and_return);
|
|
|
|
CompactedDBImpl::CompactedDBImpl(const DBOptions& options,
|
|
const std::string& dbname)
|
|
: DBImpl(options, dbname, /*seq_per_batch*/ false, +/*batch_per_txn*/ true,
|
|
/*read_only*/ true),
|
|
cfd_(nullptr),
|
|
version_(nullptr),
|
|
user_comparator_(nullptr) {}
|
|
|
|
CompactedDBImpl::~CompactedDBImpl() {}
|
|
|
|
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
|
size_t right = files_.num_files - 1;
|
|
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
|
|
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
|
|
};
|
|
return static_cast<size_t>(
|
|
std::lower_bound(files_.files, files_.files + right, key, cmp) -
|
|
files_.files);
|
|
}
|
|
|
|
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
|
|
const Slice& key, PinnableSlice* value) {
|
|
return Get(options, /*column_family*/ nullptr, key, value,
|
|
/*timestamp*/ nullptr);
|
|
}
|
|
|
|
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
|
|
const Slice& key, PinnableSlice* value,
|
|
std::string* timestamp) {
|
|
if (options.io_activity != Env::IOActivity::kUnknown) {
|
|
return Status::InvalidArgument(
|
|
"Cannot call Get with `ReadOptions::io_activity` != "
|
|
"`Env::IOActivity::kUnknown`");
|
|
}
|
|
assert(user_comparator_);
|
|
if (options.timestamp) {
|
|
const Status s = FailIfTsMismatchCf(
|
|
DefaultColumnFamily(), *(options.timestamp), /*ts_for_read=*/true);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
} else {
|
|
const Status s = FailIfCfHasTs(DefaultColumnFamily());
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
}
|
|
|
|
// Clear the timestamps for returning results so that we can distinguish
|
|
// between tombstone or key that has never been written
|
|
if (timestamp) {
|
|
timestamp->clear();
|
|
}
|
|
|
|
GetWithTimestampReadCallback read_cb(kMaxSequenceNumber);
|
|
std::string* ts =
|
|
user_comparator_->timestamp_size() > 0 ? timestamp : nullptr;
|
|
LookupKey lkey(key, kMaxSequenceNumber, options.timestamp);
|
|
GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
|
|
GetContext::kNotFound, lkey.user_key(), value,
|
|
/*columns=*/nullptr, ts, nullptr, nullptr, true,
|
|
nullptr, nullptr, nullptr, nullptr, &read_cb);
|
|
|
|
const FdWithKeyRange& f = files_.files[FindFile(lkey.user_key())];
|
|
if (user_comparator_->CompareWithoutTimestamp(
|
|
key, /*a_has_ts=*/false,
|
|
ExtractUserKeyAndStripTimestamp(f.smallest_key,
|
|
user_comparator_->timestamp_size()),
|
|
/*b_has_ts=*/false) < 0) {
|
|
return Status::NotFound();
|
|
}
|
|
Status s = f.fd.table_reader->Get(options, lkey.internal_key(), &get_context,
|
|
nullptr);
|
|
if (!s.ok() && !s.IsNotFound()) {
|
|
return s;
|
|
}
|
|
if (get_context.State() == GetContext::kFound) {
|
|
return Status::OK();
|
|
}
|
|
return Status::NotFound();
|
|
}
|
|
|
|
std::vector<Status> CompactedDBImpl::MultiGet(
|
|
const ReadOptions& options, const std::vector<ColumnFamilyHandle*>&,
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
return MultiGet(options, keys, values, /*timestamps*/ nullptr);
|
|
}
|
|
|
|
std::vector<Status> CompactedDBImpl::MultiGet(
|
|
const ReadOptions& options, const std::vector<ColumnFamilyHandle*>&,
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values,
|
|
std::vector<std::string>* timestamps) {
|
|
assert(user_comparator_);
|
|
size_t num_keys = keys.size();
|
|
|
|
if (options.timestamp) {
|
|
Status s = FailIfTsMismatchCf(DefaultColumnFamily(), *(options.timestamp),
|
|
/*ts_for_read=*/true);
|
|
if (!s.ok()) {
|
|
return std::vector<Status>(num_keys, s);
|
|
}
|
|
} else {
|
|
Status s = FailIfCfHasTs(DefaultColumnFamily());
|
|
if (!s.ok()) {
|
|
return std::vector<Status>(num_keys, s);
|
|
}
|
|
}
|
|
|
|
// Clear the timestamps for returning results so that we can distinguish
|
|
// between tombstone or key that has never been written
|
|
if (timestamps) {
|
|
for (auto& ts : *timestamps) {
|
|
ts.clear();
|
|
}
|
|
}
|
|
|
|
GetWithTimestampReadCallback read_cb(kMaxSequenceNumber);
|
|
autovector<TableReader*, 16> reader_list;
|
|
for (const auto& key : keys) {
|
|
LookupKey lkey(key, kMaxSequenceNumber, options.timestamp);
|
|
const FdWithKeyRange& f = files_.files[FindFile(lkey.user_key())];
|
|
if (user_comparator_->CompareWithoutTimestamp(
|
|
key, /*a_has_ts=*/false,
|
|
ExtractUserKeyAndStripTimestamp(f.smallest_key,
|
|
user_comparator_->timestamp_size()),
|
|
/*b_has_ts=*/false) < 0) {
|
|
reader_list.push_back(nullptr);
|
|
} else {
|
|
f.fd.table_reader->Prepare(lkey.internal_key());
|
|
reader_list.push_back(f.fd.table_reader);
|
|
}
|
|
}
|
|
std::vector<Status> statuses(num_keys, Status::NotFound());
|
|
values->resize(num_keys);
|
|
if (timestamps) {
|
|
timestamps->resize(num_keys);
|
|
}
|
|
int idx = 0;
|
|
for (auto* r : reader_list) {
|
|
if (r != nullptr) {
|
|
PinnableSlice pinnable_val;
|
|
std::string& value = (*values)[idx];
|
|
LookupKey lkey(keys[idx], kMaxSequenceNumber, options.timestamp);
|
|
std::string* timestamp = timestamps ? &(*timestamps)[idx] : nullptr;
|
|
GetContext get_context(
|
|
user_comparator_, nullptr, nullptr, nullptr, GetContext::kNotFound,
|
|
lkey.user_key(), &pinnable_val, /*columns=*/nullptr,
|
|
user_comparator_->timestamp_size() > 0 ? timestamp : nullptr, nullptr,
|
|
nullptr, true, nullptr, nullptr, nullptr, nullptr, &read_cb);
|
|
Status s = r->Get(options, lkey.internal_key(), &get_context, nullptr);
|
|
assert(static_cast<size_t>(idx) < statuses.size());
|
|
if (!s.ok() && !s.IsNotFound()) {
|
|
statuses[idx] = s;
|
|
} else {
|
|
value.assign(pinnable_val.data(), pinnable_val.size());
|
|
if (get_context.State() == GetContext::kFound) {
|
|
statuses[idx] = Status::OK();
|
|
}
|
|
}
|
|
}
|
|
++idx;
|
|
}
|
|
return statuses;
|
|
}
|
|
|
|
Status CompactedDBImpl::Init(const Options& options) {
|
|
SuperVersionContext sv_context(/* create_superversion */ true);
|
|
mutex_.Lock();
|
|
ColumnFamilyDescriptor cf(kDefaultColumnFamilyName,
|
|
ColumnFamilyOptions(options));
|
|
Status s = Recover({cf}, true /* read only */, false, true);
|
|
if (s.ok()) {
|
|
cfd_ = static_cast_with_check<ColumnFamilyHandleImpl>(DefaultColumnFamily())
|
|
->cfd();
|
|
cfd_->InstallSuperVersion(&sv_context, &mutex_);
|
|
}
|
|
mutex_.Unlock();
|
|
sv_context.Clean();
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
NewThreadStatusCfInfo(cfd_);
|
|
version_ = cfd_->GetSuperVersion()->current;
|
|
user_comparator_ = cfd_->user_comparator();
|
|
auto* vstorage = version_->storage_info();
|
|
if (vstorage->num_non_empty_levels() == 0) {
|
|
return Status::NotSupported("no file exists");
|
|
}
|
|
const LevelFilesBrief& l0 = vstorage->LevelFilesBrief(0);
|
|
// L0 should not have files
|
|
if (l0.num_files > 1) {
|
|
return Status::NotSupported("L0 contain more than 1 file");
|
|
}
|
|
if (l0.num_files == 1) {
|
|
if (vstorage->num_non_empty_levels() > 1) {
|
|
return Status::NotSupported("Both L0 and other level contain files");
|
|
}
|
|
files_ = l0;
|
|
return Status::OK();
|
|
}
|
|
|
|
for (int i = 1; i < vstorage->num_non_empty_levels() - 1; ++i) {
|
|
if (vstorage->LevelFilesBrief(i).num_files > 0) {
|
|
return Status::NotSupported("Other levels also contain files");
|
|
}
|
|
}
|
|
|
|
int level = vstorage->num_non_empty_levels() - 1;
|
|
if (vstorage->LevelFilesBrief(level).num_files > 0) {
|
|
files_ = vstorage->LevelFilesBrief(level);
|
|
return Status::OK();
|
|
}
|
|
return Status::NotSupported("no file exists");
|
|
}
|
|
|
|
Status CompactedDBImpl::Open(const Options& options, const std::string& dbname,
|
|
DB** dbptr) {
|
|
*dbptr = nullptr;
|
|
|
|
if (options.max_open_files != -1) {
|
|
return Status::InvalidArgument("require max_open_files = -1");
|
|
}
|
|
if (options.merge_operator.get() != nullptr) {
|
|
return Status::InvalidArgument("merge operator is not supported");
|
|
}
|
|
DBOptions db_options(options);
|
|
std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
|
|
Status s = db->Init(options);
|
|
if (s.ok()) {
|
|
s = db->StartPeriodicTaskScheduler();
|
|
}
|
|
if (s.ok()) {
|
|
ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
|
|
"Opened the db as fully compacted mode");
|
|
LogFlush(db->immutable_db_options_.info_log);
|
|
*dbptr = db.release();
|
|
}
|
|
return s;
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|