mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-30 04:41:49 +00:00
8f763bdeab
Summary: **Context:** We prefetch the tail part of a SST file (i.e, the blocks after data blocks till the end of the file) during each SST file open in hope to prefetch all the stuff at once ahead of time for later read e.g, footer, meta index, filter/index etc. The existing approach to estimate the tail size to prefetch is through `TailPrefetchStats` heuristics introduced in https://github.com/facebook/rocksdb/pull/4156, which has caused small reads in unlucky case (e.g, small read into the tail buffer during table open in thread 1 under the same BlockBasedTableFactory object can make thread 2's tail prefetching use a small size that it shouldn't) and is hard to debug. Therefore we decide to record the exact tail size and use it directly to prefetch tail of the SST instead of relying heuristics. **Summary:** - Obtain and record in manifest the tail size in `BlockBasedTableBuilder::Finish()` - For backward compatibility, we fall back to TailPrefetchStats and last to simple heuristics that the tail size is a linear portion of the file size - see PR conversation for more. - Make`tail_start_offset` part of the table properties and deduct tail size to record in manifest for external files (e.g, file ingestion, import CF) and db repair (with no access to manifest). Pull Request resolved: https://github.com/facebook/rocksdb/pull/11406 Test Plan: 1. New UT 2. db bench Note: db bench on /tmp/ where direct read is supported is too slow to finish and the default pinning setting in db bench is not helpful to profile # sst read of Get. Therefore I hacked the following to obtain the following comparison. ``` diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index bd5669f0f..791484c1f 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -838,7 +838,7 @@ Status BlockBasedTable::PrefetchTail( &tail_prefetch_size); // Try file system prefetch - if (!file->use_direct_io() && !force_direct_prefetch) { + if (false && !file->use_direct_io() && !force_direct_prefetch) { if (!file->Prefetch(prefetch_off, prefetch_len, ro.rate_limiter_priority) .IsNotSupported()) { prefetch_buffer->reset(new FilePrefetchBuffer( diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index ea40f5fa0..39a0ac385 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -4191,6 +4191,8 @@ class Benchmark { std::shared_ptr<TableFactory>(NewCuckooTableFactory(table_options)); } else { BlockBasedTableOptions block_based_options; + block_based_options.metadata_cache_options.partition_pinning = + PinningTier::kAll; block_based_options.checksum = static_cast<ChecksumType>(FLAGS_checksum_type); if (FLAGS_use_hash_search) { ``` Create DB ``` ./db_bench --bloom_bits=3 --use_existing_db=1 --seed=1682546046158958 --partition_index_and_filters=1 --statistics=1 -db=/dev/shm/testdb/ -benchmarks=readrandom -key_size=3200 -value_size=512 -num=1000000 -write_buffer_size=6550000 -disable_auto_compactions=false -target_file_size_base=6550000 -compression_type=none ``` ReadRandom ``` ./db_bench --bloom_bits=3 --use_existing_db=1 --seed=1682546046158958 --partition_index_and_filters=1 --statistics=1 -db=/dev/shm/testdb/ -benchmarks=readrandom -key_size=3200 -value_size=512 -num=1000000 -write_buffer_size=6550000 -disable_auto_compactions=false -target_file_size_base=6550000 -compression_type=none ``` (a) Existing (Use TailPrefetchStats for tail size + use seperate prefetch buffer in PartitionedFilter/IndexReader::CacheDependencies()) ``` rocksdb.table.open.prefetch.tail.hit COUNT : 3395 rocksdb.sst.read.micros P50 : 5.655570 P95 : 9.931396 P99 : 14.845454 P100 : 585.000000 COUNT : 999905 SUM : 6590614 ``` (b) This PR (Record tail size + use the same tail buffer in PartitionedFilter/IndexReader::CacheDependencies()) ``` rocksdb.table.open.prefetch.tail.hit COUNT : 14257 rocksdb.sst.read.micros P50 : 5.173347 P95 : 9.015017 P99 : 12.912610 P100 : 228.000000 COUNT : 998547 SUM : 5976540 ``` As we can see, we increase the prefetch tail hit count and decrease SST read count with this PR 3. Test backward compatibility by stepping through reading with post-PR code on a db generated pre-PR. Reviewed By: pdillinger Differential Revision: D45413346 Pulled By: hx235 fbshipit-source-id: 7d5e36a60a72477218f79905168d688452a4c064
160 lines
6 KiB
C++
160 lines
6 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include <cinttypes>
|
|
#include <vector>
|
|
|
|
#include "db/column_family.h"
|
|
#include "db/db_impl/db_impl.h"
|
|
#include "db/job_context.h"
|
|
#include "db/version_set.h"
|
|
#include "logging/logging.h"
|
|
#include "rocksdb/status.h"
|
|
#include "util/cast_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
Status DBImpl::SuggestCompactRange(ColumnFamilyHandle* column_family,
|
|
const Slice* begin, const Slice* end) {
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
|
auto cfd = cfh->cfd();
|
|
InternalKey start_key, end_key;
|
|
if (begin != nullptr) {
|
|
start_key.SetMinPossibleForUserKey(*begin);
|
|
}
|
|
if (end != nullptr) {
|
|
end_key.SetMaxPossibleForUserKey(*end);
|
|
}
|
|
{
|
|
InstrumentedMutexLock l(&mutex_);
|
|
auto vstorage = cfd->current()->storage_info();
|
|
for (int level = 0; level < vstorage->num_non_empty_levels() - 1; ++level) {
|
|
std::vector<FileMetaData*> inputs;
|
|
vstorage->GetOverlappingInputs(
|
|
level, begin == nullptr ? nullptr : &start_key,
|
|
end == nullptr ? nullptr : &end_key, &inputs);
|
|
for (auto f : inputs) {
|
|
f->marked_for_compaction = true;
|
|
}
|
|
}
|
|
// Since we have some more files to compact, we should also recompute
|
|
// compaction score
|
|
vstorage->ComputeCompactionScore(*cfd->ioptions(),
|
|
*cfd->GetLatestMutableCFOptions());
|
|
SchedulePendingCompaction(cfd);
|
|
MaybeScheduleFlushOrCompaction();
|
|
}
|
|
return Status::OK();
|
|
}
|
|
|
|
Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
|
|
assert(column_family);
|
|
|
|
if (target_level < 1) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Invalid target level %d\n", target_level);
|
|
return Status::InvalidArgument("Invalid target level");
|
|
}
|
|
// TODO: plumb Env::IOActivity
|
|
const ReadOptions read_options;
|
|
Status status;
|
|
VersionEdit edit;
|
|
JobContext job_context(next_job_id_.fetch_add(1), true);
|
|
{
|
|
InstrumentedMutexLock l(&mutex_);
|
|
auto* cfd = static_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
|
|
const auto* vstorage = cfd->current()->storage_info();
|
|
|
|
if (target_level >= vstorage->num_levels()) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Target level %d does not exist\n",
|
|
target_level);
|
|
job_context.Clean();
|
|
status = Status::InvalidArgument("Target level does not exist");
|
|
return status;
|
|
}
|
|
|
|
// Sort L0 files by range.
|
|
const InternalKeyComparator* icmp = &cfd->internal_comparator();
|
|
auto l0_files = vstorage->LevelFiles(0);
|
|
std::sort(l0_files.begin(), l0_files.end(),
|
|
[icmp](FileMetaData* f1, FileMetaData* f2) {
|
|
return icmp->Compare(f1->largest, f2->largest) < 0;
|
|
});
|
|
|
|
// Check that no L0 file is being compacted and that they have
|
|
// non-overlapping ranges.
|
|
for (size_t i = 0; i < l0_files.size(); ++i) {
|
|
auto f = l0_files[i];
|
|
if (f->being_compacted) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. File %" PRIu64 " being compacted\n",
|
|
f->fd.GetNumber());
|
|
job_context.Clean();
|
|
status =
|
|
Status::InvalidArgument("PromoteL0 called during L0 compaction");
|
|
return status;
|
|
}
|
|
|
|
if (i == 0) continue;
|
|
auto prev_f = l0_files[i - 1];
|
|
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64
|
|
" have overlapping ranges\n",
|
|
prev_f->fd.GetNumber(), f->fd.GetNumber());
|
|
job_context.Clean();
|
|
status = Status::InvalidArgument("L0 has overlapping files");
|
|
return status;
|
|
}
|
|
}
|
|
|
|
// Check that all levels up to target_level are empty.
|
|
for (int level = 1; level <= target_level; ++level) {
|
|
if (vstorage->NumLevelFiles(level) > 0) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Level %d not empty\n", level);
|
|
job_context.Clean();
|
|
status = Status::InvalidArgument(
|
|
"All levels up to target_level "
|
|
"must be empty");
|
|
return status;
|
|
}
|
|
}
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
for (const auto& f : l0_files) {
|
|
edit.DeleteFile(0, f->fd.GetNumber());
|
|
edit.AddFile(target_level, f->fd.GetNumber(), f->fd.GetPathId(),
|
|
f->fd.GetFileSize(), f->smallest, f->largest,
|
|
f->fd.smallest_seqno, f->fd.largest_seqno,
|
|
f->marked_for_compaction, f->temperature,
|
|
f->oldest_blob_file_number, f->oldest_ancester_time,
|
|
f->file_creation_time, f->epoch_number, f->file_checksum,
|
|
f->file_checksum_func_name, f->unique_id,
|
|
f->compensated_range_deletion_size, f->tail_size);
|
|
}
|
|
|
|
status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
|
|
read_options, &edit, &mutex_,
|
|
directories_.GetDbDir());
|
|
if (status.ok()) {
|
|
InstallSuperVersionAndScheduleWork(cfd,
|
|
&job_context.superversion_contexts[0],
|
|
*cfd->GetLatestMutableCFOptions());
|
|
}
|
|
} // lock released here
|
|
LogFlush(immutable_db_options_.info_log);
|
|
job_context.Clean();
|
|
|
|
return status;
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|