mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 16:30:56 +00:00
cc6f323705
Summary: compensate file sizes in compaction picking so files with range tombstones are preferred, such that they get compacted down earlier as they tend to delete a lot of data. This PR adds a `compensated_range_deletion_size` field in FileMeta that is computed during Flush/Compaction and persisted in MANIFEST. This value is added to `compensated_file_size` which will be used for compaction picking. Currently, for a file in level L, `compensated_range_deletion_size` is set to the estimated bytes deleted by range tombstone of this file in all levels > L. This helps to reduce space amp when data in older levels are covered by range tombstones in level L. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10734 Test Plan: - Added unit tests. - benchmark to check if the above definition `compensated_range_deletion_size` is reducing space amp as intended, without affecting write amp too much. The experiment set up favorable for this optimization: large range tombstone issued infrequently. Command used: ``` ./db_bench -benchmarks=fillrandom,waitforcompaction,stats,levelstats -use_existing_db=false -avoid_flush_during_recovery=true -write_buffer_size=33554432 -level_compaction_dynamic_level_bytes=true -max_background_jobs=8 -max_bytes_for_level_base=134217728 -target_file_size_base=33554432 -writes_per_range_tombstone=500000 -range_tombstone_width=5000000 -num=50000000 -benchmark_write_rate_limit=8388608 -threads=16 -duration=1800 --max_num_range_tombstones=1000000000 ``` In this experiment, each thread wrote 16 range tombstones over the duration of 30 minutes, each range tombstone has width 5M that is the 10% of the key space width. Results shows this PR generates a smaller DB size. Compaction stats from this PR: ``` Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ L0 2/0 31.54 MB 0.5 0.0 0.0 0.0 8.4 8.4 0.0 1.0 0.0 63.4 135.56 110.94 544 0.249 0 0 0.0 0.0 L4 3/0 96.55 MB 0.8 18.5 6.7 11.8 18.4 6.6 0.0 2.7 65.3 64.9 290.08 284.03 108 2.686 284M 1957K 0.0 0.0 L5 15/0 404.41 MB 1.0 19.1 7.7 11.4 18.8 7.4 0.3 2.5 66.6 65.7 292.93 285.34 220 1.332 293M 3808K 0.0 0.0 L6 143/0 4.12 GB 0.0 45.0 7.5 37.5 41.6 4.1 0.0 5.5 71.2 65.9 647.00 632.66 251 2.578 739M 47M 0.0 0.0 Sum 163/0 4.64 GB 0.0 82.6 21.9 60.7 87.2 26.5 0.3 10.4 61.9 65.4 1365.58 1312.97 1123 1.216 1318M 52M 0.0 0.0 ``` Compaction stats from main: ``` Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ L0 0/0 0.00 KB 0.0 0.0 0.0 0.0 8.4 8.4 0.0 1.0 0.0 60.5 142.12 115.89 569 0.250 0 0 0.0 0.0 L4 3/0 85.68 MB 1.0 17.7 6.8 10.9 17.6 6.7 0.0 2.6 62.7 62.3 289.05 281.79 112 2.581 272M 2309K 0.0 0.0 L5 11/0 293.73 MB 1.0 18.8 7.5 11.2 18.5 7.2 0.5 2.5 64.9 63.9 296.07 288.50 220 1.346 288M 4365K 0.0 0.0 L6 130/0 3.94 GB 0.0 51.5 7.6 43.9 47.9 3.9 0.0 6.3 67.2 62.4 784.95 765.92 258 3.042 848M 51M 0.0 0.0 Sum 144/0 4.31 GB 0.0 88.0 21.9 66.0 92.3 26.3 0.5 11.0 59.6 62.5 1512.19 1452.09 1159 1.305 1409M 58M 0.0 0.0``` Reviewed By: ajkr Differential Revision: D39834713 Pulled By: cbi42 fbshipit-source-id: fe9341040b8704a8fbb10cad5cf5c43e962c7e6b
160 lines
5.9 KiB
C++
160 lines
5.9 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include <cinttypes>
|
|
#include <vector>
|
|
|
|
#include "db/column_family.h"
|
|
#include "db/db_impl/db_impl.h"
|
|
#include "db/job_context.h"
|
|
#include "db/version_set.h"
|
|
#include "logging/logging.h"
|
|
#include "rocksdb/status.h"
|
|
#include "util/cast_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
Status DBImpl::SuggestCompactRange(ColumnFamilyHandle* column_family,
|
|
const Slice* begin, const Slice* end) {
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
|
auto cfd = cfh->cfd();
|
|
InternalKey start_key, end_key;
|
|
if (begin != nullptr) {
|
|
start_key.SetMinPossibleForUserKey(*begin);
|
|
}
|
|
if (end != nullptr) {
|
|
end_key.SetMaxPossibleForUserKey(*end);
|
|
}
|
|
{
|
|
InstrumentedMutexLock l(&mutex_);
|
|
auto vstorage = cfd->current()->storage_info();
|
|
for (int level = 0; level < vstorage->num_non_empty_levels() - 1; ++level) {
|
|
std::vector<FileMetaData*> inputs;
|
|
vstorage->GetOverlappingInputs(
|
|
level, begin == nullptr ? nullptr : &start_key,
|
|
end == nullptr ? nullptr : &end_key, &inputs);
|
|
for (auto f : inputs) {
|
|
f->marked_for_compaction = true;
|
|
}
|
|
}
|
|
// Since we have some more files to compact, we should also recompute
|
|
// compaction score
|
|
vstorage->ComputeCompactionScore(*cfd->ioptions(),
|
|
*cfd->GetLatestMutableCFOptions());
|
|
SchedulePendingCompaction(cfd);
|
|
MaybeScheduleFlushOrCompaction();
|
|
}
|
|
return Status::OK();
|
|
}
|
|
|
|
Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
|
|
assert(column_family);
|
|
|
|
if (target_level < 1) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Invalid target level %d\n", target_level);
|
|
return Status::InvalidArgument("Invalid target level");
|
|
}
|
|
|
|
Status status;
|
|
VersionEdit edit;
|
|
JobContext job_context(next_job_id_.fetch_add(1), true);
|
|
{
|
|
InstrumentedMutexLock l(&mutex_);
|
|
auto* cfd = static_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
|
|
const auto* vstorage = cfd->current()->storage_info();
|
|
|
|
if (target_level >= vstorage->num_levels()) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Target level %d does not exist\n",
|
|
target_level);
|
|
job_context.Clean();
|
|
status = Status::InvalidArgument("Target level does not exist");
|
|
return status;
|
|
}
|
|
|
|
// Sort L0 files by range.
|
|
const InternalKeyComparator* icmp = &cfd->internal_comparator();
|
|
auto l0_files = vstorage->LevelFiles(0);
|
|
std::sort(l0_files.begin(), l0_files.end(),
|
|
[icmp](FileMetaData* f1, FileMetaData* f2) {
|
|
return icmp->Compare(f1->largest, f2->largest) < 0;
|
|
});
|
|
|
|
// Check that no L0 file is being compacted and that they have
|
|
// non-overlapping ranges.
|
|
for (size_t i = 0; i < l0_files.size(); ++i) {
|
|
auto f = l0_files[i];
|
|
if (f->being_compacted) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. File %" PRIu64 " being compacted\n",
|
|
f->fd.GetNumber());
|
|
job_context.Clean();
|
|
status =
|
|
Status::InvalidArgument("PromoteL0 called during L0 compaction");
|
|
return status;
|
|
}
|
|
|
|
if (i == 0) continue;
|
|
auto prev_f = l0_files[i - 1];
|
|
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64
|
|
" have overlapping ranges\n",
|
|
prev_f->fd.GetNumber(), f->fd.GetNumber());
|
|
job_context.Clean();
|
|
status = Status::InvalidArgument("L0 has overlapping files");
|
|
return status;
|
|
}
|
|
}
|
|
|
|
// Check that all levels up to target_level are empty.
|
|
for (int level = 1; level <= target_level; ++level) {
|
|
if (vstorage->NumLevelFiles(level) > 0) {
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
"PromoteL0 FAILED. Level %d not empty\n", level);
|
|
job_context.Clean();
|
|
status = Status::InvalidArgument(
|
|
"All levels up to target_level "
|
|
"must be empty");
|
|
return status;
|
|
}
|
|
}
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
for (const auto& f : l0_files) {
|
|
edit.DeleteFile(0, f->fd.GetNumber());
|
|
edit.AddFile(target_level, f->fd.GetNumber(), f->fd.GetPathId(),
|
|
f->fd.GetFileSize(), f->smallest, f->largest,
|
|
f->fd.smallest_seqno, f->fd.largest_seqno,
|
|
f->marked_for_compaction, f->temperature,
|
|
f->oldest_blob_file_number, f->oldest_ancester_time,
|
|
f->file_creation_time, f->epoch_number, f->file_checksum,
|
|
f->file_checksum_func_name, f->unique_id,
|
|
f->compensated_range_deletion_size);
|
|
}
|
|
|
|
status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
|
|
&edit, &mutex_, directories_.GetDbDir());
|
|
if (status.ok()) {
|
|
InstallSuperVersionAndScheduleWork(cfd,
|
|
&job_context.superversion_contexts[0],
|
|
*cfd->GetLatestMutableCFOptions());
|
|
}
|
|
} // lock released here
|
|
LogFlush(immutable_db_options_.info_log);
|
|
job_context.Clean();
|
|
|
|
return status;
|
|
}
|
|
#endif // ROCKSDB_LITE
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|