2022-07-14 03:54:49 +00:00
|
|
|
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
|
|
//
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/compaction/compaction_outputs.h"
|
|
|
|
|
|
|
|
#include "db/builder.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
void CompactionOutputs::NewBuilder(const TableBuilderOptions& tboptions) {
|
|
|
|
builder_.reset(NewTableBuilder(tboptions, file_writer_.get()));
|
|
|
|
}
|
|
|
|
|
Refactor, clean up, fixes, and more testing for SeqnoToTimeMapping (#11905)
Summary:
This change is before a planned DBImpl change to ensure all sufficiently recent sequence numbers since Open are covered by SeqnoToTimeMapping (bug fix with existing test work-arounds). **Intended follow-up**
However, I found enough issues with SeqnoToTimeMapping to warrant this PR first, including very small fixes in DB implementation related to API contract of SeqnoToTimeMapping.
Functional fixes / changes:
* This fixes some mishandling of boundary cases. For example, if the user decides to stop writing to DB, the last written sequence number would perpetually have its write time updated to "now" and would always be ineligible for migration to cold tier. Part of the problem is that the SeqnoToTimeMapping would return a seqno known to have been written before (immediately or otherwise) the requested time, but compaction_job.cc would include that seqno in the preserve/exclude set. That is fixed (in part) by adding one in compaction_job.cc
* That problem was worse because a whole range of seqnos could be updated perpetually with new times in SeqnoToTimeMapping::Append (if no writes to DB). That logic was apparently optimized for GetOldestApproximateTime (now GetProximalTimeBeforeSeqno), which is not used in production, to the detriment of GetOldestSequenceNum (now GetProximalSeqnoBeforeTime), which is used in production. (Perhaps plans changed during development?) This is fixed in Append to optimize for accuracy of GetProximalSeqnoBeforeTime. (Unit tests added and updated.)
* Related: SeqnoToTimeMapping did not have a clear contract about the relationships between seqnos and times, just the idea of a rough correspondence. Now the class description makes it clear that the write time of each recorded seqno comes before or at the associated time, to support getting best results for GetProximalSeqnoBeforeTime. And this makes it easier to make clear the contract of each API function.
* Update `DBImpl::RecordSeqnoToTimeMapping()` to follow this ordering in gathering samples.
Some part of these changes has required an expanded test work-around for the problem (see intended follow-up above) that the DB does not immediately ensure recent seqnos are covered by its mapping. These work-arounds will be removed with that planned work.
An apparent compaction bug is revealed in
PrecludeLastLevelTest::RangeDelsCauseFileEndpointsToOverlap, so that test is disabled. Filed GitHub issue #11909
Cosmetic / code safety things (not exhaustive):
* Fix some confusing names.
* `seqno_time_mapping` was used inconsistently in places. Now just `seqno_to_time_mapping` to correspond to class name.
* Rename confusing `GetOldestSequenceNum` -> `GetProximalSeqnoBeforeTime` and `GetOldestApproximateTime` -> `GetProximalTimeBeforeSeqno`. Part of the motivation is that our times and seqnos here have the same underlying type, so we want to be clear about which is expected where to avoid mixing.
* Rename `kUnknownSeqnoTime` to `kUnknownTimeBeforeAll` because the value is a bad choice for unknown if we ever add ProximalAfterBlah functions.
* Arithmetic on SeqnoTimePair doesn't make sense except for delta encoding, so use better names / APIs with that in mind.
* (OMG) Don't allow direct comparison between SeqnoTimePair and SequenceNumber. (There is no checking that it isn't compared against time by accident.)
* A field name essentially matching the containing class name is a confusing pattern (`seqno_time_mapping_`).
* Wrap calls to confusing (but useful) upper_bound and lower_bound functions to have clearer names and more code reuse.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11905
Test Plan: GetOldestSequenceNum (now GetProximalSeqnoBeforeTime) and TruncateOldEntries were lacking unit tests, despite both being used in production (experimental feature). Added those and expanded others.
Reviewed By: jowlyzhang
Differential Revision: D49755592
Pulled By: pdillinger
fbshipit-source-id: f72a3baac74d24b963c77e538bba89a7fc8dce51
2023-09-29 18:21:59 +00:00
|
|
|
Status CompactionOutputs::Finish(
|
|
|
|
const Status& intput_status,
|
|
|
|
const SeqnoToTimeMapping& seqno_to_time_mapping) {
|
2022-07-14 03:54:49 +00:00
|
|
|
FileMetaData* meta = GetMetaData();
|
|
|
|
assert(meta != nullptr);
|
|
|
|
Status s = intput_status;
|
|
|
|
if (s.ok()) {
|
Fix/cleanup SeqnoToTimeMapping (#12253)
Summary:
The SeqnoToTimeMapping class (RocksDB internal) used by the preserve_internal_time_seconds / preclude_last_level_data_seconds options was essentially in a prototype state with some significant flaws that would risk biting us some day. This is a big, complicated change because both the implementation and the behavioral requirements of the class needed to be upgraded together. In short, this makes SeqnoToTimeMapping more internally responsible for maintaining good invariants, so that callers don't easily encounter dangerous scenarios.
* Some API functions were confusingly named and structured, so I fully refactored the APIs to use clear naming (e.g. `DecodeFrom` and `CopyFromSeqnoRange`), object states, function preconditions, etc.
* Previously the object could informally be sorted / compacted or not, and there was limited checking or enforcement on these states. Now there's a well-defined "enforced" state that is consistently checked in debug mode for applicable operations. (I attempted to create a separate "builder" class for unenforced states, but IIRC found that more cumbersome for existing uses than it was worth.)
* Previously operations would coalesce data in a way that was better for `GetProximalTimeBeforeSeqno` than for `GetProximalSeqnoBeforeTime` which is odd because the latter is the only one used by DB code currently (what is the seqno cut-off for data definitely older than this given time?). This is now reversed to consistently favor `GetProximalSeqnoBeforeTime`, with that logic concentrated in one place: `SeqnoToTimeMapping::SeqnoTimePair::Merge()`. Unfortunately, a lot of unit test logic was specifically testing the old, suboptimal behavior.
* Previously, the natural behavior of SeqnoToTimeMapping was to THROW AWAY data needed to get reasonable answers to the important `GetProximalSeqnoBeforeTime` queries. This is because SeqnoToTimeMapping only had a FIFO policy for staying within the entry capacity (except in aggregate+sort+serialize mode). If the DB wasn't extremely careful to avoid gathering too many time mappings, it could lose track of where the seqno cutoff was for cold data (`GetProximalSeqnoBeforeTime()` returning 0) and preventing all further data migration to the cold tier--until time passes etc. for mappings to catch up with FIFO purging of them. (The problem is not so acute because SST files contain relevant snapshots of the mappings, but the problem would apply to long-lived memtables.)
* Now the SeqnoToTimeMapping class has fully-integrated smarts for keeping a sufficiently complete history, within capacity limits, to give good answers to `GetProximalSeqnoBeforeTime` queries.
* Fixes old `// FIXME: be smarter about how we erase to avoid data falling off the front prematurely.`
* Fix an apparent bug in how entries are selected for storing into SST files. Previously, it only selected entries within the seqno range of the file, but that would easily leave a gap at the beginning of the timeline for data in the file for the purposes of answering GetProximalXXX queries with reasonable accuracy. This could probably lead to the same problem discussed above in naively throwing away entries in FIFO order in the old SeqnoToTimeMapping. The updated testing of GetProximalSeqnoBeforeTime in BasicSeqnoToTimeMapping relies on the fixed behavior.
* Fix a potential compaction CPU efficiency/scaling issue in which each compaction output file would iterate over and sort all seqno-to-time mappings from all compaction input files. Now we distill the input file entries to a constant size before processing each compaction output file.
Intended follow-up (me or others):
* Expand some direct testing of SeqnoToTimeMapping APIs. Here I've focused on updating existing tests to make sense.
* There are likely more gaps in availability of needed SeqnoToTimeMapping data when the DB shuts down and is restarted, at least with WAL.
* The data tracked in the DB could be kept more accurate and limited if it used the oldest seqno of unflushed data. This might require some more API refactoring.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12253
Test Plan: unit tests updated
Reviewed By: jowlyzhang
Differential Revision: D52913733
Pulled By: pdillinger
fbshipit-source-id: 020737fcbbe6212f6701191a6ab86565054c9593
2024-01-20 05:50:38 +00:00
|
|
|
SeqnoToTimeMapping relevant_mapping;
|
|
|
|
relevant_mapping.CopyFromSeqnoRange(
|
|
|
|
seqno_to_time_mapping, meta->fd.smallest_seqno, meta->fd.largest_seqno);
|
|
|
|
relevant_mapping.SetCapacity(kMaxSeqnoTimePairsPerSST);
|
|
|
|
builder_->SetSeqnoTimeTableProperties(relevant_mapping,
|
2022-07-15 04:49:34 +00:00
|
|
|
meta->oldest_ancester_time);
|
2022-07-14 03:54:49 +00:00
|
|
|
s = builder_->Finish();
|
2022-07-15 04:49:34 +00:00
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
} else {
|
|
|
|
builder_->Abandon();
|
|
|
|
}
|
|
|
|
Status io_s = builder_->io_status();
|
|
|
|
if (s.ok()) {
|
|
|
|
s = io_s;
|
|
|
|
} else {
|
|
|
|
io_s.PermitUncheckedError();
|
|
|
|
}
|
|
|
|
const uint64_t current_bytes = builder_->FileSize();
|
|
|
|
if (s.ok()) {
|
|
|
|
meta->fd.file_size = current_bytes;
|
Record and use the tail size to prefetch table tail (#11406)
Summary:
**Context:**
We prefetch the tail part of a SST file (i.e, the blocks after data blocks till the end of the file) during each SST file open in hope to prefetch all the stuff at once ahead of time for later read e.g, footer, meta index, filter/index etc. The existing approach to estimate the tail size to prefetch is through `TailPrefetchStats` heuristics introduced in https://github.com/facebook/rocksdb/pull/4156, which has caused small reads in unlucky case (e.g, small read into the tail buffer during table open in thread 1 under the same BlockBasedTableFactory object can make thread 2's tail prefetching use a small size that it shouldn't) and is hard to debug. Therefore we decide to record the exact tail size and use it directly to prefetch tail of the SST instead of relying heuristics.
**Summary:**
- Obtain and record in manifest the tail size in `BlockBasedTableBuilder::Finish()`
- For backward compatibility, we fall back to TailPrefetchStats and last to simple heuristics that the tail size is a linear portion of the file size - see PR conversation for more.
- Make`tail_start_offset` part of the table properties and deduct tail size to record in manifest for external files (e.g, file ingestion, import CF) and db repair (with no access to manifest).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11406
Test Plan:
1. New UT
2. db bench
Note: db bench on /tmp/ where direct read is supported is too slow to finish and the default pinning setting in db bench is not helpful to profile # sst read of Get. Therefore I hacked the following to obtain the following comparison.
```
diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc
index bd5669f0f..791484c1f 100644
--- a/table/block_based/block_based_table_reader.cc
+++ b/table/block_based/block_based_table_reader.cc
@@ -838,7 +838,7 @@ Status BlockBasedTable::PrefetchTail(
&tail_prefetch_size);
// Try file system prefetch
- if (!file->use_direct_io() && !force_direct_prefetch) {
+ if (false && !file->use_direct_io() && !force_direct_prefetch) {
if (!file->Prefetch(prefetch_off, prefetch_len, ro.rate_limiter_priority)
.IsNotSupported()) {
prefetch_buffer->reset(new FilePrefetchBuffer(
diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc
index ea40f5fa0..39a0ac385 100644
--- a/tools/db_bench_tool.cc
+++ b/tools/db_bench_tool.cc
@@ -4191,6 +4191,8 @@ class Benchmark {
std::shared_ptr<TableFactory>(NewCuckooTableFactory(table_options));
} else {
BlockBasedTableOptions block_based_options;
+ block_based_options.metadata_cache_options.partition_pinning =
+ PinningTier::kAll;
block_based_options.checksum =
static_cast<ChecksumType>(FLAGS_checksum_type);
if (FLAGS_use_hash_search) {
```
Create DB
```
./db_bench --bloom_bits=3 --use_existing_db=1 --seed=1682546046158958 --partition_index_and_filters=1 --statistics=1 -db=/dev/shm/testdb/ -benchmarks=readrandom -key_size=3200 -value_size=512 -num=1000000 -write_buffer_size=6550000 -disable_auto_compactions=false -target_file_size_base=6550000 -compression_type=none
```
ReadRandom
```
./db_bench --bloom_bits=3 --use_existing_db=1 --seed=1682546046158958 --partition_index_and_filters=1 --statistics=1 -db=/dev/shm/testdb/ -benchmarks=readrandom -key_size=3200 -value_size=512 -num=1000000 -write_buffer_size=6550000 -disable_auto_compactions=false -target_file_size_base=6550000 -compression_type=none
```
(a) Existing (Use TailPrefetchStats for tail size + use seperate prefetch buffer in PartitionedFilter/IndexReader::CacheDependencies())
```
rocksdb.table.open.prefetch.tail.hit COUNT : 3395
rocksdb.sst.read.micros P50 : 5.655570 P95 : 9.931396 P99 : 14.845454 P100 : 585.000000 COUNT : 999905 SUM : 6590614
```
(b) This PR (Record tail size + use the same tail buffer in PartitionedFilter/IndexReader::CacheDependencies())
```
rocksdb.table.open.prefetch.tail.hit COUNT : 14257
rocksdb.sst.read.micros P50 : 5.173347 P95 : 9.015017 P99 : 12.912610 P100 : 228.000000 COUNT : 998547 SUM : 5976540
```
As we can see, we increase the prefetch tail hit count and decrease SST read count with this PR
3. Test backward compatibility by stepping through reading with post-PR code on a db generated pre-PR.
Reviewed By: pdillinger
Differential Revision: D45413346
Pulled By: hx235
fbshipit-source-id: 7d5e36a60a72477218f79905168d688452a4c064
2023-05-08 20:14:28 +00:00
|
|
|
meta->tail_size = builder_->GetTailSize();
|
2022-07-14 03:54:49 +00:00
|
|
|
meta->marked_for_compaction = builder_->NeedCompact();
|
2023-06-22 04:49:01 +00:00
|
|
|
meta->user_defined_timestamps_persisted = static_cast<bool>(
|
|
|
|
builder_->GetTableProperties().user_defined_timestamps_persisted);
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
current_output().finished = true;
|
|
|
|
stats_.bytes_written += current_bytes;
|
|
|
|
stats_.num_output_files = outputs_.size();
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
IOStatus CompactionOutputs::WriterSyncClose(const Status& input_status,
|
|
|
|
SystemClock* clock,
|
|
|
|
Statistics* statistics,
|
|
|
|
bool use_fsync) {
|
|
|
|
IOStatus io_s;
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
IOOptions opts;
|
|
|
|
io_s = WritableFileWriter::PrepareIOOptions(
|
|
|
|
WriteOptions(Env::IOActivity::kCompaction), opts);
|
|
|
|
if (input_status.ok() && io_s.ok()) {
|
2022-07-14 03:54:49 +00:00
|
|
|
StopWatch sw(clock, statistics, COMPACTION_OUTFILE_SYNC_MICROS);
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
io_s = file_writer_->Sync(opts, use_fsync);
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
if (input_status.ok() && io_s.ok()) {
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
io_s = file_writer_->Close(opts);
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (input_status.ok() && io_s.ok()) {
|
|
|
|
FileMetaData* meta = GetMetaData();
|
|
|
|
meta->file_checksum = file_writer_->GetFileChecksum();
|
|
|
|
meta->file_checksum_func_name = file_writer_->GetFileChecksumFuncName();
|
|
|
|
}
|
|
|
|
|
|
|
|
file_writer_.reset();
|
|
|
|
|
|
|
|
return io_s;
|
|
|
|
}
|
|
|
|
|
2023-01-18 00:42:41 +00:00
|
|
|
bool CompactionOutputs::UpdateFilesToCutForTTLStates(
|
|
|
|
const Slice& internal_key) {
|
|
|
|
if (!files_to_cut_for_ttl_.empty()) {
|
|
|
|
const InternalKeyComparator* icmp =
|
|
|
|
&compaction_->column_family_data()->internal_comparator();
|
|
|
|
if (cur_files_to_cut_for_ttl_ != -1) {
|
|
|
|
// Previous key is inside the range of a file
|
|
|
|
if (icmp->Compare(internal_key,
|
|
|
|
files_to_cut_for_ttl_[cur_files_to_cut_for_ttl_]
|
|
|
|
->largest.Encode()) > 0) {
|
|
|
|
next_files_to_cut_for_ttl_ = cur_files_to_cut_for_ttl_ + 1;
|
|
|
|
cur_files_to_cut_for_ttl_ = -1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Look for the key position
|
|
|
|
while (next_files_to_cut_for_ttl_ <
|
|
|
|
static_cast<int>(files_to_cut_for_ttl_.size())) {
|
|
|
|
if (icmp->Compare(internal_key,
|
|
|
|
files_to_cut_for_ttl_[next_files_to_cut_for_ttl_]
|
|
|
|
->smallest.Encode()) >= 0) {
|
|
|
|
if (icmp->Compare(internal_key,
|
|
|
|
files_to_cut_for_ttl_[next_files_to_cut_for_ttl_]
|
|
|
|
->largest.Encode()) <= 0) {
|
|
|
|
// With in the current file
|
|
|
|
cur_files_to_cut_for_ttl_ = next_files_to_cut_for_ttl_;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// Beyond the current file
|
|
|
|
next_files_to_cut_for_ttl_++;
|
|
|
|
} else {
|
|
|
|
// Still fall into the gap
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
size_t CompactionOutputs::UpdateGrandparentBoundaryInfo(
|
|
|
|
const Slice& internal_key) {
|
|
|
|
size_t curr_key_boundary_switched_num = 0;
|
|
|
|
const std::vector<FileMetaData*>& grandparents = compaction_->grandparents();
|
|
|
|
|
|
|
|
if (grandparents.empty()) {
|
|
|
|
return curr_key_boundary_switched_num;
|
|
|
|
}
|
2022-10-06 22:54:58 +00:00
|
|
|
const Comparator* ucmp = compaction_->column_family_data()->user_comparator();
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
|
2022-10-06 22:54:58 +00:00
|
|
|
// Move the grandparent_index_ to the file containing the current user_key.
|
|
|
|
// If there are multiple files containing the same user_key, make sure the
|
|
|
|
// index points to the last file containing the key.
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
while (grandparent_index_ < grandparents.size()) {
|
|
|
|
if (being_grandparent_gap_) {
|
2023-07-14 05:26:55 +00:00
|
|
|
if (sstableKeyCompare(ucmp, internal_key,
|
2022-10-06 22:54:58 +00:00
|
|
|
grandparents[grandparent_index_]->smallest) < 0) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (seen_key_) {
|
|
|
|
curr_key_boundary_switched_num++;
|
|
|
|
grandparent_overlapped_bytes_ +=
|
|
|
|
grandparents[grandparent_index_]->fd.GetFileSize();
|
|
|
|
grandparent_boundary_switched_num_++;
|
|
|
|
}
|
|
|
|
being_grandparent_gap_ = false;
|
|
|
|
} else {
|
2022-10-06 22:54:58 +00:00
|
|
|
int cmp_result = sstableKeyCompare(
|
2023-07-14 05:26:55 +00:00
|
|
|
ucmp, internal_key, grandparents[grandparent_index_]->largest);
|
2022-10-06 22:54:58 +00:00
|
|
|
// If it's same key, make sure grandparent_index_ is pointing to the last
|
|
|
|
// one.
|
|
|
|
if (cmp_result < 0 ||
|
|
|
|
(cmp_result == 0 &&
|
|
|
|
(grandparent_index_ == grandparents.size() - 1 ||
|
2023-07-14 05:26:55 +00:00
|
|
|
sstableKeyCompare(ucmp, internal_key,
|
2022-10-06 22:54:58 +00:00
|
|
|
grandparents[grandparent_index_ + 1]->smallest) <
|
|
|
|
0))) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (seen_key_) {
|
|
|
|
curr_key_boundary_switched_num++;
|
|
|
|
grandparent_boundary_switched_num_++;
|
|
|
|
}
|
|
|
|
being_grandparent_gap_ = true;
|
|
|
|
grandparent_index_++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the first key is in the middle of a grandparent file, adding it to the
|
|
|
|
// overlap
|
|
|
|
if (!seen_key_ && !being_grandparent_gap_) {
|
|
|
|
assert(grandparent_overlapped_bytes_ == 0);
|
|
|
|
grandparent_overlapped_bytes_ =
|
2022-10-06 22:54:58 +00:00
|
|
|
GetCurrentKeyGrandparentOverlappedBytes(internal_key);
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
seen_key_ = true;
|
|
|
|
return curr_key_boundary_switched_num;
|
|
|
|
}
|
|
|
|
|
2022-10-06 22:54:58 +00:00
|
|
|
uint64_t CompactionOutputs::GetCurrentKeyGrandparentOverlappedBytes(
|
|
|
|
const Slice& internal_key) const {
|
|
|
|
// no overlap with any grandparent file
|
|
|
|
if (being_grandparent_gap_) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
uint64_t overlapped_bytes = 0;
|
|
|
|
|
|
|
|
const std::vector<FileMetaData*>& grandparents = compaction_->grandparents();
|
|
|
|
const Comparator* ucmp = compaction_->column_family_data()->user_comparator();
|
|
|
|
InternalKey ikey;
|
|
|
|
ikey.DecodeFrom(internal_key);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// make sure the grandparent_index_ is pointing to the last files containing
|
|
|
|
// the current key.
|
|
|
|
int cmp_result =
|
|
|
|
sstableKeyCompare(ucmp, ikey, grandparents[grandparent_index_]->largest);
|
|
|
|
assert(
|
|
|
|
cmp_result < 0 ||
|
|
|
|
(cmp_result == 0 &&
|
|
|
|
(grandparent_index_ == grandparents.size() - 1 ||
|
|
|
|
sstableKeyCompare(
|
|
|
|
ucmp, ikey, grandparents[grandparent_index_ + 1]->smallest) < 0)));
|
|
|
|
assert(sstableKeyCompare(ucmp, ikey,
|
|
|
|
grandparents[grandparent_index_]->smallest) >= 0);
|
|
|
|
#endif
|
|
|
|
overlapped_bytes += grandparents[grandparent_index_]->fd.GetFileSize();
|
|
|
|
|
|
|
|
// go backwards to find all overlapped files, one key can overlap multiple
|
|
|
|
// files. In the following example, if the current output key is `c`, and one
|
|
|
|
// compaction file was cut before `c`, current `c` can overlap with 3 files:
|
|
|
|
// [a b] [c...
|
|
|
|
// [b, b] [c, c] [c, c] [c, d]
|
|
|
|
for (int64_t i = static_cast<int64_t>(grandparent_index_) - 1;
|
|
|
|
i >= 0 && sstableKeyCompare(ucmp, ikey, grandparents[i]->largest) == 0;
|
|
|
|
i--) {
|
|
|
|
overlapped_bytes += grandparents[i]->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
return overlapped_bytes;
|
|
|
|
}
|
|
|
|
|
2022-09-15 05:09:12 +00:00
|
|
|
bool CompactionOutputs::ShouldStopBefore(const CompactionIterator& c_iter) {
|
|
|
|
assert(c_iter.Valid());
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
const Slice& internal_key = c_iter.key();
|
2023-02-22 20:28:18 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
bool should_stop = false;
|
|
|
|
std::pair<bool*, const Slice> p{&should_stop, internal_key};
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"CompactionOutputs::ShouldStopBefore::manual_decision", (void*)&p);
|
|
|
|
if (should_stop) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif // NDEBUG
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
const uint64_t previous_overlapped_bytes = grandparent_overlapped_bytes_;
|
2023-01-18 00:42:41 +00:00
|
|
|
const InternalKeyComparator* icmp =
|
|
|
|
&compaction_->column_family_data()->internal_comparator();
|
|
|
|
size_t num_grandparent_boundaries_crossed = 0;
|
|
|
|
bool should_stop_for_ttl = false;
|
|
|
|
// Always update grandparent information like overlapped file number, size
|
|
|
|
// etc., and TTL states.
|
|
|
|
// If compaction_->output_level() == 0, there is no need to update grandparent
|
|
|
|
// info, and that `grandparent` should be empty.
|
|
|
|
if (compaction_->output_level() > 0) {
|
|
|
|
num_grandparent_boundaries_crossed =
|
|
|
|
UpdateGrandparentBoundaryInfo(internal_key);
|
|
|
|
should_stop_for_ttl = UpdateFilesToCutForTTLStates(internal_key);
|
|
|
|
}
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
|
|
|
|
if (!HasBuilder()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-01-18 00:42:41 +00:00
|
|
|
if (should_stop_for_ttl) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-09-15 05:09:12 +00:00
|
|
|
// If there's user defined partitioner, check that first
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
if (partitioner_ && partitioner_->ShouldPartition(PartitionerRequest(
|
|
|
|
last_key_for_partitioner_, c_iter.user_key(),
|
|
|
|
current_output_file_size_)) == kRequired) {
|
2022-09-15 05:09:12 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// files output to Level 0 won't be split
|
|
|
|
if (compaction_->output_level() == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
// reach the max file size
|
2022-09-15 05:09:12 +00:00
|
|
|
if (current_output_file_size_ >= compaction_->max_output_file_size()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if it needs to split for RoundRobin
|
|
|
|
// Invalid local_output_split_key indicates that we do not need to split
|
|
|
|
if (local_output_split_key_ != nullptr && !is_split_) {
|
|
|
|
// Split occurs when the next key is larger than/equal to the cursor
|
|
|
|
if (icmp->Compare(internal_key, local_output_split_key_->Encode()) >= 0) {
|
|
|
|
is_split_ = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
// only check if the current key is going to cross the grandparents file
|
|
|
|
// boundary (either the file beginning or ending).
|
|
|
|
if (num_grandparent_boundaries_crossed > 0) {
|
|
|
|
// Cut the file before the current key if the size of the current output
|
|
|
|
// file + its overlapped grandparent files is bigger than
|
|
|
|
// max_compaction_bytes. Which is to prevent future bigger than
|
|
|
|
// max_compaction_bytes compaction from the current output level.
|
|
|
|
if (grandparent_overlapped_bytes_ + current_output_file_size_ >
|
|
|
|
compaction_->max_compaction_bytes()) {
|
|
|
|
return true;
|
2022-09-15 05:09:12 +00:00
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
// Cut the file if including the key is going to add a skippable file on
|
|
|
|
// the grandparent level AND its size is reasonably big (1/8 of target file
|
|
|
|
// size). For example, if it's compacting the files L0 + L1:
|
|
|
|
// L0: [1, 21]
|
|
|
|
// L1: [3, 23]
|
|
|
|
// L2: [2, 4] [11, 15] [22, 24]
|
|
|
|
// Without this break, it will output as:
|
|
|
|
// L1: [1,3, 21,23]
|
|
|
|
// With this break, it will output as (assuming [11, 15] at L2 is bigger
|
|
|
|
// than 1/8 of target size):
|
|
|
|
// L1: [1,3] [21,23]
|
|
|
|
// Then for the future compactions, [11,15] won't be included.
|
|
|
|
// For random datasets (either evenly distributed or skewed), it rarely
|
|
|
|
// triggers this condition, but if the user is adding 2 different datasets
|
|
|
|
// without any overlap, it may likely happen.
|
|
|
|
// More details, check PR #1963
|
|
|
|
const size_t num_skippable_boundaries_crossed =
|
|
|
|
being_grandparent_gap_ ? 2 : 3;
|
|
|
|
if (compaction_->immutable_options()->compaction_style ==
|
|
|
|
kCompactionStyleLevel &&
|
|
|
|
num_grandparent_boundaries_crossed >=
|
|
|
|
num_skippable_boundaries_crossed &&
|
|
|
|
grandparent_overlapped_bytes_ - previous_overlapped_bytes >
|
|
|
|
compaction_->target_output_file_size() / 8) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pre-cut the output file if it's reaching a certain size AND it's at the
|
|
|
|
// boundary of a grandparent file. It can reduce the future compaction size,
|
|
|
|
// the cost is having smaller files.
|
|
|
|
// The pre-cut size threshold is based on how many grandparent boundaries
|
|
|
|
// it has seen before. Basically, if it has seen no boundary at all, then it
|
|
|
|
// will pre-cut at 50% target file size. Every boundary it has seen
|
|
|
|
// increases the threshold by 5%, max at 90%, which it will always cut.
|
|
|
|
// The idea is based on if it has seen more boundaries before, it will more
|
|
|
|
// likely to see another boundary (file cutting opportunity) before the
|
|
|
|
// target file size. The test shows it can generate larger files than a
|
|
|
|
// static threshold like 75% and has a similar write amplification
|
|
|
|
// improvement.
|
|
|
|
if (compaction_->immutable_options()->compaction_style ==
|
|
|
|
kCompactionStyleLevel &&
|
2022-10-06 22:54:58 +00:00
|
|
|
current_output_file_size_ >=
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
((compaction_->target_output_file_size() + 99) / 100) *
|
|
|
|
(50 + std::min(grandparent_boundary_switched_num_ * 5,
|
|
|
|
size_t{40}))) {
|
|
|
|
return true;
|
|
|
|
}
|
2022-09-15 05:09:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
Status CompactionOutputs::AddToOutput(
|
|
|
|
const CompactionIterator& c_iter,
|
|
|
|
const CompactionFileOpenFunc& open_file_func,
|
|
|
|
const CompactionFileCloseFunc& close_file_func) {
|
|
|
|
Status s;
|
2023-02-22 20:28:18 +00:00
|
|
|
bool is_range_del = c_iter.IsDeleteRangeSentinelKey();
|
|
|
|
if (is_range_del && compaction_->bottommost_level()) {
|
|
|
|
// We don't consider range tombstone for bottommost level since:
|
|
|
|
// 1. there is no grandparent and hence no overlap to consider
|
|
|
|
// 2. range tombstone may be dropped at bottommost level.
|
|
|
|
return s;
|
|
|
|
}
|
2022-07-14 03:54:49 +00:00
|
|
|
const Slice& key = c_iter.key();
|
2022-09-15 05:09:12 +00:00
|
|
|
if (ShouldStopBefore(c_iter) && HasBuilder()) {
|
2022-07-14 03:54:49 +00:00
|
|
|
s = close_file_func(*this, c_iter.InputStatus(), key);
|
2022-09-15 05:09:12 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
// reset grandparent information
|
|
|
|
grandparent_boundary_switched_num_ = 0;
|
2022-10-06 22:54:58 +00:00
|
|
|
grandparent_overlapped_bytes_ =
|
|
|
|
GetCurrentKeyGrandparentOverlappedBytes(key);
|
2023-02-22 20:28:18 +00:00
|
|
|
if (UNLIKELY(is_range_del)) {
|
|
|
|
// lower bound for this new output file, this is needed as the lower bound
|
|
|
|
// does not come from the smallest point key in this case.
|
|
|
|
range_tombstone_lower_bound_.DecodeFrom(key);
|
|
|
|
} else {
|
|
|
|
range_tombstone_lower_bound_.Clear();
|
|
|
|
}
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open output file if necessary
|
|
|
|
if (!HasBuilder()) {
|
|
|
|
s = open_file_func(*this);
|
2022-09-15 05:09:12 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 20:28:18 +00:00
|
|
|
// c_iter may emit range deletion keys, so update `last_key_for_partitioner_`
|
|
|
|
// here before returning below when `is_range_del` is true
|
|
|
|
if (partitioner_) {
|
|
|
|
last_key_for_partitioner_.assign(c_iter.user_key().data_,
|
|
|
|
c_iter.user_key().size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (UNLIKELY(is_range_del)) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
assert(builder_ != nullptr);
|
|
|
|
const Slice& value = c_iter.value();
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
s = current_output().validator.Add(key, value);
|
2022-07-14 03:54:49 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
builder_->Add(key, value);
|
|
|
|
|
|
|
|
stats_.num_output_records++;
|
|
|
|
current_output_file_size_ = builder_->EstimatedFileSize();
|
|
|
|
|
|
|
|
if (blob_garbage_meter_) {
|
|
|
|
s = blob_garbage_meter_->ProcessOutFlow(key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ParsedInternalKey& ikey = c_iter.ikey();
|
|
|
|
s = current_output().meta.UpdateBoundaries(key, value, ikey.sequence,
|
|
|
|
ikey.type);
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2023-02-22 20:28:18 +00:00
|
|
|
namespace {
|
|
|
|
void SetMaxSeqAndTs(InternalKey& internal_key, const Slice& user_key,
|
|
|
|
const size_t ts_sz) {
|
|
|
|
if (ts_sz) {
|
|
|
|
static constexpr char kTsMax[] = "\xff\xff\xff\xff\xff\xff\xff\xff\xff";
|
|
|
|
if (ts_sz <= strlen(kTsMax)) {
|
|
|
|
internal_key = InternalKey(user_key, kMaxSequenceNumber,
|
|
|
|
kTypeRangeDeletion, Slice(kTsMax, ts_sz));
|
|
|
|
} else {
|
|
|
|
internal_key =
|
|
|
|
InternalKey(user_key, kMaxSequenceNumber, kTypeRangeDeletion,
|
|
|
|
std::string(ts_sz, '\xff'));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
internal_key.Set(user_key, kMaxSequenceNumber, kTypeRangeDeletion);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
Status CompactionOutputs::AddRangeDels(
|
2022-09-30 23:13:03 +00:00
|
|
|
const Slice* comp_start_user_key, const Slice* comp_end_user_key,
|
2022-07-14 03:54:49 +00:00
|
|
|
CompactionIterationStats& range_del_out_stats, bool bottommost_level,
|
|
|
|
const InternalKeyComparator& icmp, SequenceNumber earliest_snapshot,
|
2022-09-30 23:13:03 +00:00
|
|
|
const Slice& next_table_min_key, const std::string& full_history_ts_low) {
|
2023-01-17 20:47:44 +00:00
|
|
|
// The following example does not happen since
|
|
|
|
// CompactionOutput::ShouldStopBefore() always return false for the first
|
|
|
|
// point key. But we should consider removing this dependency. Suppose for the
|
|
|
|
// first compaction output file,
|
|
|
|
// - next_table_min_key.user_key == comp_start_user_key
|
|
|
|
// - no point key is in the output file
|
|
|
|
// - there is a range tombstone @seqno to be added that covers
|
|
|
|
// comp_start_user_key
|
|
|
|
// Then meta.smallest will be set to comp_start_user_key@seqno
|
|
|
|
// and meta.largest will be set to comp_start_user_key@kMaxSequenceNumber
|
|
|
|
// which violates the assumption that meta.smallest should be <= meta.largest.
|
2023-02-22 20:28:18 +00:00
|
|
|
assert(HasRangeDel());
|
|
|
|
FileMetaData& meta = current_output().meta;
|
|
|
|
const Comparator* ucmp = icmp.user_comparator();
|
|
|
|
InternalKey lower_bound_buf, upper_bound_buf;
|
|
|
|
Slice lower_bound_guard, upper_bound_guard;
|
|
|
|
std::string smallest_user_key;
|
|
|
|
const Slice *lower_bound, *upper_bound;
|
|
|
|
|
|
|
|
// We first determine the internal key lower_bound and upper_bound for
|
|
|
|
// this output file. All and only range tombstones that overlap with
|
|
|
|
// [lower_bound, upper_bound] should be added to this file. File
|
|
|
|
// boundaries (meta.smallest/largest) should be updated accordingly when
|
|
|
|
// extended by range tombstones.
|
2022-07-14 03:54:49 +00:00
|
|
|
size_t output_size = outputs_.size();
|
|
|
|
if (output_size == 1) {
|
2023-02-22 20:28:18 +00:00
|
|
|
// This is the first file in the subcompaction.
|
|
|
|
//
|
|
|
|
// When outputting a range tombstone that spans a subcompaction boundary,
|
|
|
|
// the files on either side of that boundary need to include that
|
|
|
|
// boundary's user key. Otherwise, the spanning range tombstone would lose
|
|
|
|
// coverage.
|
|
|
|
//
|
|
|
|
// To achieve this while preventing files from overlapping in internal key
|
|
|
|
// (an LSM invariant violation), we allow the earlier file to include the
|
|
|
|
// boundary user key up to `kMaxSequenceNumber,kTypeRangeDeletion`. The
|
|
|
|
// later file can begin at the boundary user key at the newest key version
|
|
|
|
// it contains. At this point that version number is unknown since we have
|
|
|
|
// not processed the range tombstones yet, so permit any version. Same story
|
|
|
|
// applies to timestamp, and a non-nullptr `comp_start_user_key` should have
|
|
|
|
// `kMaxTs` here, which similarly permits any timestamp.
|
|
|
|
if (comp_start_user_key) {
|
|
|
|
lower_bound_buf.Set(*comp_start_user_key, kMaxSequenceNumber,
|
|
|
|
kTypeRangeDeletion);
|
|
|
|
lower_bound_guard = lower_bound_buf.Encode();
|
|
|
|
lower_bound = &lower_bound_guard;
|
|
|
|
} else {
|
|
|
|
lower_bound = nullptr;
|
|
|
|
}
|
|
|
|
} else {
|
2022-07-14 03:54:49 +00:00
|
|
|
// For subsequent output tables, only include range tombstones from min
|
|
|
|
// key onwards since the previous file was extended to contain range
|
|
|
|
// tombstones falling before min key.
|
2023-02-22 20:28:18 +00:00
|
|
|
if (range_tombstone_lower_bound_.size() > 0) {
|
|
|
|
assert(meta.smallest.size() == 0 ||
|
|
|
|
icmp.Compare(range_tombstone_lower_bound_, meta.smallest) < 0);
|
|
|
|
lower_bound_guard = range_tombstone_lower_bound_.Encode();
|
2022-07-14 03:54:49 +00:00
|
|
|
} else {
|
2023-02-22 20:28:18 +00:00
|
|
|
assert(meta.smallest.size() > 0);
|
|
|
|
lower_bound_guard = meta.smallest.Encode();
|
|
|
|
}
|
|
|
|
lower_bound = &lower_bound_guard;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t ts_sz = ucmp->timestamp_size();
|
|
|
|
if (next_table_min_key.empty()) {
|
|
|
|
// Last file of the subcompaction.
|
|
|
|
if (comp_end_user_key) {
|
|
|
|
upper_bound_buf.Set(*comp_end_user_key, kMaxSequenceNumber,
|
|
|
|
kTypeRangeDeletion);
|
|
|
|
upper_bound_guard = upper_bound_buf.Encode();
|
2022-07-14 03:54:49 +00:00
|
|
|
upper_bound = &upper_bound_guard;
|
2023-02-22 20:28:18 +00:00
|
|
|
} else {
|
|
|
|
upper_bound = nullptr;
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
} else {
|
2023-02-22 20:28:18 +00:00
|
|
|
// There is another file coming whose coverage will begin at
|
|
|
|
// `next_table_min_key`. The current file needs to extend range tombstone
|
|
|
|
// coverage through its own keys (through `meta.largest`) and through user
|
|
|
|
// keys preceding `next_table_min_key`'s user key.
|
|
|
|
ParsedInternalKey next_table_min_key_parsed;
|
|
|
|
ParseInternalKey(next_table_min_key, &next_table_min_key_parsed,
|
|
|
|
false /* log_err_key */)
|
|
|
|
.PermitUncheckedError();
|
|
|
|
assert(next_table_min_key_parsed.sequence < kMaxSequenceNumber);
|
|
|
|
assert(meta.largest.size() == 0 ||
|
|
|
|
icmp.Compare(meta.largest.Encode(), next_table_min_key) < 0);
|
|
|
|
assert(!lower_bound || icmp.Compare(*lower_bound, next_table_min_key) <= 0);
|
|
|
|
if (meta.largest.size() > 0 &&
|
|
|
|
ucmp->EqualWithoutTimestamp(meta.largest.user_key(),
|
|
|
|
next_table_min_key_parsed.user_key)) {
|
|
|
|
// Caution: this assumes meta.largest.Encode() lives longer than
|
|
|
|
// upper_bound, which is only true if meta.largest is never updated.
|
|
|
|
// This just happens to be the case here since meta.largest serves
|
|
|
|
// as the upper_bound.
|
|
|
|
upper_bound_guard = meta.largest.Encode();
|
|
|
|
} else {
|
|
|
|
SetMaxSeqAndTs(upper_bound_buf, next_table_min_key_parsed.user_key,
|
|
|
|
ts_sz);
|
|
|
|
upper_bound_guard = upper_bound_buf.Encode();
|
|
|
|
}
|
|
|
|
upper_bound = &upper_bound_guard;
|
|
|
|
}
|
|
|
|
if (lower_bound && upper_bound &&
|
|
|
|
icmp.Compare(*lower_bound, *upper_bound) > 0) {
|
|
|
|
assert(meta.smallest.size() == 0 &&
|
|
|
|
ucmp->EqualWithoutTimestamp(ExtractUserKey(*lower_bound),
|
|
|
|
ExtractUserKey(*upper_bound)));
|
|
|
|
// This can only happen when lower_bound have the same user key as
|
|
|
|
// next_table_min_key and that there is no point key in the current
|
|
|
|
// compaction output file.
|
|
|
|
return Status::OK();
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
// The end key of the subcompaction must be bigger or equal to the upper
|
|
|
|
// bound. If the end of subcompaction is null or the upper bound is null,
|
|
|
|
// it means that this file is the last file in the compaction. So there
|
|
|
|
// will be no overlapping between this file and others.
|
2022-09-30 23:13:03 +00:00
|
|
|
assert(comp_end_user_key == nullptr || upper_bound == nullptr ||
|
2023-02-22 20:28:18 +00:00
|
|
|
ucmp->CompareWithoutTimestamp(ExtractUserKey(*upper_bound),
|
|
|
|
*comp_end_user_key) <= 0);
|
|
|
|
auto it = range_del_agg_->NewIterator(lower_bound, upper_bound);
|
2023-01-17 20:47:44 +00:00
|
|
|
Slice last_tombstone_start_user_key{};
|
2023-02-22 20:28:18 +00:00
|
|
|
bool reached_lower_bound = false;
|
2023-04-21 16:07:18 +00:00
|
|
|
const ReadOptions read_options(Env::IOActivity::kCompaction);
|
2023-02-22 20:28:18 +00:00
|
|
|
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
2022-07-14 03:54:49 +00:00
|
|
|
auto tombstone = it->Tombstone();
|
2023-02-22 20:28:18 +00:00
|
|
|
auto kv = tombstone.Serialize();
|
|
|
|
InternalKey tombstone_end = tombstone.SerializeEndKey();
|
|
|
|
// TODO: the underlying iterator should support clamping the bounds.
|
|
|
|
// tombstone_end.Encode is of form user_key@kMaxSeqno
|
|
|
|
// if it is equal to lower_bound, there is no need to include
|
|
|
|
// such range tombstone.
|
|
|
|
if (!reached_lower_bound && lower_bound &&
|
|
|
|
icmp.Compare(tombstone_end.Encode(), *lower_bound) <= 0) {
|
|
|
|
continue;
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
2023-02-22 20:28:18 +00:00
|
|
|
assert(!lower_bound ||
|
|
|
|
icmp.Compare(*lower_bound, tombstone_end.Encode()) <= 0);
|
|
|
|
reached_lower_bound = true;
|
2022-07-14 03:54:49 +00:00
|
|
|
|
2022-09-30 23:13:03 +00:00
|
|
|
// Garbage collection for range tombstones.
|
|
|
|
// If user-defined timestamp is enabled, range tombstones are dropped if
|
|
|
|
// they are at bottommost_level, below full_history_ts_low and not visible
|
|
|
|
// in any snapshot. trim_ts_ is passed to the constructor for
|
|
|
|
// range_del_agg_, and range_del_agg_ internally drops tombstones above
|
|
|
|
// trim_ts_.
|
2023-06-05 17:26:40 +00:00
|
|
|
bool consider_drop =
|
|
|
|
tombstone.seq_ <= earliest_snapshot &&
|
2022-09-30 23:13:03 +00:00
|
|
|
(ts_sz == 0 ||
|
|
|
|
(!full_history_ts_low.empty() &&
|
2023-06-05 17:26:40 +00:00
|
|
|
ucmp->CompareTimestamp(tombstone.ts_, full_history_ts_low) < 0));
|
|
|
|
if (consider_drop && bottommost_level) {
|
2022-07-14 03:54:49 +00:00
|
|
|
// TODO(andrewkr): tombstones that span multiple output files are
|
|
|
|
// counted for each compaction output file, so lots of double
|
|
|
|
// counting.
|
|
|
|
range_del_out_stats.num_range_del_drop_obsolete++;
|
|
|
|
range_del_out_stats.num_record_drop_obsolete++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(lower_bound == nullptr ||
|
2023-02-22 20:28:18 +00:00
|
|
|
ucmp->CompareWithoutTimestamp(ExtractUserKey(*lower_bound),
|
|
|
|
kv.second) < 0);
|
|
|
|
InternalKey tombstone_start = kv.first;
|
|
|
|
if (lower_bound &&
|
|
|
|
ucmp->CompareWithoutTimestamp(tombstone_start.user_key(),
|
|
|
|
ExtractUserKey(*lower_bound)) < 0) {
|
|
|
|
// This just updates the non-timestamp portion of `tombstone_start`'s user
|
|
|
|
// key. Ideally there would be a simpler API usage
|
|
|
|
ParsedInternalKey tombstone_start_parsed;
|
|
|
|
ParseInternalKey(tombstone_start.Encode(), &tombstone_start_parsed,
|
|
|
|
false /* log_err_key */)
|
|
|
|
.PermitUncheckedError();
|
|
|
|
// timestamp should be from where sequence number is from, which is from
|
|
|
|
// tombstone in this case
|
|
|
|
std::string ts =
|
|
|
|
tombstone_start_parsed.GetTimestamp(ucmp->timestamp_size())
|
|
|
|
.ToString();
|
|
|
|
tombstone_start_parsed.user_key = ExtractUserKey(*lower_bound);
|
|
|
|
tombstone_start.SetFrom(tombstone_start_parsed, ts);
|
|
|
|
}
|
|
|
|
if (upper_bound != nullptr &&
|
|
|
|
icmp.Compare(*upper_bound, tombstone_start.Encode()) < 0) {
|
|
|
|
break;
|
|
|
|
}
|
2023-06-05 17:26:40 +00:00
|
|
|
if (lower_bound &&
|
|
|
|
icmp.Compare(tombstone_start.Encode(), *lower_bound) < 0) {
|
|
|
|
tombstone_start.DecodeFrom(*lower_bound);
|
|
|
|
}
|
|
|
|
if (upper_bound && icmp.Compare(*upper_bound, tombstone_end.Encode()) < 0) {
|
|
|
|
tombstone_end.DecodeFrom(*upper_bound);
|
|
|
|
}
|
|
|
|
if (consider_drop && compaction_->KeyRangeNotExistsBeyondOutputLevel(
|
|
|
|
tombstone_start.user_key(),
|
|
|
|
tombstone_end.user_key(), &level_ptrs_)) {
|
|
|
|
range_del_out_stats.num_range_del_drop_obsolete++;
|
|
|
|
range_del_out_stats.num_record_drop_obsolete++;
|
|
|
|
continue;
|
|
|
|
}
|
2023-02-22 20:28:18 +00:00
|
|
|
// Here we show that *only* range tombstones that overlap with
|
|
|
|
// [lower_bound, upper_bound] are added to the current file, and
|
|
|
|
// sanity checking invariants that should hold:
|
|
|
|
// - [tombstone_start, tombstone_end] overlaps with [lower_bound,
|
|
|
|
// upper_bound]
|
|
|
|
// - meta.smallest <= meta.largest
|
|
|
|
// Corresponding assertions are made, the proof is broken is any of them
|
|
|
|
// fails.
|
|
|
|
// TODO: show that *all* range tombstones that overlap with
|
|
|
|
// [lower_bound, upper_bound] are added.
|
|
|
|
// TODO: some invariant about boundaries are correctly updated.
|
|
|
|
//
|
|
|
|
// Note that `tombstone_start` is updated in the if condition above, we use
|
|
|
|
// tombstone_start to refer to its initial value, i.e.,
|
|
|
|
// it->Tombstone().first, and use tombstone_start* to refer to its value
|
|
|
|
// after the update.
|
|
|
|
//
|
|
|
|
// To show [lower_bound, upper_bound] overlaps with [tombstone_start,
|
|
|
|
// tombstone_end]:
|
|
|
|
// lower_bound <= upper_bound from the if condition right after all
|
|
|
|
// bounds are initialized. We assume each tombstone fragment has
|
|
|
|
// start_key.user_key < end_key.user_key, so
|
|
|
|
// tombstone_start < tombstone_end by
|
|
|
|
// FragmentedTombstoneIterator::Tombstone(). So these two ranges are both
|
|
|
|
// non-emtpy. The flag `reached_lower_bound` and the if logic before it
|
|
|
|
// ensures lower_bound <= tombstone_end. tombstone_start is only updated
|
|
|
|
// if it has a smaller user_key than lower_bound user_key, so
|
|
|
|
// tombstone_start <= tombstone_start*. The above if condition implies
|
|
|
|
// tombstone_start* <= upper_bound. So we have
|
|
|
|
// tombstone_start <= upper_bound and lower_bound <= tombstone_end
|
|
|
|
// and the two ranges overlap.
|
|
|
|
//
|
|
|
|
// To show meta.smallest <= meta.largest:
|
|
|
|
// From the implementation of UpdateBoundariesForRange(), it suffices to
|
|
|
|
// prove that when it is first called in this function, its parameters
|
|
|
|
// satisfy `start <= end`, where start = max(tombstone_start*, lower_bound)
|
|
|
|
// and end = min(tombstone_end, upper_bound). From the above proof we have
|
|
|
|
// lower_bound <= tombstone_end and lower_bound <= upper_bound. We only need
|
|
|
|
// to show that tombstone_start* <= min(tombstone_end, upper_bound).
|
|
|
|
// Note that tombstone_start*.user_key = max(tombstone_start.user_key,
|
|
|
|
// lower_bound.user_key). Assuming tombstone_end always has
|
|
|
|
// kMaxSequenceNumber and lower_bound.seqno < kMaxSequenceNumber.
|
|
|
|
// Since lower_bound <= tombstone_end and lower_bound.seqno <
|
|
|
|
// tombstone_end.seqno (in absolute number order, not internal key order),
|
|
|
|
// lower_bound.user_key < tombstone_end.user_key.
|
|
|
|
// Since lower_bound.user_key < tombstone_end.user_key and
|
|
|
|
// tombstone_start.user_key < tombstone_end.user_key, tombstone_start* <
|
|
|
|
// tombstone_end. Since tombstone_start* <= upper_bound from the above proof
|
|
|
|
// and tombstone_start* < tombstone_end, tombstone_start* <=
|
|
|
|
// min(tombstone_end, upper_bound), so the two ranges overlap.
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
// Range tombstone is not supported by output validator yet.
|
|
|
|
builder_->Add(kv.first.Encode(), kv.second);
|
2023-02-22 20:28:18 +00:00
|
|
|
assert(icmp.Compare(tombstone_start, tombstone_end) <= 0);
|
|
|
|
meta.UpdateBoundariesForRange(tombstone_start, tombstone_end,
|
2022-07-14 03:54:49 +00:00
|
|
|
tombstone.seq_, icmp);
|
2022-12-29 21:28:24 +00:00
|
|
|
if (!bottommost_level) {
|
2023-01-17 20:47:44 +00:00
|
|
|
bool start_user_key_changed =
|
|
|
|
last_tombstone_start_user_key.empty() ||
|
|
|
|
ucmp->CompareWithoutTimestamp(last_tombstone_start_user_key,
|
2023-01-19 00:38:07 +00:00
|
|
|
it->start_key()) < 0;
|
|
|
|
last_tombstone_start_user_key = it->start_key();
|
2023-01-17 20:47:44 +00:00
|
|
|
if (start_user_key_changed) {
|
2023-02-22 20:28:18 +00:00
|
|
|
// If tombstone_start >= tombstone_end, then either no key range is
|
2023-01-17 20:47:44 +00:00
|
|
|
// covered, or that they have the same user key. If they have the same
|
|
|
|
// user key, then the internal key range should only be within this
|
|
|
|
// level, and no keys from older levels is covered.
|
|
|
|
if (ucmp->CompareWithoutTimestamp(tombstone_start.user_key(),
|
|
|
|
tombstone_end.user_key()) < 0) {
|
|
|
|
SizeApproximationOptions approx_opts;
|
|
|
|
approx_opts.files_size_error_margin = 0.1;
|
|
|
|
auto approximate_covered_size =
|
|
|
|
compaction_->input_version()->version_set()->ApproximateSize(
|
2023-04-21 16:07:18 +00:00
|
|
|
approx_opts, read_options, compaction_->input_version(),
|
2023-01-17 20:47:44 +00:00
|
|
|
tombstone_start.Encode(), tombstone_end.Encode(),
|
|
|
|
compaction_->output_level() + 1 /* start_level */,
|
|
|
|
-1 /* end_level */, kCompaction);
|
|
|
|
meta.compensated_range_deletion_size += approximate_covered_size;
|
|
|
|
}
|
|
|
|
}
|
2022-12-29 21:28:24 +00:00
|
|
|
}
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2022-09-15 05:09:12 +00:00
|
|
|
|
|
|
|
void CompactionOutputs::FillFilesToCutForTtl() {
|
|
|
|
if (compaction_->immutable_options()->compaction_style !=
|
|
|
|
kCompactionStyleLevel ||
|
|
|
|
compaction_->immutable_options()->compaction_pri !=
|
|
|
|
kMinOverlappingRatio ||
|
|
|
|
compaction_->mutable_cf_options()->ttl == 0 ||
|
|
|
|
compaction_->num_input_levels() < 2 || compaction_->bottommost_level()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We define new file with the oldest ancestor time to be younger than 1/4
|
|
|
|
// TTL, and an old one to be older than 1/2 TTL time.
|
|
|
|
int64_t temp_current_time;
|
|
|
|
auto get_time_status =
|
|
|
|
compaction_->immutable_options()->clock->GetCurrentTime(
|
|
|
|
&temp_current_time);
|
|
|
|
if (!get_time_status.ok()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto current_time = static_cast<uint64_t>(temp_current_time);
|
|
|
|
if (current_time < compaction_->mutable_cf_options()->ttl) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t old_age_thres =
|
|
|
|
current_time - compaction_->mutable_cf_options()->ttl / 2;
|
|
|
|
const std::vector<FileMetaData*>& olevel =
|
|
|
|
*(compaction_->inputs(compaction_->num_input_levels() - 1));
|
|
|
|
for (FileMetaData* file : olevel) {
|
|
|
|
// Worth filtering out by start and end?
|
|
|
|
uint64_t oldest_ancester_time = file->TryGetOldestAncesterTime();
|
|
|
|
// We put old files if they are not too small to prevent a flood
|
|
|
|
// of small files.
|
|
|
|
if (oldest_ancester_time < old_age_thres &&
|
|
|
|
file->fd.GetFileSize() >
|
|
|
|
compaction_->mutable_cf_options()->target_file_size_base / 2) {
|
|
|
|
files_to_cut_for_ttl_.push_back(file);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CompactionOutputs::CompactionOutputs(const Compaction* compaction,
|
|
|
|
const bool is_penultimate_level)
|
|
|
|
: compaction_(compaction), is_penultimate_level_(is_penultimate_level) {
|
|
|
|
partitioner_ = compaction->output_level() == 0
|
|
|
|
? nullptr
|
|
|
|
: compaction->CreateSstPartitioner();
|
|
|
|
|
|
|
|
if (compaction->output_level() != 0) {
|
|
|
|
FillFilesToCutForTtl();
|
|
|
|
}
|
2023-06-05 17:26:40 +00:00
|
|
|
|
|
|
|
level_ptrs_ = std::vector<size_t>(compaction_->number_levels(), 0);
|
2022-09-15 05:09:12 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|