2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// The representation of a DBImpl consists of a set of Versions. The
|
|
|
|
// newest version is called "current". Older versions may be kept
|
|
|
|
// around to provide a consistent view to live iterators.
|
|
|
|
//
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
// Each Version keeps track of a set of table files per level, as well as a
|
|
|
|
// set of blob files. The entire set of versions is maintained in a
|
|
|
|
// VersionSet.
|
2011-03-18 22:37:00 +00:00
|
|
|
//
|
|
|
|
// Version,VersionSet are thread-compatible, but require external
|
|
|
|
// synchronization on all accesses.
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-17 23:44:45 +00:00
|
|
|
#include <atomic>
|
|
|
|
#include <deque>
|
|
|
|
#include <limits>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <map>
|
2013-01-20 10:07:13 +00:00
|
|
|
#include <memory>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <set>
|
2016-03-11 02:16:21 +00:00
|
|
|
#include <string>
|
2020-11-17 23:54:49 +00:00
|
|
|
#include <unordered_set>
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-17 23:44:45 +00:00
|
|
|
#include <utility>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <vector>
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-17 23:44:45 +00:00
|
|
|
|
2020-10-15 20:02:44 +00:00
|
|
|
#include "cache/cache_helpers.h"
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
#include "db/blob/blob_file_meta.h"
|
2016-09-23 23:34:04 +00:00
|
|
|
#include "db/column_family.h"
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/compaction/compaction.h"
|
|
|
|
#include "db/compaction/compaction_picker.h"
|
2016-09-23 23:34:04 +00:00
|
|
|
#include "db/dbformat.h"
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 16:10:12 +00:00
|
|
|
#include "db/file_indexer.h"
|
2016-09-23 23:34:04 +00:00
|
|
|
#include "db/log_reader.h"
|
2018-12-18 01:26:56 +00:00
|
|
|
#include "db/range_del_aggregator.h"
|
2017-09-11 15:58:52 +00:00
|
|
|
#include "db/read_callback.h"
|
2016-09-23 23:34:04 +00:00
|
|
|
#include "db/table_cache.h"
|
|
|
|
#include "db/version_builder.h"
|
|
|
|
#include "db/version_edit.h"
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 18:20:25 +00:00
|
|
|
#include "db/write_controller.h"
|
2020-08-13 00:28:10 +00:00
|
|
|
#include "env/file_system_tracer.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/instrumented_mutex.h"
|
|
|
|
#include "options/db_options.h"
|
2016-09-23 23:34:04 +00:00
|
|
|
#include "port/port.h"
|
2015-03-03 18:59:36 +00:00
|
|
|
#include "rocksdb/env.h"
|
2020-02-10 23:42:46 +00:00
|
|
|
#include "rocksdb/file_checksum.h"
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 21:24:09 +00:00
|
|
|
#include "table/get_context.h"
|
|
|
|
#include "table/multiget_context.h"
|
2019-06-13 22:39:52 +00:00
|
|
|
#include "trace_replay/block_cache_tracer.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
namespace log {
|
|
|
|
class Writer;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 05:07:01 +00:00
|
|
|
class BlobIndex;
|
2011-03-18 22:37:00 +00:00
|
|
|
class Compaction;
|
2014-03-10 05:01:13 +00:00
|
|
|
class LogBuffer;
|
|
|
|
class LookupKey;
|
2011-03-18 22:37:00 +00:00
|
|
|
class MemTable;
|
|
|
|
class Version;
|
|
|
|
class VersionSet;
|
2016-06-21 01:01:03 +00:00
|
|
|
class WriteBufferManager;
|
2013-12-03 02:34:05 +00:00
|
|
|
class MergeContext;
|
2014-01-22 19:44:53 +00:00
|
|
|
class ColumnFamilySet;
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
class MergeIteratorBuilder;
|
2021-01-26 06:07:26 +00:00
|
|
|
class SystemClock;
|
2021-03-10 18:58:07 +00:00
|
|
|
class ManifestTailer;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2019-09-03 15:50:47 +00:00
|
|
|
// VersionEdit is always supposed to be valid and it is used to point at
|
|
|
|
// entries in Manifest. Ideally it should not be used as a container to
|
|
|
|
// carry around few of its fields as function params because it can cause
|
|
|
|
// readers to think it's a valid entry from Manifest. To avoid that confusion
|
|
|
|
// introducing VersionEditParams to simply carry around multiple VersionEdit
|
|
|
|
// params. It need not point to a valid record in Manifest.
|
|
|
|
using VersionEditParams = VersionEdit;
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
// Return the smallest index i such that file_level.files[i]->largest >= key.
|
|
|
|
// Return file_level.num_files if there is no such file.
|
|
|
|
// REQUIRES: "file_level.files" contains a sorted list of
|
|
|
|
// non-overlapping files.
|
|
|
|
extern int FindFile(const InternalKeyComparator& icmp,
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
const LevelFilesBrief& file_level, const Slice& key);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
|
2011-07-15 00:20:57 +00:00
|
|
|
// Returns true iff some file in "files" overlaps the user key range
|
2011-10-05 23:30:28 +00:00
|
|
|
// [*smallest,*largest].
|
2013-03-01 02:04:58 +00:00
|
|
|
// smallest==nullptr represents a key smaller than all keys in the DB.
|
|
|
|
// largest==nullptr represents a key largest than all keys in the DB.
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
// REQUIRES: If disjoint_sorted_files, file_level.files[]
|
|
|
|
// contains disjoint ranges in sorted order.
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
extern bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
|
|
|
|
bool disjoint_sorted_files,
|
|
|
|
const LevelFilesBrief& file_level,
|
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-10-28 17:03:13 +00:00
|
|
|
// Generate LevelFilesBrief from vector<FdWithKeyRange*>
|
2014-07-11 19:52:41 +00:00
|
|
|
// Would copy smallest_key and largest_key data to sequential memory
|
|
|
|
// arena: Arena used to allocate the memory
|
2014-10-28 17:03:13 +00:00
|
|
|
extern void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
Arena* arena);
|
2014-07-11 19:52:41 +00:00
|
|
|
|
2019-05-30 23:09:45 +00:00
|
|
|
// Information of the storage associated with each Version, including number of
|
|
|
|
// levels of LSM tree, files information at each level, files marked for
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
// compaction, blob files, etc.
|
2014-10-27 22:49:46 +00:00
|
|
|
class VersionStorageInfo {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2014-10-27 22:49:46 +00:00
|
|
|
VersionStorageInfo(const InternalKeyComparator* internal_comparator,
|
|
|
|
const Comparator* user_comparator, int num_levels,
|
|
|
|
CompactionStyle compaction_style,
|
2016-10-08 00:21:45 +00:00
|
|
|
VersionStorageInfo* src_vstorage,
|
|
|
|
bool _force_consistency_checks);
|
2019-09-12 01:07:12 +00:00
|
|
|
// No copying allowed
|
|
|
|
VersionStorageInfo(const VersionStorageInfo&) = delete;
|
|
|
|
void operator=(const VersionStorageInfo&) = delete;
|
2014-10-27 22:49:46 +00:00
|
|
|
~VersionStorageInfo();
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
void Reserve(int level, size_t size) { files_[level].reserve(size); }
|
|
|
|
|
2020-08-11 16:22:00 +00:00
|
|
|
void AddFile(int level, FileMetaData* f);
|
2014-10-31 15:48:19 +00:00
|
|
|
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
void AddBlobFile(std::shared_ptr<BlobFileMetaData> blob_file_meta);
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
void SetFinalized();
|
2014-10-27 22:49:46 +00:00
|
|
|
|
|
|
|
// Update num_non_empty_levels_.
|
|
|
|
void UpdateNumNonEmptyLevels();
|
|
|
|
|
|
|
|
void GenerateFileIndexer() {
|
|
|
|
file_indexer_.UpdateIndex(&arena_, num_non_empty_levels_, files_);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the accumulated stats from a file-meta.
|
|
|
|
void UpdateAccumulatedStats(FileMetaData* file_meta);
|
|
|
|
|
2018-03-08 18:18:34 +00:00
|
|
|
// Decrease the current stat from a to-be-deleted file-meta
|
2015-12-07 18:51:08 +00:00
|
|
|
void RemoveCurrentStats(FileMetaData* file_meta);
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
void ComputeCompensatedSizes();
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-01-16 00:23:36 +00:00
|
|
|
// Updates internal structures that keep track of compaction scores
|
|
|
|
// We use compaction scores to figure out which compaction to do next
|
2015-02-05 00:04:51 +00:00
|
|
|
// REQUIRES: db_mutex held!!
|
2014-10-27 22:49:46 +00:00
|
|
|
// TODO find a better way to pass compaction_options_fifo.
|
2021-06-16 23:50:43 +00:00
|
|
|
void ComputeCompactionScore(const ImmutableOptions& immutable_options,
|
2016-09-14 04:11:59 +00:00
|
|
|
const MutableCFOptions& mutable_cf_options);
|
2014-01-16 00:23:36 +00:00
|
|
|
|
2015-08-14 04:42:20 +00:00
|
|
|
// Estimate est_comp_needed_bytes_
|
|
|
|
void EstimateCompactionBytesNeeded(
|
|
|
|
const MutableCFOptions& mutable_cf_options);
|
|
|
|
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-17 23:44:45 +00:00
|
|
|
// This computes files_marked_for_compaction_ and is called by
|
|
|
|
// ComputeCompactionScore()
|
|
|
|
void ComputeFilesMarkedForCompaction();
|
|
|
|
|
2018-04-03 04:57:28 +00:00
|
|
|
// This computes ttl_expired_files_ and is called by
|
|
|
|
// ComputeCompactionScore()
|
2021-05-05 20:59:21 +00:00
|
|
|
void ComputeExpiredTtlFiles(const ImmutableOptions& ioptions,
|
2018-07-16 21:24:33 +00:00
|
|
|
const uint64_t ttl);
|
2018-04-03 04:57:28 +00:00
|
|
|
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 02:24:25 +00:00
|
|
|
// This computes files_marked_for_periodic_compaction_ and is called by
|
|
|
|
// ComputeCompactionScore()
|
|
|
|
void ComputeFilesMarkedForPeriodicCompaction(
|
2021-05-05 20:59:21 +00:00
|
|
|
const ImmutableOptions& ioptions,
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 02:24:25 +00:00
|
|
|
const uint64_t periodic_compaction_seconds);
|
|
|
|
|
2017-10-25 23:24:29 +00:00
|
|
|
// This computes bottommost_files_marked_for_compaction_ and is called by
|
|
|
|
// ComputeCompactionScore() or UpdateOldestSnapshot().
|
|
|
|
//
|
|
|
|
// Among bottommost files (assumes they've already been computed), marks the
|
|
|
|
// ones that have keys that would be eliminated if recompacted, according to
|
|
|
|
// the seqnum of the oldest existing snapshot. Must be called every time
|
|
|
|
// oldest snapshot changes as that is when bottom-level files can become
|
|
|
|
// eligible for compaction.
|
|
|
|
//
|
|
|
|
// REQUIRES: DB mutex held
|
|
|
|
void ComputeBottommostFilesMarkedForCompaction();
|
|
|
|
|
2014-10-28 17:03:13 +00:00
|
|
|
// Generate level_files_brief_ from files_
|
|
|
|
void GenerateLevelFilesBrief();
|
2014-10-27 22:49:46 +00:00
|
|
|
// Sort all files for this version based on their file size and
|
2015-09-22 00:16:31 +00:00
|
|
|
// record results in files_by_compaction_pri_. The largest files are listed
|
|
|
|
// first.
|
2016-09-14 04:11:59 +00:00
|
|
|
void UpdateFilesByCompactionPri(CompactionPri compaction_pri);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
2015-06-04 23:51:25 +00:00
|
|
|
void GenerateLevel0NonOverlapping();
|
|
|
|
bool level0_non_overlapping() const {
|
|
|
|
return level0_non_overlapping_;
|
|
|
|
}
|
|
|
|
|
2017-10-25 23:24:29 +00:00
|
|
|
// Check whether each file in this version is bottommost (i.e., nothing in its
|
|
|
|
// key-range could possibly exist in an older file/level).
|
|
|
|
// REQUIRES: This version has not been saved
|
|
|
|
void GenerateBottommostFiles();
|
|
|
|
|
|
|
|
// Updates the oldest snapshot and related internal state, like the bottommost
|
|
|
|
// files marked for compaction.
|
|
|
|
// REQUIRES: DB mutex held
|
|
|
|
void UpdateOldestSnapshot(SequenceNumber oldest_snapshot_seqnum);
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
int MaxInputLevel() const;
|
2017-08-03 22:36:28 +00:00
|
|
|
int MaxOutputLevel(bool allow_ingest_behind) const;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-10-28 17:00:51 +00:00
|
|
|
// Return level number that has idx'th highest score
|
|
|
|
int CompactionScoreLevel(int idx) const { return compaction_level_[idx]; }
|
|
|
|
|
|
|
|
// Return idx'th highest score
|
|
|
|
double CompactionScore(int idx) const { return compaction_score_[idx]; }
|
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
void GetOverlappingInputs(
|
2014-10-27 22:49:46 +00:00
|
|
|
int level, const InternalKey* begin, // nullptr means before all keys
|
|
|
|
const InternalKey* end, // nullptr means after all keys
|
2012-11-06 17:06:16 +00:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
2015-10-13 21:24:45 +00:00
|
|
|
int hint_index = -1, // index of overlap file
|
|
|
|
int* file_index = nullptr, // return index of overlap file
|
2018-10-16 06:20:15 +00:00
|
|
|
bool expand_range = true, // if set, returns files which overlap the
|
|
|
|
// range and overlap each other. If false,
|
2015-10-13 21:24:45 +00:00
|
|
|
// then just files intersecting the range
|
2018-10-16 06:20:15 +00:00
|
|
|
InternalKey** next_smallest = nullptr) // if non-null, returns the
|
|
|
|
const; // smallest key of next file not included
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 18:11:04 +00:00
|
|
|
void GetCleanInputsWithinInterval(
|
|
|
|
int level, const InternalKey* begin, // nullptr means before all keys
|
|
|
|
const InternalKey* end, // nullptr means after all keys
|
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index = -1, // index of overlap file
|
|
|
|
int* file_index = nullptr) // return index of overlap file
|
|
|
|
const;
|
2011-10-05 23:30:28 +00:00
|
|
|
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 18:11:04 +00:00
|
|
|
void GetOverlappingInputsRangeBinarySearch(
|
2018-10-16 06:20:15 +00:00
|
|
|
int level, // level > 0
|
2018-07-14 00:34:54 +00:00
|
|
|
const InternalKey* begin, // nullptr means before all keys
|
|
|
|
const InternalKey* end, // nullptr means after all keys
|
2012-11-06 17:06:16 +00:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 18:11:04 +00:00
|
|
|
int hint_index, // index of overlap file
|
|
|
|
int* file_index, // return index of overlap file
|
2018-10-16 06:20:15 +00:00
|
|
|
bool within_interval = false, // if set, force the inputs within interval
|
|
|
|
InternalKey** next_smallest = nullptr) // if non-null, returns the
|
|
|
|
const; // smallest key of next file not included
|
2012-11-05 07:47:06 +00:00
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
// Returns true iff some file in the specified level overlaps
|
2011-10-05 23:30:28 +00:00
|
|
|
// some part of [*smallest_user_key,*largest_user_key].
|
|
|
|
// smallest_user_key==NULL represents a key smaller than all keys in the DB.
|
|
|
|
// largest_user_key==NULL represents a key largest than all keys in the DB.
|
2014-10-27 22:49:46 +00:00
|
|
|
bool OverlapInLevel(int level, const Slice* smallest_user_key,
|
2011-10-05 23:30:28 +00:00
|
|
|
const Slice* largest_user_key);
|
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// Returns true iff the first or last file in inputs contains
|
|
|
|
// an overlapping user key to the file "just outside" of it (i.e.
|
|
|
|
// just after the last file, or just before the first file)
|
|
|
|
// REQUIRES: "*inputs" is a sorted list of non-overlapping files
|
|
|
|
bool HasOverlappingUserKey(const std::vector<FileMetaData*>* inputs,
|
|
|
|
int level);
|
|
|
|
|
2014-11-04 01:45:55 +00:00
|
|
|
int num_levels() const { return num_levels_; }
|
2014-01-16 00:15:43 +00:00
|
|
|
|
2014-10-28 17:03:13 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2014-11-04 01:45:55 +00:00
|
|
|
int num_non_empty_levels() const {
|
2014-10-28 17:03:13 +00:00
|
|
|
assert(finalized_);
|
|
|
|
return num_non_empty_levels_;
|
|
|
|
}
|
|
|
|
|
2015-03-30 21:04:21 +00:00
|
|
|
// REQUIRES: This version has been finalized.
|
|
|
|
// (CalculateBaseBytes() is called)
|
|
|
|
// This may or may not return number of level files. It is to keep backward
|
|
|
|
// compatible behavior in universal compaction.
|
|
|
|
int l0_delay_trigger_count() const { return l0_delay_trigger_count_; }
|
|
|
|
|
|
|
|
void set_l0_delay_trigger_count(int v) { l0_delay_trigger_count_ = v; }
|
|
|
|
|
2014-10-28 16:59:56 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
int NumLevelFiles(int level) const {
|
|
|
|
assert(finalized_);
|
2014-11-11 21:47:22 +00:00
|
|
|
return static_cast<int>(files_[level].size());
|
2014-10-28 16:59:56 +00:00
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2014-01-16 00:18:04 +00:00
|
|
|
// Return the combined file size of all files at the specified level.
|
2014-10-01 23:19:16 +00:00
|
|
|
uint64_t NumLevelBytes(int level) const;
|
2014-01-16 00:18:04 +00:00
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
const std::vector<FileMetaData*>& LevelFiles(int level) const {
|
|
|
|
return files_[level];
|
|
|
|
}
|
|
|
|
|
2020-05-28 17:00:19 +00:00
|
|
|
class FileLocation {
|
|
|
|
public:
|
|
|
|
FileLocation() = default;
|
|
|
|
FileLocation(int level, size_t position)
|
|
|
|
: level_(level), position_(position) {}
|
|
|
|
|
|
|
|
int GetLevel() const { return level_; }
|
|
|
|
size_t GetPosition() const { return position_; }
|
|
|
|
|
|
|
|
bool IsValid() const { return level_ >= 0; }
|
|
|
|
|
|
|
|
bool operator==(const FileLocation& rhs) const {
|
|
|
|
return level_ == rhs.level_ && position_ == rhs.position_;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator!=(const FileLocation& rhs) const { return !(*this == rhs); }
|
|
|
|
|
|
|
|
static FileLocation Invalid() { return FileLocation(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
int level_ = -1;
|
|
|
|
size_t position_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
FileLocation GetFileLocation(uint64_t file_number) const {
|
|
|
|
const auto it = file_locations_.find(file_number);
|
|
|
|
|
|
|
|
if (it == file_locations_.end()) {
|
|
|
|
return FileLocation::Invalid();
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(it->second.GetLevel() < num_levels_);
|
|
|
|
assert(it->second.GetPosition() < files_[it->second.GetLevel()].size());
|
|
|
|
assert(files_[it->second.GetLevel()][it->second.GetPosition()]);
|
|
|
|
assert(files_[it->second.GetLevel()][it->second.GetPosition()]
|
|
|
|
->fd.GetNumber() == file_number);
|
|
|
|
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
|
2020-06-08 22:59:25 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
FileMetaData* GetFileMetaDataByNumber(uint64_t file_number) const {
|
|
|
|
auto location = GetFileLocation(file_number);
|
|
|
|
|
|
|
|
if (!location.IsValid()) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return files_[location.GetLevel()][location.GetPosition()];
|
|
|
|
}
|
|
|
|
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
using BlobFiles = std::map<uint64_t, std::shared_ptr<BlobFileMetaData>>;
|
|
|
|
const BlobFiles& GetBlobFiles() const { return blob_files_; }
|
|
|
|
|
2021-03-04 08:42:11 +00:00
|
|
|
uint64_t GetTotalBlobFileSize() const {
|
|
|
|
uint64_t total_blob_bytes = 0;
|
|
|
|
|
|
|
|
for (const auto& pair : blob_files_) {
|
|
|
|
const auto& meta = pair.second;
|
|
|
|
assert(meta);
|
|
|
|
|
2021-09-13 17:46:07 +00:00
|
|
|
total_blob_bytes += meta->GetBlobFileSize();
|
2021-03-04 08:42:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return total_blob_bytes;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
const ROCKSDB_NAMESPACE::LevelFilesBrief& LevelFilesBrief(int level) const {
|
2014-11-12 22:19:33 +00:00
|
|
|
assert(level < static_cast<int>(level_files_brief_.size()));
|
2014-10-27 22:49:46 +00:00
|
|
|
return level_files_brief_[level];
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2015-09-22 00:16:31 +00:00
|
|
|
const std::vector<int>& FilesByCompactionPri(int level) const {
|
2014-10-27 22:49:46 +00:00
|
|
|
assert(finalized_);
|
2015-09-22 00:16:31 +00:00
|
|
|
return files_by_compaction_pri_[level];
|
2014-10-27 22:49:46 +00:00
|
|
|
}
|
|
|
|
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-17 23:44:45 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: DB mutex held during access
|
|
|
|
const autovector<std::pair<int, FileMetaData*>>& FilesMarkedForCompaction()
|
|
|
|
const {
|
|
|
|
assert(finalized_);
|
|
|
|
return files_marked_for_compaction_;
|
|
|
|
}
|
|
|
|
|
2018-04-03 04:57:28 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: DB mutex held during access
|
|
|
|
const autovector<std::pair<int, FileMetaData*>>& ExpiredTtlFiles() const {
|
|
|
|
assert(finalized_);
|
|
|
|
return expired_ttl_files_;
|
|
|
|
}
|
|
|
|
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 02:24:25 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: DB mutex held during access
|
|
|
|
const autovector<std::pair<int, FileMetaData*>>&
|
|
|
|
FilesMarkedForPeriodicCompaction() const {
|
|
|
|
assert(finalized_);
|
|
|
|
return files_marked_for_periodic_compaction_;
|
|
|
|
}
|
|
|
|
|
2019-10-31 18:16:33 +00:00
|
|
|
void TEST_AddFileMarkedForPeriodicCompaction(int level, FileMetaData* f) {
|
|
|
|
files_marked_for_periodic_compaction_.emplace_back(level, f);
|
|
|
|
}
|
|
|
|
|
2017-10-25 23:24:29 +00:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: DB mutex held during access
|
|
|
|
const autovector<std::pair<int, FileMetaData*>>&
|
|
|
|
BottommostFilesMarkedForCompaction() const {
|
|
|
|
assert(finalized_);
|
|
|
|
return bottommost_files_marked_for_compaction_;
|
|
|
|
}
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
int base_level() const { return base_level_; }
|
2018-10-22 17:18:51 +00:00
|
|
|
double level_multiplier() const { return level_multiplier_; }
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
// REQUIRES: lock is held
|
2015-09-22 00:16:31 +00:00
|
|
|
// Set the index that is used to offset into files_by_compaction_pri_ to find
|
2014-10-27 22:49:46 +00:00
|
|
|
// the next compaction candidate file.
|
|
|
|
void SetNextCompactionIndex(int level, int index) {
|
|
|
|
next_file_to_compact_by_size_[level] = index;
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: lock is held
|
|
|
|
int NextCompactionIndex(int level) const {
|
|
|
|
return next_file_to_compact_by_size_[level];
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2014-11-04 01:45:55 +00:00
|
|
|
const FileIndexer& file_indexer() const {
|
2014-10-27 22:49:46 +00:00
|
|
|
assert(finalized_);
|
|
|
|
return file_indexer_;
|
|
|
|
}
|
|
|
|
|
2015-09-22 00:16:31 +00:00
|
|
|
// Only the first few entries of files_by_compaction_pri_ are sorted.
|
2014-10-27 22:49:46 +00:00
|
|
|
// There is no need to sort all the files because it is likely
|
|
|
|
// that on a running system, we need to look at only the first
|
|
|
|
// few largest files because a new version is created every few
|
|
|
|
// seconds/minutes (because of concurrent compactions).
|
|
|
|
static const size_t kNumberFilesToSort = 50;
|
|
|
|
|
2014-01-16 00:18:04 +00:00
|
|
|
// Return a human-readable short (single-line) summary of the number
|
|
|
|
// of files per level. Uses *scratch as backing store.
|
|
|
|
struct LevelSummaryStorage {
|
2014-09-23 20:43:03 +00:00
|
|
|
char buffer[1000];
|
2014-01-16 00:18:04 +00:00
|
|
|
};
|
|
|
|
struct FileSummaryStorage {
|
2014-09-23 20:43:03 +00:00
|
|
|
char buffer[3000];
|
2014-01-16 00:18:04 +00:00
|
|
|
};
|
|
|
|
const char* LevelSummary(LevelSummaryStorage* scratch) const;
|
|
|
|
// Return a human-readable short (single-line) summary of files
|
|
|
|
// in a specified level. Uses *scratch as backing store.
|
|
|
|
const char* LevelFileSummary(FileSummaryStorage* scratch, int level) const;
|
|
|
|
|
|
|
|
// Return the maximum overlapping data (in bytes) at next level for any
|
|
|
|
// file at a level >= 1.
|
2021-07-08 17:07:37 +00:00
|
|
|
uint64_t MaxNextLevelOverlappingBytes();
|
2014-01-16 00:18:04 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Return a human readable string that describes this version's contents.
|
2012-12-16 02:28:36 +00:00
|
|
|
std::string DebugString(bool hex = false) const;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-06-24 22:37:06 +00:00
|
|
|
uint64_t GetAverageValueSize() const {
|
2014-10-17 21:58:30 +00:00
|
|
|
if (accumulated_num_non_deletions_ == 0) {
|
2014-06-24 22:37:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2014-10-17 21:58:30 +00:00
|
|
|
assert(accumulated_raw_key_size_ + accumulated_raw_value_size_ > 0);
|
|
|
|
assert(accumulated_file_size_ > 0);
|
2014-10-27 22:49:46 +00:00
|
|
|
return accumulated_raw_value_size_ / accumulated_num_non_deletions_ *
|
2014-10-17 21:58:30 +00:00
|
|
|
accumulated_file_size_ /
|
|
|
|
(accumulated_raw_key_size_ + accumulated_raw_value_size_);
|
2014-06-24 22:37:06 +00:00
|
|
|
}
|
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
uint64_t GetEstimatedActiveKeys() const;
|
2014-10-27 22:49:46 +00:00
|
|
|
|
2016-04-21 01:46:54 +00:00
|
|
|
double GetEstimatedCompressionRatioAtLevel(int level) const;
|
|
|
|
|
2015-09-22 00:16:31 +00:00
|
|
|
// re-initializes the index that is used to offset into
|
|
|
|
// files_by_compaction_pri_
|
2014-10-27 22:49:46 +00:00
|
|
|
// to find the next compaction candidate file.
|
|
|
|
void ResetNextCompactionIndex(int level) {
|
|
|
|
next_file_to_compact_by_size_[level] = 0;
|
|
|
|
}
|
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
const InternalKeyComparator* InternalComparator() {
|
|
|
|
return internal_comparator_;
|
|
|
|
}
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
// Returns maximum total bytes of data on a given level.
|
|
|
|
uint64_t MaxBytesForLevel(int level) const;
|
|
|
|
|
|
|
|
// Must be called after any change to MutableCFOptions.
|
2021-05-05 20:59:21 +00:00
|
|
|
void CalculateBaseBytes(const ImmutableOptions& ioptions,
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
const MutableCFOptions& options);
|
|
|
|
|
2015-07-22 04:33:20 +00:00
|
|
|
// Returns an estimate of the amount of live data in bytes.
|
|
|
|
uint64_t EstimateLiveDataSize() const;
|
|
|
|
|
2015-08-14 04:42:20 +00:00
|
|
|
uint64_t estimated_compaction_needed_bytes() const {
|
|
|
|
return estimated_compaction_needed_bytes_;
|
|
|
|
}
|
|
|
|
|
When slowdown is triggered, reduce the write rate
Summary: It's usually hard for users to set a value of options.delayed_write_rate. With this diff, after slowdown condition triggers, we greedily reduce write rate if estimated pending compaction bytes increase. If estimated compaction pending bytes drop, we increase the write rate.
Test Plan:
Add a unit test
Test with db_bench setting:
TEST_TMPDIR=/dev/shm/ ./db_bench --benchmarks=fillrandom -num=10000000 --soft_pending_compaction_bytes_limit=1000000000 --hard_pending_compaction_bytes_limit=3000000000 --delayed_write_rate=100000000
and make sure without the commit, write stop will happen, but with the commit, it will not happen.
Reviewers: igor, anthony, rven, yhchiang, kradhakrishnan, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D52131
2015-12-18 01:07:44 +00:00
|
|
|
void TEST_set_estimated_compaction_needed_bytes(uint64_t v) {
|
|
|
|
estimated_compaction_needed_bytes_ = v;
|
|
|
|
}
|
|
|
|
|
2016-10-08 00:21:45 +00:00
|
|
|
bool force_consistency_checks() const { return force_consistency_checks_; }
|
|
|
|
|
2019-03-26 02:14:04 +00:00
|
|
|
SequenceNumber bottommost_files_mark_threshold() const {
|
|
|
|
return bottommost_files_mark_threshold_;
|
|
|
|
}
|
|
|
|
|
2017-10-25 23:24:29 +00:00
|
|
|
// Returns whether any key in [`smallest_key`, `largest_key`] could appear in
|
|
|
|
// an older L0 file than `last_l0_idx` or in a greater level than `last_level`
|
|
|
|
//
|
|
|
|
// @param last_level Level after which we check for overlap
|
|
|
|
// @param last_l0_idx If `last_level == 0`, index of L0 file after which we
|
|
|
|
// check for overlap; otherwise, must be -1
|
2018-10-23 15:12:54 +00:00
|
|
|
bool RangeMightExistAfterSortedRun(const Slice& smallest_user_key,
|
|
|
|
const Slice& largest_user_key,
|
|
|
|
int last_level, int last_l0_idx);
|
2017-10-25 23:24:29 +00:00
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
private:
|
|
|
|
const InternalKeyComparator* internal_comparator_;
|
|
|
|
const Comparator* user_comparator_;
|
|
|
|
int num_levels_; // Number of levels
|
|
|
|
int num_non_empty_levels_; // Number of levels. Any level larger than it
|
|
|
|
// is guaranteed to be empty.
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
// Per-level max bytes
|
|
|
|
std::vector<uint64_t> level_max_bytes_;
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
// A short brief metadata of files per level
|
2020-02-20 20:07:53 +00:00
|
|
|
autovector<ROCKSDB_NAMESPACE::LevelFilesBrief> level_files_brief_;
|
2014-10-27 22:49:46 +00:00
|
|
|
FileIndexer file_indexer_;
|
|
|
|
Arena arena_; // Used to allocate space for file_levels_
|
|
|
|
|
|
|
|
CompactionStyle compaction_style_;
|
|
|
|
|
|
|
|
// List of files per level, files in each level are arranged
|
|
|
|
// in increasing order of keys
|
|
|
|
std::vector<FileMetaData*>* files_;
|
|
|
|
|
2020-05-28 17:00:19 +00:00
|
|
|
// Map of all table files in version. Maps file number to (level, position on
|
|
|
|
// level).
|
|
|
|
using FileLocations = std::unordered_map<uint64_t, FileLocation>;
|
|
|
|
FileLocations file_locations_;
|
|
|
|
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
// Map of blob files in version by number.
|
|
|
|
BlobFiles blob_files_;
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
// Level that L0 data should be compacted to. All levels < base_level_ should
|
2015-03-30 21:04:21 +00:00
|
|
|
// be empty. -1 if it is not level-compaction so it's not applicable.
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 19:44:17 +00:00
|
|
|
int base_level_;
|
|
|
|
|
2018-10-22 17:18:51 +00:00
|
|
|
double level_multiplier_;
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
// A list for the same set of files that are stored in files_,
|
|
|
|
// but files in each level are now sorted based on file
|
|
|
|
// size. The file with the largest size is at the front.
|
|
|
|
// This vector stores the index of the file from files_.
|
2015-09-22 00:16:31 +00:00
|
|
|
std::vector<std::vector<int>> files_by_compaction_pri_;
|
2014-10-27 22:49:46 +00:00
|
|
|
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
2015-06-04 23:51:25 +00:00
|
|
|
// If true, means that files in L0 have keys with non overlapping ranges
|
|
|
|
bool level0_non_overlapping_;
|
|
|
|
|
2015-09-22 00:16:31 +00:00
|
|
|
// An index into files_by_compaction_pri_ that specifies the first
|
2014-10-27 22:49:46 +00:00
|
|
|
// file that is not yet compacted
|
|
|
|
std::vector<int> next_file_to_compact_by_size_;
|
|
|
|
|
2015-09-22 00:16:31 +00:00
|
|
|
// Only the first few entries of files_by_compaction_pri_ are sorted.
|
2014-10-27 22:49:46 +00:00
|
|
|
// There is no need to sort all the files because it is likely
|
|
|
|
// that on a running system, we need to look at only the first
|
|
|
|
// few largest files because a new version is created every few
|
|
|
|
// seconds/minutes (because of concurrent compactions).
|
|
|
|
static const size_t number_of_files_to_sort_ = 50;
|
|
|
|
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-17 23:44:45 +00:00
|
|
|
// This vector contains list of files marked for compaction and also not
|
|
|
|
// currently being compacted. It is protected by DB mutex. It is calculated in
|
|
|
|
// ComputeCompactionScore()
|
|
|
|
autovector<std::pair<int, FileMetaData*>> files_marked_for_compaction_;
|
|
|
|
|
2018-04-03 04:57:28 +00:00
|
|
|
autovector<std::pair<int, FileMetaData*>> expired_ttl_files_;
|
|
|
|
|
Periodic Compactions (#5166)
Summary:
Introducing Periodic Compactions.
This feature allows all the files in a CF to be periodically compacted. It could help in catching any corruptions that could creep into the DB proactively as every file is constantly getting re-compacted. And also, of course, it helps to cleanup data older than certain threshold.
- Introduced a new option `periodic_compaction_time` to control how long a file can live without being compacted in a CF.
- This works across all levels.
- The files are put in the same level after going through the compaction. (Related files in the same level are picked up as `ExpandInputstoCleanCut` is used).
- Compaction filters, if any, are invoked as usual.
- A new table property, `file_creation_time`, is introduced to implement this feature. This property is set to the time at which the SST file was created (and that time is given by the underlying Env/OS).
This feature can be enabled on its own, or in conjunction with `ttl`. It is possible to set a different time threshold for the bottom level when used in conjunction with ttl. Since `ttl` works only on 0 to last but one levels, you could set `ttl` to, say, 1 day, and `periodic_compaction_time` to, say, 7 days. Since `ttl < periodic_compaction_time` all files in last but one levels keep getting picked up based on ttl, and almost never based on periodic_compaction_time. The files in the bottom level get picked up for compaction based on `periodic_compaction_time`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5166
Differential Revision: D14884441
Pulled By: sagar0
fbshipit-source-id: 408426cbacb409c06386a98632dcf90bfa1bda47
2019-04-11 02:24:25 +00:00
|
|
|
autovector<std::pair<int, FileMetaData*>>
|
|
|
|
files_marked_for_periodic_compaction_;
|
|
|
|
|
2017-10-25 23:24:29 +00:00
|
|
|
// These files are considered bottommost because none of their keys can exist
|
|
|
|
// at lower levels. They are not necessarily all in the same level. The marked
|
|
|
|
// ones are eligible for compaction because they contain duplicate key
|
|
|
|
// versions that are no longer protected by snapshot. These variables are
|
|
|
|
// protected by DB mutex and are calculated in `GenerateBottommostFiles()` and
|
|
|
|
// `ComputeBottommostFilesMarkedForCompaction()`.
|
|
|
|
autovector<std::pair<int, FileMetaData*>> bottommost_files_;
|
|
|
|
autovector<std::pair<int, FileMetaData*>>
|
|
|
|
bottommost_files_marked_for_compaction_;
|
|
|
|
|
|
|
|
// Threshold for needing to mark another bottommost file. Maintain it so we
|
|
|
|
// can quickly check when releasing a snapshot whether more bottommost files
|
|
|
|
// became eligible for compaction. It's defined as the min of the max nonzero
|
|
|
|
// seqnums of unmarked bottommost files.
|
|
|
|
SequenceNumber bottommost_files_mark_threshold_ = kMaxSequenceNumber;
|
|
|
|
|
|
|
|
// Monotonically increases as we release old snapshots. Zero indicates no
|
|
|
|
// snapshots have been released yet. When no snapshots remain we set it to the
|
|
|
|
// current seqnum, which needs to be protected as a snapshot can still be
|
|
|
|
// created that references it.
|
|
|
|
SequenceNumber oldest_snapshot_seqnum_ = 0;
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
// Level that should be compacted next and its compaction score.
|
|
|
|
// Score < 1 means compaction is not strictly needed. These fields
|
|
|
|
// are initialized by Finalize().
|
|
|
|
// The most critical level to be compacted is listed first
|
|
|
|
// These are used to pick the best compaction level
|
|
|
|
std::vector<double> compaction_score_;
|
|
|
|
std::vector<int> compaction_level_;
|
2015-03-30 21:04:21 +00:00
|
|
|
int l0_delay_trigger_count_ = 0; // Count used to trigger slow down and stop
|
|
|
|
// for number of L0 files.
|
2014-10-27 22:49:46 +00:00
|
|
|
|
|
|
|
// the following are the sampled temporary stats.
|
|
|
|
// the current accumulated size of sampled files.
|
|
|
|
uint64_t accumulated_file_size_;
|
|
|
|
// the current accumulated size of all raw keys based on the sampled files.
|
|
|
|
uint64_t accumulated_raw_key_size_;
|
|
|
|
// the current accumulated size of all raw keys based on the sampled files.
|
|
|
|
uint64_t accumulated_raw_value_size_;
|
|
|
|
// total number of non-deletion entries
|
|
|
|
uint64_t accumulated_num_non_deletions_;
|
|
|
|
// total number of deletion entries
|
|
|
|
uint64_t accumulated_num_deletions_;
|
2015-12-07 18:51:08 +00:00
|
|
|
// current number of non_deletion entries
|
|
|
|
uint64_t current_num_non_deletions_;
|
2018-03-08 18:18:34 +00:00
|
|
|
// current number of deletion entries
|
2015-12-07 18:51:08 +00:00
|
|
|
uint64_t current_num_deletions_;
|
|
|
|
// current number of file samples
|
|
|
|
uint64_t current_num_samples_;
|
2015-08-14 04:42:20 +00:00
|
|
|
// Estimated bytes needed to be compacted until all levels' size is down to
|
|
|
|
// target sizes.
|
|
|
|
uint64_t estimated_compaction_needed_bytes_;
|
2014-10-27 22:49:46 +00:00
|
|
|
|
|
|
|
bool finalized_;
|
|
|
|
|
2016-10-08 00:21:45 +00:00
|
|
|
// If set to true, we will run consistency checks even if RocksDB
|
|
|
|
// is compiled in release mode
|
|
|
|
bool force_consistency_checks_;
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
friend class Version;
|
|
|
|
friend class VersionSet;
|
|
|
|
};
|
|
|
|
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 21:24:09 +00:00
|
|
|
using MultiGetRange = MultiGetContext::Range;
|
Add blob files to VersionStorageInfo/VersionBuilder (#6597)
Summary:
The patch adds a couple of classes to represent metadata about
blob files: `SharedBlobFileMetaData` contains the information elements
that are immutable (once the blob file is closed), e.g. blob file number,
total number and size of blob files, checksum method/value, while
`BlobFileMetaData` contains attributes that can vary across versions like
the amount of garbage in the file. There is a single `SharedBlobFileMetaData`
for each blob file, which is jointly owned by the `BlobFileMetaData` objects
that point to it; `BlobFileMetaData` objects, in turn, are owned by `Version`s
and can also be shared if the (immutable _and_ mutable) state of the blob file
is the same in two versions.
In addition, the patch adds the blob file metadata to `VersionStorageInfo`, and extends
`VersionBuilder` so that it can apply blob file related `VersionEdit`s (i.e. those
containing `BlobFileAddition`s and/or `BlobFileGarbage`), and save blob file metadata
to a new `VersionStorageInfo`. Consistency checks are also extended to ensure
that table files point to blob files that are part of the `Version`, and that all blob files
that are part of any given `Version` have at least some _non_-garbage data in them.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6597
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D20656803
Pulled By: ltamasi
fbshipit-source-id: f1f74d135045b3b42d0146f03ee576ef0a4bfd80
2020-03-27 01:48:55 +00:00
|
|
|
// A column family's version consists of the table and blob files owned by
|
|
|
|
// the column family at a certain point in time.
|
2014-10-27 22:49:46 +00:00
|
|
|
class Version {
|
|
|
|
public:
|
|
|
|
// Append to *iters a sequence of iterators that will
|
|
|
|
// yield the contents of this Version when merged together.
|
2020-08-03 22:21:56 +00:00
|
|
|
// @param read_options Must outlive any iterator built by
|
|
|
|
// `merger_iter_builder`.
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo).
|
|
|
|
void AddIterators(const ReadOptions& read_options,
|
|
|
|
const FileOptions& soptions,
|
2016-11-04 18:53:38 +00:00
|
|
|
MergeIteratorBuilder* merger_iter_builder,
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
RangeDelAggregator* range_del_agg,
|
|
|
|
bool allow_unprepared_value);
|
2014-10-27 22:49:46 +00:00
|
|
|
|
2020-08-03 22:21:56 +00:00
|
|
|
// @param read_options Must outlive any iterator built by
|
|
|
|
// `merger_iter_builder`.
|
|
|
|
void AddIteratorsForLevel(const ReadOptions& read_options,
|
|
|
|
const FileOptions& soptions,
|
2016-10-21 00:05:32 +00:00
|
|
|
MergeIteratorBuilder* merger_iter_builder,
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
int level, RangeDelAggregator* range_del_agg,
|
|
|
|
bool allow_unprepared_value);
|
2016-10-21 00:05:32 +00:00
|
|
|
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
Status OverlapWithLevelIterator(const ReadOptions&, const FileOptions&,
|
2018-03-16 17:27:39 +00:00
|
|
|
const Slice& smallest_user_key,
|
|
|
|
const Slice& largest_user_key,
|
|
|
|
int level, bool* overlap);
|
|
|
|
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
// Lookup the value for key or get all merge operands for key.
|
|
|
|
// If do_merge = true (default) then lookup value for key.
|
|
|
|
// Behavior if do_merge = true:
|
|
|
|
// If found, store it in *value and
|
|
|
|
// return OK. Else return a non-OK status.
|
|
|
|
// Uses *operands to store merge_operator operations to apply later.
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
2015-10-15 23:37:15 +00:00
|
|
|
//
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
// If the ReadOptions.read_tier is set to do a read-only fetch, then
|
|
|
|
// *value_found will be set to false if it cannot be determined whether
|
|
|
|
// this value exists without doing IO.
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
2015-10-15 23:37:15 +00:00
|
|
|
//
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
// If the key is Deleted, *status will be set to NotFound and
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
2015-10-15 23:37:15 +00:00
|
|
|
// *key_exists will be set to true.
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
// If no key was found, *status will be set to NotFound and
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
2015-10-15 23:37:15 +00:00
|
|
|
// *key_exists will be set to false.
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
// If seq is non-null, *seq will be set to the sequence number found
|
|
|
|
// for the key if a key was found.
|
|
|
|
// Behavior if do_merge = false
|
|
|
|
// If the key has any merge operands then store them in
|
|
|
|
// merge_context.operands_list and don't merge the operands
|
2014-10-27 22:49:46 +00:00
|
|
|
// REQUIRES: lock is not held
|
2017-03-13 18:44:50 +00:00
|
|
|
void Get(const ReadOptions&, const LookupKey& key, PinnableSlice* value,
|
2020-03-02 23:58:32 +00:00
|
|
|
std::string* timestamp, Status* status, MergeContext* merge_context,
|
Use only "local" range tombstones during Get (#4449)
Summary:
Previously, range tombstones were accumulated from every level, which
was necessary if a range tombstone in a higher level covered a key in a lower
level. However, RangeDelAggregator::AddTombstones's complexity is based on
the number of tombstones that are currently stored in it, which is wasteful in
the Get case, where we only need to know the highest sequence number of range
tombstones that cover the key from higher levels, and compute the highest covering
sequence number at the current level. This change introduces this optimization, and
removes the use of RangeDelAggregator from the Get path.
In the benchmark results, the following command was used to initialize the database:
```
./db_bench -db=/dev/shm/5k-rts -use_existing_db=false -benchmarks=filluniquerandom -write_buffer_size=1048576 -compression_type=lz4 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 -value_size=112 -key_size=16 -block_size=4096 -level_compaction_dynamic_level_bytes=true -num=5000000 -max_background_jobs=12 -benchmark_write_rate_limit=20971520 -range_tombstone_width=100 -writes_per_range_tombstone=100 -max_num_range_tombstones=50000 -bloom_bits=8
```
...and the following command was used to measure read throughput:
```
./db_bench -db=/dev/shm/5k-rts/ -use_existing_db=true -benchmarks=readrandom -disable_auto_compactions=true -num=5000000 -reads=100000 -threads=32
```
The filluniquerandom command was only run once, and the resulting database was used
to measure read performance before and after the PR. Both binaries were compiled with
`DEBUG_LEVEL=0`.
Readrandom results before PR:
```
readrandom : 4.544 micros/op 220090 ops/sec; 16.9 MB/s (63103 of 100000 found)
```
Readrandom results after PR:
```
readrandom : 11.147 micros/op 89707 ops/sec; 6.9 MB/s (63103 of 100000 found)
```
So it's actually slower right now, but this PR paves the way for future optimizations (see #4493).
----
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4449
Differential Revision: D10370575
Pulled By: abhimadan
fbshipit-source-id: 9a2e152be1ef36969055c0e9eb4beb0d96c11f4d
2018-10-24 19:29:29 +00:00
|
|
|
SequenceNumber* max_covering_tombstone_seq,
|
|
|
|
bool* value_found = nullptr, bool* key_exists = nullptr,
|
|
|
|
SequenceNumber* seq = nullptr, ReadCallback* callback = nullptr,
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
bool* is_blob = nullptr, bool do_merge = true);
|
2014-10-27 22:49:46 +00:00
|
|
|
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 21:24:09 +00:00
|
|
|
void MultiGet(const ReadOptions&, MultiGetRange* range,
|
2020-12-14 21:47:17 +00:00
|
|
|
ReadCallback* callback = nullptr);
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 21:24:09 +00:00
|
|
|
|
2020-12-05 05:28:26 +00:00
|
|
|
// Interprets blob_index_slice as a blob reference, and (assuming the
|
|
|
|
// corresponding blob file is part of this Version) retrieves the blob and
|
|
|
|
// saves it in *value.
|
|
|
|
// REQUIRES: blob_index_slice stores an encoded blob reference
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 05:07:01 +00:00
|
|
|
Status GetBlob(const ReadOptions& read_options, const Slice& user_key,
|
2021-03-04 08:42:11 +00:00
|
|
|
const Slice& blob_index_slice, PinnableSlice* value,
|
|
|
|
uint64_t* bytes_read) const;
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 05:07:01 +00:00
|
|
|
|
2020-12-05 05:28:26 +00:00
|
|
|
// Retrieves a blob using a blob reference and saves it in *value,
|
|
|
|
// assuming the corresponding blob file is part of this Version.
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 05:07:01 +00:00
|
|
|
Status GetBlob(const ReadOptions& read_options, const Slice& user_key,
|
2021-03-04 08:42:11 +00:00
|
|
|
const BlobIndex& blob_index, PinnableSlice* value,
|
|
|
|
uint64_t* bytes_read) const;
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 05:07:01 +00:00
|
|
|
|
2021-09-25 05:13:19 +00:00
|
|
|
using BlobReadRequest =
|
|
|
|
std::pair<BlobIndex, std::reference_wrapper<const KeyContext>>;
|
|
|
|
using BlobReadRequests = std::vector<BlobReadRequest>;
|
|
|
|
void MultiGetBlob(const ReadOptions& read_options, MultiGetRange& range,
|
|
|
|
std::unordered_map<uint64_t, BlobReadRequests>& blob_rqs);
|
2021-09-18 01:43:32 +00:00
|
|
|
|
2015-02-05 00:04:51 +00:00
|
|
|
// Loads some stats information from files. Call without mutex held. It needs
|
|
|
|
// to be called before applying the version to the version set.
|
2015-08-04 20:48:16 +00:00
|
|
|
void PrepareApply(const MutableCFOptions& mutable_cf_options,
|
|
|
|
bool update_stats);
|
2014-10-27 22:49:46 +00:00
|
|
|
|
|
|
|
// Reference count management (so Versions do not disappear out from
|
|
|
|
// under live iterators)
|
|
|
|
void Ref();
|
|
|
|
// Decrease reference count. Delete the object if no reference left
|
|
|
|
// and return true. Otherwise, return false.
|
|
|
|
bool Unref();
|
|
|
|
|
2020-05-04 22:05:34 +00:00
|
|
|
// Add all files listed in the current version to *live_table_files and
|
|
|
|
// *live_blob_files.
|
|
|
|
void AddLiveFiles(std::vector<uint64_t>* live_table_files,
|
|
|
|
std::vector<uint64_t>* live_blob_files) const;
|
2014-10-27 22:49:46 +00:00
|
|
|
|
|
|
|
// Return a human readable string that describes this version's contents.
|
2017-06-12 13:58:25 +00:00
|
|
|
std::string DebugString(bool hex = false, bool print_stats = false) const;
|
2014-10-27 22:49:46 +00:00
|
|
|
|
2018-03-08 18:18:34 +00:00
|
|
|
// Returns the version number of this version
|
2014-10-27 22:49:46 +00:00
|
|
|
uint64_t GetVersionNumber() const { return version_number_; }
|
|
|
|
|
2014-06-24 22:37:06 +00:00
|
|
|
// REQUIRES: lock is held
|
|
|
|
// On success, "tp" will contains the table properties of the file
|
|
|
|
// specified in "file_meta". If the file name of "file_meta" is
|
2018-03-08 18:18:34 +00:00
|
|
|
// known ahead, passing it by a non-null "fname" can save a
|
2014-06-24 22:37:06 +00:00
|
|
|
// file-name conversion.
|
|
|
|
Status GetTableProperties(std::shared_ptr<const TableProperties>* tp,
|
|
|
|
const FileMetaData* file_meta,
|
2015-10-13 21:24:45 +00:00
|
|
|
const std::string* fname = nullptr) const;
|
2014-06-24 22:37:06 +00:00
|
|
|
|
2014-02-14 00:28:21 +00:00
|
|
|
// REQUIRES: lock is held
|
|
|
|
// On success, *props will be populated with all SSTables' table properties.
|
|
|
|
// The keys of `props` are the sst file name, the values of `props` are the
|
2018-11-09 19:17:34 +00:00
|
|
|
// tables' properties, represented as std::shared_ptr.
|
2014-02-14 00:28:21 +00:00
|
|
|
Status GetPropertiesOfAllTables(TablePropertiesCollection* props);
|
2015-08-25 19:03:54 +00:00
|
|
|
Status GetPropertiesOfAllTables(TablePropertiesCollection* props, int level);
|
2015-10-19 17:34:55 +00:00
|
|
|
Status GetPropertiesOfTablesInRange(const Range* range, std::size_t n,
|
2015-10-13 21:24:45 +00:00
|
|
|
TablePropertiesCollection* props) const;
|
2015-08-25 19:03:54 +00:00
|
|
|
|
2019-08-15 23:59:42 +00:00
|
|
|
// Print summary of range delete tombstones in SST files into out_str,
|
|
|
|
// with maximum max_entries_to_print entries printed out.
|
|
|
|
Status TablesRangeTombstoneSummary(int max_entries_to_print,
|
|
|
|
std::string* out_str);
|
|
|
|
|
2015-08-25 19:03:54 +00:00
|
|
|
// REQUIRES: lock is held
|
2018-03-08 18:18:34 +00:00
|
|
|
// On success, "tp" will contains the aggregated table property among
|
2015-08-25 19:03:54 +00:00
|
|
|
// the table properties of all sst files in this version.
|
|
|
|
Status GetAggregatedTableProperties(
|
|
|
|
std::shared_ptr<const TableProperties>* tp, int level = -1);
|
|
|
|
|
2014-10-27 22:49:46 +00:00
|
|
|
uint64_t GetEstimatedActiveKeys() {
|
2014-10-31 15:48:19 +00:00
|
|
|
return storage_info_.GetEstimatedActiveKeys();
|
2014-10-27 22:49:46 +00:00
|
|
|
}
|
2014-07-28 21:50:16 +00:00
|
|
|
|
2014-08-05 18:27:34 +00:00
|
|
|
size_t GetMemoryUsageByTableReaders();
|
|
|
|
|
2014-10-28 16:59:56 +00:00
|
|
|
ColumnFamilyData* cfd() const { return cfd_; }
|
|
|
|
|
2021-03-10 18:58:07 +00:00
|
|
|
// Return the next Version in the linked list.
|
|
|
|
Version* Next() const { return next_; }
|
2014-10-28 17:04:38 +00:00
|
|
|
|
2017-01-20 07:03:45 +00:00
|
|
|
int TEST_refs() const { return refs_; }
|
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
VersionStorageInfo* storage_info() { return &storage_info_; }
|
|
|
|
|
|
|
|
VersionSet* version_set() { return vset_; }
|
2014-10-28 17:08:41 +00:00
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
void GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta);
|
|
|
|
|
2018-03-02 01:50:54 +00:00
|
|
|
uint64_t GetSstFilesSize();
|
|
|
|
|
2019-10-25 18:52:24 +00:00
|
|
|
// Retrieves the file_creation_time of the oldest file in the DB.
|
|
|
|
// Prerequisite for this API is max_open_files = -1
|
|
|
|
void GetCreationTimeOfOldestFile(uint64_t* creation_time);
|
|
|
|
|
2019-07-23 22:30:59 +00:00
|
|
|
const MutableCFOptions& GetMutableCFOptions() { return mutable_cf_options_; }
|
2018-05-21 21:33:55 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2015-03-03 18:59:36 +00:00
|
|
|
Env* env_;
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* clock_;
|
2021-01-26 06:07:26 +00:00
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
friend class ReactiveVersionSet;
|
2011-03-18 22:37:00 +00:00
|
|
|
friend class VersionSet;
|
2020-03-21 02:17:54 +00:00
|
|
|
friend class VersionEditHandler;
|
|
|
|
friend class VersionEditHandlerPointInTime;
|
2014-10-27 22:49:46 +00:00
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
const InternalKeyComparator* internal_comparator() const {
|
|
|
|
return storage_info_.internal_comparator_;
|
2014-10-27 22:49:46 +00:00
|
|
|
}
|
2014-10-31 15:48:19 +00:00
|
|
|
const Comparator* user_comparator() const {
|
|
|
|
return storage_info_.user_comparator_;
|
2014-10-27 22:49:46 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
// Returns true if the filter blocks in the specified level will not be
|
|
|
|
// checked during read operations. In certain cases (trivial move or preload),
|
|
|
|
// the filter block may already be cached, but we still do not access it such
|
|
|
|
// that it eventually expires from the cache.
|
2016-02-01 22:58:46 +00:00
|
|
|
bool IsFilterSkipped(int level, bool is_file_last_in_level = false);
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
|
2014-10-17 21:58:30 +00:00
|
|
|
// The helper function of UpdateAccumulatedStats, which may fill the missing
|
2018-03-08 18:18:34 +00:00
|
|
|
// fields of file_meta from its associated TableProperties.
|
2014-06-24 22:37:06 +00:00
|
|
|
// Returns true if it does initialize FileMetaData.
|
|
|
|
bool MaybeInitializeFileMetaData(FileMetaData* file_meta);
|
|
|
|
|
2014-10-17 21:58:30 +00:00
|
|
|
// Update the accumulated stats associated with the current version.
|
|
|
|
// This accumulated stats will be used in compaction.
|
2015-08-04 20:48:16 +00:00
|
|
|
void UpdateAccumulatedStats(bool update_stats);
|
2014-06-24 22:37:06 +00:00
|
|
|
|
2014-01-16 00:23:36 +00:00
|
|
|
// Sort all files for this version based on their file size and
|
2015-09-22 00:16:31 +00:00
|
|
|
// record results in files_by_compaction_pri_. The largest files are listed
|
|
|
|
// first.
|
|
|
|
void UpdateFilesByCompactionPri();
|
2014-01-16 00:23:36 +00:00
|
|
|
|
2014-01-31 23:30:27 +00:00
|
|
|
ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs
|
2014-10-27 22:49:46 +00:00
|
|
|
Logger* info_log_;
|
|
|
|
Statistics* db_statistics_;
|
2014-04-17 21:07:05 +00:00
|
|
|
TableCache* table_cache_;
|
2020-10-15 20:02:44 +00:00
|
|
|
BlobFileCache* blob_file_cache_;
|
2014-04-17 21:07:05 +00:00
|
|
|
const MergeOperator* merge_operator_;
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 05:14:39 +00:00
|
|
|
|
2014-10-31 15:48:19 +00:00
|
|
|
VersionStorageInfo storage_info_;
|
2011-03-18 22:37:00 +00:00
|
|
|
VersionSet* vset_; // VersionSet to which this Version belongs
|
|
|
|
Version* next_; // Next version in linked list
|
2011-05-21 02:17:43 +00:00
|
|
|
Version* prev_; // Previous version in linked list
|
2011-03-18 22:37:00 +00:00
|
|
|
int refs_; // Number of live refs to this version
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const FileOptions file_options_;
|
2018-05-21 21:33:55 +00:00
|
|
|
const MutableCFOptions mutable_cf_options_;
|
2020-06-09 23:49:07 +00:00
|
|
|
// Cached value to avoid recomputing it on every read.
|
|
|
|
const size_t max_file_size_for_l0_meta_pin_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// A version number that uniquely represents this version. This is
|
|
|
|
// used for debugging and logging purposes only.
|
|
|
|
uint64_t version_number_;
|
2020-09-08 17:49:01 +00:00
|
|
|
std::shared_ptr<IOTracer> io_tracer_;
|
2012-10-19 21:00:53 +00:00
|
|
|
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
Version(ColumnFamilyData* cfd, VersionSet* vset, const FileOptions& file_opt,
|
2020-09-08 17:49:01 +00:00
|
|
|
MutableCFOptions mutable_cf_options,
|
|
|
|
const std::shared_ptr<IOTracer>& io_tracer,
|
|
|
|
uint64_t version_number = 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-10-28 16:59:56 +00:00
|
|
|
~Version();
|
2012-11-01 05:01:57 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// No copying allowed
|
2019-09-12 01:07:12 +00:00
|
|
|
Version(const Version&) = delete;
|
|
|
|
void operator=(const Version&) = delete;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2018-04-06 02:49:06 +00:00
|
|
|
struct ObsoleteFileInfo {
|
|
|
|
FileMetaData* metadata;
|
|
|
|
std::string path;
|
|
|
|
|
|
|
|
ObsoleteFileInfo() noexcept : metadata(nullptr) {}
|
|
|
|
ObsoleteFileInfo(FileMetaData* f, const std::string& file_path)
|
|
|
|
: metadata(f), path(file_path) {}
|
|
|
|
|
|
|
|
ObsoleteFileInfo(const ObsoleteFileInfo&) = delete;
|
|
|
|
ObsoleteFileInfo& operator=(const ObsoleteFileInfo&) = delete;
|
|
|
|
|
|
|
|
ObsoleteFileInfo(ObsoleteFileInfo&& rhs) noexcept :
|
|
|
|
ObsoleteFileInfo() {
|
|
|
|
*this = std::move(rhs);
|
|
|
|
}
|
|
|
|
|
|
|
|
ObsoleteFileInfo& operator=(ObsoleteFileInfo&& rhs) noexcept {
|
|
|
|
path = std::move(rhs.path);
|
|
|
|
metadata = rhs.metadata;
|
|
|
|
rhs.metadata = nullptr;
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteMetadata() {
|
|
|
|
delete metadata;
|
|
|
|
metadata = nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-04-30 18:23:32 +00:00
|
|
|
class ObsoleteBlobFileInfo {
|
|
|
|
public:
|
|
|
|
ObsoleteBlobFileInfo(uint64_t blob_file_number, std::string path)
|
|
|
|
: blob_file_number_(blob_file_number), path_(std::move(path)) {}
|
|
|
|
|
|
|
|
uint64_t GetBlobFileNumber() const { return blob_file_number_; }
|
|
|
|
const std::string& GetPath() const { return path_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint64_t blob_file_number_;
|
|
|
|
std::string path_;
|
|
|
|
};
|
|
|
|
|
2018-08-20 21:54:03 +00:00
|
|
|
class BaseReferencedVersionBuilder;
|
|
|
|
|
2019-06-04 17:51:22 +00:00
|
|
|
class AtomicGroupReadBuffer {
|
|
|
|
public:
|
2021-03-10 18:58:07 +00:00
|
|
|
AtomicGroupReadBuffer() = default;
|
2019-06-04 17:51:22 +00:00
|
|
|
Status AddEdit(VersionEdit* edit);
|
|
|
|
void Clear();
|
|
|
|
bool IsFull() const;
|
|
|
|
bool IsEmpty() const;
|
|
|
|
|
|
|
|
uint64_t TEST_read_edits_in_atomic_group() const {
|
|
|
|
return read_edits_in_atomic_group_;
|
|
|
|
}
|
|
|
|
std::vector<VersionEdit>& replay_buffer() { return replay_buffer_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint64_t read_edits_in_atomic_group_ = 0;
|
|
|
|
std::vector<VersionEdit> replay_buffer_;
|
|
|
|
};
|
|
|
|
|
2019-05-30 23:09:45 +00:00
|
|
|
// VersionSet is the collection of versions of all the column families of the
|
|
|
|
// database. Each database owns one VersionSet. A VersionSet has access to all
|
|
|
|
// column families via ColumnFamilySet, i.e. set of the column families.
|
2011-03-18 22:37:00 +00:00
|
|
|
class VersionSet {
|
|
|
|
public:
|
2016-09-23 23:34:04 +00:00
|
|
|
VersionSet(const std::string& dbname, const ImmutableDBOptions* db_options,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const FileOptions& file_options, Cache* table_cache,
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager* write_buffer_manager,
|
2019-06-13 22:39:52 +00:00
|
|
|
WriteController* write_controller,
|
2020-08-13 00:28:10 +00:00
|
|
|
BlockCacheTracer* const block_cache_tracer,
|
2021-06-10 18:01:44 +00:00
|
|
|
const std::shared_ptr<IOTracer>& io_tracer,
|
|
|
|
const std::string& db_session_id);
|
2019-09-12 01:07:12 +00:00
|
|
|
// No copying allowed
|
|
|
|
VersionSet(const VersionSet&) = delete;
|
|
|
|
void operator=(const VersionSet&) = delete;
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
virtual ~VersionSet();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-10-24 05:48:00 +00:00
|
|
|
Status LogAndApplyToDefaultColumnFamily(
|
|
|
|
VersionEdit* edit, InstrumentedMutex* mu,
|
|
|
|
FSDirectory* db_directory = nullptr, bool new_descriptor_log = false,
|
|
|
|
const ColumnFamilyOptions* column_family_options = nullptr) {
|
|
|
|
ColumnFamilyData* default_cf = GetColumnFamilySet()->GetDefault();
|
|
|
|
const MutableCFOptions* cf_options =
|
|
|
|
default_cf->GetLatestMutableCFOptions();
|
|
|
|
return LogAndApply(default_cf, *cf_options, edit, mu, db_directory,
|
|
|
|
new_descriptor_log, column_family_options);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Apply *edit to the current version to form a new descriptor that
|
|
|
|
// is both saved to persistent state and installed as the new
|
2011-09-01 19:08:02 +00:00
|
|
|
// current version. Will release *mu while actually writing to the file.
|
2014-02-28 22:05:11 +00:00
|
|
|
// column_family_options has to be set if edit is column family add
|
2011-09-01 19:08:02 +00:00
|
|
|
// REQUIRES: *mu is held on entry.
|
|
|
|
// REQUIRES: no other thread concurrently calls LogAndApply()
|
2014-10-27 22:49:46 +00:00
|
|
|
Status LogAndApply(
|
|
|
|
ColumnFamilyData* column_family_data,
|
|
|
|
const MutableCFOptions& mutable_cf_options, VersionEdit* edit,
|
2020-03-03 00:14:00 +00:00
|
|
|
InstrumentedMutex* mu, FSDirectory* db_directory = nullptr,
|
2014-10-27 22:49:46 +00:00
|
|
|
bool new_descriptor_log = false,
|
2016-07-06 01:09:59 +00:00
|
|
|
const ColumnFamilyOptions* column_family_options = nullptr) {
|
2018-10-16 02:59:20 +00:00
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
cfds.emplace_back(column_family_data);
|
|
|
|
autovector<const MutableCFOptions*> mutable_cf_options_list;
|
|
|
|
mutable_cf_options_list.emplace_back(&mutable_cf_options);
|
|
|
|
autovector<autovector<VersionEdit*>> edit_lists;
|
|
|
|
autovector<VersionEdit*> edit_list;
|
|
|
|
edit_list.emplace_back(edit);
|
|
|
|
edit_lists.emplace_back(edit_list);
|
2018-06-28 19:16:10 +00:00
|
|
|
return LogAndApply(cfds, mutable_cf_options_list, edit_lists, mu,
|
2016-07-06 01:09:59 +00:00
|
|
|
db_directory, new_descriptor_log, column_family_options);
|
|
|
|
}
|
|
|
|
// The batch version. If edit_list.size() > 1, caller must ensure that
|
|
|
|
// no edit in the list column family add or drop
|
|
|
|
Status LogAndApply(
|
|
|
|
ColumnFamilyData* column_family_data,
|
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
const autovector<VersionEdit*>& edit_list, InstrumentedMutex* mu,
|
2020-03-03 00:14:00 +00:00
|
|
|
FSDirectory* db_directory = nullptr, bool new_descriptor_log = false,
|
2020-10-27 01:20:43 +00:00
|
|
|
const ColumnFamilyOptions* column_family_options = nullptr,
|
|
|
|
const std::function<void(const Status&)>& manifest_wcb = {}) {
|
2018-10-16 02:59:20 +00:00
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
cfds.emplace_back(column_family_data);
|
|
|
|
autovector<const MutableCFOptions*> mutable_cf_options_list;
|
|
|
|
mutable_cf_options_list.emplace_back(&mutable_cf_options);
|
|
|
|
autovector<autovector<VersionEdit*>> edit_lists;
|
|
|
|
edit_lists.emplace_back(edit_list);
|
2018-06-28 19:16:10 +00:00
|
|
|
return LogAndApply(cfds, mutable_cf_options_list, edit_lists, mu,
|
2020-10-27 01:20:43 +00:00
|
|
|
db_directory, new_descriptor_log, column_family_options,
|
|
|
|
{manifest_wcb});
|
2018-06-28 19:16:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The across-multi-cf batch version. If edit_lists contain more than
|
|
|
|
// 1 version edits, caller must ensure that no edit in the []list is column
|
|
|
|
// family manipulation.
|
2019-03-26 23:41:31 +00:00
|
|
|
virtual Status LogAndApply(
|
2018-10-16 02:59:20 +00:00
|
|
|
const autovector<ColumnFamilyData*>& cfds,
|
|
|
|
const autovector<const MutableCFOptions*>& mutable_cf_options_list,
|
|
|
|
const autovector<autovector<VersionEdit*>>& edit_lists,
|
2020-03-03 00:14:00 +00:00
|
|
|
InstrumentedMutex* mu, FSDirectory* db_directory = nullptr,
|
2018-10-16 02:59:20 +00:00
|
|
|
bool new_descriptor_log = false,
|
2020-10-27 01:20:43 +00:00
|
|
|
const ColumnFamilyOptions* new_cf_options = nullptr,
|
|
|
|
const std::vector<std::function<void(const Status&)>>& manifest_wcbs =
|
|
|
|
{});
|
2014-01-10 23:12:34 +00:00
|
|
|
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
static Status GetCurrentManifestPath(const std::string& dbname,
|
|
|
|
FileSystem* fs,
|
2019-05-22 16:17:39 +00:00
|
|
|
std::string* manifest_filename,
|
|
|
|
uint64_t* manifest_file_number);
|
2021-07-16 00:48:17 +00:00
|
|
|
void WakeUpWaitingManifestWriters();
|
2019-03-26 23:41:31 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Recover the last saved descriptor from persistent storage.
|
2014-04-09 16:56:17 +00:00
|
|
|
// If read_only == true, Recover() will not complain if some column families
|
|
|
|
// are not opened
|
|
|
|
Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
|
2019-09-03 15:50:47 +00:00
|
|
|
bool read_only = false, std::string* db_id = nullptr);
|
2014-01-22 19:44:53 +00:00
|
|
|
|
2020-03-21 02:17:54 +00:00
|
|
|
Status TryRecover(const std::vector<ColumnFamilyDescriptor>& column_families,
|
2020-07-10 20:39:47 +00:00
|
|
|
bool read_only,
|
|
|
|
const std::vector<std::string>& files_in_dbname,
|
|
|
|
std::string* db_id, bool* has_missing_table_file);
|
2020-03-21 02:17:54 +00:00
|
|
|
|
|
|
|
// Try to recover the version set to the most recent consistent state
|
|
|
|
// recorded in the specified manifest.
|
|
|
|
Status TryRecoverFromOneManifest(
|
|
|
|
const std::string& manifest_path,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
|
|
|
bool read_only, std::string* db_id, bool* has_missing_table_file);
|
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
// Reads a manifest file and returns a list of column families in
|
|
|
|
// column_families.
|
|
|
|
static Status ListColumnFamilies(std::vector<std::string>* column_families,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const std::string& dbname, FileSystem* fs);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2012-10-31 18:47:18 +00:00
|
|
|
// Try to reduce the number of levels. This call is valid when
|
|
|
|
// only one level from the new max level to the old
|
|
|
|
// max level containing files.
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
// The call is static, since number of levels is immutable during
|
|
|
|
// the lifetime of a RocksDB instance. It reduces number of levels
|
|
|
|
// in a DB by applying changes to manifest.
|
2012-10-31 18:47:18 +00:00
|
|
|
// For example, a db currently has 7 levels [0-6], and a call to
|
|
|
|
// to reduce to 5 [0-4] can only be executed when only one level
|
|
|
|
// among [4-6] contains files.
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
static Status ReduceNumberOfLevels(const std::string& dbname,
|
|
|
|
const Options* options,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const FileOptions& file_options,
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 22:57:04 +00:00
|
|
|
int new_levels);
|
2012-10-31 18:47:18 +00:00
|
|
|
|
2020-02-10 23:42:46 +00:00
|
|
|
// Get the checksum information of all live files
|
|
|
|
Status GetLiveFilesChecksumInfo(FileChecksumList* checksum_list);
|
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
// printf contents (for debugging)
|
|
|
|
Status DumpManifest(Options& options, std::string& manifestFileName,
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 17:07:40 +00:00
|
|
|
bool verbose, bool hex = false, bool json = false);
|
2014-04-15 20:39:26 +00:00
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Return the current manifest file number
|
2014-11-04 01:45:55 +00:00
|
|
|
uint64_t manifest_file_number() const { return manifest_file_number_; }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2016-06-10 02:03:10 +00:00
|
|
|
uint64_t options_file_number() const { return options_file_number_; }
|
|
|
|
|
2014-11-04 01:45:55 +00:00
|
|
|
uint64_t pending_manifest_file_number() const {
|
2014-03-18 04:50:15 +00:00
|
|
|
return pending_manifest_file_number_;
|
|
|
|
}
|
|
|
|
|
2014-11-07 23:44:12 +00:00
|
|
|
uint64_t current_next_file_number() const { return next_file_number_.load(); }
|
2014-11-07 19:50:34 +00:00
|
|
|
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
uint64_t min_log_number_to_keep_2pc() const {
|
|
|
|
return min_log_number_to_keep_2pc_.load();
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Allocate and return a new file number
|
2014-11-11 14:58:47 +00:00
|
|
|
uint64_t NewFileNumber() { return next_file_number_.fetch_add(1); }
|
2012-08-27 06:45:35 +00:00
|
|
|
|
2018-07-27 21:02:07 +00:00
|
|
|
// Fetch And Add n new file number
|
|
|
|
uint64_t FetchAddFileNumber(uint64_t n) {
|
|
|
|
return next_file_number_.fetch_add(n);
|
|
|
|
}
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
// Return the last sequence number.
|
Fix TSAN build and re-enable the tests (#7386)
Summary:
Resolve TSAN build warnings and re-enable disabled TSAN tests.
Not sure if it's a compiler issue or TSAN check issue. Switching from
conditional operator to if-else mitigated the problem.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7386
Test Plan:
run TSAN check 10 times in circleci.
```
WARNING: ThreadSanitizer: data race (pid=27735)
Atomic write of size 8 at 0x7b54000005e8 by thread T32:
#0 __tsan_atomic64_store <null> (db_test+0x4cee95)
https://github.com/facebook/rocksdb/issues/1 std::__atomic_base<unsigned long>::store(unsigned long, std::memory_order) /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/atomic_base.h:374:2 (db_test+0x78460e)
https://github.com/facebook/rocksdb/issues/2 rocksdb::VersionSet::SetLastSequence(unsigned long) /home/circleci/project/./db/version_set.h:1058:20 (db_test+0x78460e)
...
Previous read of size 8 at 0x7b54000005e8 by thread T31:
#0 bool rocksdb::DBImpl::MultiCFSnapshot<std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > > >(rocksdb::ReadOptions const&, rocksdb::ReadCallback*, std::function<rocksdb::DBImpl::MultiGetColumnFamilyData* (std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >::iterator&)>&, std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >*, unsigned long*) /home/circleci/project/db/db_impl/db_impl.cc (db_test+0x715087)
```
Reviewed By: ltamasi
Differential Revision: D23725226
Pulled By: jay-zhuang
fbshipit-source-id: a6d662a5ea68111246cd32ec95f3411a25f76bc6
2020-09-25 21:44:58 +00:00
|
|
|
uint64_t LastSequence() const {
|
2013-12-20 17:57:58 +00:00
|
|
|
return last_sequence_.load(std::memory_order_acquire);
|
|
|
|
}
|
2011-04-12 19:38:58 +00:00
|
|
|
|
2017-06-24 21:06:43 +00:00
|
|
|
// Note: memory_order_acquire must be sufficient.
|
2017-11-11 01:18:01 +00:00
|
|
|
uint64_t LastAllocatedSequence() const {
|
|
|
|
return last_allocated_sequence_.load(std::memory_order_seq_cst);
|
2017-06-24 21:06:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-01 07:39:56 +00:00
|
|
|
// Note: memory_order_acquire must be sufficient.
|
Fix TSAN build and re-enable the tests (#7386)
Summary:
Resolve TSAN build warnings and re-enable disabled TSAN tests.
Not sure if it's a compiler issue or TSAN check issue. Switching from
conditional operator to if-else mitigated the problem.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7386
Test Plan:
run TSAN check 10 times in circleci.
```
WARNING: ThreadSanitizer: data race (pid=27735)
Atomic write of size 8 at 0x7b54000005e8 by thread T32:
#0 __tsan_atomic64_store <null> (db_test+0x4cee95)
https://github.com/facebook/rocksdb/issues/1 std::__atomic_base<unsigned long>::store(unsigned long, std::memory_order) /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/atomic_base.h:374:2 (db_test+0x78460e)
https://github.com/facebook/rocksdb/issues/2 rocksdb::VersionSet::SetLastSequence(unsigned long) /home/circleci/project/./db/version_set.h:1058:20 (db_test+0x78460e)
...
Previous read of size 8 at 0x7b54000005e8 by thread T31:
#0 bool rocksdb::DBImpl::MultiCFSnapshot<std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > > >(rocksdb::ReadOptions const&, rocksdb::ReadCallback*, std::function<rocksdb::DBImpl::MultiGetColumnFamilyData* (std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >::iterator&)>&, std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >*, unsigned long*) /home/circleci/project/db/db_impl/db_impl.cc (db_test+0x715087)
```
Reviewed By: ltamasi
Differential Revision: D23725226
Pulled By: jay-zhuang
fbshipit-source-id: a6d662a5ea68111246cd32ec95f3411a25f76bc6
2020-09-25 21:44:58 +00:00
|
|
|
uint64_t LastPublishedSequence() const {
|
2017-12-01 07:39:56 +00:00
|
|
|
return last_published_sequence_.load(std::memory_order_seq_cst);
|
|
|
|
}
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
// Set the last sequence number to s.
|
Fix TSAN build and re-enable the tests (#7386)
Summary:
Resolve TSAN build warnings and re-enable disabled TSAN tests.
Not sure if it's a compiler issue or TSAN check issue. Switching from
conditional operator to if-else mitigated the problem.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7386
Test Plan:
run TSAN check 10 times in circleci.
```
WARNING: ThreadSanitizer: data race (pid=27735)
Atomic write of size 8 at 0x7b54000005e8 by thread T32:
#0 __tsan_atomic64_store <null> (db_test+0x4cee95)
https://github.com/facebook/rocksdb/issues/1 std::__atomic_base<unsigned long>::store(unsigned long, std::memory_order) /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/atomic_base.h:374:2 (db_test+0x78460e)
https://github.com/facebook/rocksdb/issues/2 rocksdb::VersionSet::SetLastSequence(unsigned long) /home/circleci/project/./db/version_set.h:1058:20 (db_test+0x78460e)
...
Previous read of size 8 at 0x7b54000005e8 by thread T31:
#0 bool rocksdb::DBImpl::MultiCFSnapshot<std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > > >(rocksdb::ReadOptions const&, rocksdb::ReadCallback*, std::function<rocksdb::DBImpl::MultiGetColumnFamilyData* (std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >::iterator&)>&, std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >*, unsigned long*) /home/circleci/project/db/db_impl/db_impl.cc (db_test+0x715087)
```
Reviewed By: ltamasi
Differential Revision: D23725226
Pulled By: jay-zhuang
fbshipit-source-id: a6d662a5ea68111246cd32ec95f3411a25f76bc6
2020-09-25 21:44:58 +00:00
|
|
|
void SetLastSequence(uint64_t s) {
|
2011-04-12 19:38:58 +00:00
|
|
|
assert(s >= last_sequence_);
|
2018-03-08 18:18:34 +00:00
|
|
|
// Last visible sequence must always be less than last written seq
|
2017-11-11 01:18:01 +00:00
|
|
|
assert(!db_options_->two_write_queues || s <= last_allocated_sequence_);
|
2013-12-20 17:57:58 +00:00
|
|
|
last_sequence_.store(s, std::memory_order_release);
|
2011-04-12 19:38:58 +00:00
|
|
|
}
|
|
|
|
|
2017-12-01 07:39:56 +00:00
|
|
|
// Note: memory_order_release must be sufficient
|
Fix TSAN build and re-enable the tests (#7386)
Summary:
Resolve TSAN build warnings and re-enable disabled TSAN tests.
Not sure if it's a compiler issue or TSAN check issue. Switching from
conditional operator to if-else mitigated the problem.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7386
Test Plan:
run TSAN check 10 times in circleci.
```
WARNING: ThreadSanitizer: data race (pid=27735)
Atomic write of size 8 at 0x7b54000005e8 by thread T32:
#0 __tsan_atomic64_store <null> (db_test+0x4cee95)
https://github.com/facebook/rocksdb/issues/1 std::__atomic_base<unsigned long>::store(unsigned long, std::memory_order) /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/atomic_base.h:374:2 (db_test+0x78460e)
https://github.com/facebook/rocksdb/issues/2 rocksdb::VersionSet::SetLastSequence(unsigned long) /home/circleci/project/./db/version_set.h:1058:20 (db_test+0x78460e)
...
Previous read of size 8 at 0x7b54000005e8 by thread T31:
#0 bool rocksdb::DBImpl::MultiCFSnapshot<std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > > >(rocksdb::ReadOptions const&, rocksdb::ReadCallback*, std::function<rocksdb::DBImpl::MultiGetColumnFamilyData* (std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >::iterator&)>&, std::unordered_map<unsigned int, rocksdb::DBImpl::MultiGetColumnFamilyData, std::hash<unsigned int>, std::equal_to<unsigned int>, std::allocator<std::pair<unsigned int const, rocksdb::DBImpl::MultiGetColumnFamilyData> > >*, unsigned long*) /home/circleci/project/db/db_impl/db_impl.cc (db_test+0x715087)
```
Reviewed By: ltamasi
Differential Revision: D23725226
Pulled By: jay-zhuang
fbshipit-source-id: a6d662a5ea68111246cd32ec95f3411a25f76bc6
2020-09-25 21:44:58 +00:00
|
|
|
void SetLastPublishedSequence(uint64_t s) {
|
2017-12-01 07:39:56 +00:00
|
|
|
assert(s >= last_published_sequence_);
|
|
|
|
last_published_sequence_.store(s, std::memory_order_seq_cst);
|
|
|
|
}
|
|
|
|
|
2017-06-24 21:06:43 +00:00
|
|
|
// Note: memory_order_release must be sufficient
|
2017-11-11 01:18:01 +00:00
|
|
|
void SetLastAllocatedSequence(uint64_t s) {
|
|
|
|
assert(s >= last_allocated_sequence_);
|
|
|
|
last_allocated_sequence_.store(s, std::memory_order_seq_cst);
|
2017-06-24 21:06:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note: memory_order_release must be sufficient
|
2017-11-11 01:18:01 +00:00
|
|
|
uint64_t FetchAddLastAllocatedSequence(uint64_t s) {
|
|
|
|
return last_allocated_sequence_.fetch_add(s, std::memory_order_seq_cst);
|
2017-06-24 21:06:43 +00:00
|
|
|
}
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
// Mark the specified file number as used.
|
2017-10-10 20:07:00 +00:00
|
|
|
// REQUIRED: this is only called during single-threaded recovery or repair.
|
|
|
|
void MarkFileNumberUsed(uint64_t number);
|
2011-09-01 19:08:02 +00:00
|
|
|
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
// Mark the specified log number as deleted
|
|
|
|
// REQUIRED: this is only called during single-threaded recovery or repair, or
|
|
|
|
// from ::LogAndApply where the global mutex is held.
|
|
|
|
void MarkMinLogNumberToKeep2PC(uint64_t number);
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
// Return the log file number for the log file that is currently
|
|
|
|
// being compacted, or zero if there is no such log file.
|
2014-11-04 01:45:55 +00:00
|
|
|
uint64_t prev_log_number() const { return prev_log_number_; }
|
2011-04-12 19:38:58 +00:00
|
|
|
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
// Returns the minimum log number which still has data not flushed to any SST
|
|
|
|
// file.
|
|
|
|
// In non-2PC mode, all the log numbers smaller than this number can be safely
|
|
|
|
// deleted.
|
|
|
|
uint64_t MinLogNumberWithUnflushedData() const {
|
|
|
|
return PreComputeMinLogNumberWithUnflushedData(nullptr);
|
|
|
|
}
|
|
|
|
// Returns the minimum log number which still has data not flushed to any SST
|
Track WAL obsoletion when updating empty CF's log number (#7781)
Summary:
In the write path, there is an optimization: when a new WAL is created during SwitchMemtable, we update the internal log number of the empty column families to the new WAL. `FindObsoleteFiles` marks a WAL as obsolete if the WAL's log number is less than `VersionSet::MinLogNumberWithUnflushedData`. After updating the empty column families' internal log number, `VersionSet::MinLogNumberWithUnflushedData` might change, so some WALs might become obsolete to be purged from disk.
For example, consider there are 3 column families: 0, 1, 2:
1. initially, all the column families' log number is 1;
2. write some data to cf0, and flush cf0, but the flush is pending;
3. now a new WAL 2 is created;
4. write data to cf1 and WAL 2, now cf0's log number is 1, cf1's log number is 2, cf2's log number is 2 (because cf1 and cf2 are empty, so their log numbers will be set to the highest log number);
5. now cf0's flush hasn't finished, flush cf1, a new WAL 3 is created, and cf1's flush finishes, now cf0's log number is 1, cf1's log number is 3, cf2's log number is 3, since WAL 1 still contains data for the unflushed cf0, no WAL can be deleted from disk;
6. now cf0's flush finishes, cf0's log number is 2 (because when cf0 was switching memtable, WAL 3 does not exist yet), cf1's log number is 3, cf2's log number is 3, so WAL 1 can be purged from disk now, but WAL 2 still cannot because `MinLogNumberToKeep()` is 2;
7. write data to cf2 and WAL 3, because cf0 is empty, its log number is updated to 3, so now cf0's log number is 3, cf1's log number is 3, cf2's log number is 3;
8. now if the background threads want to purge obsolete files from disk, WAL 2 can be purged because `MinLogNumberToKeep()` is 3. But there are only two flush results written to MANIFEST: the first is for flushing cf1, and the `MinLogNumberToKeep` is 1, the second is for flushing cf0, and the `MinLogNumberToKeep` is 2. So without this PR, if the DB crashes at this point and try to recover, `WalSet` will still expect WAL 2 to exist.
When WAL tracking is enabled, we assume WALs will only become obsolete after a flush result is written to MANIFEST in `MemtableList::TryInstallMemtableFlushResults` (or its atomic flush counterpart). The above situation breaks this assumption.
This PR tracks WAL obsoletion if necessary before updating the empty column families' log numbers.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7781
Test Plan:
watch existing tests and stress tests to pass.
`make -j48 blackbox_crash_test` on devserver
Reviewed By: ltamasi
Differential Revision: D25631695
Pulled By: cheng-chang
fbshipit-source-id: ca7fff967bdb42204b84226063d909893bc0a4ec
2020-12-19 05:33:20 +00:00
|
|
|
// file.
|
|
|
|
// Empty column families' log number is considered to be
|
|
|
|
// new_log_number_for_empty_cf.
|
|
|
|
uint64_t PreComputeMinLogNumberWithUnflushedData(
|
|
|
|
uint64_t new_log_number_for_empty_cf) const {
|
|
|
|
uint64_t min_log_num = port::kMaxUint64;
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
// It's safe to ignore dropped column families here:
|
|
|
|
// cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
|
|
|
|
uint64_t num =
|
|
|
|
cfd->IsEmpty() ? new_log_number_for_empty_cf : cfd->GetLogNumber();
|
|
|
|
if (min_log_num > num && !cfd->IsDropped()) {
|
|
|
|
min_log_num = num;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return min_log_num;
|
|
|
|
}
|
|
|
|
// Returns the minimum log number which still has data not flushed to any SST
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
// file, except data from `cfd_to_skip`.
|
|
|
|
uint64_t PreComputeMinLogNumberWithUnflushedData(
|
|
|
|
const ColumnFamilyData* cfd_to_skip) const {
|
Track WAL obsoletion when updating empty CF's log number (#7781)
Summary:
In the write path, there is an optimization: when a new WAL is created during SwitchMemtable, we update the internal log number of the empty column families to the new WAL. `FindObsoleteFiles` marks a WAL as obsolete if the WAL's log number is less than `VersionSet::MinLogNumberWithUnflushedData`. After updating the empty column families' internal log number, `VersionSet::MinLogNumberWithUnflushedData` might change, so some WALs might become obsolete to be purged from disk.
For example, consider there are 3 column families: 0, 1, 2:
1. initially, all the column families' log number is 1;
2. write some data to cf0, and flush cf0, but the flush is pending;
3. now a new WAL 2 is created;
4. write data to cf1 and WAL 2, now cf0's log number is 1, cf1's log number is 2, cf2's log number is 2 (because cf1 and cf2 are empty, so their log numbers will be set to the highest log number);
5. now cf0's flush hasn't finished, flush cf1, a new WAL 3 is created, and cf1's flush finishes, now cf0's log number is 1, cf1's log number is 3, cf2's log number is 3, since WAL 1 still contains data for the unflushed cf0, no WAL can be deleted from disk;
6. now cf0's flush finishes, cf0's log number is 2 (because when cf0 was switching memtable, WAL 3 does not exist yet), cf1's log number is 3, cf2's log number is 3, so WAL 1 can be purged from disk now, but WAL 2 still cannot because `MinLogNumberToKeep()` is 2;
7. write data to cf2 and WAL 3, because cf0 is empty, its log number is updated to 3, so now cf0's log number is 3, cf1's log number is 3, cf2's log number is 3;
8. now if the background threads want to purge obsolete files from disk, WAL 2 can be purged because `MinLogNumberToKeep()` is 3. But there are only two flush results written to MANIFEST: the first is for flushing cf1, and the `MinLogNumberToKeep` is 1, the second is for flushing cf0, and the `MinLogNumberToKeep` is 2. So without this PR, if the DB crashes at this point and try to recover, `WalSet` will still expect WAL 2 to exist.
When WAL tracking is enabled, we assume WALs will only become obsolete after a flush result is written to MANIFEST in `MemtableList::TryInstallMemtableFlushResults` (or its atomic flush counterpart). The above situation breaks this assumption.
This PR tracks WAL obsoletion if necessary before updating the empty column families' log numbers.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7781
Test Plan:
watch existing tests and stress tests to pass.
`make -j48 blackbox_crash_test` on devserver
Reviewed By: ltamasi
Differential Revision: D25631695
Pulled By: cheng-chang
fbshipit-source-id: ca7fff967bdb42204b84226063d909893bc0a4ec
2020-12-19 05:33:20 +00:00
|
|
|
uint64_t min_log_num = port::kMaxUint64;
|
2014-01-28 19:05:04 +00:00
|
|
|
for (auto cfd : *column_family_set_) {
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
if (cfd == cfd_to_skip) {
|
|
|
|
continue;
|
|
|
|
}
|
2015-07-02 21:27:00 +00:00
|
|
|
// It's safe to ignore dropped column families here:
|
|
|
|
// cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
|
|
|
|
if (min_log_num > cfd->GetLogNumber() && !cfd->IsDropped()) {
|
2014-01-29 21:28:50 +00:00
|
|
|
min_log_num = cfd->GetLogNumber();
|
2014-01-28 19:05:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return min_log_num;
|
|
|
|
}
|
2020-11-17 23:54:49 +00:00
|
|
|
// Returns the minimum log number which still has data not flushed to any SST
|
|
|
|
// file, except data from `cfds_to_skip`.
|
|
|
|
uint64_t PreComputeMinLogNumberWithUnflushedData(
|
|
|
|
const std::unordered_set<const ColumnFamilyData*>& cfds_to_skip) const {
|
|
|
|
uint64_t min_log_num = port::kMaxUint64;
|
|
|
|
for (auto cfd : *column_family_set_) {
|
|
|
|
if (cfds_to_skip.count(cfd)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// It's safe to ignore dropped column families here:
|
|
|
|
// cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
|
|
|
|
if (min_log_num > cfd->GetLogNumber() && !cfd->IsDropped()) {
|
|
|
|
min_log_num = cfd->GetLogNumber();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return min_log_num;
|
|
|
|
}
|
2014-01-28 19:05:04 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Create an iterator that reads over the compaction inputs for "*c".
|
|
|
|
// The caller should delete the iterator when no longer needed.
|
2020-08-03 22:21:56 +00:00
|
|
|
// @param read_options Must outlive the returned iterator.
|
2017-11-17 01:46:43 +00:00
|
|
|
InternalIterator* MakeInputIterator(
|
2020-08-03 22:21:56 +00:00
|
|
|
const ReadOptions& read_options, const Compaction* c,
|
|
|
|
RangeDelAggregator* range_del_agg,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const FileOptions& file_options_compactions);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-05-04 22:05:34 +00:00
|
|
|
// Add all files listed in any live version to *live_table_files and
|
|
|
|
// *live_blob_files. Note that these lists may contain duplicates.
|
|
|
|
void AddLiveFiles(std::vector<uint64_t>* live_table_files,
|
|
|
|
std::vector<uint64_t>* live_blob_files) const;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-04-29 22:36:21 +00:00
|
|
|
// Return the approximate size of data to be scanned for range [start, end)
|
2019-07-23 22:30:59 +00:00
|
|
|
// in levels [start_level, end_level). If end_level == -1 it will search
|
2015-09-10 20:50:00 +00:00
|
|
|
// through all non-empty levels
|
2019-07-31 15:46:48 +00:00
|
|
|
uint64_t ApproximateSize(const SizeApproximationOptions& options, Version* v,
|
|
|
|
const Slice& start, const Slice& end,
|
2019-06-20 21:28:22 +00:00
|
|
|
int start_level, int end_level,
|
|
|
|
TableReaderCaller caller);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-09-24 21:01:01 +00:00
|
|
|
// Return the size of the current manifest file
|
2014-11-04 01:45:55 +00:00
|
|
|
uint64_t manifest_file_size() const { return manifest_file_size_; }
|
2012-10-19 21:00:53 +00:00
|
|
|
|
2014-01-27 22:33:50 +00:00
|
|
|
Status GetMetadataForFile(uint64_t number, int* filelevel,
|
2014-02-06 23:42:16 +00:00
|
|
|
FileMetaData** metadata, ColumnFamilyData** cfd);
|
2013-08-22 21:32:53 +00:00
|
|
|
|
2015-10-07 00:46:22 +00:00
|
|
|
// This function doesn't support leveldb SST filenames
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
void GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata);
|
2013-08-22 21:32:53 +00:00
|
|
|
|
2020-04-30 18:23:32 +00:00
|
|
|
void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) {
|
2020-10-15 20:02:44 +00:00
|
|
|
assert(table_cache_);
|
|
|
|
|
|
|
|
table_cache_->Erase(GetSlice(&blob_file_number));
|
|
|
|
|
2020-04-30 18:23:32 +00:00
|
|
|
obsolete_blob_files_.emplace_back(blob_file_number, std::move(path));
|
|
|
|
}
|
|
|
|
|
2018-04-06 02:49:06 +00:00
|
|
|
void GetObsoleteFiles(std::vector<ObsoleteFileInfo>* files,
|
2020-04-30 18:23:32 +00:00
|
|
|
std::vector<ObsoleteBlobFileInfo>* blob_files,
|
2016-03-11 02:16:21 +00:00
|
|
|
std::vector<std::string>* manifest_filenames,
|
2015-02-10 01:38:32 +00:00
|
|
|
uint64_t min_pending_output);
|
2013-11-08 23:23:46 +00:00
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
ColumnFamilySet* GetColumnFamilySet() { return column_family_set_.get(); }
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const FileOptions& file_options() { return file_options_; }
|
|
|
|
void ChangeFileOptions(const MutableDBOptions& new_options) {
|
|
|
|
file_options_.writable_file_max_buffer_size =
|
2017-10-31 20:49:25 +00:00
|
|
|
new_options.writable_file_max_buffer_size;
|
|
|
|
}
|
2014-01-02 17:08:12 +00:00
|
|
|
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
const ImmutableDBOptions* db_options() const { return db_options_; }
|
|
|
|
|
2015-02-12 01:10:43 +00:00
|
|
|
static uint64_t GetNumLiveVersions(Version* dummy_versions);
|
|
|
|
|
2015-08-20 18:47:19 +00:00
|
|
|
static uint64_t GetTotalSstFilesSize(Version* dummy_versions);
|
|
|
|
|
2021-09-08 19:19:01 +00:00
|
|
|
static uint64_t GetTotalBlobFileSize(Version* dummy_versions);
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
// Get the IO Status returned by written Manifest.
|
2020-06-27 15:55:49 +00:00
|
|
|
const IOStatus& io_status() const { return io_status_; }
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
|
2020-10-24 05:48:00 +00:00
|
|
|
// The returned WalSet needs to be accessed with DB mutex held.
|
Define WAL related classes to be used in VersionEdit and VersionSet (#7164)
Summary:
`WalAddition`, `WalDeletion` are defined in `wal_version.h` and used in `VersionEdit`.
`WalAddition` is used to represent events of creating a new WAL (no size, just log number), or closing a WAL (with size).
`WalDeletion` is used to represent events of deleting or archiving a WAL, it means the WAL is no longer alive (won't be replayed during recovery).
`WalSet` is the set of alive WALs kept in `VersionSet`.
1. Why use `WalDeletion` instead of relying on `MinLogNumber` to identify outdated WALs
On recovery, we can compute `MinLogNumber()` based on the log numbers kept in MANIFEST, any log with number < MinLogNumber can be ignored. So it seems that we don't need to persist `WalDeletion` to MANIFEST, since we can ignore the WALs based on MinLogNumber.
But the `MinLogNumber()` is actually a lower bound, it does not exactly mean that logs starting from MinLogNumber must exist. This is because in a corner case, when a column family is empty and never flushed, its log number is set to the largest log number, but not persisted in MANIFEST. So let's say there are 2 column families, when creating the DB, the first WAL has log number 1, so it's persisted to MANIFEST for both column families. Then CF 0 is empty and never flushed, CF 1 is updated and flushed, so a new WAL with log number 2 is created and persisted to MANIFEST for CF 1. But CF 0's log number in MANIFEST is still 1. So on recovery, MinLogNumber is 1, but since log 1 only contains data for CF 1, and CF 1 is flushed, log 1 might have already been deleted from disk.
We can make `MinLogNumber()` be the exactly minimum log number that must exist, by persisting the most recent log number for empty column families that are not flushed. But if there are N such column families, then every time a new WAL is created, we need to add N records to MANIFEST.
In current design, a record is persisted to MANIFEST only when WAL is created, closed, or deleted/archived, so the number of WAL related records are bounded to 3x number of WALs.
2. Why keep `WalSet` in `VersionSet` instead of applying the `VersionEdit`s to `VersionStorageInfo`
`VersionEdit`s are originally designed to track the addition and deletion of SST files. The SST files are related to column families, each column family has a list of `Version`s, and each `Version` keeps the set of active SST files in `VersionStorageInfo`.
But WALs are a concept of DB, they are not bounded to specific column families. So logically it does not make sense to store WALs in a column family's `Version`s.
Also, `Version`'s purpose is to keep reference to SST / blob files, so that they are not deleted until there is no version referencing them. But a WAL is deleted regardless of version references.
So we keep the WALs in `VersionSet` for the purpose of writing out the DB state's snapshot when creating new MANIFESTs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7164
Test Plan:
make version_edit_test && ./version_edit_test
make wal_edit_test && ./wal_edit_test
Reviewed By: ltamasi
Differential Revision: D22677936
Pulled By: cheng-chang
fbshipit-source-id: 5a3b6890140e572ffd79eb37e6e4c3c32361a859
2020-08-05 23:32:26 +00:00
|
|
|
const WalSet& GetWalSet() const { return wals_; }
|
|
|
|
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-06-30 22:30:01 +00:00
|
|
|
void TEST_CreateAndAppendVersion(ColumnFamilyData* cfd) {
|
|
|
|
assert(cfd);
|
|
|
|
|
|
|
|
const auto& mutable_cf_options = *cfd->GetLatestMutableCFOptions();
|
|
|
|
Version* const version =
|
2020-09-08 17:49:01 +00:00
|
|
|
new Version(cfd, this, file_options_, mutable_cf_options, io_tracer_);
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-06-30 22:30:01 +00:00
|
|
|
|
|
|
|
constexpr bool update_stats = false;
|
|
|
|
version->PrepareApply(mutable_cf_options, update_stats);
|
|
|
|
AppendVersion(cfd, version);
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
protected:
|
2020-06-18 17:07:42 +00:00
|
|
|
using VersionBuilderMap =
|
|
|
|
std::unordered_map<uint32_t,
|
|
|
|
std::unique_ptr<BaseReferencedVersionBuilder>>;
|
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
struct ManifestWriter;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
friend class Version;
|
2020-03-21 02:17:54 +00:00
|
|
|
friend class VersionEditHandler;
|
|
|
|
friend class VersionEditHandlerPointInTime;
|
2020-11-11 15:58:15 +00:00
|
|
|
friend class DumpManifestHandler;
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
friend class DBImpl;
|
2019-03-26 23:41:31 +00:00
|
|
|
friend class DBImplReadOnly;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-01-22 19:44:53 +00:00
|
|
|
struct LogReporter : public log::Reader::Reporter {
|
|
|
|
Status* status;
|
2018-03-05 21:08:17 +00:00
|
|
|
virtual void Corruption(size_t /*bytes*/, const Status& s) override {
|
2020-06-18 17:07:42 +00:00
|
|
|
if (status->ok()) {
|
|
|
|
*status = s;
|
|
|
|
}
|
2014-01-22 19:44:53 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-03-21 02:17:54 +00:00
|
|
|
void Reset();
|
|
|
|
|
2019-08-16 21:16:49 +00:00
|
|
|
// Returns approximated offset of a key in a file for a given version.
|
|
|
|
uint64_t ApproximateOffsetOf(Version* v, const FdWithKeyRange& f,
|
|
|
|
const Slice& key, TableReaderCaller caller);
|
|
|
|
|
|
|
|
// Returns approximated data size between start and end keys in a file
|
|
|
|
// for a given version.
|
2015-04-29 22:36:21 +00:00
|
|
|
uint64_t ApproximateSize(Version* v, const FdWithKeyRange& f,
|
2019-08-16 21:16:49 +00:00
|
|
|
const Slice& start, const Slice& end,
|
|
|
|
TableReaderCaller caller);
|
2015-04-29 22:36:21 +00:00
|
|
|
|
2020-01-07 04:08:24 +00:00
|
|
|
struct MutableCFState {
|
|
|
|
uint64_t log_number;
|
2020-12-05 22:17:11 +00:00
|
|
|
std::string full_history_ts_low;
|
|
|
|
|
|
|
|
explicit MutableCFState() = default;
|
|
|
|
explicit MutableCFState(uint64_t _log_number, std::string ts_low)
|
|
|
|
: log_number(_log_number), full_history_ts_low(std::move(ts_low)) {}
|
2020-01-07 04:08:24 +00:00
|
|
|
};
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
// Save current contents to *log
|
2020-01-07 04:08:24 +00:00
|
|
|
Status WriteCurrentStateToManifest(
|
|
|
|
const std::unordered_map<uint32_t, MutableCFState>& curr_state,
|
2020-10-24 05:48:00 +00:00
|
|
|
const VersionEdit& wal_additions, log::Writer* log, IOStatus& io_s);
|
2011-09-01 19:08:02 +00:00
|
|
|
|
2014-01-10 23:12:34 +00:00
|
|
|
void AppendVersion(ColumnFamilyData* column_family_data, Version* v);
|
2011-05-21 02:17:43 +00:00
|
|
|
|
2014-09-08 22:25:01 +00:00
|
|
|
ColumnFamilyData* CreateColumnFamily(const ColumnFamilyOptions& cf_options,
|
2020-03-21 02:17:54 +00:00
|
|
|
const VersionEdit* edit);
|
2014-02-28 22:05:11 +00:00
|
|
|
|
2020-03-21 02:17:54 +00:00
|
|
|
Status VerifyFileMetadata(const std::string& fpath,
|
|
|
|
const FileMetaData& meta) const;
|
2014-01-22 19:44:53 +00:00
|
|
|
|
2020-10-24 05:48:00 +00:00
|
|
|
// Protected by DB mutex.
|
Define WAL related classes to be used in VersionEdit and VersionSet (#7164)
Summary:
`WalAddition`, `WalDeletion` are defined in `wal_version.h` and used in `VersionEdit`.
`WalAddition` is used to represent events of creating a new WAL (no size, just log number), or closing a WAL (with size).
`WalDeletion` is used to represent events of deleting or archiving a WAL, it means the WAL is no longer alive (won't be replayed during recovery).
`WalSet` is the set of alive WALs kept in `VersionSet`.
1. Why use `WalDeletion` instead of relying on `MinLogNumber` to identify outdated WALs
On recovery, we can compute `MinLogNumber()` based on the log numbers kept in MANIFEST, any log with number < MinLogNumber can be ignored. So it seems that we don't need to persist `WalDeletion` to MANIFEST, since we can ignore the WALs based on MinLogNumber.
But the `MinLogNumber()` is actually a lower bound, it does not exactly mean that logs starting from MinLogNumber must exist. This is because in a corner case, when a column family is empty and never flushed, its log number is set to the largest log number, but not persisted in MANIFEST. So let's say there are 2 column families, when creating the DB, the first WAL has log number 1, so it's persisted to MANIFEST for both column families. Then CF 0 is empty and never flushed, CF 1 is updated and flushed, so a new WAL with log number 2 is created and persisted to MANIFEST for CF 1. But CF 0's log number in MANIFEST is still 1. So on recovery, MinLogNumber is 1, but since log 1 only contains data for CF 1, and CF 1 is flushed, log 1 might have already been deleted from disk.
We can make `MinLogNumber()` be the exactly minimum log number that must exist, by persisting the most recent log number for empty column families that are not flushed. But if there are N such column families, then every time a new WAL is created, we need to add N records to MANIFEST.
In current design, a record is persisted to MANIFEST only when WAL is created, closed, or deleted/archived, so the number of WAL related records are bounded to 3x number of WALs.
2. Why keep `WalSet` in `VersionSet` instead of applying the `VersionEdit`s to `VersionStorageInfo`
`VersionEdit`s are originally designed to track the addition and deletion of SST files. The SST files are related to column families, each column family has a list of `Version`s, and each `Version` keeps the set of active SST files in `VersionStorageInfo`.
But WALs are a concept of DB, they are not bounded to specific column families. So logically it does not make sense to store WALs in a column family's `Version`s.
Also, `Version`'s purpose is to keep reference to SST / blob files, so that they are not deleted until there is no version referencing them. But a WAL is deleted regardless of version references.
So we keep the WALs in `VersionSet` for the purpose of writing out the DB state's snapshot when creating new MANIFESTs.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7164
Test Plan:
make version_edit_test && ./version_edit_test
make wal_edit_test && ./wal_edit_test
Reviewed By: ltamasi
Differential Revision: D22677936
Pulled By: cheng-chang
fbshipit-source-id: 5a3b6890140e572ffd79eb37e6e4c3c32361a859
2020-08-05 23:32:26 +00:00
|
|
|
WalSet wals_;
|
|
|
|
|
2020-03-21 02:17:54 +00:00
|
|
|
std::unique_ptr<ColumnFamilySet> column_family_set_;
|
2020-10-15 20:02:44 +00:00
|
|
|
Cache* table_cache_;
|
2011-03-18 22:37:00 +00:00
|
|
|
Env* const env_;
|
2020-08-13 00:28:10 +00:00
|
|
|
FileSystemPtr const fs_;
|
2021-03-15 11:32:24 +00:00
|
|
|
SystemClock* const clock_;
|
2011-03-18 22:37:00 +00:00
|
|
|
const std::string dbname_;
|
2019-09-03 15:50:47 +00:00
|
|
|
std::string db_id_;
|
2016-09-23 23:34:04 +00:00
|
|
|
const ImmutableDBOptions* const db_options_;
|
2014-11-07 23:44:12 +00:00
|
|
|
std::atomic<uint64_t> next_file_number_;
|
2020-12-10 03:05:14 +00:00
|
|
|
// Any WAL number smaller than this should be ignored during recovery,
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-03 22:35:11 +00:00
|
|
|
// and is qualified for being deleted in 2PC mode. In non-2PC mode, this
|
|
|
|
// number is ignored.
|
|
|
|
std::atomic<uint64_t> min_log_number_to_keep_2pc_ = {0};
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t manifest_file_number_;
|
2016-06-10 02:03:10 +00:00
|
|
|
uint64_t options_file_number_;
|
2014-03-18 04:50:15 +00:00
|
|
|
uint64_t pending_manifest_file_number_;
|
2017-12-01 07:39:56 +00:00
|
|
|
// The last seq visible to reads. It normally indicates the last sequence in
|
|
|
|
// the memtable but when using two write queues it could also indicate the
|
|
|
|
// last sequence in the WAL visible to reads.
|
2013-12-20 17:57:58 +00:00
|
|
|
std::atomic<uint64_t> last_sequence_;
|
2017-12-01 07:39:56 +00:00
|
|
|
// The last seq that is already allocated. It is applicable only when we have
|
|
|
|
// two write queues. In that case seq might or might not have appreated in
|
|
|
|
// memtable but it is expected to appear in the WAL.
|
|
|
|
// We have last_sequence <= last_allocated_sequence_
|
2017-11-11 01:18:01 +00:00
|
|
|
std::atomic<uint64_t> last_allocated_sequence_;
|
2017-12-01 07:39:56 +00:00
|
|
|
// The last allocated sequence that is also published to the readers. This is
|
|
|
|
// applicable only when last_seq_same_as_publish_seq_ is not set. Otherwise
|
|
|
|
// last_sequence_ also indicates the last published seq.
|
2018-03-08 18:18:34 +00:00
|
|
|
// We have last_sequence <= last_published_sequence_ <=
|
2017-12-01 07:39:56 +00:00
|
|
|
// last_allocated_sequence_
|
|
|
|
std::atomic<uint64_t> last_published_sequence_;
|
2011-04-12 19:38:58 +00:00
|
|
|
uint64_t prev_log_number_; // 0 or backing store for memtable being compacted
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Opened lazily
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<log::Writer> descriptor_log_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-10-19 21:00:53 +00:00
|
|
|
// generates a increasing version number for every new version
|
|
|
|
uint64_t current_version_number_;
|
|
|
|
|
|
|
|
// Queue of writers to the manifest file
|
|
|
|
std::deque<ManifestWriter*> manifest_writers_;
|
|
|
|
|
2014-01-16 00:15:43 +00:00
|
|
|
// Current size of manifest file
|
2014-01-10 23:12:34 +00:00
|
|
|
uint64_t manifest_file_size_;
|
2013-01-11 01:18:50 +00:00
|
|
|
|
2018-04-06 02:49:06 +00:00
|
|
|
std::vector<ObsoleteFileInfo> obsolete_files_;
|
2020-04-30 18:23:32 +00:00
|
|
|
std::vector<ObsoleteBlobFileInfo> obsolete_blob_files_;
|
2016-03-11 02:16:21 +00:00
|
|
|
std::vector<std::string> obsolete_manifests_;
|
2013-11-08 23:23:46 +00:00
|
|
|
|
2014-09-08 22:25:01 +00:00
|
|
|
// env options for all reads and writes except compactions
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
FileOptions file_options_;
|
2013-03-15 00:00:04 +00:00
|
|
|
|
2019-06-13 22:39:52 +00:00
|
|
|
BlockCacheTracer* const block_cache_tracer_;
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
// Store the IO status when Manifest is written
|
|
|
|
IOStatus io_status_;
|
|
|
|
|
2020-08-13 00:28:10 +00:00
|
|
|
std::shared_ptr<IOTracer> io_tracer_;
|
|
|
|
|
2021-06-10 18:01:44 +00:00
|
|
|
std::string db_session_id_;
|
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
private:
|
|
|
|
// REQUIRES db mutex at beginning. may release and re-acquire db mutex
|
|
|
|
Status ProcessManifestWrites(std::deque<ManifestWriter>& writers,
|
2020-03-03 00:14:00 +00:00
|
|
|
InstrumentedMutex* mu, FSDirectory* db_directory,
|
2019-03-26 23:41:31 +00:00
|
|
|
bool new_descriptor_log,
|
|
|
|
const ColumnFamilyOptions* new_cf_options);
|
|
|
|
|
2014-03-13 01:09:03 +00:00
|
|
|
void LogAndApplyCFHelper(VersionEdit* edit);
|
2019-08-29 21:06:07 +00:00
|
|
|
Status LogAndApplyHelper(ColumnFamilyData* cfd, VersionBuilder* b,
|
|
|
|
VersionEdit* edit, InstrumentedMutex* mu);
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2019-05-30 23:09:45 +00:00
|
|
|
// ReactiveVersionSet represents a collection of versions of the column
|
|
|
|
// families of the database. Users of ReactiveVersionSet, e.g. DBImplSecondary,
|
|
|
|
// need to replay the MANIFEST (description log in older terms) in order to
|
|
|
|
// reconstruct and install versions.
|
2019-03-26 23:41:31 +00:00
|
|
|
class ReactiveVersionSet : public VersionSet {
|
|
|
|
public:
|
|
|
|
ReactiveVersionSet(const std::string& dbname,
|
|
|
|
const ImmutableDBOptions* _db_options,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
const FileOptions& _file_options, Cache* table_cache,
|
2019-03-26 23:41:31 +00:00
|
|
|
WriteBufferManager* write_buffer_manager,
|
2020-08-13 00:28:10 +00:00
|
|
|
WriteController* write_controller,
|
|
|
|
const std::shared_ptr<IOTracer>& io_tracer);
|
2019-03-26 23:41:31 +00:00
|
|
|
|
|
|
|
~ReactiveVersionSet() override;
|
|
|
|
|
|
|
|
Status ReadAndApply(
|
|
|
|
InstrumentedMutex* mu,
|
|
|
|
std::unique_ptr<log::FragmentBufferedReader>* manifest_reader,
|
2021-03-10 18:58:07 +00:00
|
|
|
Status* manifest_read_status,
|
2019-03-26 23:41:31 +00:00
|
|
|
std::unordered_set<ColumnFamilyData*>* cfds_changed);
|
|
|
|
|
|
|
|
Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
|
|
|
|
std::unique_ptr<log::FragmentBufferedReader>* manifest_reader,
|
|
|
|
std::unique_ptr<log::Reader::Reporter>* manifest_reporter,
|
|
|
|
std::unique_ptr<Status>* manifest_reader_status);
|
2021-03-10 18:58:07 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
uint64_t TEST_read_edits_in_atomic_group() const;
|
|
|
|
#endif //! NDEBUG
|
2019-03-26 23:41:31 +00:00
|
|
|
|
2021-03-10 18:58:07 +00:00
|
|
|
std::vector<VersionEdit>& replay_buffer();
|
2019-06-04 17:51:22 +00:00
|
|
|
|
2019-03-26 23:41:31 +00:00
|
|
|
protected:
|
|
|
|
// REQUIRES db mutex
|
|
|
|
Status ApplyOneVersionEditToBuilder(
|
2019-06-04 17:51:22 +00:00
|
|
|
VersionEdit& edit, std::unordered_set<ColumnFamilyData*>* cfds_changed,
|
2019-09-03 15:50:47 +00:00
|
|
|
VersionEdit* version_edit);
|
2019-03-26 23:41:31 +00:00
|
|
|
|
|
|
|
Status MaybeSwitchManifest(
|
|
|
|
log::Reader::Reporter* reporter,
|
|
|
|
std::unique_ptr<log::FragmentBufferedReader>* manifest_reader);
|
|
|
|
|
|
|
|
private:
|
2021-03-10 18:58:07 +00:00
|
|
|
std::unique_ptr<ManifestTailer> manifest_tailer_;
|
2019-03-26 23:41:31 +00:00
|
|
|
|
|
|
|
using VersionSet::LogAndApply;
|
|
|
|
using VersionSet::Recover;
|
|
|
|
|
|
|
|
Status LogAndApply(
|
|
|
|
const autovector<ColumnFamilyData*>& /*cfds*/,
|
|
|
|
const autovector<const MutableCFOptions*>& /*mutable_cf_options_list*/,
|
|
|
|
const autovector<autovector<VersionEdit*>>& /*edit_lists*/,
|
2020-03-03 00:14:00 +00:00
|
|
|
InstrumentedMutex* /*mu*/, FSDirectory* /*db_directory*/,
|
2020-10-27 01:20:43 +00:00
|
|
|
bool /*new_descriptor_log*/, const ColumnFamilyOptions* /*new_cf_option*/,
|
|
|
|
const std::vector<std::function<void(const Status&)>>& /*manifest_wcbs*/)
|
|
|
|
override {
|
2019-03-26 23:41:31 +00:00
|
|
|
return Status::NotSupported("not supported in reactive mode");
|
|
|
|
}
|
|
|
|
|
|
|
|
// No copy allowed
|
|
|
|
ReactiveVersionSet(const ReactiveVersionSet&);
|
|
|
|
ReactiveVersionSet& operator=(const ReactiveVersionSet&);
|
|
|
|
};
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|