2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-10-29 00:52:32 +00:00
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
#include "table/mock_table.h"
|
|
|
|
|
2014-10-29 00:52:32 +00:00
|
|
|
#include "db/dbformat.h"
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
#include "env/composite_env_wrapper.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/random_access_file_reader.h"
|
2014-10-29 00:52:32 +00:00
|
|
|
#include "port/port.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "rocksdb/table_properties.h"
|
|
|
|
#include "table/get_context.h"
|
2014-10-29 00:52:32 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-11-14 19:35:48 +00:00
|
|
|
namespace mock {
|
2014-10-29 00:52:32 +00:00
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
KVVector MakeMockFile(std::initializer_list<KVPair> l) { return KVVector(l); }
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
void SortKVVector(KVVector* kv_vector, const Comparator* ucmp) {
|
|
|
|
InternalKeyComparator icmp(ucmp);
|
2020-10-01 17:08:52 +00:00
|
|
|
std::sort(kv_vector->begin(), kv_vector->end(),
|
|
|
|
[icmp](KVPair a, KVPair b) -> bool {
|
|
|
|
return icmp.Compare(a.first, b.first) < 0;
|
|
|
|
});
|
2015-09-02 20:58:22 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 18:03:29 +00:00
|
|
|
class MockTableReader : public TableReader {
|
|
|
|
public:
|
2020-10-01 17:08:52 +00:00
|
|
|
explicit MockTableReader(const KVVector& table) : table_(table) {}
|
2020-07-22 18:03:29 +00:00
|
|
|
|
|
|
|
InternalIterator* NewIterator(const ReadOptions&,
|
|
|
|
const SliceTransform* prefix_extractor,
|
|
|
|
Arena* arena, bool skip_filters,
|
|
|
|
TableReaderCaller caller,
|
|
|
|
size_t compaction_readahead_size = 0,
|
|
|
|
bool allow_unprepared_value = false) override;
|
|
|
|
|
|
|
|
Status Get(const ReadOptions& readOptions, const Slice& key,
|
|
|
|
GetContext* get_context, const SliceTransform* prefix_extractor,
|
|
|
|
bool skip_filters = false) override;
|
|
|
|
|
2023-04-21 16:07:18 +00:00
|
|
|
uint64_t ApproximateOffsetOf(const ReadOptions& /*read_options*/,
|
|
|
|
const Slice& /*key*/,
|
2020-07-22 18:03:29 +00:00
|
|
|
TableReaderCaller /*caller*/) override {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-04-21 16:07:18 +00:00
|
|
|
uint64_t ApproximateSize(const ReadOptions& /*read_options*/,
|
|
|
|
const Slice& /*start*/, const Slice& /*end*/,
|
2020-07-22 18:03:29 +00:00
|
|
|
TableReaderCaller /*caller*/) override {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t ApproximateMemoryUsage() const override { return 0; }
|
|
|
|
|
|
|
|
void SetupForCompaction() override {}
|
|
|
|
|
|
|
|
std::shared_ptr<const TableProperties> GetTableProperties() const override;
|
|
|
|
|
2023-12-01 19:15:17 +00:00
|
|
|
~MockTableReader() = default;
|
2020-07-22 18:03:29 +00:00
|
|
|
|
|
|
|
private:
|
2020-10-01 17:08:52 +00:00
|
|
|
const KVVector& table_;
|
2020-07-22 18:03:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class MockTableIterator : public InternalIterator {
|
|
|
|
public:
|
2020-10-01 17:08:52 +00:00
|
|
|
explicit MockTableIterator(const KVVector& table) : table_(table) {
|
2020-07-22 18:03:29 +00:00
|
|
|
itr_ = table_.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Valid() const override { return itr_ != table_.end(); }
|
|
|
|
|
|
|
|
void SeekToFirst() override { itr_ = table_.begin(); }
|
|
|
|
|
|
|
|
void SeekToLast() override {
|
|
|
|
itr_ = table_.end();
|
|
|
|
--itr_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Seek(const Slice& target) override {
|
2020-10-01 17:08:52 +00:00
|
|
|
KVPair target_pair(target.ToString(), "");
|
|
|
|
InternalKeyComparator icmp(BytewiseComparator());
|
|
|
|
itr_ = std::lower_bound(table_.begin(), table_.end(), target_pair,
|
|
|
|
[icmp](KVPair a, KVPair b) -> bool {
|
|
|
|
return icmp.Compare(a.first, b.first) < 0;
|
|
|
|
});
|
2020-07-22 18:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SeekForPrev(const Slice& target) override {
|
2020-10-01 17:08:52 +00:00
|
|
|
KVPair target_pair(target.ToString(), "");
|
|
|
|
InternalKeyComparator icmp(BytewiseComparator());
|
|
|
|
itr_ = std::upper_bound(table_.begin(), table_.end(), target_pair,
|
|
|
|
[icmp](KVPair a, KVPair b) -> bool {
|
|
|
|
return icmp.Compare(a.first, b.first) < 0;
|
|
|
|
});
|
2020-07-22 18:03:29 +00:00
|
|
|
Prev();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Next() override { ++itr_; }
|
|
|
|
|
|
|
|
void Prev() override {
|
|
|
|
if (itr_ == table_.begin()) {
|
|
|
|
itr_ = table_.end();
|
|
|
|
} else {
|
|
|
|
--itr_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice key() const override { return Slice(itr_->first); }
|
|
|
|
|
|
|
|
Slice value() const override { return Slice(itr_->second); }
|
|
|
|
|
|
|
|
Status status() const override { return Status::OK(); }
|
|
|
|
|
|
|
|
private:
|
2020-10-01 17:08:52 +00:00
|
|
|
const KVVector& table_;
|
|
|
|
KVVector::const_iterator itr_;
|
2020-07-22 18:03:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class MockTableBuilder : public TableBuilder {
|
|
|
|
public:
|
|
|
|
MockTableBuilder(uint32_t id, MockTableFileSystem* file_system,
|
|
|
|
MockTableFactory::MockCorruptionMode corrupt_mode =
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
MockTableFactory::kCorruptNone,
|
|
|
|
size_t key_value_size = 1)
|
|
|
|
: id_(id),
|
|
|
|
file_system_(file_system),
|
|
|
|
corrupt_mode_(corrupt_mode),
|
|
|
|
key_value_size_(key_value_size) {
|
2020-07-22 18:03:29 +00:00
|
|
|
table_ = MakeMockFile({});
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: Either Finish() or Abandon() has been called.
|
2023-12-01 19:15:17 +00:00
|
|
|
~MockTableBuilder() = default;
|
2020-07-22 18:03:29 +00:00
|
|
|
|
|
|
|
// Add key,value to the table being constructed.
|
|
|
|
// REQUIRES: key is after any previously added key according to comparator.
|
|
|
|
// REQUIRES: Finish(), Abandon() have not been called
|
|
|
|
void Add(const Slice& key, const Slice& value) override {
|
|
|
|
if (corrupt_mode_ == MockTableFactory::kCorruptValue) {
|
|
|
|
// Corrupt the value
|
2020-10-01 17:08:52 +00:00
|
|
|
table_.push_back({key.ToString(), value.ToString() + " "});
|
2020-07-22 18:03:29 +00:00
|
|
|
corrupt_mode_ = MockTableFactory::kCorruptNone;
|
|
|
|
} else if (corrupt_mode_ == MockTableFactory::kCorruptKey) {
|
2020-10-01 17:08:52 +00:00
|
|
|
table_.push_back({key.ToString() + " ", value.ToString()});
|
2020-07-22 18:03:29 +00:00
|
|
|
corrupt_mode_ = MockTableFactory::kCorruptNone;
|
2020-10-01 17:08:52 +00:00
|
|
|
} else if (corrupt_mode_ == MockTableFactory::kCorruptReorderKey) {
|
|
|
|
if (prev_key_.empty()) {
|
|
|
|
prev_key_ = key.ToString();
|
|
|
|
prev_value_ = value.ToString();
|
|
|
|
} else {
|
|
|
|
table_.push_back({key.ToString(), value.ToString()});
|
|
|
|
table_.push_back({prev_key_, prev_value_});
|
|
|
|
corrupt_mode_ = MockTableFactory::kCorruptNone;
|
|
|
|
}
|
2020-07-22 18:03:29 +00:00
|
|
|
} else {
|
2020-10-01 17:08:52 +00:00
|
|
|
table_.push_back({key.ToString(), value.ToString()});
|
2020-07-22 18:03:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return non-ok iff some error has been detected.
|
|
|
|
Status status() const override { return Status::OK(); }
|
|
|
|
|
|
|
|
// Return non-ok iff some error happens during IO.
|
|
|
|
IOStatus io_status() const override { return IOStatus::OK(); }
|
|
|
|
|
|
|
|
Status Finish() override {
|
|
|
|
MutexLock lock_guard(&file_system_->mutex);
|
|
|
|
file_system_->files.insert({id_, table_});
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Abandon() override {}
|
|
|
|
|
|
|
|
uint64_t NumEntries() const override { return table_.size(); }
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
uint64_t FileSize() const override { return table_.size() * key_value_size_; }
|
2020-07-22 18:03:29 +00:00
|
|
|
|
|
|
|
TableProperties GetTableProperties() const override {
|
|
|
|
return TableProperties();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get file checksum
|
|
|
|
std::string GetFileChecksum() const override { return kUnknownFileChecksum; }
|
|
|
|
// Get file checksum function name
|
|
|
|
const char* GetFileChecksumFuncName() const override {
|
|
|
|
return kUnknownFileChecksumFuncName;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint32_t id_;
|
2020-10-01 17:08:52 +00:00
|
|
|
std::string prev_key_;
|
|
|
|
std::string prev_value_;
|
2020-07-22 18:03:29 +00:00
|
|
|
MockTableFileSystem* file_system_;
|
|
|
|
int corrupt_mode_;
|
2020-10-01 17:08:52 +00:00
|
|
|
KVVector table_;
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
size_t key_value_size_;
|
2020-07-22 18:03:29 +00:00
|
|
|
};
|
|
|
|
|
2018-05-21 21:33:55 +00:00
|
|
|
InternalIterator* MockTableReader::NewIterator(
|
|
|
|
const ReadOptions&, const SliceTransform* /* prefix_extractor */,
|
2019-09-20 19:00:55 +00:00
|
|
|
Arena* /*arena*/, bool /*skip_filters*/, TableReaderCaller /*caller*/,
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
size_t /*compaction_readahead_size*/, bool /* allow_unprepared_value */) {
|
2014-10-29 00:52:32 +00:00
|
|
|
return new MockTableIterator(table_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MockTableReader::Get(const ReadOptions&, const Slice& key,
|
2018-05-21 21:33:55 +00:00
|
|
|
GetContext* get_context,
|
|
|
|
const SliceTransform* /*prefix_extractor*/,
|
|
|
|
bool /*skip_filters*/) {
|
2014-10-29 00:52:32 +00:00
|
|
|
std::unique_ptr<MockTableIterator> iter(new MockTableIterator(table_));
|
|
|
|
for (iter->Seek(key); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey parsed_key;
|
2020-10-28 17:11:13 +00:00
|
|
|
Status pik_status =
|
|
|
|
ParseInternalKey(iter->key(), &parsed_key, true /* log_err_key */);
|
|
|
|
if (!pik_status.ok()) {
|
|
|
|
return pik_status;
|
2014-10-29 00:52:32 +00:00
|
|
|
}
|
|
|
|
|
2018-04-05 22:54:24 +00:00
|
|
|
bool dont_care __attribute__((__unused__));
|
|
|
|
if (!get_context->SaveValue(parsed_key, iter->value(), &dont_care)) {
|
2014-10-29 00:52:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<const TableProperties> MockTableReader::GetTableProperties()
|
|
|
|
const {
|
2023-07-28 16:47:31 +00:00
|
|
|
TableProperties* tp = new TableProperties();
|
|
|
|
tp->num_entries = table_.size();
|
|
|
|
tp->num_range_deletions = 0;
|
|
|
|
tp->raw_key_size = 1;
|
|
|
|
tp->raw_value_size = 1;
|
|
|
|
|
|
|
|
return std::shared_ptr<const TableProperties>(tp);
|
2014-10-29 00:52:32 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 18:03:29 +00:00
|
|
|
MockTableFactory::MockTableFactory()
|
|
|
|
: next_id_(1), corrupt_mode_(MockTableFactory::kCorruptNone) {}
|
2014-10-29 00:52:32 +00:00
|
|
|
|
|
|
|
Status MockTableFactory::NewTableReader(
|
2020-06-29 21:51:57 +00:00
|
|
|
const ReadOptions& /*ro*/,
|
2018-03-05 21:08:17 +00:00
|
|
|
const TableReaderOptions& /*table_reader_options*/,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file, uint64_t /*file_size*/,
|
|
|
|
std::unique_ptr<TableReader>* table_reader,
|
2018-03-05 21:08:17 +00:00
|
|
|
bool /*prefetch_index_and_filter_in_cache*/) const {
|
2020-12-24 00:54:05 +00:00
|
|
|
uint32_t id;
|
|
|
|
Status s = GetIDFromFile(file.get(), &id);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-10-29 00:52:32 +00:00
|
|
|
|
|
|
|
MutexLock lock_guard(&file_system_.mutex);
|
|
|
|
|
|
|
|
auto it = file_system_.files.find(id);
|
|
|
|
if (it == file_system_.files.end()) {
|
|
|
|
return Status::IOError("Mock file not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
table_reader->reset(new MockTableReader(it->second));
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
TableBuilder* MockTableFactory::NewTableBuilder(
|
2018-03-05 21:08:17 +00:00
|
|
|
const TableBuilderOptions& /*table_builder_options*/,
|
2021-04-29 13:59:53 +00:00
|
|
|
WritableFileWriter* file) const {
|
2020-12-24 00:54:05 +00:00
|
|
|
uint32_t id;
|
|
|
|
Status s = GetAndWriteNextID(file, &id);
|
|
|
|
assert(s.ok());
|
2014-10-29 00:52:32 +00:00
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
return new MockTableBuilder(id, &file_system_, corrupt_mode_,
|
|
|
|
key_value_size_);
|
2014-10-29 00:52:32 +00:00
|
|
|
}
|
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
Status MockTableFactory::CreateMockTable(Env* env, const std::string& fname,
|
2020-10-01 17:08:52 +00:00
|
|
|
KVVector file_contents) {
|
2021-01-29 06:08:46 +00:00
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
2021-10-07 18:40:20 +00:00
|
|
|
Status s = WritableFileWriter::Create(env->GetFileSystem(), fname,
|
|
|
|
FileOptions(), &file_writer, nullptr);
|
2014-11-14 19:35:48 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2020-12-24 00:54:05 +00:00
|
|
|
uint32_t id;
|
2021-01-29 06:08:46 +00:00
|
|
|
s = GetAndWriteNextID(file_writer.get(), &id);
|
2020-12-24 00:54:05 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
file_system_.files.insert({id, std::move(file_contents)});
|
|
|
|
}
|
|
|
|
return s;
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
2020-12-24 00:54:05 +00:00
|
|
|
Status MockTableFactory::GetAndWriteNextID(WritableFileWriter* file,
|
|
|
|
uint32_t* next_id) const {
|
|
|
|
*next_id = next_id_.fetch_add(1);
|
2014-10-29 00:52:32 +00:00
|
|
|
char buf[4];
|
2020-12-24 00:54:05 +00:00
|
|
|
EncodeFixed32(buf, *next_id);
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
return file->Append(IOOptions(), Slice(buf, 4));
|
2014-10-29 00:52:32 +00:00
|
|
|
}
|
|
|
|
|
2020-12-24 00:54:05 +00:00
|
|
|
Status MockTableFactory::GetIDFromFile(RandomAccessFileReader* file,
|
|
|
|
uint32_t* id) const {
|
2014-10-29 00:52:32 +00:00
|
|
|
char buf[4];
|
|
|
|
Slice result;
|
Group rocksdb.sst.read.micros stat by different user read IOActivity + misc (#11444)
Summary:
**Context/Summary:**
- Similar to https://github.com/facebook/rocksdb/pull/11288 but for user read such as `Get(), MultiGet(), DBIterator::XXX(), Verify(File)Checksum()`.
- For this, I refactored some user-facing `MultiGet` calls in `TransactionBase` and various types of `DB` so that it does not call a user-facing `Get()` but `GetImpl()` for passing the `ReadOptions::io_activity` check (see PR conversation)
- New user read stats breakdown are guarded by `kExceptDetailedTimers` since measurement shows they have 4-5% regression to the upstream/main.
- Misc
- More refactoring: with https://github.com/facebook/rocksdb/pull/11288, we complete passing `ReadOptions/IOOptions` to FS level. So we can now replace the previously [added](https://github.com/facebook/rocksdb/pull/9424) `rate_limiter_priority` parameter in `RandomAccessFileReader`'s `Read/MultiRead/Prefetch()` with `IOOptions::rate_limiter_priority`
- Also, `ReadAsync()` call time is measured in `SST_READ_MICRO` now
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11444
Test Plan:
- CI fake db crash/stress test
- Microbenchmarking
**Build** `make clean && ROCKSDB_NO_FBCODE=1 DEBUG_LEVEL=0 make -jN db_basic_bench`
- google benchmark version: https://github.com/google/benchmark/commit/604f6fd3f4b34a84ec4eb4db81d842fa4db829cd
- db_basic_bench_base: upstream
- db_basic_bench_pr: db_basic_bench_base + this PR
- asyncread_db_basic_bench_base: upstream + [db basic bench patch for IteratorNext](https://github.com/facebook/rocksdb/compare/main...hx235:rocksdb:micro_bench_async_read)
- asyncread_db_basic_bench_pr: asyncread_db_basic_bench_base + this PR
**Test**
Get
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{null_stat|base|pr} --benchmark_filter=DBGet/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/negative_query:0/enable_filter:0/mmap:1/threads:1 --benchmark_repetitions=1000
```
Result
```
Coming soon
```
AsyncRead
```
TEST_TMPDIR=/dev/shm ./asyncread_db_basic_bench_{base|pr} --benchmark_filter=IteratorNext/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1/async_io:1/include_detailed_timers:0 --benchmark_repetitions=1000 > syncread_db_basic_bench_{base|pr}.out
```
Result
```
Base:
1956,1956,1968,1977,1979,1986,1988,1988,1988,1990,1991,1991,1993,1993,1993,1993,1994,1996,1997,1997,1997,1998,1999,2001,2001,2002,2004,2007,2007,2008,
PR (2.3% regression, due to measuring `SST_READ_MICRO` that wasn't measured before):
1993,2014,2016,2022,2024,2027,2027,2028,2028,2030,2031,2031,2032,2032,2038,2039,2042,2044,2044,2047,2047,2047,2048,2049,2050,2052,2052,2052,2053,2053,
```
Reviewed By: ajkr
Differential Revision: D45918925
Pulled By: hx235
fbshipit-source-id: 58a54560d9ebeb3a59b6d807639692614dad058a
2023-08-09 00:26:50 +00:00
|
|
|
Status s = file->Read(IOOptions(), 0, 4, &result, buf, nullptr);
|
2014-10-29 00:52:32 +00:00
|
|
|
assert(result.size() == 4);
|
2020-12-24 00:54:05 +00:00
|
|
|
*id = DecodeFixed32(buf);
|
|
|
|
return s;
|
2014-10-29 00:52:32 +00:00
|
|
|
}
|
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
void MockTableFactory::AssertSingleFile(const KVVector& file_contents) {
|
2014-10-29 00:52:32 +00:00
|
|
|
ASSERT_EQ(file_system_.files.size(), 1U);
|
2016-11-01 03:35:54 +00:00
|
|
|
ASSERT_EQ(file_contents, file_system_.files.begin()->second);
|
2014-10-29 00:52:32 +00:00
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
void MockTableFactory::AssertLatestFiles(
|
|
|
|
const std::vector<KVVector>& files_contents) {
|
|
|
|
ASSERT_GE(file_system_.files.size(), files_contents.size());
|
|
|
|
auto it = file_system_.files.rbegin();
|
|
|
|
for (auto expect = files_contents.rbegin(); expect != files_contents.rend();
|
|
|
|
expect++, it++) {
|
|
|
|
ASSERT_TRUE(it != file_system_.files.rend());
|
|
|
|
if (*expect != it->second) {
|
|
|
|
std::cout << "Wrong content! Content of file, expect:" << std::endl;
|
|
|
|
for (const auto& kv : *expect) {
|
|
|
|
ParsedInternalKey ikey;
|
|
|
|
std::string key, value;
|
|
|
|
std::tie(key, value) = kv;
|
|
|
|
ASSERT_OK(ParseInternalKey(Slice(key), &ikey, true /* log_err_key */));
|
|
|
|
std::cout << ikey.DebugString(true, false) << " -> " << value
|
|
|
|
<< std::endl;
|
|
|
|
}
|
|
|
|
std::cout << "actual:" << std::endl;
|
|
|
|
for (const auto& kv : it->second) {
|
|
|
|
ParsedInternalKey ikey;
|
|
|
|
std::string key, value;
|
|
|
|
std::tie(key, value) = kv;
|
|
|
|
ASSERT_OK(ParseInternalKey(Slice(key), &ikey, true /* log_err_key */));
|
|
|
|
std::cout << ikey.DebugString(true, false) << " -> " << value
|
|
|
|
<< std::endl;
|
|
|
|
}
|
|
|
|
FAIL();
|
2015-09-02 20:58:22 +00:00
|
|
|
}
|
|
|
|
}
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace mock
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|