2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-29 03:34:02 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2015-07-20 18:12:02 +00:00
|
|
|
|
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
#include <algorithm>
|
|
|
|
#include <set>
|
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2019-05-30 03:44:08 +00:00
|
|
|
#include "file/filename.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/compaction_filter.h"
|
2014-01-31 01:18:17 +00:00
|
|
|
#include "rocksdb/db.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "rocksdb/env.h"
|
2014-01-31 01:18:17 +00:00
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/slice_transform.h"
|
|
|
|
#include "rocksdb/table.h"
|
2017-04-04 01:27:24 +00:00
|
|
|
#include "table/meta_blocks.h"
|
2019-09-05 17:03:42 +00:00
|
|
|
#include "table/plain/plain_table_bloom.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/plain/plain_table_factory.h"
|
|
|
|
#include "table/plain/plain_table_key_coding.h"
|
|
|
|
#include "table/plain/plain_table_reader.h"
|
2017-04-04 01:27:24 +00:00
|
|
|
#include "table/table_builder.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2020-07-03 02:24:25 +00:00
|
|
|
#include "util/cast_util.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "util/hash.h"
|
|
|
|
#include "util/mutexlock.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/random.h"
|
2015-03-20 00:29:37 +00:00
|
|
|
#include "util/string_util.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 01:30:10 +00:00
|
|
|
class PlainTableKeyDecoderTest : public testing::Test {};
|
|
|
|
|
|
|
|
TEST_F(PlainTableKeyDecoderTest, ReadNonMmap) {
|
|
|
|
Random rnd(301);
|
|
|
|
const uint32_t kLength = 2222;
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string tmp = rnd.RandomString(kLength);
|
|
|
|
Slice contents(tmp);
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 01:30:10 +00:00
|
|
|
test::StringSource* string_source =
|
|
|
|
new test::StringSource(contents, 0, false);
|
2021-01-04 23:59:52 +00:00
|
|
|
std::unique_ptr<FSRandomAccessFile> holder(string_source);
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomAccessFileReader> file_reader(
|
2021-01-04 23:59:52 +00:00
|
|
|
new RandomAccessFileReader(std::move(holder), "test"));
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<PlainTableReaderFileInfo> file_info(
|
|
|
|
new PlainTableReaderFileInfo(std::move(file_reader), EnvOptions(),
|
|
|
|
kLength));
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 01:30:10 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
PlainTableFileReader reader(file_info.get());
|
|
|
|
|
|
|
|
const uint32_t kReadSize = 77;
|
|
|
|
for (uint32_t pos = 0; pos < kLength; pos += kReadSize) {
|
|
|
|
uint32_t read_size = std::min(kLength - pos, kReadSize);
|
|
|
|
Slice out;
|
|
|
|
ASSERT_TRUE(reader.Read(pos, read_size, &out));
|
|
|
|
ASSERT_EQ(0, out.compare(tmp.substr(pos, read_size)));
|
|
|
|
}
|
|
|
|
|
2016-01-12 00:10:48 +00:00
|
|
|
ASSERT_LT(uint32_t(string_source->total_reads()), kLength / kReadSize / 2);
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 01:30:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::vector<std::pair<uint32_t, uint32_t>>> reads = {
|
|
|
|
{{600, 30}, {590, 30}, {600, 20}, {600, 40}},
|
|
|
|
{{800, 20}, {100, 20}, {500, 20}, {1500, 20}, {100, 20}, {80, 20}},
|
|
|
|
{{1000, 20}, {500, 20}, {1000, 50}},
|
|
|
|
{{1000, 20}, {500, 20}, {500, 20}},
|
|
|
|
{{1000, 20}, {500, 20}, {200, 20}, {500, 20}},
|
|
|
|
{{1000, 20}, {500, 20}, {200, 20}, {1000, 50}},
|
|
|
|
{{600, 500}, {610, 20}, {100, 20}},
|
|
|
|
{{500, 100}, {490, 100}, {550, 50}},
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<int> num_file_reads = {2, 6, 2, 2, 4, 3, 2, 2};
|
|
|
|
|
|
|
|
for (size_t i = 0; i < reads.size(); i++) {
|
|
|
|
string_source->set_total_reads(0);
|
|
|
|
PlainTableFileReader reader(file_info.get());
|
|
|
|
for (auto p : reads[i]) {
|
|
|
|
Slice out;
|
|
|
|
ASSERT_TRUE(reader.Read(p.first, p.second, &out));
|
|
|
|
ASSERT_EQ(0, out.compare(tmp.substr(p.first, p.second)));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(num_file_reads[i], string_source->total_reads());
|
|
|
|
}
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
class PlainTableDBTest : public testing::Test,
|
|
|
|
public testing::WithParamInterface<bool> {
|
2014-01-03 18:53:21 +00:00
|
|
|
protected:
|
|
|
|
private:
|
2013-10-29 03:34:02 +00:00
|
|
|
std::string dbname_;
|
|
|
|
Env* env_;
|
|
|
|
DB* db_;
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
bool mmap_mode_;
|
2013-10-29 03:34:02 +00:00
|
|
|
Options last_options_;
|
|
|
|
|
2014-01-03 18:53:21 +00:00
|
|
|
public:
|
2015-09-16 23:57:43 +00:00
|
|
|
PlainTableDBTest() : env_(Env::Default()) {}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~PlainTableDBTest() override {
|
2013-10-29 03:34:02 +00:00
|
|
|
delete db_;
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
void SetUp() override {
|
|
|
|
mmap_mode_ = GetParam();
|
2018-07-14 00:18:39 +00:00
|
|
|
dbname_ = test::PerThreadDBPath("plain_table_db_test");
|
2015-09-16 23:57:43 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
|
|
|
db_ = nullptr;
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
// Return the current option configuration.
|
|
|
|
Options CurrentOptions() {
|
|
|
|
Options options;
|
2023-06-16 04:12:39 +00:00
|
|
|
options.level_compaction_dynamic_level_bytes = false;
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 0;
|
|
|
|
plain_table_options.bloom_bits_per_key = 2;
|
|
|
|
plain_table_options.hash_table_ratio = 0.8;
|
|
|
|
plain_table_options.index_sparseness = 3;
|
|
|
|
plain_table_options.huge_page_tlb_size = 0;
|
|
|
|
plain_table_options.encoding_type = kPrefix;
|
|
|
|
plain_table_options.full_scan_mode = false;
|
2019-03-01 23:41:55 +00:00
|
|
|
plain_table_options.store_index_in_file = false;
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
2014-07-18 23:58:13 +00:00
|
|
|
options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true));
|
2014-07-18 07:08:38 +00:00
|
|
|
|
2014-03-10 19:56:46 +00:00
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(8));
|
2015-09-16 23:57:43 +00:00
|
|
|
options.allow_mmap_reads = mmap_mode_;
|
2016-11-16 17:24:52 +00:00
|
|
|
options.allow_concurrent_memtable_write = false;
|
2019-05-14 00:43:47 +00:00
|
|
|
options.unordered_write = false;
|
2013-10-29 03:34:02 +00:00
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2020-07-03 02:24:25 +00:00
|
|
|
DBImpl* dbfull() { return static_cast_with_check<DBImpl>(db_); }
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
void Close() {
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2019-01-26 01:07:00 +00:00
|
|
|
bool mmap_mode() const { return mmap_mode_; }
|
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
void DestroyAndReopen(Options* options = nullptr) {
|
2022-11-02 21:34:24 +00:00
|
|
|
// Destroy using last options
|
2013-10-29 03:34:02 +00:00
|
|
|
Destroy(&last_options_);
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Destroy(Options* options) {
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, *options));
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PureReopen(Options* options, DB** db) {
|
|
|
|
return DB::Open(*options, dbname_, db);
|
|
|
|
}
|
|
|
|
|
2019-01-26 01:07:00 +00:00
|
|
|
Status ReopenForReadOnly(Options* options) {
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
return DB::OpenForReadOnly(*options, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
Status TryReopen(Options* options = nullptr) {
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
Options opts;
|
|
|
|
if (options != nullptr) {
|
|
|
|
opts = *options;
|
|
|
|
} else {
|
|
|
|
opts = CurrentOptions();
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
}
|
|
|
|
last_options_ = opts;
|
|
|
|
|
|
|
|
return DB::Open(opts, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Put(const Slice& k, const Slice& v) {
|
|
|
|
return db_->Put(WriteOptions(), k, v);
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
2014-04-25 19:21:34 +00:00
|
|
|
ReadOptions options;
|
2013-10-29 03:34:02 +00:00
|
|
|
options.snapshot = snapshot;
|
|
|
|
std::string result;
|
|
|
|
Status s = db_->Get(options, k, &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int NumTableFilesAtLevel(int level) {
|
|
|
|
std::string property;
|
2022-05-06 20:03:58 +00:00
|
|
|
EXPECT_TRUE(db_->GetProperty(
|
|
|
|
"rocksdb.num-files-at-level" + std::to_string(level), &property));
|
2013-10-29 03:34:02 +00:00
|
|
|
return atoi(property.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return spread of files per level
|
|
|
|
std::string FilesPerLevel() {
|
|
|
|
std::string result;
|
2014-11-11 21:47:22 +00:00
|
|
|
size_t last_non_zero_offset = 0;
|
2013-10-29 03:34:02 +00:00
|
|
|
for (int level = 0; level < db_->NumberLevels(); level++) {
|
|
|
|
int f = NumTableFilesAtLevel(level);
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
|
|
|
|
result += buf;
|
|
|
|
if (f > 0) {
|
|
|
|
last_non_zero_offset = result.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result.resize(last_non_zero_offset);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string IterStatus(Iterator* iter) {
|
|
|
|
std::string result;
|
|
|
|
if (iter->Valid()) {
|
|
|
|
result = iter->key().ToString() + "->" + iter->value().ToString();
|
|
|
|
} else {
|
|
|
|
result = "(invalid)";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, Empty) {
|
2014-01-03 18:53:21 +00:00
|
|
|
ASSERT_TRUE(dbfull() != nullptr);
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("0000000000000foo"));
|
|
|
|
}
|
|
|
|
|
2014-07-18 23:58:13 +00:00
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
class TestPlainTableReader : public PlainTableReader {
|
|
|
|
public:
|
2021-05-05 20:59:21 +00:00
|
|
|
TestPlainTableReader(
|
|
|
|
const EnvOptions& env_options, const InternalKeyComparator& icomparator,
|
|
|
|
EncodingType encoding_type, uint64_t file_size, int bloom_bits_per_key,
|
|
|
|
double hash_table_ratio, size_t index_sparseness,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
std::unique_ptr<TableProperties>&& props,
|
2021-05-05 20:59:21 +00:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file,
|
|
|
|
const ImmutableOptions& ioptions, const SliceTransform* prefix_extractor,
|
|
|
|
bool* expect_bloom_not_match, bool store_index_in_file,
|
|
|
|
uint32_t column_family_id, const std::string& column_family_name)
|
2014-09-04 23:18:36 +00:00
|
|
|
: PlainTableReader(ioptions, std::move(file), env_options, icomparator,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
encoding_type, file_size, props.get(),
|
2018-05-21 21:33:55 +00:00
|
|
|
prefix_extractor),
|
2014-02-08 00:25:38 +00:00
|
|
|
expect_bloom_not_match_(expect_bloom_not_match) {
|
2015-09-16 23:57:43 +00:00
|
|
|
Status s = MmapDataIfNeeded();
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(s.ok());
|
2014-06-18 23:36:48 +00:00
|
|
|
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
s = PopulateIndex(props.get(), bloom_bits_per_key, hash_table_ratio,
|
|
|
|
index_sparseness, 2 * 1024 * 1024);
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(s.ok());
|
2014-07-18 23:58:13 +00:00
|
|
|
|
2016-04-07 06:10:32 +00:00
|
|
|
EXPECT_EQ(column_family_id, static_cast<uint32_t>(props->column_family_id));
|
|
|
|
EXPECT_EQ(column_family_name, props->column_family_name);
|
2019-03-01 23:41:55 +00:00
|
|
|
if (store_index_in_file) {
|
|
|
|
auto bloom_version_ptr = props->user_collected_properties.find(
|
|
|
|
PlainTablePropertyNames::kBloomVersion);
|
|
|
|
EXPECT_TRUE(bloom_version_ptr != props->user_collected_properties.end());
|
|
|
|
EXPECT_EQ(bloom_version_ptr->second, std::string("1"));
|
|
|
|
if (ioptions.bloom_locality > 0) {
|
|
|
|
auto num_blocks_ptr = props->user_collected_properties.find(
|
|
|
|
PlainTablePropertyNames::kNumBloomBlocks);
|
|
|
|
EXPECT_TRUE(num_blocks_ptr != props->user_collected_properties.end());
|
|
|
|
}
|
|
|
|
}
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
table_properties_ = std::move(props);
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~TestPlainTableReader() override {}
|
2014-02-08 00:25:38 +00:00
|
|
|
|
|
|
|
private:
|
2019-02-14 21:52:47 +00:00
|
|
|
bool MatchBloom(uint32_t hash) const override {
|
2014-02-08 00:25:38 +00:00
|
|
|
bool ret = PlainTableReader::MatchBloom(hash);
|
2014-07-18 23:58:13 +00:00
|
|
|
if (*expect_bloom_not_match_) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(!ret);
|
2014-07-18 23:58:13 +00:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(ret);
|
2014-07-18 23:58:13 +00:00
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
bool* expect_bloom_not_match_;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
|
|
class TestPlainTableFactory : public PlainTableFactory {
|
|
|
|
public:
|
|
|
|
explicit TestPlainTableFactory(bool* expect_bloom_not_match,
|
2016-04-07 06:10:32 +00:00
|
|
|
const PlainTableOptions& options,
|
|
|
|
uint32_t column_family_id,
|
|
|
|
std::string column_family_name)
|
2014-07-18 07:08:38 +00:00
|
|
|
: PlainTableFactory(options),
|
|
|
|
bloom_bits_per_key_(options.bloom_bits_per_key),
|
|
|
|
hash_table_ratio_(options.hash_table_ratio),
|
|
|
|
index_sparseness_(options.index_sparseness),
|
2019-03-01 23:41:55 +00:00
|
|
|
store_index_in_file_(options.store_index_in_file),
|
2016-04-07 06:10:32 +00:00
|
|
|
expect_bloom_not_match_(expect_bloom_not_match),
|
|
|
|
column_family_id_(column_family_id),
|
|
|
|
column_family_name_(std::move(column_family_name)) {}
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2020-06-29 21:51:57 +00:00
|
|
|
using PlainTableFactory::NewTableReader;
|
2016-07-20 18:23:31 +00:00
|
|
|
Status NewTableReader(
|
2020-06-29 21:51:57 +00:00
|
|
|
const ReadOptions& /*ro*/, const TableReaderOptions& table_reader_options,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
|
|
|
|
std::unique_ptr<TableReader>* table,
|
2018-03-05 21:08:17 +00:00
|
|
|
bool /*prefetch_index_and_filter_in_cache*/) const override {
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
std::unique_ptr<TableProperties> props;
|
2023-04-21 16:07:18 +00:00
|
|
|
const ReadOptions read_options;
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
|
2023-04-21 16:07:18 +00:00
|
|
|
table_reader_options.ioptions, read_options,
|
|
|
|
&props);
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(s.ok());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2019-03-01 23:41:55 +00:00
|
|
|
if (store_index_in_file_) {
|
|
|
|
BlockHandle bloom_block_handle;
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
s = FindMetaBlockInFile(file.get(), file_size, kPlainTableMagicNumber,
|
2023-04-21 16:07:18 +00:00
|
|
|
table_reader_options.ioptions, read_options,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
BloomBlockBuilder::kBloomBlock,
|
|
|
|
&bloom_block_handle);
|
2019-03-01 23:41:55 +00:00
|
|
|
EXPECT_TRUE(s.ok());
|
|
|
|
|
|
|
|
BlockHandle index_block_handle;
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
s = FindMetaBlockInFile(file.get(), file_size, kPlainTableMagicNumber,
|
2023-04-21 16:07:18 +00:00
|
|
|
table_reader_options.ioptions, read_options,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
PlainTableIndexBuilder::kPlainTableIndexBlock,
|
|
|
|
&index_block_handle);
|
2019-03-01 23:41:55 +00:00
|
|
|
EXPECT_TRUE(s.ok());
|
|
|
|
}
|
|
|
|
|
2014-06-18 23:36:48 +00:00
|
|
|
auto& user_props = props->user_collected_properties;
|
|
|
|
auto encoding_type_prop =
|
|
|
|
user_props.find(PlainTablePropertyNames::kEncodingType);
|
|
|
|
assert(encoding_type_prop != user_props.end());
|
|
|
|
EncodingType encoding_type = static_cast<EncodingType>(
|
|
|
|
DecodeFixed32(encoding_type_prop->second.c_str()));
|
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
std::unique_ptr<PlainTableReader> new_reader(new TestPlainTableReader(
|
2015-09-11 18:36:33 +00:00
|
|
|
table_reader_options.env_options,
|
|
|
|
table_reader_options.internal_comparator, encoding_type, file_size,
|
Improve / clean up meta block code & integrity (#9163)
Summary:
* Checksums are now checked on meta blocks unless specifically
suppressed or not applicable (e.g. plain table). (Was other way around.)
This means a number of cases that were not checking checksums now are,
including direct read TableProperties in Version::GetTableProperties
(fixed in meta_blocks ReadTableProperties), reading any block from
PersistentCache (fixed in BlockFetcher), read TableProperties in
SstFileDumper (ldb/sst_dump/BackupEngine) before table reader open,
maybe more.
* For that to work, I moved the global_seqno+TableProperties checksum
logic to the shared table/ code, because that is used by many utilies
such as SstFileDumper.
* Also for that to work, we have to know when we're dealing with a block
that has a checksum (trailer), so added that capability to Footer based
on magic number, and from there BlockFetcher.
* Knowledge of trailer presence has also fixed a problem where other
table formats were reading blocks including bytes for a non-existant
trailer--and awkwardly kind-of not using them, e.g. no shared code
checking checksums. (BlockFetcher compression type was populated
incorrectly.) Now we only read what is needed.
* Minimized code duplication and differing/incompatible/awkward
abstractions in meta_blocks.{cc,h} (e.g. SeekTo in metaindex block
without parsing block handle)
* Moved some meta block handling code from table_properties*.*
* Moved some code specific to block-based table from shared table/ code
to BlockBasedTable class. The checksum stuff means we can't completely
separate it, but things that don't need to be in shared table/ code
should not be.
* Use unique_ptr rather than raw ptr in more places. (Note: you can
std::move from unique_ptr to shared_ptr.)
Without enhancements to GetPropertiesOfAllTablesTest (see below),
net reduction of roughly 100 lines of code.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9163
Test Plan:
existing tests and
* Enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to verify that
checksums are now checked on direct read of table properties by TableCache
(new test would fail before this change)
* Also enhanced DBTablePropertiesTest.GetPropertiesOfAllTablesTest to test
putting table properties under old meta name
* Also generally enhanced that same test to actually test what it was
supposed to be testing already, by kicking things out of table cache when
we don't want them there.
Reviewed By: ajkr, mrambacher
Differential Revision: D32514757
Pulled By: pdillinger
fbshipit-source-id: 507964b9311d186ae8d1131182290cbd97a99fa9
2021-11-18 19:42:12 +00:00
|
|
|
bloom_bits_per_key_, hash_table_ratio_, index_sparseness_,
|
|
|
|
std::move(props), std::move(file), table_reader_options.ioptions,
|
2022-01-21 19:36:36 +00:00
|
|
|
table_reader_options.prefix_extractor.get(), expect_bloom_not_match_,
|
2019-03-01 23:41:55 +00:00
|
|
|
store_index_in_file_, column_family_id_, column_family_name_));
|
2014-02-08 00:25:38 +00:00
|
|
|
|
|
|
|
*table = std::move(new_reader);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
int bloom_bits_per_key_;
|
|
|
|
double hash_table_ratio_;
|
|
|
|
size_t index_sparseness_;
|
2019-03-01 23:41:55 +00:00
|
|
|
bool store_index_in_file_;
|
2014-02-08 00:25:38 +00:00
|
|
|
bool* expect_bloom_not_match_;
|
2016-04-07 06:10:32 +00:00
|
|
|
const uint32_t column_family_id_;
|
|
|
|
const std::string column_family_name_;
|
2014-02-08 00:25:38 +00:00
|
|
|
};
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2019-10-21 23:51:19 +00:00
|
|
|
TEST_P(PlainTableDBTest, BadOptions1) {
|
|
|
|
// Build with a prefix extractor
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v1"));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2019-10-21 23:51:19 +00:00
|
|
|
|
|
|
|
// Bad attempt to re-open without a prefix extractor
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.prefix_extractor.reset();
|
|
|
|
ASSERT_EQ(
|
|
|
|
"Invalid argument: Prefix extractor is missing when opening a PlainTable "
|
|
|
|
"built using a prefix extractor",
|
2020-02-14 16:15:24 +00:00
|
|
|
TryReopen(&options).ToString());
|
2019-10-21 23:51:19 +00:00
|
|
|
|
|
|
|
// Bad attempt to re-open with different prefix extractor
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(6));
|
|
|
|
ASSERT_EQ(
|
|
|
|
"Invalid argument: Prefix extractor given doesn't match the one used to "
|
|
|
|
"build PlainTable",
|
2020-02-14 16:15:24 +00:00
|
|
|
TryReopen(&options).ToString());
|
2019-10-21 23:51:19 +00:00
|
|
|
|
|
|
|
// Correct prefix extractor
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(8));
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v1", Get("1000000000000foo"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(PlainTableDBTest, BadOptions2) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.prefix_extractor.reset();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
// Build without a prefix extractor
|
|
|
|
// (apparently works even if hash_table_ratio > 0)
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v1"));
|
2020-10-13 18:58:12 +00:00
|
|
|
// Build without a prefix extractor, this call will fail and returns the
|
|
|
|
// status for this bad attempt.
|
|
|
|
ASSERT_NOK(dbfull()->TEST_FlushMemTable());
|
2019-10-21 23:51:19 +00:00
|
|
|
|
|
|
|
// Bad attempt to re-open with hash_table_ratio > 0 and no prefix extractor
|
|
|
|
Status s = TryReopen(&options);
|
|
|
|
ASSERT_EQ(
|
|
|
|
"Not implemented: PlainTable requires a prefix extractor enable prefix "
|
|
|
|
"hash mode.",
|
|
|
|
s.ToString());
|
|
|
|
|
|
|
|
// OK to open with hash_table_ratio == 0 and no prefix extractor
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v1", Get("1000000000000foo"));
|
|
|
|
|
|
|
|
// OK to open newly with a prefix_extractor and hash table; builds index
|
|
|
|
// in memory.
|
|
|
|
options = CurrentOptions();
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v1", Get("1000000000000foo"));
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, Flush) {
|
2014-05-04 20:55:53 +00:00
|
|
|
for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
|
|
|
|
huge_page_tlb_size += 2 * 1024 * 1024) {
|
2014-06-18 23:36:48 +00:00
|
|
|
for (EncodingType encoding_type : {kPlain, kPrefix}) {
|
2022-11-02 21:34:24 +00:00
|
|
|
for (int bloom = -1; bloom <= 117; bloom += 117) {
|
|
|
|
const int bloom_bits = std::max(bloom, 0);
|
|
|
|
const bool full_scan_mode = bloom < 0;
|
|
|
|
for (int total_order = 0; total_order <= 1; total_order++) {
|
|
|
|
for (int store_index_in_file = 0; store_index_in_file <= 1;
|
|
|
|
++store_index_in_file) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
|
|
|
if (total_order) {
|
|
|
|
options.prefix_extractor.reset();
|
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 0;
|
|
|
|
plain_table_options.bloom_bits_per_key = bloom_bits;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 2;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
plain_table_options.encoding_type = encoding_type;
|
|
|
|
plain_table_options.full_scan_mode = full_scan_mode;
|
|
|
|
plain_table_options.store_index_in_file = store_index_in_file;
|
|
|
|
|
|
|
|
options.table_factory.reset(
|
|
|
|
NewPlainTableFactory(plain_table_options));
|
|
|
|
} else {
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 0;
|
|
|
|
plain_table_options.bloom_bits_per_key = bloom_bits;
|
|
|
|
plain_table_options.hash_table_ratio = 0.75;
|
|
|
|
plain_table_options.index_sparseness = 16;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
plain_table_options.encoding_type = encoding_type;
|
|
|
|
plain_table_options.full_scan_mode = full_scan_mode;
|
|
|
|
plain_table_options.store_index_in_file = store_index_in_file;
|
|
|
|
|
|
|
|
options.table_factory.reset(
|
|
|
|
NewPlainTableFactory(plain_table_options));
|
|
|
|
}
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
uint64_t int_num;
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
"rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v1"));
|
|
|
|
ASSERT_OK(Put("0000000000000bar", "v2"));
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v3"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
"rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_GT(int_num, 0U);
|
|
|
|
|
|
|
|
TablePropertiesCollection ptc;
|
|
|
|
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(
|
|
|
|
&ptc));
|
|
|
|
ASSERT_EQ(1U, ptc.size());
|
|
|
|
auto row = ptc.begin();
|
|
|
|
auto tp = row->second;
|
|
|
|
|
|
|
|
if (full_scan_mode) {
|
|
|
|
// Does not support Get/Seek
|
|
|
|
std::unique_ptr<Iterator> iter(
|
|
|
|
dbfull()->NewIterator(ReadOptions()));
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("0000000000000bar", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v2", iter->value().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000000foo", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v3", iter->value().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
ASSERT_TRUE(iter->status().ok());
|
2019-10-18 21:43:17 +00:00
|
|
|
} else {
|
2022-11-02 21:34:24 +00:00
|
|
|
if (!store_index_in_file) {
|
|
|
|
ASSERT_EQ(total_order ? "4" : "12",
|
|
|
|
(tp->user_collected_properties)
|
|
|
|
.at("plain_table_hash_table_size"));
|
|
|
|
ASSERT_EQ("0", (tp->user_collected_properties)
|
|
|
|
.at("plain_table_sub_index_size"));
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ("0", (tp->user_collected_properties)
|
|
|
|
.at("plain_table_hash_table_size"));
|
|
|
|
ASSERT_EQ("0", (tp->user_collected_properties)
|
|
|
|
.at("plain_table_sub_index_size"));
|
|
|
|
}
|
|
|
|
ASSERT_EQ("v3", Get("1000000000000foo"));
|
|
|
|
ASSERT_EQ("v2", Get("0000000000000bar"));
|
2019-10-18 21:43:17 +00:00
|
|
|
}
|
2019-03-01 23:41:55 +00:00
|
|
|
}
|
2014-07-18 23:58:13 +00:00
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, Flush2) {
|
2014-05-04 20:55:53 +00:00
|
|
|
for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
|
|
|
|
huge_page_tlb_size += 2 * 1024 * 1024) {
|
2014-06-18 23:36:48 +00:00
|
|
|
for (EncodingType encoding_type : {kPlain, kPrefix}) {
|
2022-11-02 21:34:24 +00:00
|
|
|
for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) {
|
|
|
|
for (int total_order = 0; total_order <= 1; total_order++) {
|
|
|
|
for (int store_index_in_file = 0; store_index_in_file <= 1;
|
|
|
|
++store_index_in_file) {
|
|
|
|
if (encoding_type == kPrefix && total_order) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!bloom_bits && store_index_in_file) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (total_order && store_index_in_file) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bool expect_bloom_not_match = false;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
if (total_order) {
|
|
|
|
options.prefix_extractor = nullptr;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 2;
|
|
|
|
} else {
|
|
|
|
plain_table_options.hash_table_ratio = 0.75;
|
|
|
|
plain_table_options.index_sparseness = 16;
|
|
|
|
}
|
|
|
|
plain_table_options.user_key_len = kPlainTableVariableLength;
|
|
|
|
plain_table_options.bloom_bits_per_key = bloom_bits;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
plain_table_options.encoding_type = encoding_type;
|
|
|
|
plain_table_options.store_index_in_file = store_index_in_file;
|
|
|
|
options.table_factory.reset(new TestPlainTableFactory(
|
|
|
|
&expect_bloom_not_match, plain_table_options,
|
|
|
|
0 /* column_family_id */, kDefaultColumnFamilyName));
|
|
|
|
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
ASSERT_OK(Put("0000000000000bar", "b"));
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v1"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v2"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_EQ("v2", Get("1000000000000foo"));
|
|
|
|
|
|
|
|
ASSERT_OK(Put("0000000000000eee", "v3"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_EQ("v3", Get("0000000000000eee"));
|
|
|
|
|
|
|
|
ASSERT_OK(Delete("0000000000000bar"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("0000000000000bar"));
|
|
|
|
|
|
|
|
ASSERT_OK(Put("0000000000000eee", "v5"));
|
|
|
|
ASSERT_OK(Put("9000000000000eee", "v5"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_EQ("v5", Get("0000000000000eee"));
|
|
|
|
|
|
|
|
// Test Bloom Filter
|
|
|
|
if (bloom_bits > 0) {
|
|
|
|
// Neither key nor value should exist.
|
|
|
|
expect_bloom_not_match = true;
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("5_not00000000bar"));
|
|
|
|
// Key doesn't exist any more but prefix exists.
|
|
|
|
if (total_order) {
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("1000000000000not"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("0000000000000not"));
|
|
|
|
}
|
|
|
|
expect_bloom_not_match = false;
|
|
|
|
}
|
2014-05-04 20:55:53 +00:00
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
2014-06-18 23:36:48 +00:00
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-26 02:06:28 +00:00
|
|
|
TEST_P(PlainTableDBTest, Immortal) {
|
2019-01-26 01:07:00 +00:00
|
|
|
for (EncodingType encoding_type : {kPlain, kPrefix}) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_open_files = -1;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.hash_table_ratio = 0.75;
|
|
|
|
plain_table_options.index_sparseness = 16;
|
|
|
|
plain_table_options.user_key_len = kPlainTableVariableLength;
|
|
|
|
plain_table_options.bloom_bits_per_key = 10;
|
|
|
|
plain_table_options.encoding_type = encoding_type;
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
|
|
|
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
ASSERT_OK(Put("0000000000000bar", "b"));
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v1"));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2019-01-26 01:07:00 +00:00
|
|
|
|
|
|
|
int copied = 0;
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2019-01-26 01:07:00 +00:00
|
|
|
"GetContext::SaveValue::PinSelf", [&](void* /*arg*/) { copied++; });
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2019-01-26 01:07:00 +00:00
|
|
|
ASSERT_EQ("b", Get("0000000000000bar"));
|
|
|
|
ASSERT_EQ("v1", Get("1000000000000foo"));
|
|
|
|
ASSERT_EQ(2, copied);
|
|
|
|
copied = 0;
|
|
|
|
|
|
|
|
Close();
|
|
|
|
ASSERT_OK(ReopenForReadOnly(&options));
|
|
|
|
|
|
|
|
ASSERT_EQ("b", Get("0000000000000bar"));
|
|
|
|
ASSERT_EQ("v1", Get("1000000000000foo"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("1000000000000bar"));
|
|
|
|
if (mmap_mode()) {
|
|
|
|
ASSERT_EQ(0, copied);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(2, copied);
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2019-01-26 01:07:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, Iterator) {
|
2014-05-04 20:55:53 +00:00
|
|
|
for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
|
|
|
|
huge_page_tlb_size += 2 * 1024 * 1024) {
|
2014-06-18 23:36:48 +00:00
|
|
|
for (EncodingType encoding_type : {kPlain, kPrefix}) {
|
2022-11-02 21:34:24 +00:00
|
|
|
for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) {
|
|
|
|
for (int total_order = 0; total_order <= 1; total_order++) {
|
|
|
|
if (encoding_type == kPrefix && total_order == 1) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bool expect_bloom_not_match = false;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
|
|
|
if (total_order) {
|
|
|
|
options.prefix_extractor = nullptr;
|
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
|
|
|
plain_table_options.bloom_bits_per_key = bloom_bits;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 2;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
plain_table_options.encoding_type = encoding_type;
|
|
|
|
|
|
|
|
options.table_factory.reset(new TestPlainTableFactory(
|
|
|
|
&expect_bloom_not_match, plain_table_options,
|
|
|
|
0 /* column_family_id */, kDefaultColumnFamilyName));
|
|
|
|
} else {
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
|
|
|
plain_table_options.bloom_bits_per_key = bloom_bits;
|
|
|
|
plain_table_options.hash_table_ratio = 0.75;
|
|
|
|
plain_table_options.index_sparseness = 16;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
plain_table_options.encoding_type = encoding_type;
|
|
|
|
|
|
|
|
options.table_factory.reset(new TestPlainTableFactory(
|
|
|
|
&expect_bloom_not_match, plain_table_options,
|
|
|
|
0 /* column_family_id */, kDefaultColumnFamilyName));
|
|
|
|
}
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put("1000000000foo002", "v_2"));
|
|
|
|
ASSERT_OK(Put("0000000000000bar", "random"));
|
|
|
|
ASSERT_OK(Put("1000000000foo001", "v1"));
|
|
|
|
ASSERT_OK(Put("3000000000000bar", "bar_v"));
|
|
|
|
ASSERT_OK(Put("1000000000foo003", "v__3"));
|
|
|
|
ASSERT_OK(Put("1000000000foo004", "v__4"));
|
|
|
|
ASSERT_OK(Put("1000000000foo005", "v__5"));
|
|
|
|
ASSERT_OK(Put("1000000000foo007", "v__7"));
|
|
|
|
ASSERT_OK(Put("1000000000foo008", "v__8"));
|
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_EQ("v1", Get("1000000000foo001"));
|
|
|
|
ASSERT_EQ("v__3", Get("1000000000foo003"));
|
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions());
|
|
|
|
iter->Seek("1000000000foo000");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo001", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v1", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo002", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v_2", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo003", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__3", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo004", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__4", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("3000000000000bar");
|
2014-05-04 20:55:53 +00:00
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("3000000000000bar", iter->key().ToString());
|
2022-11-02 21:34:24 +00:00
|
|
|
ASSERT_EQ("bar_v", iter->value().ToString());
|
2014-05-04 15:37:09 +00:00
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
iter->Seek("1000000000foo000");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo001", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v1", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("1000000000foo005");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo005", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__5", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("1000000000foo006");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo007", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__7", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("1000000000foo008");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo008", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__8", iter->value().ToString());
|
|
|
|
|
|
|
|
if (total_order == 0) {
|
|
|
|
iter->Seek("1000000000foo009");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("3000000000000bar", iter->key().ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test Bloom Filter
|
|
|
|
if (bloom_bits > 0) {
|
|
|
|
if (!total_order) {
|
|
|
|
// Neither key nor value should exist.
|
|
|
|
expect_bloom_not_match = true;
|
|
|
|
iter->Seek("2not000000000bar");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("2not000000000bar"));
|
|
|
|
expect_bloom_not_match = false;
|
|
|
|
} else {
|
|
|
|
expect_bloom_not_match = true;
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("2not000000000bar"));
|
|
|
|
expect_bloom_not_match = false;
|
|
|
|
}
|
2014-05-04 20:55:53 +00:00
|
|
|
}
|
2022-11-02 21:34:24 +00:00
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
delete iter;
|
2014-05-04 20:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-05 17:03:42 +00:00
|
|
|
namespace {
|
|
|
|
std::string NthKey(size_t n, char filler) {
|
|
|
|
std::string rv(16, filler);
|
|
|
|
rv[0] = n % 10;
|
|
|
|
rv[1] = (n / 10) % 10;
|
|
|
|
rv[2] = (n / 100) % 10;
|
|
|
|
rv[3] = (n / 1000) % 10;
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
TEST_P(PlainTableDBTest, BloomSchema) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
for (int bloom_locality = 0; bloom_locality <= 1; bloom_locality++) {
|
2019-09-20 19:00:55 +00:00
|
|
|
options.bloom_locality = bloom_locality;
|
2019-09-05 17:03:42 +00:00
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
2019-09-20 19:00:55 +00:00
|
|
|
plain_table_options.bloom_bits_per_key = 3; // high FP rate for test
|
2019-09-05 17:03:42 +00:00
|
|
|
plain_table_options.hash_table_ratio = 0.75;
|
|
|
|
plain_table_options.index_sparseness = 16;
|
|
|
|
plain_table_options.huge_page_tlb_size = 0;
|
|
|
|
plain_table_options.encoding_type = kPlain;
|
|
|
|
|
|
|
|
bool expect_bloom_not_match = false;
|
|
|
|
options.table_factory.reset(new TestPlainTableFactory(
|
2019-09-20 19:00:55 +00:00
|
|
|
&expect_bloom_not_match, plain_table_options, 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName));
|
2019-09-05 17:03:42 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 2345; ++i) {
|
|
|
|
ASSERT_OK(Put(NthKey(i, 'y'), "added"));
|
|
|
|
}
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2019-09-05 17:03:42 +00:00
|
|
|
ASSERT_EQ("added", Get(NthKey(42, 'y')));
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 32; ++i) {
|
|
|
|
// Known pattern of Bloom filter false positives can detect schema change
|
|
|
|
// with high probability. Known FPs stuffed into bits:
|
2019-09-09 21:49:39 +00:00
|
|
|
uint32_t pattern;
|
|
|
|
if (!bloom_locality) {
|
|
|
|
pattern = 1785868347UL;
|
2019-09-10 21:32:38 +00:00
|
|
|
} else if (CACHE_LINE_SIZE == 64U) {
|
2019-09-09 21:49:39 +00:00
|
|
|
pattern = 2421694657UL;
|
2019-09-10 21:32:38 +00:00
|
|
|
} else if (CACHE_LINE_SIZE == 128U) {
|
2019-09-09 21:49:39 +00:00
|
|
|
pattern = 788710956UL;
|
|
|
|
} else {
|
2019-09-10 21:32:38 +00:00
|
|
|
ASSERT_EQ(CACHE_LINE_SIZE, 256U);
|
2019-09-09 21:49:39 +00:00
|
|
|
pattern = 163905UL;
|
|
|
|
}
|
|
|
|
bool expect_fp = pattern & (1UL << i);
|
2019-09-20 19:00:55 +00:00
|
|
|
// fprintf(stderr, "expect_fp@%u: %d\n", i, (int)expect_fp);
|
2019-09-05 17:03:42 +00:00
|
|
|
expect_bloom_not_match = !expect_fp;
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(NthKey(i, 'n')));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-10 04:17:14 +00:00
|
|
|
namespace {
|
2014-04-07 23:56:26 +00:00
|
|
|
std::string MakeLongKey(size_t length, char c) {
|
|
|
|
return std::string(length, c);
|
|
|
|
}
|
2022-11-02 21:34:24 +00:00
|
|
|
} // anonymous namespace
|
2014-04-07 23:56:26 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, IteratorLargeKeys) {
|
2014-04-07 23:56:26 +00:00
|
|
|
Options options = CurrentOptions();
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 0;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
2014-04-07 23:56:26 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
options.prefix_extractor.reset();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
std::string key_list[] = {MakeLongKey(30, '0'), MakeLongKey(16, '1'),
|
|
|
|
MakeLongKey(32, '2'), MakeLongKey(60, '3'),
|
|
|
|
MakeLongKey(90, '4'), MakeLongKey(50, '5'),
|
|
|
|
MakeLongKey(26, '6')};
|
2014-04-07 23:56:26 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < 7; i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(key_list[i], std::to_string(i)));
|
2014-04-07 23:56:26 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-04-07 23:56:26 +00:00
|
|
|
|
2014-04-25 19:21:34 +00:00
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions());
|
2014-04-07 23:56:26 +00:00
|
|
|
iter->Seek(key_list[0]);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < 7; i++) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ(key_list[i], iter->key().ToString());
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_EQ(std::to_string(i), iter->value().ToString());
|
2014-04-07 23:56:26 +00:00
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2014-06-18 23:36:48 +00:00
|
|
|
namespace {
|
|
|
|
std::string MakeLongKeyWithPrefix(size_t length, char c) {
|
|
|
|
return "00000000" + std::string(length - 8, c);
|
|
|
|
}
|
2022-11-02 21:34:24 +00:00
|
|
|
} // anonymous namespace
|
2014-06-18 23:36:48 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, IteratorLargeKeysWithPrefix) {
|
2014-06-18 23:36:48 +00:00
|
|
|
Options options = CurrentOptions();
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0.8;
|
|
|
|
plain_table_options.index_sparseness = 3;
|
|
|
|
plain_table_options.huge_page_tlb_size = 0;
|
|
|
|
plain_table_options.encoding_type = kPrefix;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
2014-06-18 23:36:48 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
std::string key_list[] = {
|
|
|
|
MakeLongKeyWithPrefix(30, '0'), MakeLongKeyWithPrefix(16, '1'),
|
|
|
|
MakeLongKeyWithPrefix(32, '2'), MakeLongKeyWithPrefix(60, '3'),
|
|
|
|
MakeLongKeyWithPrefix(90, '4'), MakeLongKeyWithPrefix(50, '5'),
|
|
|
|
MakeLongKeyWithPrefix(26, '6')};
|
|
|
|
|
|
|
|
for (size_t i = 0; i < 7; i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(key_list[i], std::to_string(i)));
|
2014-06-18 23:36:48 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-06-18 23:36:48 +00:00
|
|
|
|
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions());
|
|
|
|
iter->Seek(key_list[0]);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < 7; i++) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ(key_list[i], iter->key().ToString());
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_EQ(std::to_string(i), iter->value().ToString());
|
2014-06-18 23:36:48 +00:00
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, IteratorReverseSuffixComparator) {
|
2014-02-08 00:25:38 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
2014-10-29 20:49:45 +00:00
|
|
|
test::SimpleSuffixReverseComparator comp;
|
2014-02-08 00:25:38 +00:00
|
|
|
options.comparator = ∁
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_OK(Put("1000000000foo002", "v_2"));
|
|
|
|
ASSERT_OK(Put("0000000000000bar", "random"));
|
|
|
|
ASSERT_OK(Put("1000000000foo001", "v1"));
|
|
|
|
ASSERT_OK(Put("3000000000000bar", "bar_v"));
|
|
|
|
ASSERT_OK(Put("1000000000foo003", "v__3"));
|
|
|
|
ASSERT_OK(Put("1000000000foo004", "v__4"));
|
|
|
|
ASSERT_OK(Put("1000000000foo005", "v__5"));
|
|
|
|
ASSERT_OK(Put("1000000000foo007", "v__7"));
|
|
|
|
ASSERT_OK(Put("1000000000foo008", "v__8"));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_EQ("v1", Get("1000000000foo001"));
|
|
|
|
ASSERT_EQ("v__3", Get("1000000000foo003"));
|
2014-04-25 19:21:34 +00:00
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions());
|
2014-02-08 00:25:38 +00:00
|
|
|
iter->Seek("1000000000foo009");
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_TRUE(iter->Valid());
|
2014-02-08 00:25:38 +00:00
|
|
|
ASSERT_EQ("1000000000foo008", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__8", iter->value().ToString());
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
2014-02-08 00:25:38 +00:00
|
|
|
ASSERT_EQ("1000000000foo007", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__7", iter->value().ToString());
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
2014-02-08 00:25:38 +00:00
|
|
|
ASSERT_EQ("1000000000foo005", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__5", iter->value().ToString());
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo004", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__4", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("3000000000000bar");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("3000000000000bar", iter->key().ToString());
|
|
|
|
ASSERT_EQ("bar_v", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("1000000000foo005");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo005", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__5", iter->value().ToString());
|
|
|
|
|
|
|
|
iter->Seek("1000000000foo006");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
2014-02-08 00:25:38 +00:00
|
|
|
ASSERT_EQ("1000000000foo005", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__5", iter->value().ToString());
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
iter->Seek("1000000000foo008");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("1000000000foo008", iter->key().ToString());
|
|
|
|
ASSERT_EQ("v__8", iter->value().ToString());
|
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
iter->Seek("1000000000foo000");
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("3000000000000bar", iter->key().ToString());
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, HashBucketConflict) {
|
2014-05-04 20:55:53 +00:00
|
|
|
for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
|
|
|
|
huge_page_tlb_size += 2 * 1024 * 1024) {
|
|
|
|
for (unsigned char i = 1; i <= 3; i++) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 2 ^ i;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
ASSERT_OK(Put("5000000000000fo0", "v1"));
|
|
|
|
ASSERT_OK(Put("5000000000000fo1", "v2"));
|
|
|
|
ASSERT_OK(Put("5000000000000fo2", "v"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo0", "v3"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo1", "v4"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo2", "v"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo3", "v"));
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
ASSERT_EQ("v1", Get("5000000000000fo0"));
|
|
|
|
ASSERT_EQ("v2", Get("5000000000000fo1"));
|
|
|
|
ASSERT_EQ("v3", Get("2000000000000fo0"));
|
|
|
|
ASSERT_EQ("v4", Get("2000000000000fo1"));
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("5000000000000bar"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("2000000000000bar"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("5000000000000fo8"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("2000000000000fo8"));
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
ReadOptions ro;
|
|
|
|
Iterator* iter = dbfull()->NewIterator(ro);
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("5000000000000fo0");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo0", iter->key().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo1", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("5000000000000fo1");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo1", iter->key().ToString());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("2000000000000fo0");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo0", iter->key().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo1", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("2000000000000fo1");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo1", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("2000000000000bar");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo0", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("5000000000000bar");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo0", iter->key().ToString());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("2000000000000fo8");
|
|
|
|
ASSERT_TRUE(!iter->Valid() ||
|
|
|
|
options.comparator->Compare(iter->key(), "20000001") > 0);
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("5000000000000fo8");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("1000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("3000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("8000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2014-05-04 20:55:53 +00:00
|
|
|
delete iter;
|
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, HashBucketConflictReverseSuffixComparator) {
|
2014-05-04 20:55:53 +00:00
|
|
|
for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
|
|
|
|
huge_page_tlb_size += 2 * 1024 * 1024) {
|
|
|
|
for (unsigned char i = 1; i <= 3; i++) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2014-10-29 20:49:45 +00:00
|
|
|
test::SimpleSuffixReverseComparator comp;
|
2014-05-04 20:55:53 +00:00
|
|
|
options.comparator = ∁
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 2 ^ i;
|
|
|
|
plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
2014-05-04 20:55:53 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
ASSERT_OK(Put("5000000000000fo0", "v1"));
|
|
|
|
ASSERT_OK(Put("5000000000000fo1", "v2"));
|
|
|
|
ASSERT_OK(Put("5000000000000fo2", "v"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo0", "v3"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo1", "v4"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo2", "v"));
|
|
|
|
ASSERT_OK(Put("2000000000000fo3", "v"));
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
ASSERT_EQ("v1", Get("5000000000000fo0"));
|
|
|
|
ASSERT_EQ("v2", Get("5000000000000fo1"));
|
|
|
|
ASSERT_EQ("v3", Get("2000000000000fo0"));
|
|
|
|
ASSERT_EQ("v4", Get("2000000000000fo1"));
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("5000000000000bar"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("2000000000000bar"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("5000000000000fo8"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("2000000000000fo8"));
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
ReadOptions ro;
|
|
|
|
Iterator* iter = dbfull()->NewIterator(ro);
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("5000000000000fo1");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo1", iter->key().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo0", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("5000000000000fo1");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo1", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("2000000000000fo1");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo1", iter->key().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo0", iter->key().ToString());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("2000000000000fo1");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo1", iter->key().ToString());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("2000000000000var");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("2000000000000fo3", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("5000000000000var");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo2", iter->key().ToString());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
std::string seek_key = "2000000000000bar";
|
|
|
|
iter->Seek(seek_key);
|
|
|
|
ASSERT_TRUE(!iter->Valid() ||
|
|
|
|
options.prefix_extractor->Transform(iter->key()) !=
|
|
|
|
options.prefix_extractor->Transform(seek_key));
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("1000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-04-25 22:45:37 +00:00
|
|
|
|
2014-05-04 20:55:53 +00:00
|
|
|
iter->Seek("3000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
|
|
|
iter->Seek("8000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2014-05-04 20:55:53 +00:00
|
|
|
delete iter;
|
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, NonExistingKeyToNonEmptyBucket) {
|
2014-02-08 00:25:38 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
// Set only one bucket to force bucket conflict.
|
|
|
|
// Test index interval for the same prefix to be 1, 2 and 4
|
2014-07-18 07:08:38 +00:00
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 16;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 5;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
2014-02-08 00:25:38 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
ASSERT_OK(Put("5000000000000fo0", "v1"));
|
|
|
|
ASSERT_OK(Put("5000000000000fo1", "v2"));
|
|
|
|
ASSERT_OK(Put("5000000000000fo2", "v3"));
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
ASSERT_EQ("v1", Get("5000000000000fo0"));
|
|
|
|
ASSERT_EQ("v2", Get("5000000000000fo1"));
|
|
|
|
ASSERT_EQ("v3", Get("5000000000000fo2"));
|
|
|
|
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("8000000000000bar"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("1000000000000bar"));
|
|
|
|
|
2014-04-25 19:21:34 +00:00
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions());
|
2014-02-08 00:25:38 +00:00
|
|
|
|
|
|
|
iter->Seek("5000000000000bar");
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("5000000000000fo0", iter->key().ToString());
|
|
|
|
|
|
|
|
iter->Seek("5000000000000fo8");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
|
|
|
iter->Seek("1000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
|
|
|
iter->Seek("8000000000000fo2");
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2014-02-08 00:25:38 +00:00
|
|
|
delete iter;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static std::string Key(int i) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "key_______%06d", i);
|
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, CompactionTrigger) {
|
2013-10-29 03:34:02 +00:00
|
|
|
Options options = CurrentOptions();
|
2020-03-11 19:31:06 +00:00
|
|
|
options.write_buffer_size = 120 << 10; // 120KB
|
2013-10-29 03:34:02 +00:00
|
|
|
options.num_levels = 3;
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
|
2022-11-02 21:34:24 +00:00
|
|
|
num++) {
|
2013-10-29 03:34:02 +00:00
|
|
|
std::vector<std::string> values;
|
2015-08-26 21:19:31 +00:00
|
|
|
// Write 120KB (10 values, each 12K)
|
|
|
|
for (int i = 0; i < 10; i++) {
|
2020-07-09 21:33:42 +00:00
|
|
|
values.push_back(rnd.RandomString(12 << 10));
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
2015-08-26 21:19:31 +00:00
|
|
|
ASSERT_OK(Put(Key(999), ""));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
// generate one more file in level-0, and should trigger level-0 compaction
|
2013-10-29 03:34:02 +00:00
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 12; i++) {
|
2020-07-09 21:33:42 +00:00
|
|
|
values.push_back(rnd.RandomString(10000));
|
2013-10-29 03:34:02 +00:00
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
2015-08-26 21:19:31 +00:00
|
|
|
ASSERT_OK(Put(Key(999), ""));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 1);
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
TEST_P(PlainTableDBTest, AdaptiveTable) {
|
2014-06-17 03:06:18 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
|
|
|
|
options.table_factory.reset(NewPlainTableFactory());
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v1"));
|
|
|
|
ASSERT_OK(Put("0000000000000bar", "v2"));
|
|
|
|
ASSERT_OK(Put("1000000000000foo", "v3"));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-06-17 03:06:18 +00:00
|
|
|
|
|
|
|
options.create_if_missing = false;
|
2014-06-18 05:04:37 +00:00
|
|
|
std::shared_ptr<TableFactory> block_based_factory(
|
|
|
|
NewBlockBasedTableFactory());
|
2022-11-02 21:34:24 +00:00
|
|
|
std::shared_ptr<TableFactory> plain_table_factory(NewPlainTableFactory());
|
2020-02-14 16:15:24 +00:00
|
|
|
std::shared_ptr<TableFactory> dummy_factory;
|
2014-06-18 05:04:37 +00:00
|
|
|
options.table_factory.reset(NewAdaptiveTableFactory(
|
2020-02-14 16:15:24 +00:00
|
|
|
block_based_factory, block_based_factory, plain_table_factory));
|
2014-06-17 03:06:18 +00:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v3", Get("1000000000000foo"));
|
|
|
|
ASSERT_EQ("v2", Get("0000000000000bar"));
|
|
|
|
|
|
|
|
ASSERT_OK(Put("2000000000000foo", "v4"));
|
|
|
|
ASSERT_OK(Put("3000000000000bar", "v5"));
|
2020-10-13 18:58:12 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-06-17 03:06:18 +00:00
|
|
|
ASSERT_EQ("v4", Get("2000000000000foo"));
|
|
|
|
ASSERT_EQ("v5", Get("3000000000000bar"));
|
|
|
|
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v3", Get("1000000000000foo"));
|
|
|
|
ASSERT_EQ("v2", Get("0000000000000bar"));
|
|
|
|
ASSERT_EQ("v4", Get("2000000000000foo"));
|
|
|
|
ASSERT_EQ("v5", Get("3000000000000bar"));
|
|
|
|
|
2020-02-14 16:15:24 +00:00
|
|
|
options.paranoid_checks = false;
|
2014-06-17 03:06:18 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory());
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_NE("v3", Get("1000000000000foo"));
|
|
|
|
|
2020-02-14 16:15:24 +00:00
|
|
|
options.paranoid_checks = false;
|
2014-06-17 03:06:18 +00:00
|
|
|
options.table_factory.reset(NewPlainTableFactory());
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_NE("v5", Get("3000000000000bar"));
|
|
|
|
}
|
|
|
|
|
2020-06-03 22:53:09 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(PlainTableDBTest, PlainTableDBTest, ::testing::Bool());
|
2015-09-16 23:57:43 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|