2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2014-02-05 00:21:47 +00:00
|
|
|
|
|
|
|
#include <stdio.h>
|
2014-03-01 02:19:07 +00:00
|
|
|
|
2014-01-24 19:09:04 +00:00
|
|
|
#include <algorithm>
|
2014-06-13 02:03:22 +00:00
|
|
|
#include <iostream>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <map>
|
2013-07-23 21:42:27 +00:00
|
|
|
#include <memory>
|
2015-09-02 20:58:22 +00:00
|
|
|
#include <string>
|
2013-10-10 18:43:24 +00:00
|
|
|
#include <vector>
|
|
|
|
|
2017-05-06 03:10:56 +00:00
|
|
|
#include "cache/lru_cache.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/memtable.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2015-10-16 21:10:33 +00:00
|
|
|
#include "memtable/stl_wrappers.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/statistics.h"
|
2017-02-06 22:43:55 +00:00
|
|
|
#include "port/port.h"
|
2013-11-13 06:46:51 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
|
|
|
#include "rocksdb/memtablerep.h"
|
2015-09-02 22:36:47 +00:00
|
|
|
#include "rocksdb/perf_context.h"
|
2014-03-01 02:19:07 +00:00
|
|
|
#include "rocksdb/slice_transform.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2014-01-28 05:58:46 +00:00
|
|
|
#include "table/block.h"
|
2013-11-13 06:46:51 +00:00
|
|
|
#include "table/block_based_table_builder.h"
|
2013-11-20 06:00:48 +00:00
|
|
|
#include "table/block_based_table_factory.h"
|
2013-11-13 06:46:51 +00:00
|
|
|
#include "table/block_based_table_reader.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/block_builder.h"
|
|
|
|
#include "table/format.h"
|
2015-09-02 20:58:22 +00:00
|
|
|
#include "table/get_context.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/internal_iterator.h"
|
2014-01-28 05:58:46 +00:00
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "table/plain_table_factory.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/scoped_arena_iterator.h"
|
2016-10-18 23:59:37 +00:00
|
|
|
#include "table/sst_file_writer_collectors.h"
|
2015-01-09 21:04:06 +00:00
|
|
|
#include "util/compression.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/random.h"
|
2015-03-20 00:29:37 +00:00
|
|
|
#include "util/string_util.h"
|
2016-08-24 01:20:41 +00:00
|
|
|
#include "util/sync_point.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
2016-04-21 17:16:28 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
2015-03-03 01:07:03 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-05-01 18:09:32 +00:00
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
|
|
|
2013-11-08 05:27:21 +00:00
|
|
|
namespace {
|
2014-01-24 19:09:04 +00:00
|
|
|
|
2016-04-21 17:16:28 +00:00
|
|
|
// DummyPropertiesCollector used to test BlockBasedTableProperties
|
|
|
|
class DummyPropertiesCollector : public TablePropertiesCollector {
|
|
|
|
public:
|
|
|
|
const char* Name() const { return ""; }
|
|
|
|
|
2017-07-22 01:13:59 +00:00
|
|
|
Status Finish(UserCollectedProperties* properties) { return Status::OK(); }
|
2016-04-21 17:16:28 +00:00
|
|
|
|
2017-07-22 01:13:59 +00:00
|
|
|
Status Add(const Slice& user_key, const Slice& value) { return Status::OK(); }
|
2016-04-21 17:16:28 +00:00
|
|
|
|
|
|
|
virtual UserCollectedProperties GetReadableProperties() const {
|
|
|
|
return UserCollectedProperties{};
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class DummyPropertiesCollectorFactory1
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
|
|
|
virtual TablePropertiesCollector* CreateTablePropertiesCollector(
|
2017-07-22 01:13:59 +00:00
|
|
|
TablePropertiesCollectorFactory::Context context) {
|
2016-04-21 17:16:28 +00:00
|
|
|
return new DummyPropertiesCollector();
|
|
|
|
}
|
|
|
|
const char* Name() const { return "DummyPropertiesCollector1"; }
|
|
|
|
};
|
|
|
|
|
|
|
|
class DummyPropertiesCollectorFactory2
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
|
|
|
virtual TablePropertiesCollector* CreateTablePropertiesCollector(
|
2017-07-22 01:13:59 +00:00
|
|
|
TablePropertiesCollectorFactory::Context context) {
|
2016-04-21 17:16:28 +00:00
|
|
|
return new DummyPropertiesCollector();
|
|
|
|
}
|
|
|
|
const char* Name() const { return "DummyPropertiesCollector2"; }
|
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Return reverse of "key".
|
|
|
|
// Used to test non-lexicographic comparators.
|
2014-01-24 19:09:04 +00:00
|
|
|
std::string Reverse(const Slice& key) {
|
|
|
|
auto rev = key.ToString();
|
|
|
|
std::reverse(rev.begin(), rev.end());
|
2011-03-18 22:37:00 +00:00
|
|
|
return rev;
|
|
|
|
}
|
|
|
|
|
|
|
|
class ReverseKeyComparator : public Comparator {
|
|
|
|
public:
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual const char* Name() const override {
|
2013-10-05 05:32:05 +00:00
|
|
|
return "rocksdb.ReverseBytewiseComparator";
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual int Compare(const Slice& a, const Slice& b) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void FindShortestSeparator(std::string* start,
|
|
|
|
const Slice& limit) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string s = Reverse(*start);
|
|
|
|
std::string l = Reverse(limit);
|
|
|
|
BytewiseComparator()->FindShortestSeparator(&s, l);
|
|
|
|
*start = Reverse(s);
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void FindShortSuccessor(std::string* key) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string s = Reverse(*key);
|
|
|
|
BytewiseComparator()->FindShortSuccessor(&s);
|
|
|
|
*key = Reverse(s);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-01-24 19:09:04 +00:00
|
|
|
ReverseKeyComparator reverse_key_comparator;
|
|
|
|
|
|
|
|
void Increment(const Comparator* cmp, std::string* key) {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (cmp == BytewiseComparator()) {
|
|
|
|
key->push_back('\0');
|
|
|
|
} else {
|
|
|
|
assert(cmp == &reverse_key_comparator);
|
|
|
|
std::string rev = Reverse(*key);
|
|
|
|
rev.push_back('\0');
|
|
|
|
*key = Reverse(rev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Helper class for tests to unify the interface between
|
|
|
|
// BlockBuilder/TableBuilder and Block/Table.
|
|
|
|
class Constructor {
|
|
|
|
public:
|
2015-09-02 20:58:22 +00:00
|
|
|
explicit Constructor(const Comparator* cmp)
|
|
|
|
: data_(stl_wrappers::LessOfComparator(cmp)) {}
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual ~Constructor() { }
|
|
|
|
|
|
|
|
void Add(const std::string& key, const Slice& value) {
|
|
|
|
data_[key] = value.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish constructing the data structure with all the keys that have
|
|
|
|
// been added so far. Returns the keys in sorted order in "*keys"
|
|
|
|
// and stores the key/value pairs in "*kvmap"
|
2015-09-02 20:58:22 +00:00
|
|
|
void Finish(const Options& options, const ImmutableCFOptions& ioptions,
|
2014-08-25 21:22:05 +00:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-09-02 20:58:22 +00:00
|
|
|
std::vector<std::string>* keys, stl_wrappers::KVMap* kvmap) {
|
2014-01-27 21:53:22 +00:00
|
|
|
last_internal_key_ = &internal_comparator;
|
2011-03-18 22:37:00 +00:00
|
|
|
*kvmap = data_;
|
|
|
|
keys->clear();
|
2015-09-02 20:58:22 +00:00
|
|
|
for (const auto& kv : data_) {
|
|
|
|
keys->push_back(kv.first);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
data_.clear();
|
2014-09-04 23:18:36 +00:00
|
|
|
Status s = FinishImpl(options, ioptions, table_options,
|
|
|
|
internal_comparator, *kvmap);
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(s.ok()) << s.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the data structure from the data in "data"
|
2014-01-27 21:53:22 +00:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-08-25 21:22:05 +00:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-09-02 20:58:22 +00:00
|
|
|
const stl_wrappers::KVMap& data) = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
virtual InternalIterator* NewIterator() const = 0;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
virtual const stl_wrappers::KVMap& data() { return data_; }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-09-05 00:40:41 +00:00
|
|
|
virtual bool IsArenaMode() const { return false; }
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
|
2011-03-21 19:40:57 +00:00
|
|
|
|
2014-09-05 00:40:41 +00:00
|
|
|
virtual bool AnywayDeleteIterator() const { return false; }
|
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
protected:
|
|
|
|
const InternalKeyComparator* last_internal_key_;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap data_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class BlockConstructor: public Constructor {
|
|
|
|
public:
|
|
|
|
explicit BlockConstructor(const Comparator* cmp)
|
|
|
|
: Constructor(cmp),
|
|
|
|
comparator_(cmp),
|
2013-03-01 02:04:58 +00:00
|
|
|
block_(nullptr) { }
|
2011-03-18 22:37:00 +00:00
|
|
|
~BlockConstructor() {
|
|
|
|
delete block_;
|
|
|
|
}
|
2017-07-22 01:13:59 +00:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
delete block_;
|
2013-03-01 02:04:58 +00:00
|
|
|
block_ = nullptr;
|
2014-09-02 18:49:38 +00:00
|
|
|
BlockBuilder builder(table_options.block_restart_interval);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-11-06 19:14:28 +00:00
|
|
|
for (const auto kv : kv_map) {
|
|
|
|
builder.Add(kv.first, kv.second);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
// Open the block
|
2012-04-17 15:36:46 +00:00
|
|
|
data_ = builder.Finish().ToString();
|
|
|
|
BlockContents contents;
|
|
|
|
contents.data = data_;
|
|
|
|
contents.cachable = false;
|
2016-10-18 23:59:37 +00:00
|
|
|
block_ = new Block(std::move(contents), kDisableGlobalSequenceNumber);
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-10-12 22:06:38 +00:00
|
|
|
virtual InternalIterator* NewIterator() const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return block_->NewIterator(comparator_);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const Comparator* comparator_;
|
2012-04-17 15:36:46 +00:00
|
|
|
std::string data_;
|
2011-03-18 22:37:00 +00:00
|
|
|
Block* block_;
|
|
|
|
|
|
|
|
BlockConstructor();
|
|
|
|
};
|
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
// A helper class that converts internal format keys into user keys
|
2015-10-12 22:06:38 +00:00
|
|
|
class KeyConvertingIterator : public InternalIterator {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2015-10-12 22:06:38 +00:00
|
|
|
explicit KeyConvertingIterator(InternalIterator* iter,
|
|
|
|
bool arena_mode = false)
|
2014-09-05 00:40:41 +00:00
|
|
|
: iter_(iter), arena_mode_(arena_mode) {}
|
|
|
|
virtual ~KeyConvertingIterator() {
|
|
|
|
if (arena_mode_) {
|
2015-10-12 22:06:38 +00:00
|
|
|
iter_->~InternalIterator();
|
2014-09-05 00:40:41 +00:00
|
|
|
} else {
|
|
|
|
delete iter_;
|
|
|
|
}
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual bool Valid() const override { return iter_->Valid(); }
|
|
|
|
virtual void Seek(const Slice& target) override {
|
2013-12-20 17:35:24 +00:00
|
|
|
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
|
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
|
|
|
iter_->Seek(encoded);
|
|
|
|
}
|
2016-09-28 01:20:57 +00:00
|
|
|
virtual void SeekForPrev(const Slice& target) override {
|
|
|
|
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
|
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
|
|
|
iter_->SeekForPrev(encoded);
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void SeekToFirst() override { iter_->SeekToFirst(); }
|
|
|
|
virtual void SeekToLast() override { iter_->SeekToLast(); }
|
|
|
|
virtual void Next() override { iter_->Next(); }
|
|
|
|
virtual void Prev() override { iter_->Prev(); }
|
2013-12-20 17:35:24 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Slice key() const override {
|
2013-12-20 17:35:24 +00:00
|
|
|
assert(Valid());
|
2014-11-06 19:14:28 +00:00
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
if (!ParseInternalKey(iter_->key(), &parsed_key)) {
|
2013-12-20 17:35:24 +00:00
|
|
|
status_ = Status::Corruption("malformed internal key");
|
|
|
|
return Slice("corrupted key");
|
|
|
|
}
|
2014-11-06 19:14:28 +00:00
|
|
|
return parsed_key.user_key;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2014-01-24 18:57:15 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Slice value() const override { return iter_->value(); }
|
|
|
|
virtual Status status() const override {
|
2013-12-20 17:35:24 +00:00
|
|
|
return status_.ok() ? iter_->status() : status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
mutable Status status_;
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* iter_;
|
2014-09-05 00:40:41 +00:00
|
|
|
bool arena_mode_;
|
2013-12-20 17:35:24 +00:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
KeyConvertingIterator(const KeyConvertingIterator&);
|
|
|
|
void operator=(const KeyConvertingIterator&);
|
|
|
|
};
|
|
|
|
|
|
|
|
class TableConstructor: public Constructor {
|
|
|
|
public:
|
2014-01-28 18:35:48 +00:00
|
|
|
explicit TableConstructor(const Comparator* cmp,
|
2014-04-25 19:21:34 +00:00
|
|
|
bool convert_to_internal_key = false)
|
2014-02-08 00:25:38 +00:00
|
|
|
: Constructor(cmp),
|
2014-04-25 19:21:34 +00:00
|
|
|
convert_to_internal_key_(convert_to_internal_key) {}
|
2014-01-28 18:35:48 +00:00
|
|
|
~TableConstructor() { Reset(); }
|
2014-01-24 18:57:15 +00:00
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions& ioptions,
|
2017-07-22 01:13:59 +00:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2015-09-02 20:58:22 +00:00
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
Reset();
|
2015-09-16 23:57:43 +00:00
|
|
|
soptions.use_mmap_reads = ioptions.allow_mmap_reads;
|
2015-08-05 14:33:27 +00:00
|
|
|
file_writer_.reset(test::GetWritableFileWriter(new test::StringSink()));
|
2013-12-20 17:35:24 +00:00
|
|
|
unique_ptr<TableBuilder> builder;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
2016-04-07 06:10:32 +00:00
|
|
|
std::string column_family_name;
|
2016-09-18 05:30:43 +00:00
|
|
|
int unknown_level = -1;
|
2014-09-04 23:18:36 +00:00
|
|
|
builder.reset(ioptions.table_factory->NewTableBuilder(
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
TableBuilderOptions(ioptions, internal_comparator,
|
|
|
|
&int_tbl_prop_collector_factories,
|
|
|
|
options.compression, CompressionOptions(),
|
|
|
|
nullptr /* compression_dict */,
|
2016-09-18 05:30:43 +00:00
|
|
|
false /* skip_filters */, column_family_name,
|
|
|
|
unknown_level),
|
2015-10-08 23:57:35 +00:00
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
file_writer_.get()));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-11-06 19:14:28 +00:00
|
|
|
for (const auto kv : kv_map) {
|
2013-12-20 17:35:24 +00:00
|
|
|
if (convert_to_internal_key_) {
|
2014-11-06 19:14:28 +00:00
|
|
|
ParsedInternalKey ikey(kv.first, kMaxSequenceNumber, kTypeValue);
|
2013-12-20 17:35:24 +00:00
|
|
|
std::string encoded;
|
|
|
|
AppendInternalKey(&encoded, ikey);
|
2014-11-06 19:14:28 +00:00
|
|
|
builder->Add(encoded, kv.second);
|
2013-12-20 17:35:24 +00:00
|
|
|
} else {
|
2014-11-06 19:14:28 +00:00
|
|
|
builder->Add(kv.first, kv.second);
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(builder->status().ok());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-12-20 17:35:24 +00:00
|
|
|
Status s = builder->Finish();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
file_writer_->Flush();
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(s.ok()) << s.ToString();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
EXPECT_EQ(GetSink()->contents().size(), builder->FileSize());
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Open the table
|
2013-01-31 23:20:24 +00:00
|
|
|
uniq_id_ = cur_uniq_id_++;
|
2015-08-05 14:33:27 +00:00
|
|
|
file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
|
2014-09-04 23:18:36 +00:00
|
|
|
return ioptions.table_factory->NewTableReader(
|
2015-09-11 18:36:33 +00:00
|
|
|
TableReaderOptions(ioptions, soptions, internal_comparator),
|
|
|
|
std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
virtual InternalIterator* NewIterator() const override {
|
2014-02-08 00:25:38 +00:00
|
|
|
ReadOptions ro;
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* iter = table_reader_->NewIterator(ro);
|
2013-12-20 17:35:24 +00:00
|
|
|
if (convert_to_internal_key_) {
|
|
|
|
return new KeyConvertingIterator(iter);
|
|
|
|
} else {
|
|
|
|
return iter;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ApproximateOffsetOf(const Slice& key) const {
|
2016-08-19 22:10:31 +00:00
|
|
|
if (convert_to_internal_key_) {
|
|
|
|
InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
const Slice skey = ikey.Encode();
|
|
|
|
return table_reader_->ApproximateOffsetOf(skey);
|
|
|
|
}
|
2013-10-30 17:52:33 +00:00
|
|
|
return table_reader_->ApproximateOffsetOf(key);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
virtual Status Reopen(const ImmutableCFOptions& ioptions) {
|
2015-08-05 14:33:27 +00:00
|
|
|
file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
|
2014-09-04 23:18:36 +00:00
|
|
|
return ioptions.table_factory->NewTableReader(
|
2015-09-11 18:36:33 +00:00
|
|
|
TableReaderOptions(ioptions, soptions, *last_internal_key_),
|
|
|
|
std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
|
2013-01-31 23:20:24 +00:00
|
|
|
}
|
|
|
|
|
2014-08-25 23:14:30 +00:00
|
|
|
virtual TableReader* GetTableReader() {
|
2013-10-30 17:52:33 +00:00
|
|
|
return table_reader_.get();
|
2013-01-31 23:20:24 +00:00
|
|
|
}
|
|
|
|
|
2014-09-05 00:40:41 +00:00
|
|
|
virtual bool AnywayDeleteIterator() const override {
|
|
|
|
return convert_to_internal_key_;
|
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
void ResetTableReader() { table_reader_.reset(); }
|
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
bool ConvertToInternalKey() { return convert_to_internal_key_; }
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
|
|
|
void Reset() {
|
2013-01-31 23:20:24 +00:00
|
|
|
uniq_id_ = 0;
|
2013-10-30 17:52:33 +00:00
|
|
|
table_reader_.reset();
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
file_writer_.reset();
|
|
|
|
file_reader_.reset();
|
|
|
|
}
|
|
|
|
|
2015-08-05 14:33:27 +00:00
|
|
|
test::StringSink* GetSink() {
|
|
|
|
return static_cast<test::StringSink*>(file_writer_->writable_file());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-31 23:20:24 +00:00
|
|
|
uint64_t uniq_id_;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
unique_ptr<WritableFileWriter> file_writer_;
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader_;
|
2013-10-30 17:52:33 +00:00
|
|
|
unique_ptr<TableReader> table_reader_;
|
2014-09-05 00:40:41 +00:00
|
|
|
bool convert_to_internal_key_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
TableConstructor();
|
2013-01-31 23:20:24 +00:00
|
|
|
|
|
|
|
static uint64_t cur_uniq_id_;
|
2015-09-16 23:57:43 +00:00
|
|
|
EnvOptions soptions;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
2013-12-20 17:35:24 +00:00
|
|
|
uint64_t TableConstructor::cur_uniq_id_ = 1;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
class MemTableConstructor: public Constructor {
|
|
|
|
public:
|
2016-06-21 01:01:03 +00:00
|
|
|
explicit MemTableConstructor(const Comparator* cmp, WriteBufferManager* wb)
|
2011-03-18 22:37:00 +00:00
|
|
|
: Constructor(cmp),
|
2013-07-23 21:42:27 +00:00
|
|
|
internal_comparator_(cmp),
|
2016-06-21 01:01:03 +00:00
|
|
|
write_buffer_manager_(wb),
|
2013-07-23 21:42:27 +00:00
|
|
|
table_factory_(new SkipListFactory) {
|
2014-12-02 20:09:20 +00:00
|
|
|
options_.memtable_factory = table_factory_;
|
|
|
|
ImmutableCFOptions ioptions(options_);
|
2016-09-14 04:11:59 +00:00
|
|
|
memtable_ =
|
|
|
|
new MemTable(internal_comparator_, ioptions, MutableCFOptions(options_),
|
2017-06-02 19:08:01 +00:00
|
|
|
wb, kMaxSequenceNumber, 0 /* column_family_id */);
|
2011-05-21 02:17:43 +00:00
|
|
|
memtable_->Ref();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
~MemTableConstructor() {
|
2013-12-02 05:23:44 +00:00
|
|
|
delete memtable_->Unref();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2017-07-22 01:13:59 +00:00
|
|
|
virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2013-12-02 05:23:44 +00:00
|
|
|
delete memtable_->Unref();
|
2014-12-02 20:09:20 +00:00
|
|
|
ImmutableCFOptions mem_ioptions(ioptions);
|
2014-10-01 23:19:16 +00:00
|
|
|
memtable_ = new MemTable(internal_comparator_, mem_ioptions,
|
2016-09-14 04:11:59 +00:00
|
|
|
MutableCFOptions(options_), write_buffer_manager_,
|
2017-06-02 19:08:01 +00:00
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2011-05-21 02:17:43 +00:00
|
|
|
memtable_->Ref();
|
2011-03-18 22:37:00 +00:00
|
|
|
int seq = 1;
|
2014-11-06 19:14:28 +00:00
|
|
|
for (const auto kv : kv_map) {
|
|
|
|
memtable_->Add(seq, kTypeValue, kv.first, kv.second);
|
2011-03-18 22:37:00 +00:00
|
|
|
seq++;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-10-12 22:06:38 +00:00
|
|
|
virtual InternalIterator* NewIterator() const override {
|
2014-09-05 00:40:41 +00:00
|
|
|
return new KeyConvertingIterator(
|
|
|
|
memtable_->NewIterator(ReadOptions(), &arena_), true);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-09-05 00:40:41 +00:00
|
|
|
virtual bool AnywayDeleteIterator() const override { return true; }
|
|
|
|
|
|
|
|
virtual bool IsArenaMode() const override { return true; }
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2014-09-05 00:40:41 +00:00
|
|
|
mutable Arena arena_;
|
2011-03-18 22:37:00 +00:00
|
|
|
InternalKeyComparator internal_comparator_;
|
2014-12-02 20:09:20 +00:00
|
|
|
Options options_;
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager* write_buffer_manager_;
|
2011-03-18 22:37:00 +00:00
|
|
|
MemTable* memtable_;
|
2013-07-23 21:42:27 +00:00
|
|
|
std::shared_ptr<SkipListFactory> table_factory_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
class InternalIteratorFromIterator : public InternalIterator {
|
|
|
|
public:
|
|
|
|
explicit InternalIteratorFromIterator(Iterator* it) : it_(it) {}
|
|
|
|
virtual bool Valid() const override { return it_->Valid(); }
|
|
|
|
virtual void Seek(const Slice& target) override { it_->Seek(target); }
|
2016-09-28 01:20:57 +00:00
|
|
|
virtual void SeekForPrev(const Slice& target) override {
|
|
|
|
it_->SeekForPrev(target);
|
|
|
|
}
|
2015-10-12 22:06:38 +00:00
|
|
|
virtual void SeekToFirst() override { it_->SeekToFirst(); }
|
|
|
|
virtual void SeekToLast() override { it_->SeekToLast(); }
|
|
|
|
virtual void Next() override { it_->Next(); }
|
|
|
|
virtual void Prev() override { it_->Prev(); }
|
|
|
|
Slice key() const override { return it_->key(); }
|
|
|
|
Slice value() const override { return it_->value(); }
|
|
|
|
virtual Status status() const override { return it_->status(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
unique_ptr<Iterator> it_;
|
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
class DBConstructor: public Constructor {
|
|
|
|
public:
|
|
|
|
explicit DBConstructor(const Comparator* cmp)
|
|
|
|
: Constructor(cmp),
|
|
|
|
comparator_(cmp) {
|
2013-03-01 02:04:58 +00:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
NewDB();
|
|
|
|
}
|
|
|
|
~DBConstructor() {
|
|
|
|
delete db_;
|
|
|
|
}
|
2017-07-22 01:13:59 +00:00
|
|
|
virtual Status FinishImpl(const Options& options,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
const stl_wrappers::KVMap& kv_map) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
delete db_;
|
2013-03-01 02:04:58 +00:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
NewDB();
|
2014-11-06 19:14:28 +00:00
|
|
|
for (const auto kv : kv_map) {
|
2011-03-18 22:37:00 +00:00
|
|
|
WriteBatch batch;
|
2014-11-06 19:14:28 +00:00
|
|
|
batch.Put(kv.first, kv.second);
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-10-12 22:06:38 +00:00
|
|
|
|
|
|
|
virtual InternalIterator* NewIterator() const override {
|
|
|
|
return new InternalIteratorFromIterator(db_->NewIterator(ReadOptions()));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual DB* db() const override { return db_; }
|
2011-03-21 19:40:57 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
|
|
|
void NewDB() {
|
|
|
|
std::string name = test::TmpDir() + "/table_testdb";
|
|
|
|
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2011-03-18 22:37:00 +00:00
|
|
|
options.comparator = comparator_;
|
|
|
|
Status status = DestroyDB(name, options);
|
|
|
|
ASSERT_TRUE(status.ok()) << status.ToString();
|
|
|
|
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.error_if_exists = true;
|
2011-03-21 19:40:57 +00:00
|
|
|
options.write_buffer_size = 10000; // Something small to force merging
|
2011-03-18 22:37:00 +00:00
|
|
|
status = DB::Open(options, name, &db_);
|
|
|
|
ASSERT_TRUE(status.ok()) << status.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
const Comparator* comparator_;
|
|
|
|
DB* db_;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum TestType {
|
2013-12-20 17:35:24 +00:00
|
|
|
BLOCK_BASED_TABLE_TEST,
|
2015-07-20 18:09:14 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2013-12-20 17:35:24 +00:00
|
|
|
PLAIN_TABLE_SEMI_FIXED_PREFIX,
|
|
|
|
PLAIN_TABLE_FULL_STR_PREFIX,
|
2014-02-08 00:25:38 +00:00
|
|
|
PLAIN_TABLE_TOTAL_ORDER,
|
2015-07-20 18:09:14 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|
2011-03-18 22:37:00 +00:00
|
|
|
BLOCK_TEST,
|
|
|
|
MEMTABLE_TEST,
|
2011-07-19 23:36:47 +00:00
|
|
|
DB_TEST
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct TestArgs {
|
|
|
|
TestType type;
|
|
|
|
bool reverse_compare;
|
|
|
|
int restart_interval;
|
2012-06-28 06:41:33 +00:00
|
|
|
CompressionType compression;
|
2015-01-15 00:24:24 +00:00
|
|
|
uint32_t format_version;
|
2015-09-16 23:57:43 +00:00
|
|
|
bool use_mmap;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2013-11-20 06:00:48 +00:00
|
|
|
static std::vector<TestArgs> GenerateArgList() {
|
2014-01-24 19:09:04 +00:00
|
|
|
std::vector<TestArgs> test_args;
|
|
|
|
std::vector<TestType> test_types = {
|
2015-07-20 18:09:14 +00:00
|
|
|
BLOCK_BASED_TABLE_TEST,
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
PLAIN_TABLE_SEMI_FIXED_PREFIX,
|
|
|
|
PLAIN_TABLE_FULL_STR_PREFIX,
|
|
|
|
PLAIN_TABLE_TOTAL_ORDER,
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
BLOCK_TEST,
|
|
|
|
MEMTABLE_TEST, DB_TEST};
|
2014-01-24 19:09:04 +00:00
|
|
|
std::vector<bool> reverse_compare_types = {false, true};
|
|
|
|
std::vector<int> restart_intervals = {16, 1, 1024};
|
2012-06-28 06:41:33 +00:00
|
|
|
|
|
|
|
// Only add compression if it is supported
|
2015-01-15 00:24:24 +00:00
|
|
|
std::vector<std::pair<CompressionType, bool>> compression_types;
|
|
|
|
compression_types.emplace_back(kNoCompression, false);
|
2015-04-06 19:50:44 +00:00
|
|
|
if (Snappy_Supported()) {
|
2015-01-15 00:24:24 +00:00
|
|
|
compression_types.emplace_back(kSnappyCompression, false);
|
2014-01-24 19:09:04 +00:00
|
|
|
}
|
2015-04-06 19:50:44 +00:00
|
|
|
if (Zlib_Supported()) {
|
2015-01-15 00:24:24 +00:00
|
|
|
compression_types.emplace_back(kZlibCompression, false);
|
|
|
|
compression_types.emplace_back(kZlibCompression, true);
|
2014-01-24 19:09:04 +00:00
|
|
|
}
|
2015-04-06 19:50:44 +00:00
|
|
|
if (BZip2_Supported()) {
|
2015-01-15 00:24:24 +00:00
|
|
|
compression_types.emplace_back(kBZip2Compression, false);
|
|
|
|
compression_types.emplace_back(kBZip2Compression, true);
|
2014-01-24 19:09:04 +00:00
|
|
|
}
|
2015-04-06 19:50:44 +00:00
|
|
|
if (LZ4_Supported()) {
|
2015-01-15 00:24:24 +00:00
|
|
|
compression_types.emplace_back(kLZ4Compression, false);
|
|
|
|
compression_types.emplace_back(kLZ4Compression, true);
|
|
|
|
compression_types.emplace_back(kLZ4HCCompression, false);
|
|
|
|
compression_types.emplace_back(kLZ4HCCompression, true);
|
2014-02-08 02:12:30 +00:00
|
|
|
}
|
2016-04-20 05:54:24 +00:00
|
|
|
if (XPRESS_Supported()) {
|
|
|
|
compression_types.emplace_back(kXpressCompression, false);
|
|
|
|
compression_types.emplace_back(kXpressCompression, true);
|
|
|
|
}
|
2015-08-27 22:40:42 +00:00
|
|
|
if (ZSTD_Supported()) {
|
2016-09-01 22:28:40 +00:00
|
|
|
compression_types.emplace_back(kZSTD, false);
|
|
|
|
compression_types.emplace_back(kZSTD, true);
|
2015-08-27 22:40:42 +00:00
|
|
|
}
|
2012-06-29 02:26:43 +00:00
|
|
|
|
2014-01-24 19:09:04 +00:00
|
|
|
for (auto test_type : test_types) {
|
|
|
|
for (auto reverse_compare : reverse_compare_types) {
|
2015-07-20 18:09:14 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-01-24 19:09:04 +00:00
|
|
|
if (test_type == PLAIN_TABLE_SEMI_FIXED_PREFIX ||
|
2015-09-16 23:57:43 +00:00
|
|
|
test_type == PLAIN_TABLE_FULL_STR_PREFIX ||
|
|
|
|
test_type == PLAIN_TABLE_TOTAL_ORDER) {
|
2013-12-20 17:35:24 +00:00
|
|
|
// Plain table doesn't use restart index or compression.
|
|
|
|
TestArgs one_arg;
|
2014-01-24 19:09:04 +00:00
|
|
|
one_arg.type = test_type;
|
|
|
|
one_arg.reverse_compare = reverse_compare;
|
|
|
|
one_arg.restart_interval = restart_intervals[0];
|
2015-01-15 00:24:24 +00:00
|
|
|
one_arg.compression = compression_types[0].first;
|
2015-09-16 23:57:43 +00:00
|
|
|
one_arg.use_mmap = true;
|
|
|
|
test_args.push_back(one_arg);
|
|
|
|
one_arg.use_mmap = false;
|
2014-01-24 19:09:04 +00:00
|
|
|
test_args.push_back(one_arg);
|
2013-12-20 17:35:24 +00:00
|
|
|
continue;
|
|
|
|
}
|
2015-07-20 18:09:14 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|
2012-06-28 06:41:33 +00:00
|
|
|
|
2014-01-24 19:09:04 +00:00
|
|
|
for (auto restart_interval : restart_intervals) {
|
|
|
|
for (auto compression_type : compression_types) {
|
2013-12-20 17:35:24 +00:00
|
|
|
TestArgs one_arg;
|
2014-01-24 19:09:04 +00:00
|
|
|
one_arg.type = test_type;
|
|
|
|
one_arg.reverse_compare = reverse_compare;
|
|
|
|
one_arg.restart_interval = restart_interval;
|
2015-01-15 00:24:24 +00:00
|
|
|
one_arg.compression = compression_type.first;
|
|
|
|
one_arg.format_version = compression_type.second ? 2 : 1;
|
2015-09-16 23:57:43 +00:00
|
|
|
one_arg.use_mmap = false;
|
2014-01-24 19:09:04 +00:00
|
|
|
test_args.push_back(one_arg);
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
2014-01-24 19:09:04 +00:00
|
|
|
}
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
2014-01-24 19:09:04 +00:00
|
|
|
}
|
|
|
|
return test_args;
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
// In order to make all tests run for plain table format, including
|
|
|
|
// those operating on empty keys, create a new prefix transformer which
|
|
|
|
// return fixed prefix if the slice is not shorter than the prefix length,
|
|
|
|
// and the full slice if it is shorter.
|
|
|
|
class FixedOrLessPrefixTransform : public SliceTransform {
|
|
|
|
private:
|
|
|
|
const size_t prefix_len_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit FixedOrLessPrefixTransform(size_t prefix_len) :
|
|
|
|
prefix_len_(prefix_len) {
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual const char* Name() const override { return "rocksdb.FixedPrefix"; }
|
2013-12-20 17:35:24 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Slice Transform(const Slice& src) const override {
|
2013-12-20 17:35:24 +00:00
|
|
|
assert(InDomain(src));
|
|
|
|
if (src.size() < prefix_len_) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
return Slice(src.data(), prefix_len_);
|
|
|
|
}
|
|
|
|
|
2017-07-22 01:13:59 +00:00
|
|
|
virtual bool InDomain(const Slice& src) const override { return true; }
|
2013-12-20 17:35:24 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual bool InRange(const Slice& dst) const override {
|
2013-12-20 17:35:24 +00:00
|
|
|
return (dst.size() <= prefix_len_);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
class HarnessTest : public testing::Test {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2015-03-17 01:08:59 +00:00
|
|
|
HarnessTest()
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
: ioptions_(options_),
|
|
|
|
constructor_(nullptr),
|
|
|
|
write_buffer_(options_.db_write_buffer_size) {}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
void Init(const TestArgs& args) {
|
|
|
|
delete constructor_;
|
2013-03-01 02:04:58 +00:00
|
|
|
constructor_ = nullptr;
|
2013-11-20 06:00:48 +00:00
|
|
|
options_ = Options();
|
2012-06-28 06:41:33 +00:00
|
|
|
options_.compression = args.compression;
|
2011-03-18 22:37:00 +00:00
|
|
|
// Use shorter block size for tests to exercise block boundary
|
|
|
|
// conditions more.
|
|
|
|
if (args.reverse_compare) {
|
|
|
|
options_.comparator = &reverse_key_comparator;
|
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
|
|
|
|
internal_comparator_.reset(
|
|
|
|
new test::PlainInternalKeyComparator(options_.comparator));
|
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
support_prev_ = true;
|
|
|
|
only_support_prefix_seek_ = false;
|
2015-09-16 23:57:43 +00:00
|
|
|
options_.allow_mmap_reads = args.use_mmap;
|
2011-03-18 22:37:00 +00:00
|
|
|
switch (args.type) {
|
2013-12-20 17:35:24 +00:00
|
|
|
case BLOCK_BASED_TABLE_TEST:
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options_.flush_block_policy_factory.reset(
|
2014-03-01 00:39:27 +00:00
|
|
|
new FlushBlockBySizePolicyFactory());
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
table_options_.block_restart_interval = args.restart_interval;
|
2016-02-05 18:22:37 +00:00
|
|
|
table_options_.index_block_restart_interval = args.restart_interval;
|
2015-01-15 00:24:24 +00:00
|
|
|
table_options_.format_version = args.format_version;
|
2014-08-25 21:22:05 +00:00
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2016-08-19 22:10:31 +00:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2013-12-20 17:35:24 +00:00
|
|
|
break;
|
2015-07-20 18:09:14 +00:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2013-12-20 17:35:24 +00:00
|
|
|
case PLAIN_TABLE_SEMI_FIXED_PREFIX:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = true;
|
2014-03-10 19:56:46 +00:00
|
|
|
options_.prefix_extractor.reset(new FixedOrLessPrefixTransform(2));
|
2014-02-08 00:25:38 +00:00
|
|
|
options_.table_factory.reset(NewPlainTableFactory());
|
2016-08-19 22:10:31 +00:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
2014-01-27 21:53:22 +00:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2013-12-20 17:35:24 +00:00
|
|
|
break;
|
|
|
|
case PLAIN_TABLE_FULL_STR_PREFIX:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = true;
|
2014-03-10 19:56:46 +00:00
|
|
|
options_.prefix_extractor.reset(NewNoopTransform());
|
2014-02-08 00:25:38 +00:00
|
|
|
options_.table_factory.reset(NewPlainTableFactory());
|
2016-08-19 22:10:31 +00:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
2014-02-08 00:25:38 +00:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
|
|
|
break;
|
|
|
|
case PLAIN_TABLE_TOTAL_ORDER:
|
|
|
|
support_prev_ = false;
|
|
|
|
only_support_prefix_seek_ = false;
|
|
|
|
options_.prefix_extractor = nullptr;
|
2014-07-18 07:08:38 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = kPlainTableVariableLength;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
options_.table_factory.reset(
|
|
|
|
NewPlainTableFactory(plain_table_options));
|
|
|
|
}
|
2016-08-19 22:10:31 +00:00
|
|
|
constructor_ = new TableConstructor(
|
|
|
|
options_.comparator, true /* convert_to_internal_key_ */);
|
2014-01-27 21:53:22 +00:00
|
|
|
internal_comparator_.reset(
|
|
|
|
new InternalKeyComparator(options_.comparator));
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
2015-07-20 18:09:14 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|
2011-03-18 22:37:00 +00:00
|
|
|
case BLOCK_TEST:
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2011-03-18 22:37:00 +00:00
|
|
|
constructor_ = new BlockConstructor(options_.comparator);
|
|
|
|
break;
|
|
|
|
case MEMTABLE_TEST:
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2014-12-02 20:09:20 +00:00
|
|
|
constructor_ = new MemTableConstructor(options_.comparator,
|
|
|
|
&write_buffer_);
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
|
|
|
case DB_TEST:
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options_.block_size = 256;
|
|
|
|
options_.table_factory.reset(
|
|
|
|
new BlockBasedTableFactory(table_options_));
|
2011-03-18 22:37:00 +00:00
|
|
|
constructor_ = new DBConstructor(options_.comparator);
|
|
|
|
break;
|
|
|
|
}
|
2014-09-04 23:18:36 +00:00
|
|
|
ioptions_ = ImmutableCFOptions(options_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
~HarnessTest() { delete constructor_; }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
void Add(const std::string& key, const std::string& value) {
|
|
|
|
constructor_->Add(key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Test(Random* rnd) {
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap data;
|
2014-09-04 23:18:36 +00:00
|
|
|
constructor_->Finish(options_, ioptions_, table_options_,
|
|
|
|
*internal_comparator_, &keys, &data);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
TestForwardScan(keys, data);
|
2013-12-20 17:35:24 +00:00
|
|
|
if (support_prev_) {
|
|
|
|
TestBackwardScan(keys, data);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
TestRandomAccess(rnd, keys, data);
|
|
|
|
}
|
|
|
|
|
2017-07-22 01:13:59 +00:00
|
|
|
void TestForwardScan(const std::vector<std::string>& keys,
|
2015-09-02 20:58:22 +00:00
|
|
|
const stl_wrappers::KVMap& data) {
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* iter = constructor_->NewIterator();
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
iter->SeekToFirst();
|
2015-09-02 20:58:22 +00:00
|
|
|
for (stl_wrappers::KVMap::const_iterator model_iter = data.begin();
|
|
|
|
model_iter != data.end(); ++model_iter) {
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-09-05 00:40:41 +00:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
2015-10-12 22:06:38 +00:00
|
|
|
iter->~InternalIterator();
|
2014-09-05 00:40:41 +00:00
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2017-07-22 01:13:59 +00:00
|
|
|
void TestBackwardScan(const std::vector<std::string>& keys,
|
2015-09-02 20:58:22 +00:00
|
|
|
const stl_wrappers::KVMap& data) {
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* iter = constructor_->NewIterator();
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
iter->SeekToLast();
|
2015-09-02 20:58:22 +00:00
|
|
|
for (stl_wrappers::KVMap::const_reverse_iterator model_iter = data.rbegin();
|
|
|
|
model_iter != data.rend(); ++model_iter) {
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
iter->Prev();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2014-09-05 00:40:41 +00:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
2015-10-12 22:06:38 +00:00
|
|
|
iter->~InternalIterator();
|
2014-09-05 00:40:41 +00:00
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
|
|
|
|
const stl_wrappers::KVMap& data) {
|
2011-03-18 22:37:00 +00:00
|
|
|
static const bool kVerbose = false;
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* iter = constructor_->NewIterator();
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap::const_iterator model_iter = data.begin();
|
2011-03-18 22:37:00 +00:00
|
|
|
if (kVerbose) fprintf(stderr, "---\n");
|
|
|
|
for (int i = 0; i < 200; i++) {
|
2013-12-20 17:35:24 +00:00
|
|
|
const int toss = rnd->Uniform(support_prev_ ? 5 : 3);
|
2011-03-18 22:37:00 +00:00
|
|
|
switch (toss) {
|
|
|
|
case 0: {
|
|
|
|
if (iter->Valid()) {
|
|
|
|
if (kVerbose) fprintf(stderr, "Next\n");
|
|
|
|
iter->Next();
|
|
|
|
++model_iter;
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 1: {
|
|
|
|
if (kVerbose) fprintf(stderr, "SeekToFirst\n");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
model_iter = data.begin();
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 2: {
|
|
|
|
std::string key = PickRandomKey(rnd, keys);
|
|
|
|
model_iter = data.lower_bound(key);
|
|
|
|
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
|
|
|
|
EscapeString(key).c_str());
|
|
|
|
iter->Seek(Slice(key));
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 3: {
|
|
|
|
if (iter->Valid()) {
|
|
|
|
if (kVerbose) fprintf(stderr, "Prev\n");
|
|
|
|
iter->Prev();
|
|
|
|
if (model_iter == data.begin()) {
|
|
|
|
model_iter = data.end(); // Wrap around to invalid value
|
|
|
|
} else {
|
|
|
|
--model_iter;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case 4: {
|
|
|
|
if (kVerbose) fprintf(stderr, "SeekToLast\n");
|
|
|
|
iter->SeekToLast();
|
|
|
|
if (keys.empty()) {
|
|
|
|
model_iter = data.end();
|
|
|
|
} else {
|
|
|
|
std::string last = data.rbegin()->first;
|
|
|
|
model_iter = data.lower_bound(last);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-05 00:40:41 +00:00
|
|
|
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
2015-10-12 22:06:38 +00:00
|
|
|
iter->~InternalIterator();
|
2014-09-05 00:40:41 +00:00
|
|
|
} else {
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
std::string ToString(const stl_wrappers::KVMap& data,
|
|
|
|
const stl_wrappers::KVMap::const_iterator& it) {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (it == data.end()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->first + "->" + it->second + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
std::string ToString(const stl_wrappers::KVMap& data,
|
|
|
|
const stl_wrappers::KVMap::const_reverse_iterator& it) {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (it == data.rend()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->first + "->" + it->second + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
std::string ToString(const InternalIterator* it) {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!it->Valid()) {
|
|
|
|
return "END";
|
|
|
|
} else {
|
|
|
|
return "'" + it->key().ToString() + "->" + it->value().ToString() + "'";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) {
|
|
|
|
if (keys.empty()) {
|
|
|
|
return "foo";
|
|
|
|
} else {
|
2014-11-11 21:47:22 +00:00
|
|
|
const int index = rnd->Uniform(static_cast<int>(keys.size()));
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string result = keys[index];
|
2013-12-20 17:35:24 +00:00
|
|
|
switch (rnd->Uniform(support_prev_ ? 3 : 1)) {
|
2011-03-18 22:37:00 +00:00
|
|
|
case 0:
|
|
|
|
// Return an existing key
|
|
|
|
break;
|
|
|
|
case 1: {
|
|
|
|
// Attempt to return something smaller than an existing key
|
2013-12-20 17:35:24 +00:00
|
|
|
if (result.size() > 0 && result[result.size() - 1] > '\0'
|
|
|
|
&& (!only_support_prefix_seek_
|
|
|
|
|| options_.prefix_extractor->Transform(result).size()
|
|
|
|
< result.size())) {
|
|
|
|
result[result.size() - 1]--;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
break;
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
case 2: {
|
|
|
|
// Return something larger than an existing key
|
|
|
|
Increment(options_.comparator, &result);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
// Returns nullptr if not running against a DB
|
2011-03-21 19:40:57 +00:00
|
|
|
DB* db() const { return constructor_->db(); }
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options_ = Options();
|
2014-09-04 23:18:36 +00:00
|
|
|
ImmutableCFOptions ioptions_;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options_ = BlockBasedTableOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
Constructor* constructor_;
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager write_buffer_;
|
2013-12-20 17:35:24 +00:00
|
|
|
bool support_prev_;
|
|
|
|
bool only_support_prefix_seek_;
|
2014-01-27 21:53:22 +00:00
|
|
|
shared_ptr<InternalKeyComparator> internal_comparator_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|
|
|
bool result = (val >= low) && (val <= high);
|
|
|
|
if (!result) {
|
|
|
|
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
|
|
|
(unsigned long long)(val),
|
|
|
|
(unsigned long long)(low),
|
|
|
|
(unsigned long long)(high));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-01-24 20:14:08 +00:00
|
|
|
// Tests against all kinds of tables
|
2015-03-17 21:08:00 +00:00
|
|
|
class TableTest : public testing::Test {
|
2014-01-27 21:53:22 +00:00
|
|
|
public:
|
|
|
|
const InternalKeyComparator& GetPlainInternalComparator(
|
|
|
|
const Comparator* comp) {
|
|
|
|
if (!plain_internal_comparator) {
|
|
|
|
plain_internal_comparator.reset(
|
|
|
|
new test::PlainInternalKeyComparator(comp));
|
|
|
|
}
|
|
|
|
return *plain_internal_comparator;
|
|
|
|
}
|
2017-02-07 00:29:29 +00:00
|
|
|
void IndexTest(BlockBasedTableOptions table_options);
|
2014-01-27 21:53:22 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::unique_ptr<InternalKeyComparator> plain_internal_comparator;
|
|
|
|
};
|
|
|
|
|
|
|
|
class GeneralTableTest : public TableTest {};
|
|
|
|
class BlockBasedTableTest : public TableTest {};
|
|
|
|
class PlainTableTest : public TableTest {};
|
2015-03-17 21:08:00 +00:00
|
|
|
class TablePropertyTest : public testing::Test {};
|
2014-02-12 21:14:59 +00:00
|
|
|
|
|
|
|
// This test serves as the living tutorial for the prefix scan of user collected
|
|
|
|
// properties.
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(TablePropertyTest, PrefixScanTest) {
|
2014-02-12 21:14:59 +00:00
|
|
|
UserCollectedProperties props{{"num.111.1", "1"},
|
|
|
|
{"num.111.2", "2"},
|
|
|
|
{"num.111.3", "3"},
|
|
|
|
{"num.333.1", "1"},
|
|
|
|
{"num.333.2", "2"},
|
|
|
|
{"num.333.3", "3"},
|
|
|
|
{"num.555.1", "1"},
|
|
|
|
{"num.555.2", "2"},
|
|
|
|
{"num.555.3", "3"}, };
|
|
|
|
|
|
|
|
// prefixes that exist
|
|
|
|
for (const std::string& prefix : {"num.111", "num.333", "num.555"}) {
|
|
|
|
int num = 0;
|
|
|
|
for (auto pos = props.lower_bound(prefix);
|
|
|
|
pos != props.end() &&
|
|
|
|
pos->first.compare(0, prefix.size(), prefix) == 0;
|
|
|
|
++pos) {
|
|
|
|
++num;
|
2014-11-25 04:44:49 +00:00
|
|
|
auto key = prefix + "." + ToString(num);
|
2014-02-12 21:14:59 +00:00
|
|
|
ASSERT_EQ(key, pos->first);
|
2014-11-25 04:44:49 +00:00
|
|
|
ASSERT_EQ(ToString(num), pos->second);
|
2014-02-12 21:14:59 +00:00
|
|
|
}
|
|
|
|
ASSERT_EQ(3, num);
|
|
|
|
}
|
|
|
|
|
|
|
|
// prefixes that don't exist
|
|
|
|
for (const std::string& prefix :
|
|
|
|
{"num.000", "num.222", "num.444", "num.666"}) {
|
|
|
|
auto pos = props.lower_bound(prefix);
|
|
|
|
ASSERT_TRUE(pos == props.end() ||
|
|
|
|
pos->first.compare(0, prefix.size(), prefix) != 0);
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-10 18:43:24 +00:00
|
|
|
// This test include all the basic checks except those for index size and block
|
|
|
|
// size, which will be conducted in separated unit tests.
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, BasicBlockBasedTableProperties) {
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-10-10 18:43:24 +00:00
|
|
|
|
|
|
|
c.Add("a1", "val1");
|
|
|
|
c.Add("b2", "val2");
|
|
|
|
c.Add("c3", "val3");
|
|
|
|
c.Add("d4", "val4");
|
|
|
|
c.Add("e5", "val5");
|
|
|
|
c.Add("f6", "val6");
|
|
|
|
c.Add("g7", "val7");
|
|
|
|
c.Add("h8", "val8");
|
|
|
|
c.Add("j9", "val9");
|
2016-08-19 22:10:31 +00:00
|
|
|
uint64_t diff_internal_user_bytes = 9 * 8; // 8 is seq size, 9 k-v totally
|
2013-10-10 18:43:24 +00:00
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2013-10-10 18:43:24 +00:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 21:22:05 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-08-25 23:14:30 +00:00
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
2013-11-20 00:29:42 +00:00
|
|
|
ASSERT_EQ(kvmap.size(), props.num_entries);
|
2013-10-10 18:43:24 +00:00
|
|
|
|
|
|
|
auto raw_key_size = kvmap.size() * 2ul;
|
|
|
|
auto raw_value_size = kvmap.size() * 4ul;
|
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
ASSERT_EQ(raw_key_size + diff_internal_user_bytes, props.raw_key_size);
|
2013-11-20 00:29:42 +00:00
|
|
|
ASSERT_EQ(raw_value_size, props.raw_value_size);
|
|
|
|
ASSERT_EQ(1ul, props.num_data_blocks);
|
|
|
|
ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
|
2013-10-10 18:43:24 +00:00
|
|
|
|
|
|
|
// Verify data size.
|
2014-09-02 18:49:38 +00:00
|
|
|
BlockBuilder block_builder(1);
|
2013-10-10 18:43:24 +00:00
|
|
|
for (const auto& item : kvmap) {
|
|
|
|
block_builder.Add(item.first, item.second);
|
|
|
|
}
|
|
|
|
Slice content = block_builder.Finish();
|
2016-08-19 22:10:31 +00:00
|
|
|
ASSERT_EQ(content.size() + kBlockTrailerSize + diff_internal_user_bytes,
|
|
|
|
props.data_size);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2013-10-10 18:43:24 +00:00
|
|
|
}
|
|
|
|
|
2016-04-21 17:16:28 +00:00
|
|
|
TEST_F(BlockBasedTableTest, BlockBasedTableProperties2) {
|
|
|
|
TableConstructor c(&reverse_key_comparator);
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
|
|
|
|
{
|
|
|
|
Options options;
|
2016-05-12 16:47:16 +00:00
|
|
|
options.compression = CompressionType::kNoCompression;
|
2016-04-21 17:16:28 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
|
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
|
|
|
|
|
|
|
// Default comparator
|
|
|
|
ASSERT_EQ("leveldb.BytewiseComparator", props.comparator_name);
|
|
|
|
// No merge operator
|
|
|
|
ASSERT_EQ("nullptr", props.merge_operator_name);
|
2016-08-26 18:46:32 +00:00
|
|
|
// No prefix extractor
|
|
|
|
ASSERT_EQ("nullptr", props.prefix_extractor_name);
|
2016-04-21 17:16:28 +00:00
|
|
|
// No property collectors
|
|
|
|
ASSERT_EQ("[]", props.property_collectors_names);
|
|
|
|
// No filter policy is used
|
|
|
|
ASSERT_EQ("", props.filter_policy_name);
|
2016-05-12 16:47:16 +00:00
|
|
|
// Compression type == that set:
|
|
|
|
ASSERT_EQ("NoCompression", props.compression_name);
|
2016-04-21 17:16:28 +00:00
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
Options options;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
options.comparator = &reverse_key_comparator;
|
|
|
|
options.merge_operator = MergeOperators::CreateUInt64AddOperator();
|
2016-08-26 18:46:32 +00:00
|
|
|
options.prefix_extractor.reset(NewNoopTransform());
|
2016-04-21 17:16:28 +00:00
|
|
|
options.table_properties_collector_factories.emplace_back(
|
|
|
|
new DummyPropertiesCollectorFactory1());
|
|
|
|
options.table_properties_collector_factories.emplace_back(
|
|
|
|
new DummyPropertiesCollectorFactory2());
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
|
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
|
|
|
|
|
|
|
ASSERT_EQ("rocksdb.ReverseBytewiseComparator", props.comparator_name);
|
|
|
|
ASSERT_EQ("UInt64AddOperator", props.merge_operator_name);
|
2016-08-26 18:46:32 +00:00
|
|
|
ASSERT_EQ("rocksdb.Noop", props.prefix_extractor_name);
|
2016-04-21 17:16:28 +00:00
|
|
|
ASSERT_EQ("[DummyPropertiesCollector1,DummyPropertiesCollector2]",
|
|
|
|
props.property_collectors_names);
|
|
|
|
ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
|
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
TEST_F(BlockBasedTableTest, RangeDelBlock) {
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
std::vector<std::string> keys = {"1pika", "2chu"};
|
|
|
|
std::vector<std::string> vals = {"p", "c"};
|
|
|
|
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
RangeTombstone t(keys[i], vals[i], i);
|
|
|
|
std::pair<InternalKey, Slice> p = t.Serialize();
|
|
|
|
c.Add(p.first.Encode().ToString(), p.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> sorted_keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
Options options;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
std::unique_ptr<InternalKeyComparator> internal_cmp(
|
|
|
|
new InternalKeyComparator(options.comparator));
|
|
|
|
c.Finish(options, ioptions, table_options, *internal_cmp, &sorted_keys,
|
|
|
|
&kvmap);
|
|
|
|
|
2016-11-05 16:10:51 +00:00
|
|
|
for (int j = 0; j < 2; ++j) {
|
|
|
|
std::unique_ptr<InternalIterator> iter(
|
|
|
|
c.GetTableReader()->NewRangeTombstoneIterator(ReadOptions()));
|
|
|
|
if (j > 0) {
|
|
|
|
// For second iteration, delete the table reader object and verify the
|
|
|
|
// iterator can still access its metablock's range tombstones.
|
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
2017-03-09 06:16:45 +00:00
|
|
|
ASSERT_FALSE(iter->Valid());
|
2016-11-05 16:10:51 +00:00
|
|
|
iter->SeekToFirst();
|
2017-03-09 06:16:45 +00:00
|
|
|
ASSERT_TRUE(iter->Valid());
|
2016-11-05 16:10:51 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &parsed_key));
|
|
|
|
RangeTombstone t(parsed_key, iter->value());
|
|
|
|
ASSERT_EQ(t.start_key_, keys[i]);
|
|
|
|
ASSERT_EQ(t.end_key_, vals[i]);
|
|
|
|
ASSERT_EQ(t.seq_, i);
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
}
|
2016-08-19 22:10:31 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, FilterPolicyNameProperties) {
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-10-16 23:57:20 +00:00
|
|
|
c.Add("a1", "val1");
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2014-08-25 21:22:05 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-16 23:57:20 +00:00
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 21:22:05 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-08-25 23:14:30 +00:00
|
|
|
auto& props = *c.GetTableReader()->GetTableProperties();
|
2013-11-20 00:29:42 +00:00
|
|
|
ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2013-10-16 23:57:20 +00:00
|
|
|
}
|
|
|
|
|
2015-03-03 01:07:03 +00:00
|
|
|
//
|
|
|
|
// BlockBasedTableTest::PrefetchTest
|
|
|
|
//
|
|
|
|
void AssertKeysInCache(BlockBasedTable* table_reader,
|
2015-09-02 20:58:22 +00:00
|
|
|
const std::vector<std::string>& keys_in_cache,
|
2016-08-19 22:10:31 +00:00
|
|
|
const std::vector<std::string>& keys_not_in_cache,
|
|
|
|
bool convert = false) {
|
|
|
|
if (convert) {
|
|
|
|
for (auto key : keys_in_cache) {
|
|
|
|
InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), ikey.Encode()));
|
|
|
|
}
|
|
|
|
for (auto key : keys_not_in_cache) {
|
|
|
|
InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), ikey.Encode()));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (auto key : keys_in_cache) {
|
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
|
|
|
for (auto key : keys_not_in_cache) {
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
2015-03-03 01:07:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrefetchRange(TableConstructor* c, Options* opt,
|
2016-08-19 22:10:31 +00:00
|
|
|
BlockBasedTableOptions* table_options, const char* key_begin,
|
2015-09-02 20:58:22 +00:00
|
|
|
const char* key_end,
|
|
|
|
const std::vector<std::string>& keys_in_cache,
|
|
|
|
const std::vector<std::string>& keys_not_in_cache,
|
2015-03-03 01:07:03 +00:00
|
|
|
const Status expected_status = Status::OK()) {
|
|
|
|
// reset the cache and reopen the table
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options->block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2015-03-03 01:07:03 +00:00
|
|
|
opt->table_factory.reset(NewBlockBasedTableFactory(*table_options));
|
|
|
|
const ImmutableCFOptions ioptions2(*opt);
|
|
|
|
ASSERT_OK(c->Reopen(ioptions2));
|
|
|
|
|
|
|
|
// prefetch
|
|
|
|
auto* table_reader = dynamic_cast<BlockBasedTable*>(c->GetTableReader());
|
2016-08-19 22:10:31 +00:00
|
|
|
Status s;
|
|
|
|
unique_ptr<Slice> begin, end;
|
|
|
|
unique_ptr<InternalKey> i_begin, i_end;
|
|
|
|
if (key_begin != nullptr) {
|
|
|
|
if (c->ConvertToInternalKey()) {
|
|
|
|
i_begin.reset(new InternalKey(key_begin, kMaxSequenceNumber, kTypeValue));
|
|
|
|
begin.reset(new Slice(i_begin->Encode()));
|
|
|
|
} else {
|
|
|
|
begin.reset(new Slice(key_begin));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (key_end != nullptr) {
|
|
|
|
if (c->ConvertToInternalKey()) {
|
|
|
|
i_end.reset(new InternalKey(key_end, kMaxSequenceNumber, kTypeValue));
|
|
|
|
end.reset(new Slice(i_end->Encode()));
|
|
|
|
} else {
|
|
|
|
end.reset(new Slice(key_end));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = table_reader->Prefetch(begin.get(), end.get());
|
|
|
|
|
2015-03-03 01:07:03 +00:00
|
|
|
ASSERT_TRUE(s.code() == expected_status.code());
|
|
|
|
|
|
|
|
// assert our expectation in cache warmup
|
2016-08-19 22:10:31 +00:00
|
|
|
AssertKeysInCache(table_reader, keys_in_cache, keys_not_in_cache,
|
|
|
|
c->ConvertToInternalKey());
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c->ResetTableReader();
|
2015-03-03 01:07:03 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, PrefetchTest) {
|
2015-03-03 01:07:03 +00:00
|
|
|
// The purpose of this test is to test the prefetching operation built into
|
|
|
|
// BlockBasedTable.
|
|
|
|
Options opt;
|
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
|
|
|
opt.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
// big enough so we don't ever lose cached values.
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2015-03-03 01:07:03 +00:00
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2015-03-03 01:07:03 +00:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2015-03-03 01:07:03 +00:00
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2015-03-03 01:07:03 +00:00
|
|
|
|
|
|
|
// We get the following data spread :
|
|
|
|
//
|
|
|
|
// Data block Index
|
|
|
|
// ========================
|
|
|
|
// [ k01 k02 k03 ] k03
|
|
|
|
// [ k04 ] k04
|
|
|
|
// [ k05 ] k05
|
|
|
|
// [ k06 k07 ] k07
|
|
|
|
|
|
|
|
|
|
|
|
// Simple
|
2016-08-19 22:10:31 +00:00
|
|
|
PrefetchRange(&c, &opt, &table_options,
|
|
|
|
/*key_range=*/"k01", "k05",
|
|
|
|
/*keys_in_cache=*/{"k01", "k02", "k03", "k04", "k05"},
|
|
|
|
/*keys_not_in_cache=*/{"k06", "k07"});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k01", "k01", {"k01", "k02", "k03"},
|
2015-03-03 01:07:03 +00:00
|
|
|
{"k04", "k05", "k06", "k07"});
|
|
|
|
// odd
|
2016-08-19 22:10:31 +00:00
|
|
|
PrefetchRange(&c, &opt, &table_options, "a", "z",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k00", "k00", {"k01", "k02", "k03"},
|
2015-03-03 01:07:03 +00:00
|
|
|
{"k04", "k05", "k06", "k07"});
|
|
|
|
// Edge cases
|
2016-08-19 22:10:31 +00:00
|
|
|
PrefetchRange(&c, &opt, &table_options, "k00", "k06",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k00", "zzz",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
2015-03-03 01:07:03 +00:00
|
|
|
// null keys
|
2016-08-19 22:10:31 +00:00
|
|
|
PrefetchRange(&c, &opt, &table_options, nullptr, nullptr,
|
|
|
|
{"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, "k04", nullptr,
|
|
|
|
{"k04", "k05", "k06", "k07"}, {"k01", "k02", "k03"});
|
|
|
|
PrefetchRange(&c, &opt, &table_options, nullptr, "k05",
|
|
|
|
{"k01", "k02", "k03", "k04", "k05"}, {"k06", "k07"});
|
2015-03-03 01:07:03 +00:00
|
|
|
// invalid
|
2016-08-19 22:10:31 +00:00
|
|
|
PrefetchRange(&c, &opt, &table_options, "k06", "k00", {}, {},
|
2015-03-03 01:07:03 +00:00
|
|
|
Status::InvalidArgument(Slice("k06 "), Slice("k07")));
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2015-03-03 01:07:03 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) {
|
2014-08-25 23:14:30 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
Options options;
|
|
|
|
// Make each key/value an individual block
|
|
|
|
table_options.block_size = 64;
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
|
|
|
// Binary search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// Hash search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// Hash search index with hash_index_allow_collision
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.hash_index_allow_collision = true;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
// Hash search index with filter policy
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
|
|
|
break;
|
2017-02-07 00:29:29 +00:00
|
|
|
case 4:
|
|
|
|
default:
|
|
|
|
// Binary search index
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
break;
|
2014-08-25 23:14:30 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(),
|
|
|
|
true /* convert_to_internal_key_ */);
|
2014-08-25 23:14:30 +00:00
|
|
|
c.Add("aaaa1", std::string('a', 56));
|
|
|
|
c.Add("bbaa1", std::string('a', 56));
|
|
|
|
c.Add("cccc1", std::string('a', 56));
|
|
|
|
c.Add("bbbb1", std::string('a', 56));
|
|
|
|
c.Add("baaa1", std::string('a', 56));
|
|
|
|
c.Add("abbb1", std::string('a', 56));
|
|
|
|
c.Add("cccc2", std::string('a', 56));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 23:14:30 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
auto props = c.GetTableReader()->GetTableProperties();
|
|
|
|
ASSERT_EQ(7u, props->num_data_blocks);
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.total_order_seek = true;
|
2015-10-12 22:06:38 +00:00
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
2014-08-25 23:14:30 +00:00
|
|
|
|
|
|
|
iter->Seek(InternalKey("b", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("baaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("bb", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
|
|
|
|
iter->Seek(InternalKey("bbb", 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("cccc1", ExtractUserKey(iter->key()).ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-06 19:46:32 +00:00
|
|
|
TEST_F(BlockBasedTableTest, NoopTransformSeek) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.comparator = BytewiseComparator();
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewNoopTransform());
|
|
|
|
|
|
|
|
TableConstructor c(options.comparator);
|
|
|
|
// To tickle the PrefixMayMatch bug it is important that the
|
|
|
|
// user-key is a single byte so that the index key exactly matches
|
|
|
|
// the user-key.
|
|
|
|
InternalKey key("a", 1, kTypeValue);
|
|
|
|
c.Add(key.Encode().ToString(), "b");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
2016-01-07 17:48:29 +00:00
|
|
|
const InternalKeyComparator internal_comparator(options.comparator);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator, &keys,
|
|
|
|
&kvmap);
|
2016-01-06 19:46:32 +00:00
|
|
|
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.total_order_seek = (i == 0);
|
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
|
|
|
|
|
|
|
iter->Seek(key.Encode());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("a", ExtractUserKey(iter->key()).ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-26 18:46:32 +00:00
|
|
|
TEST_F(BlockBasedTableTest, SkipPrefixBloomFilter) {
|
|
|
|
// if DB is opened with a prefix extractor of a different name,
|
|
|
|
// prefix bloom is skipped when read the file
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(2));
|
|
|
|
table_options.whole_key_filtering = false;
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.comparator = BytewiseComparator();
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
|
|
|
|
|
|
|
TableConstructor c(options.comparator);
|
|
|
|
InternalKey key("abcdefghijk", 1, kTypeValue);
|
|
|
|
c.Add(key.Encode().ToString(), "test");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
const InternalKeyComparator internal_comparator(options.comparator);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator, &keys,
|
|
|
|
&kvmap);
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(9));
|
|
|
|
const ImmutableCFOptions new_ioptions(options);
|
|
|
|
c.Reopen(new_ioptions);
|
|
|
|
auto reader = c.GetTableReader();
|
|
|
|
std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
// Test point lookup
|
|
|
|
// only one kv
|
|
|
|
for (auto& kv : kvmap) {
|
|
|
|
db_iter->Seek(kv.first);
|
|
|
|
ASSERT_TRUE(db_iter->Valid());
|
|
|
|
ASSERT_OK(db_iter->status());
|
|
|
|
ASSERT_EQ(db_iter->key(), kv.first);
|
|
|
|
ASSERT_EQ(db_iter->value(), kv.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-10 18:43:24 +00:00
|
|
|
static std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-09-26 16:14:05 +00:00
|
|
|
void AddInternalKey(TableConstructor* c, const std::string& prefix,
|
2017-07-22 01:13:59 +00:00
|
|
|
int suffix_len = 800) {
|
2014-04-10 21:19:43 +00:00
|
|
|
static Random rnd(1023);
|
|
|
|
InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue);
|
|
|
|
c->Add(k.Encode().ToString(), "v");
|
|
|
|
}
|
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
void TableTest::IndexTest(BlockBasedTableOptions table_options) {
|
2014-04-10 21:19:43 +00:00
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
|
|
|
|
// keys with prefix length 3, make sure the key/value is big enough to fill
|
|
|
|
// one block
|
|
|
|
AddInternalKey(&c, "0015");
|
|
|
|
AddInternalKey(&c, "0035");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0054");
|
|
|
|
AddInternalKey(&c, "0055");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0056");
|
|
|
|
AddInternalKey(&c, "0057");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0058");
|
|
|
|
AddInternalKey(&c, "0075");
|
|
|
|
|
|
|
|
AddInternalKey(&c, "0076");
|
|
|
|
AddInternalKey(&c, "0095");
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-04-10 21:19:43 +00:00
|
|
|
Options options;
|
2014-08-25 21:22:05 +00:00
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(3));
|
|
|
|
table_options.block_size = 1700;
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(1024, 4);
|
2014-08-25 21:22:05 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-04-10 21:19:43 +00:00
|
|
|
|
|
|
|
std::unique_ptr<InternalKeyComparator> comparator(
|
|
|
|
new InternalKeyComparator(BytewiseComparator()));
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
|
2014-08-25 23:14:30 +00:00
|
|
|
auto reader = c.GetTableReader();
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2014-08-25 23:14:30 +00:00
|
|
|
auto props = reader->GetTableProperties();
|
2014-04-10 21:19:43 +00:00
|
|
|
ASSERT_EQ(5u, props->num_data_blocks);
|
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
std::unique_ptr<InternalIterator> index_iter(
|
2015-10-12 22:06:38 +00:00
|
|
|
reader->NewIterator(ReadOptions()));
|
2014-04-10 21:19:43 +00:00
|
|
|
|
|
|
|
// -- Find keys do not exist, but have common prefix.
|
|
|
|
std::vector<std::string> prefixes = {"001", "003", "005", "007", "009"};
|
|
|
|
std::vector<std::string> lower_bound = {keys[0], keys[1], keys[2],
|
|
|
|
keys[7], keys[9], };
|
|
|
|
|
|
|
|
// find the lower bound of the prefix
|
|
|
|
for (size_t i = 0; i < prefixes.size(); ++i) {
|
2017-02-07 00:29:29 +00:00
|
|
|
index_iter->Seek(InternalKey(prefixes[i], 0, kTypeValue).Encode());
|
|
|
|
ASSERT_OK(index_iter->status());
|
|
|
|
ASSERT_TRUE(index_iter->Valid());
|
2014-04-10 21:19:43 +00:00
|
|
|
|
|
|
|
// seek the first element in the block
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_EQ(lower_bound[i], index_iter->key().ToString());
|
|
|
|
ASSERT_EQ("v", index_iter->value().ToString());
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// find the upper bound of prefixes
|
|
|
|
std::vector<std::string> upper_bound = {keys[1], keys[2], keys[7], keys[9], };
|
|
|
|
|
|
|
|
// find existing keys
|
|
|
|
for (const auto& item : kvmap) {
|
|
|
|
auto ukey = ExtractUserKey(item.first).ToString();
|
2017-02-07 00:29:29 +00:00
|
|
|
index_iter->Seek(ukey);
|
2014-04-10 21:19:43 +00:00
|
|
|
|
|
|
|
// ASSERT_OK(regular_iter->status());
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_OK(index_iter->status());
|
2014-04-10 21:19:43 +00:00
|
|
|
|
|
|
|
// ASSERT_TRUE(regular_iter->Valid());
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_TRUE(index_iter->Valid());
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_EQ(item.first, index_iter->key().ToString());
|
|
|
|
ASSERT_EQ(item.second, index_iter->value().ToString());
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < prefixes.size(); ++i) {
|
|
|
|
// the key is greater than any existing keys.
|
|
|
|
auto key = prefixes[i] + "9";
|
2017-02-07 00:29:29 +00:00
|
|
|
index_iter->Seek(InternalKey(key, 0, kTypeValue).Encode());
|
2014-04-10 21:19:43 +00:00
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_OK(index_iter->status());
|
2014-04-10 21:19:43 +00:00
|
|
|
if (i == prefixes.size() - 1) {
|
|
|
|
// last key
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_TRUE(!index_iter->Valid());
|
2014-04-10 21:19:43 +00:00
|
|
|
} else {
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_TRUE(index_iter->Valid());
|
2014-04-10 21:19:43 +00:00
|
|
|
// seek the first element in the block
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_EQ(upper_bound[i], index_iter->key().ToString());
|
|
|
|
ASSERT_EQ("v", index_iter->value().ToString());
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find keys with prefix that don't match any of the existing prefixes.
|
|
|
|
std::vector<std::string> non_exist_prefixes = {"002", "004", "006", "008"};
|
|
|
|
for (const auto& prefix : non_exist_prefixes) {
|
2017-02-07 00:29:29 +00:00
|
|
|
index_iter->Seek(InternalKey(prefix, 0, kTypeValue).Encode());
|
2014-04-10 21:19:43 +00:00
|
|
|
// regular_iter->Seek(prefix);
|
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
ASSERT_OK(index_iter->status());
|
2014-06-13 02:03:22 +00:00
|
|
|
// Seek to non-existing prefixes should yield either invalid, or a
|
|
|
|
// key with prefix greater than the target.
|
2017-02-07 00:29:29 +00:00
|
|
|
if (index_iter->Valid()) {
|
|
|
|
Slice ukey = ExtractUserKey(index_iter->key());
|
2014-06-13 02:03:22 +00:00
|
|
|
Slice ukey_prefix = options.prefix_extractor->Transform(ukey);
|
|
|
|
ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) < 0);
|
|
|
|
}
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2014-04-10 21:19:43 +00:00
|
|
|
}
|
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
TEST_F(TableTest, BinaryIndexTest) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
IndexTest(table_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(TableTest, HashIndexTest) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
IndexTest(table_options);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(TableTest, PartitionIndexTest) {
|
|
|
|
const int max_index_keys = 5;
|
2017-03-28 18:56:56 +00:00
|
|
|
const int est_max_index_key_value_size = 32;
|
|
|
|
const int est_max_index_size = max_index_keys * est_max_index_key_value_size;
|
|
|
|
for (int i = 1; i <= est_max_index_size + 1; i++) {
|
2017-02-07 00:29:29 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
|
2017-03-28 18:56:56 +00:00
|
|
|
table_options.metadata_block_size = i;
|
2017-02-07 00:29:29 +00:00
|
|
|
IndexTest(table_options);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-10 18:43:24 +00:00
|
|
|
// It's very hard to figure out the index block size of a block accurately.
|
|
|
|
// To make sure we get the index size, we just make sure as key number
|
|
|
|
// grows, the filter block size also grows.
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, IndexSizeStat) {
|
2013-10-10 18:43:24 +00:00
|
|
|
uint64_t last_index_size = 0;
|
|
|
|
|
|
|
|
// we need to use random keys since the pure human readable texts
|
|
|
|
// may be well compressed, resulting insignifcant change of index
|
|
|
|
// block size.
|
|
|
|
Random rnd(test::RandomSeed());
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
|
|
|
|
for (int i = 0; i < 100; ++i) {
|
|
|
|
keys.push_back(RandomString(&rnd, 10000));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Each time we load one more key to the table. the table index block
|
|
|
|
// size is expected to be larger than last time's.
|
|
|
|
for (size_t i = 1; i < keys.size(); ++i) {
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(),
|
|
|
|
true /* convert_to_internal_key_ */);
|
2013-10-10 18:43:24 +00:00
|
|
|
for (size_t j = 0; j < i; ++j) {
|
|
|
|
c.Add(keys[j], "val");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> ks;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2013-10-10 18:43:24 +00:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 21:22:05 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
|
2014-08-25 23:14:30 +00:00
|
|
|
auto index_size = c.GetTableReader()->GetTableProperties()->index_size;
|
2013-10-10 18:43:24 +00:00
|
|
|
ASSERT_GT(index_size, last_index_size);
|
|
|
|
last_index_size = index_size;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2013-10-10 18:43:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, NumBlockStat) {
|
2013-10-10 18:43:24 +00:00
|
|
|
Random rnd(test::RandomSeed());
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-10-10 18:43:24 +00:00
|
|
|
Options options;
|
|
|
|
options.compression = kNoCompression;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = 1;
|
|
|
|
table_options.block_size = 1000;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-10-10 18:43:24 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
// the key/val are slightly smaller than block size, so that each block
|
|
|
|
// holds roughly one key/value pair.
|
|
|
|
c.Add(RandomString(&rnd, 900), "val");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> ks;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 21:22:05 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
|
2014-02-05 00:21:47 +00:00
|
|
|
ASSERT_EQ(kvmap.size(),
|
2014-08-25 23:14:30 +00:00
|
|
|
c.GetTableReader()->GetTableProperties()->num_data_blocks);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2013-10-10 18:43:24 +00:00
|
|
|
}
|
|
|
|
|
2014-02-19 23:38:57 +00:00
|
|
|
// A simple tool that takes the snapshot of block cache statistics.
|
|
|
|
class BlockCachePropertiesSnapshot {
|
2013-11-13 06:46:51 +00:00
|
|
|
public:
|
2014-02-19 23:38:57 +00:00
|
|
|
explicit BlockCachePropertiesSnapshot(Statistics* statistics) {
|
2014-01-17 20:46:06 +00:00
|
|
|
block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_MISS);
|
|
|
|
block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_HIT);
|
|
|
|
index_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
|
|
|
|
index_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
|
|
|
|
data_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
|
|
|
|
data_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
|
2014-02-19 23:38:57 +00:00
|
|
|
filter_block_cache_miss =
|
|
|
|
statistics->getTickerCount(BLOCK_CACHE_FILTER_MISS);
|
|
|
|
filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT);
|
2015-10-07 22:17:20 +00:00
|
|
|
block_cache_bytes_read = statistics->getTickerCount(BLOCK_CACHE_BYTES_READ);
|
|
|
|
block_cache_bytes_write =
|
|
|
|
statistics->getTickerCount(BLOCK_CACHE_BYTES_WRITE);
|
2014-02-19 23:38:57 +00:00
|
|
|
}
|
|
|
|
|
2014-10-31 18:59:54 +00:00
|
|
|
void AssertIndexBlockStat(int64_t expected_index_block_cache_miss,
|
|
|
|
int64_t expected_index_block_cache_hit) {
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
2014-02-19 23:38:57 +00:00
|
|
|
}
|
|
|
|
|
2014-10-31 18:59:54 +00:00
|
|
|
void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss,
|
|
|
|
int64_t expected_filter_block_cache_hit) {
|
|
|
|
ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit);
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
2013-11-20 00:29:42 +00:00
|
|
|
// Check if the fetched props matches the expected ones.
|
2014-02-19 23:38:57 +00:00
|
|
|
// TODO(kailiu) Use this only when you disabled filter policy!
|
2014-10-31 18:59:54 +00:00
|
|
|
void AssertEqual(int64_t expected_index_block_cache_miss,
|
|
|
|
int64_t expected_index_block_cache_hit,
|
|
|
|
int64_t expected_data_block_cache_miss,
|
|
|
|
int64_t expected_data_block_cache_hit) const {
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
|
|
|
ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss,
|
|
|
|
block_cache_miss);
|
|
|
|
ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit,
|
|
|
|
block_cache_hit);
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
2015-10-07 22:17:20 +00:00
|
|
|
int64_t GetCacheBytesRead() { return block_cache_bytes_read; }
|
|
|
|
|
|
|
|
int64_t GetCacheBytesWrite() { return block_cache_bytes_write; }
|
|
|
|
|
2013-11-13 06:46:51 +00:00
|
|
|
private:
|
2014-02-05 00:21:47 +00:00
|
|
|
int64_t block_cache_miss = 0;
|
|
|
|
int64_t block_cache_hit = 0;
|
|
|
|
int64_t index_block_cache_miss = 0;
|
|
|
|
int64_t index_block_cache_hit = 0;
|
|
|
|
int64_t data_block_cache_miss = 0;
|
|
|
|
int64_t data_block_cache_hit = 0;
|
2014-02-19 23:38:57 +00:00
|
|
|
int64_t filter_block_cache_miss = 0;
|
|
|
|
int64_t filter_block_cache_hit = 0;
|
2015-10-07 22:17:20 +00:00
|
|
|
int64_t block_cache_bytes_read = 0;
|
|
|
|
int64_t block_cache_bytes_write = 0;
|
2013-11-13 06:46:51 +00:00
|
|
|
};
|
|
|
|
|
2014-02-19 23:38:57 +00:00
|
|
|
// Make sure, by default, index/filter blocks were pre-loaded (meaning we won't
|
|
|
|
// use block cache to store them).
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, BlockCacheDisabledTest) {
|
2014-02-19 23:38:57 +00:00
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
BlockBasedTableOptions table_options;
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(1024, 4);
|
2014-08-25 21:22:05 +00:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
2014-02-19 23:38:57 +00:00
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-02-19 23:38:57 +00:00
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2014-02-19 23:38:57 +00:00
|
|
|
c.Add("key", "value");
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 21:22:05 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-02-19 23:38:57 +00:00
|
|
|
|
|
|
|
// preloading filter/index blocks is enabled.
|
2014-08-25 23:14:30 +00:00
|
|
|
auto reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-02-19 23:38:57 +00:00
|
|
|
ASSERT_TRUE(reader->TEST_filter_block_preloaded());
|
2014-03-01 02:19:07 +00:00
|
|
|
ASSERT_TRUE(reader->TEST_index_reader_preloaded());
|
2014-02-19 23:38:57 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// nothing happens in the beginning
|
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertIndexBlockStat(0, 0);
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2014-09-29 18:09:09 +00:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
2015-03-03 18:59:36 +00:00
|
|
|
GetContext::kNotFound, Slice(), nullptr, nullptr,
|
2016-11-04 01:40:23 +00:00
|
|
|
nullptr, nullptr, nullptr);
|
2014-02-19 23:38:57 +00:00
|
|
|
// a hack that just to trigger BlockBasedTable::GetFilter.
|
2014-09-29 18:09:09 +00:00
|
|
|
reader->Get(ReadOptions(), "non-exist-key", &get_context);
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertIndexBlockStat(0, 0);
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Due to the difficulities of the intersaction between statistics, this test
|
|
|
|
// only tests the case when "index block is put to block cache"
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
2013-11-13 06:46:51 +00:00
|
|
|
// -- Table construction
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2013-11-13 06:46:51 +00:00
|
|
|
options.create_if_missing = true;
|
2017-05-02 20:39:09 +00:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-01-24 18:57:15 +00:00
|
|
|
|
|
|
|
// Enable the cache for index/filter blocks
|
|
|
|
BlockBasedTableOptions table_options;
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(1024, 4);
|
2014-01-24 18:57:15 +00:00
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2013-11-13 06:46:51 +00:00
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-13 06:46:51 +00:00
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2013-11-13 06:46:51 +00:00
|
|
|
c.Add("key", "value");
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
2014-08-25 21:22:05 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
2014-02-19 23:38:57 +00:00
|
|
|
// preloading filter/index blocks is prohibited.
|
2014-10-22 18:52:35 +00:00
|
|
|
auto* reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-02-19 23:38:57 +00:00
|
|
|
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
|
2014-03-01 02:19:07 +00:00
|
|
|
ASSERT_TRUE(!reader->TEST_index_reader_preloaded());
|
2013-11-13 06:46:51 +00:00
|
|
|
|
|
|
|
// -- PART 1: Open with regular block cache.
|
|
|
|
// Since block_cache is disabled, no cache activities will be involved.
|
2015-10-12 22:06:38 +00:00
|
|
|
unique_ptr<InternalIterator> iter;
|
2013-11-13 06:46:51 +00:00
|
|
|
|
2015-10-07 22:17:20 +00:00
|
|
|
int64_t last_cache_bytes_read = 0;
|
2013-11-13 06:46:51 +00:00
|
|
|
// At first, no block will be accessed.
|
|
|
|
{
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2013-11-13 06:46:51 +00:00
|
|
|
// index will be added to block cache.
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(1, // index block miss
|
|
|
|
0, 0, 0);
|
2015-10-07 22:17:20 +00:00
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
|
|
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only index block will be accessed
|
|
|
|
{
|
|
|
|
iter.reset(c.NewIterator());
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2013-11-13 06:46:51 +00:00
|
|
|
// NOTE: to help better highlight the "detla" of each ticker, I use
|
|
|
|
// <last_value> + <added_value> to indicate the increment of changed
|
|
|
|
// value; other numbers remain the same.
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(1, 0 + 1, // index block hit
|
|
|
|
0, 0);
|
2015-10-07 22:17:20 +00:00
|
|
|
// Cache hit, bytes read from cache should increase
|
|
|
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
|
|
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only data block will be accessed
|
|
|
|
{
|
|
|
|
iter->SeekToFirst();
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(1, 1, 0 + 1, // data block miss
|
|
|
|
0);
|
2015-10-07 22:17:20 +00:00
|
|
|
// Cache miss, Bytes read from cache should not change
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), last_cache_bytes_read);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
|
|
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Data block will be in cache
|
|
|
|
{
|
|
|
|
iter.reset(c.NewIterator());
|
|
|
|
iter->SeekToFirst();
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(1, 1 + 1, /* index block hit */
|
|
|
|
1, 0 + 1 /* data block hit */);
|
2015-10-07 22:17:20 +00:00
|
|
|
// Cache hit, bytes read from cache should increase
|
|
|
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
|
|
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
|
|
|
table_options.block_cache->GetUsage());
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
// release the iterator so that the block cache can reset correctly.
|
|
|
|
iter.reset();
|
2017-05-02 20:39:09 +00:00
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
|
|
|
|
2014-10-22 18:52:35 +00:00
|
|
|
// -- PART 2: Open with very small block cache
|
2013-11-13 06:46:51 +00:00
|
|
|
// In this test, no block will ever get hit since the block cache is
|
|
|
|
// too small to fit even one entry.
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(1, 4);
|
2017-05-02 20:39:09 +00:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-08-25 21:22:05 +00:00
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions2(options);
|
|
|
|
c.Reopen(ioptions2);
|
2013-11-13 06:46:51 +00:00
|
|
|
{
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(1, // index block miss
|
|
|
|
0, 0, 0);
|
2015-10-07 22:17:20 +00:00
|
|
|
// Cache miss, Bytes read from cache should not change
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Both index and data block get accessed.
|
|
|
|
// It first cache index block then data block. But since the cache size
|
|
|
|
// is only 1, index block will be purged after data block is inserted.
|
|
|
|
iter.reset(c.NewIterator());
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(1 + 1, // index block miss
|
|
|
|
0, 0, // data block miss
|
|
|
|
0);
|
2015-10-07 22:17:20 +00:00
|
|
|
// Cache hit, bytes read from cache should increase
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// SeekToFirst() accesses data block. With similar reason, we expect data
|
|
|
|
// block's cache miss.
|
|
|
|
iter->SeekToFirst();
|
2014-02-19 23:38:57 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
2014-02-05 00:21:47 +00:00
|
|
|
props.AssertEqual(2, 0, 0 + 1, // data block miss
|
|
|
|
0);
|
2015-10-07 22:17:20 +00:00
|
|
|
// Cache miss, Bytes read from cache should not change
|
|
|
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
2014-10-22 18:52:35 +00:00
|
|
|
iter.reset();
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2014-10-22 18:52:35 +00:00
|
|
|
|
|
|
|
// -- PART 3: Open table with bloom filter enabled but not in SST file
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(4096, 4);
|
2014-10-22 18:52:35 +00:00
|
|
|
table_options.cache_index_and_filter_blocks = false;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c3(BytewiseComparator());
|
2014-10-22 20:53:35 +00:00
|
|
|
std::string user_key = "k01";
|
|
|
|
InternalKey internal_key(user_key, 0, kTypeValue);
|
|
|
|
c3.Add(internal_key.Encode().ToString(), "hello");
|
2014-10-22 18:52:35 +00:00
|
|
|
ImmutableCFOptions ioptions3(options);
|
|
|
|
// Generate table without filter policy
|
|
|
|
c3.Finish(options, ioptions3, table_options,
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
c3.ResetTableReader();
|
|
|
|
|
2014-10-22 18:52:35 +00:00
|
|
|
// Open table with filter policy
|
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(1));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
2017-05-02 20:39:09 +00:00
|
|
|
options.statistics = CreateDBStatistics();
|
2014-10-22 18:52:35 +00:00
|
|
|
ImmutableCFOptions ioptions4(options);
|
|
|
|
ASSERT_OK(c3.Reopen(ioptions4));
|
|
|
|
reader = dynamic_cast<BlockBasedTable*>(c3.GetTableReader());
|
|
|
|
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
|
2017-03-13 18:44:50 +00:00
|
|
|
PinnableSlice value;
|
2014-10-22 18:52:35 +00:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
2015-03-03 18:59:36 +00:00
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2016-11-04 01:40:23 +00:00
|
|
|
nullptr, nullptr, nullptr);
|
2014-10-22 20:53:35 +00:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), user_key, &get_context));
|
2017-03-13 18:44:50 +00:00
|
|
|
ASSERT_STREQ(value.data(), "hello");
|
2014-10-22 18:52:35 +00:00
|
|
|
BlockCachePropertiesSnapshot props(options.statistics.get());
|
|
|
|
props.AssertFilterBlockStat(0, 0);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c3.ResetTableReader();
|
2013-11-13 06:46:51 +00:00
|
|
|
}
|
|
|
|
|
2016-01-04 18:51:00 +00:00
|
|
|
void ValidateBlockSizeDeviation(int value, int expected) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size_deviation = value;
|
|
|
|
BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options);
|
|
|
|
|
|
|
|
const BlockBasedTableOptions* normalized_table_options =
|
|
|
|
(const BlockBasedTableOptions*)factory->GetOptions();
|
|
|
|
ASSERT_EQ(normalized_table_options->block_size_deviation, expected);
|
|
|
|
|
|
|
|
delete factory;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ValidateBlockRestartInterval(int value, int expected) {
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_restart_interval = value;
|
|
|
|
BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options);
|
|
|
|
|
|
|
|
const BlockBasedTableOptions* normalized_table_options =
|
|
|
|
(const BlockBasedTableOptions*)factory->GetOptions();
|
|
|
|
ASSERT_EQ(normalized_table_options->block_restart_interval, expected);
|
|
|
|
|
|
|
|
delete factory;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlockBasedTableTest, InvalidOptions) {
|
|
|
|
// invalid values for block_size_deviation (<0 or >100) are silently set to 0
|
|
|
|
ValidateBlockSizeDeviation(-10, 0);
|
|
|
|
ValidateBlockSizeDeviation(-1, 0);
|
|
|
|
ValidateBlockSizeDeviation(0, 0);
|
|
|
|
ValidateBlockSizeDeviation(1, 1);
|
|
|
|
ValidateBlockSizeDeviation(99, 99);
|
|
|
|
ValidateBlockSizeDeviation(100, 100);
|
|
|
|
ValidateBlockSizeDeviation(101, 0);
|
|
|
|
ValidateBlockSizeDeviation(1000, 0);
|
|
|
|
|
|
|
|
// invalid values for block_restart_interval (<1) are silently set to 1
|
|
|
|
ValidateBlockRestartInterval(-10, 1);
|
|
|
|
ValidateBlockRestartInterval(-1, 1);
|
|
|
|
ValidateBlockRestartInterval(0, 1);
|
|
|
|
ValidateBlockRestartInterval(1, 1);
|
|
|
|
ValidateBlockRestartInterval(2, 2);
|
|
|
|
ValidateBlockRestartInterval(1000, 1000);
|
|
|
|
}
|
|
|
|
|
2015-09-02 22:36:47 +00:00
|
|
|
TEST_F(BlockBasedTableTest, BlockReadCountTest) {
|
|
|
|
// bloom_filter_type = 0 -- block-based filter
|
|
|
|
// bloom_filter_type = 0 -- full filter
|
|
|
|
for (int bloom_filter_type = 0; bloom_filter_type < 2; ++bloom_filter_type) {
|
|
|
|
for (int index_and_filter_in_cache = 0; index_and_filter_in_cache < 2;
|
|
|
|
++index_and_filter_in_cache) {
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_cache = NewLRUCache(1, 0);
|
|
|
|
table_options.cache_index_and_filter_blocks = index_and_filter_in_cache;
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
NewBloomFilterPolicy(10, bloom_filter_type == 0));
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 23:05:53 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2015-09-02 22:36:47 +00:00
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
std::string user_key = "k04";
|
|
|
|
InternalKey internal_key(user_key, 0, kTypeValue);
|
|
|
|
std::string encoded_key = internal_key.Encode().ToString();
|
|
|
|
c.Add(encoded_key, "hello");
|
|
|
|
ImmutableCFOptions ioptions(options);
|
|
|
|
// Generate table with filter policy
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
auto reader = c.GetTableReader();
|
2017-03-13 18:44:50 +00:00
|
|
|
PinnableSlice value;
|
2015-09-02 22:36:47 +00:00
|
|
|
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
|
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2016-11-04 01:40:23 +00:00
|
|
|
nullptr, nullptr, nullptr);
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2015-09-02 22:36:47 +00:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context));
|
|
|
|
if (index_and_filter_in_cache) {
|
|
|
|
// data, index and filter block
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 3);
|
2015-09-02 22:36:47 +00:00
|
|
|
} else {
|
|
|
|
// just the data block
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 1);
|
2015-09-02 22:36:47 +00:00
|
|
|
}
|
|
|
|
ASSERT_EQ(get_context.State(), GetContext::kFound);
|
2017-03-13 18:44:50 +00:00
|
|
|
ASSERT_STREQ(value.data(), "hello");
|
2015-09-02 22:36:47 +00:00
|
|
|
|
|
|
|
// Get non-existing key
|
|
|
|
user_key = "does-not-exist";
|
|
|
|
internal_key = InternalKey(user_key, 0, kTypeValue);
|
|
|
|
encoded_key = internal_key.Encode().ToString();
|
|
|
|
|
2017-03-13 18:44:50 +00:00
|
|
|
value.Reset();
|
2015-09-02 22:36:47 +00:00
|
|
|
get_context = GetContext(options.comparator, nullptr, nullptr, nullptr,
|
|
|
|
GetContext::kNotFound, user_key, &value, nullptr,
|
2016-11-04 01:40:23 +00:00
|
|
|
nullptr, nullptr, nullptr);
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2015-09-02 22:36:47 +00:00
|
|
|
ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context));
|
|
|
|
ASSERT_EQ(get_context.State(), GetContext::kNotFound);
|
|
|
|
|
|
|
|
if (index_and_filter_in_cache) {
|
|
|
|
if (bloom_filter_type == 0) {
|
|
|
|
// with block-based, we read index and then the filter
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 2);
|
2015-09-02 22:36:47 +00:00
|
|
|
} else {
|
|
|
|
// with full-filter, we read filter first and then we stop
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 1);
|
2015-09-02 22:36:47 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// filter is already in memory and it figures out that the key doesn't
|
|
|
|
// exist
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(get_perf_context()->block_read_count, 0);
|
2015-09-02 22:36:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-06 03:10:56 +00:00
|
|
|
// A wrapper around LRICache that also keeps track of data blocks (in contrast
|
|
|
|
// with the objects) in the cache. The class is very simple and can be used only
|
|
|
|
// for trivial tests.
|
|
|
|
class MockCache : public LRUCache {
|
|
|
|
public:
|
|
|
|
MockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
|
|
double high_pri_pool_ratio)
|
|
|
|
: LRUCache(capacity, num_shard_bits, strict_capacity_limit,
|
|
|
|
high_pri_pool_ratio) {}
|
|
|
|
virtual Status Insert(const Slice& key, void* value, size_t charge,
|
|
|
|
void (*deleter)(const Slice& key, void* value),
|
|
|
|
Handle** handle = nullptr,
|
|
|
|
Priority priority = Priority::LOW) override {
|
|
|
|
// Replace the deleter with our own so that we keep track of data blocks
|
|
|
|
// erased from the cache
|
|
|
|
deleters_[key.ToString()] = deleter;
|
|
|
|
return ShardedCache::Insert(key, value, charge, &MockDeleter, handle,
|
|
|
|
priority);
|
|
|
|
}
|
|
|
|
// This is called by the application right after inserting a data block
|
|
|
|
virtual void TEST_mark_as_data_block(const Slice& key,
|
|
|
|
size_t charge) override {
|
|
|
|
marked_data_in_cache_[key.ToString()] = charge;
|
|
|
|
marked_size_ += charge;
|
|
|
|
}
|
|
|
|
using DeleterFunc = void (*)(const Slice& key, void* value);
|
|
|
|
static std::map<std::string, DeleterFunc> deleters_;
|
|
|
|
static std::map<std::string, size_t> marked_data_in_cache_;
|
|
|
|
static size_t marked_size_;
|
|
|
|
static void MockDeleter(const Slice& key, void* value) {
|
|
|
|
// If the item was marked for being data block, decrease its usage from the
|
|
|
|
// total data block usage of the cache
|
|
|
|
if (marked_data_in_cache_.find(key.ToString()) !=
|
|
|
|
marked_data_in_cache_.end()) {
|
|
|
|
marked_size_ -= marked_data_in_cache_[key.ToString()];
|
|
|
|
}
|
|
|
|
// Then call the origianl deleter
|
|
|
|
assert(deleters_.find(key.ToString()) != deleters_.end());
|
|
|
|
auto deleter = deleters_[key.ToString()];
|
|
|
|
deleter(key, value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t MockCache::marked_size_ = 0;
|
|
|
|
std::map<std::string, MockCache::DeleterFunc> MockCache::deleters_;
|
|
|
|
std::map<std::string, size_t> MockCache::marked_data_in_cache_;
|
|
|
|
|
|
|
|
// Block cache can contain raw data blocks as well as general objects. If an
|
|
|
|
// object depends on the table to be live, it then must be destructed before the
|
2017-08-18 17:53:03 +00:00
|
|
|
// table is closed. This test makes sure that the only items remains in the
|
2017-05-06 03:10:56 +00:00
|
|
|
// cache after the table is closed are raw data blocks.
|
|
|
|
TEST_F(BlockBasedTableTest, NoObjectInCacheAfterTableClose) {
|
|
|
|
for (auto index_type :
|
|
|
|
{BlockBasedTableOptions::IndexType::kBinarySearch,
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch}) {
|
|
|
|
for (bool block_based_filter : {true, false}) {
|
|
|
|
for (bool partition_filter : {true, false}) {
|
|
|
|
if (partition_filter &&
|
|
|
|
(block_based_filter ||
|
|
|
|
index_type !=
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (bool index_and_filter_in_cache : {true, false}) {
|
|
|
|
for (bool pin_l0 : {true, false}) {
|
|
|
|
if (pin_l0 && !index_and_filter_in_cache) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Create a table
|
|
|
|
Options opt;
|
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
|
|
|
opt.compression = kNoCompression;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
table_options.index_type =
|
|
|
|
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
|
|
|
|
table_options.pin_l0_filter_and_index_blocks_in_cache = pin_l0;
|
|
|
|
table_options.partition_filters = partition_filter;
|
|
|
|
table_options.cache_index_and_filter_blocks =
|
|
|
|
index_and_filter_in_cache;
|
|
|
|
// big enough so we don't ever lose cached values.
|
|
|
|
table_options.block_cache = std::shared_ptr<rocksdb::Cache>(
|
|
|
|
new MockCache(16 * 1024 * 1024, 4, false, 0.0));
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
rocksdb::NewBloomFilterPolicy(10, block_based_filter));
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
std::string user_key = "k01";
|
|
|
|
std::string key =
|
|
|
|
InternalKey(user_key, 0, kTypeValue).Encode().ToString();
|
|
|
|
c.Add(key, "hello");
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
|
|
|
|
|
|
|
// Doing a read to make index/filter loaded into the cache
|
|
|
|
auto table_reader =
|
|
|
|
dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
|
|
|
PinnableSlice value;
|
|
|
|
GetContext get_context(opt.comparator, nullptr, nullptr, nullptr,
|
|
|
|
GetContext::kNotFound, user_key, &value,
|
|
|
|
nullptr, nullptr, nullptr, nullptr);
|
|
|
|
InternalKey ikey(user_key, 0, kTypeValue);
|
|
|
|
auto s = table_reader->Get(ReadOptions(), key, &get_context);
|
|
|
|
ASSERT_EQ(get_context.State(), GetContext::kFound);
|
|
|
|
ASSERT_STREQ(value.data(), "hello");
|
|
|
|
|
|
|
|
// Close the table
|
|
|
|
c.ResetTableReader();
|
|
|
|
|
|
|
|
auto usage = table_options.block_cache->GetUsage();
|
|
|
|
auto pinned_usage = table_options.block_cache->GetPinnedUsage();
|
|
|
|
// The only usage must be for marked data blocks
|
|
|
|
ASSERT_EQ(usage, MockCache::marked_size_);
|
|
|
|
// There must be some pinned data since PinnableSlice has not
|
|
|
|
// released them yet
|
|
|
|
ASSERT_GT(pinned_usage, 0);
|
|
|
|
// Release pinnable slice reousrces
|
|
|
|
value.Reset();
|
|
|
|
pinned_usage = table_options.block_cache->GetPinnedUsage();
|
|
|
|
ASSERT_EQ(pinned_usage, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(BlockBasedTableTest, BlockCacheLeak) {
|
2014-01-24 20:14:08 +00:00
|
|
|
// Check that when we reopen a table we don't lose access to blocks already
|
|
|
|
// in the cache. This test checks whether the Table actually makes use of the
|
|
|
|
// unique ID from the file.
|
|
|
|
|
|
|
|
Options opt;
|
2014-01-27 21:53:22 +00:00
|
|
|
unique_ptr<InternalKeyComparator> ikc;
|
|
|
|
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
|
2014-01-24 20:14:08 +00:00
|
|
|
opt.compression = kNoCompression;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
|
|
|
// big enough so we don't ever lose cached values.
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2014-08-25 21:22:05 +00:00
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-01-24 20:14:08 +00:00
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2014-01-24 20:14:08 +00:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(opt);
|
|
|
|
c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
|
2014-01-24 20:14:08 +00:00
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
unique_ptr<InternalIterator> iter(c.NewIterator());
|
2014-01-24 20:14:08 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
while (iter->Valid()) {
|
|
|
|
iter->key();
|
|
|
|
iter->value();
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions1(opt);
|
|
|
|
ASSERT_OK(c.Reopen(ioptions1));
|
2014-08-25 23:14:30 +00:00
|
|
|
auto table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-01-25 05:10:19 +00:00
|
|
|
for (const std::string& key : keys) {
|
2014-01-28 05:58:46 +00:00
|
|
|
ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
|
2014-01-24 20:14:08 +00:00
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2014-06-20 08:23:02 +00:00
|
|
|
|
|
|
|
// rerun with different block cache
|
2016-04-07 20:51:47 +00:00
|
|
|
table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
|
2014-08-25 21:22:05 +00:00
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions2(opt);
|
|
|
|
ASSERT_OK(c.Reopen(ioptions2));
|
2014-08-25 23:14:30 +00:00
|
|
|
table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
|
2014-06-20 08:23:02 +00:00
|
|
|
for (const std::string& key : keys) {
|
|
|
|
ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
|
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2014-01-24 20:14:08 +00:00
|
|
|
}
|
|
|
|
|
2016-08-24 01:20:41 +00:00
|
|
|
TEST_F(BlockBasedTableTest, NewIndexIteratorLeak) {
|
|
|
|
// A regression test to avoid data race described in
|
|
|
|
// https://github.com/facebook/rocksdb/issues/1267
|
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
c.Add("a1", "val1");
|
|
|
|
Options options;
|
|
|
|
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.index_type = BlockBasedTableOptions::kHashSearch;
|
|
|
|
table_options.cache_index_and_filter_blocks = true;
|
|
|
|
table_options.block_cache = NewLRUCache(0);
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options,
|
|
|
|
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
|
|
|
|
{
|
|
|
|
{"BlockBasedTable::NewIndexIterator::thread1:1",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread2:2"},
|
|
|
|
{"BlockBasedTable::NewIndexIterator::thread2:3",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread1:4"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread1:1"},
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread1:4"},
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread2:2"},
|
|
|
|
{"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker",
|
|
|
|
"BlockBasedTable::NewIndexIterator::thread2:3"},
|
|
|
|
});
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
ReadOptions ro;
|
|
|
|
auto* reader = c.GetTableReader();
|
|
|
|
|
|
|
|
std::function<void()> func1 = [&]() {
|
|
|
|
TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker");
|
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
|
|
|
iter->Seek(InternalKey("a1", 0, kTypeValue).Encode());
|
|
|
|
};
|
|
|
|
|
|
|
|
std::function<void()> func2 = [&]() {
|
|
|
|
TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker");
|
|
|
|
std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
|
|
|
|
};
|
|
|
|
|
2017-02-06 22:43:55 +00:00
|
|
|
auto thread1 = port::Thread(func1);
|
|
|
|
auto thread2 = port::Thread(func2);
|
2016-08-24 01:20:41 +00:00
|
|
|
thread1.join();
|
|
|
|
thread2.join();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
c.ResetTableReader();
|
|
|
|
}
|
|
|
|
|
2015-07-20 18:09:14 +00:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(PlainTableTest, BasicPlainTableProperties) {
|
2014-07-18 07:08:38 +00:00
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = 8;
|
|
|
|
plain_table_options.bloom_bits_per_key = 8;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
|
|
|
|
PlainTableFactory factory(plain_table_options);
|
2015-08-05 14:33:27 +00:00
|
|
|
test::StringSink sink;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
unique_ptr<WritableFileWriter> file_writer(
|
2015-08-05 14:33:27 +00:00
|
|
|
test::GetWritableFileWriter(new test::StringSink()));
|
2014-01-27 21:53:22 +00:00
|
|
|
Options options;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
2014-01-27 21:53:22 +00:00
|
|
|
InternalKeyComparator ikc(options.comparator);
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
2016-04-07 06:10:32 +00:00
|
|
|
std::string column_family_name;
|
2016-09-18 05:30:43 +00:00
|
|
|
int unknown_level = -1;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 17:04:30 +00:00
|
|
|
std::unique_ptr<TableBuilder> builder(factory.NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
|
2016-04-07 06:10:32 +00:00
|
|
|
kNoCompression, CompressionOptions(),
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
nullptr /* compression_dict */,
|
2016-09-18 05:30:43 +00:00
|
|
|
false /* skip_filters */, column_family_name,
|
|
|
|
unknown_level),
|
2015-10-08 23:57:35 +00:00
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
file_writer.get()));
|
2014-01-24 20:14:08 +00:00
|
|
|
|
|
|
|
for (char c = 'a'; c <= 'z'; ++c) {
|
2014-01-27 21:53:22 +00:00
|
|
|
std::string key(8, c);
|
|
|
|
key.append("\1 "); // PlainTable expects internal key structure
|
2014-01-24 20:14:08 +00:00
|
|
|
std::string value(28, c + 42);
|
|
|
|
builder->Add(key, value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(builder->Finish());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
file_writer->Flush();
|
2014-01-24 20:14:08 +00:00
|
|
|
|
2015-08-05 14:33:27 +00:00
|
|
|
test::StringSink* ss =
|
|
|
|
static_cast<test::StringSink*>(file_writer->writable_file());
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
2015-08-05 14:33:27 +00:00
|
|
|
new test::StringSource(ss->contents(), 72242, true)));
|
2014-01-24 20:14:08 +00:00
|
|
|
|
2014-02-08 03:26:49 +00:00
|
|
|
TableProperties* props = nullptr;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
auto s = ReadTableProperties(file_reader.get(), ss->contents().size(),
|
2016-07-19 16:44:03 +00:00
|
|
|
kPlainTableMagicNumber, ioptions,
|
2014-01-25 05:10:19 +00:00
|
|
|
&props);
|
2014-02-08 06:43:58 +00:00
|
|
|
std::unique_ptr<TableProperties> props_guard(props);
|
2014-01-24 20:14:08 +00:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
2014-02-08 03:26:49 +00:00
|
|
|
ASSERT_EQ(0ul, props->index_size);
|
|
|
|
ASSERT_EQ(0ul, props->filter_size);
|
|
|
|
ASSERT_EQ(16ul * 26, props->raw_key_size);
|
|
|
|
ASSERT_EQ(28ul * 26, props->raw_value_size);
|
|
|
|
ASSERT_EQ(26ul, props->num_entries);
|
|
|
|
ASSERT_EQ(1ul, props->num_data_blocks);
|
2014-01-24 20:14:08 +00:00
|
|
|
}
|
2015-07-20 18:09:14 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|
2014-01-24 20:14:08 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) {
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2011-03-18 22:37:00 +00:00
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", "hello2");
|
|
|
|
c.Add("k03", std::string(10000, 'x'));
|
|
|
|
c.Add("k04", std::string(200000, 'x'));
|
|
|
|
c.Add("k05", std::string(300000, 'x'));
|
|
|
|
c.Add("k06", "hello3");
|
|
|
|
c.Add("k07", std::string(100000, 'x'));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2014-01-27 21:53:22 +00:00
|
|
|
test::PlainInternalKeyComparator internal_comparator(options.comparator);
|
2011-03-18 22:37:00 +00:00
|
|
|
options.compression = kNoCompression;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, internal_comparator,
|
|
|
|
&keys, &kvmap);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
|
2016-08-19 22:10:31 +00:00
|
|
|
// k04 and k05 will be in two consecutive blocks, the index is
|
|
|
|
// an arbitrary slice between k04 and k05, either before or after k04a
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 10000, 211000));
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-24 19:09:04 +00:00
|
|
|
static void DoCompressionTest(CompressionType comp) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Random rnd(301);
|
2016-08-19 22:10:31 +00:00
|
|
|
TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string tmp;
|
|
|
|
c.Add("k01", "hello");
|
|
|
|
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
|
|
|
|
c.Add("k03", "hello3");
|
|
|
|
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
|
|
|
|
std::vector<std::string> keys;
|
2015-09-02 20:58:22 +00:00
|
|
|
stl_wrappers::KVMap kvmap;
|
2013-11-20 06:00:48 +00:00
|
|
|
Options options;
|
2014-01-27 21:53:22 +00:00
|
|
|
test::PlainInternalKeyComparator ikc(options.comparator);
|
2012-06-28 06:41:33 +00:00
|
|
|
options.compression = comp;
|
2014-08-25 21:22:05 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 1024;
|
2014-09-04 23:18:36 +00:00
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, ikc, &keys, &kvmap);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000));
|
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000));
|
2013-12-20 17:35:24 +00:00
|
|
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6100));
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) {
|
2014-02-11 01:02:02 +00:00
|
|
|
std::vector<CompressionType> compression_state;
|
2015-04-06 19:50:44 +00:00
|
|
|
if (!Snappy_Supported()) {
|
2012-06-28 06:41:33 +00:00
|
|
|
fprintf(stderr, "skipping snappy compression tests\n");
|
|
|
|
} else {
|
2014-02-11 01:02:02 +00:00
|
|
|
compression_state.push_back(kSnappyCompression);
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
|
|
|
|
2015-04-06 19:50:44 +00:00
|
|
|
if (!Zlib_Supported()) {
|
2012-06-28 06:41:33 +00:00
|
|
|
fprintf(stderr, "skipping zlib compression tests\n");
|
|
|
|
} else {
|
2014-02-11 01:02:02 +00:00
|
|
|
compression_state.push_back(kZlibCompression);
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
|
|
|
|
2014-02-11 01:02:02 +00:00
|
|
|
// TODO(kailiu) DoCompressionTest() doesn't work with BZip2.
|
|
|
|
/*
|
2015-04-06 19:50:44 +00:00
|
|
|
if (!BZip2_Supported()) {
|
2014-02-08 02:12:30 +00:00
|
|
|
fprintf(stderr, "skipping bzip2 compression tests\n");
|
|
|
|
} else {
|
2014-02-11 01:02:02 +00:00
|
|
|
compression_state.push_back(kBZip2Compression);
|
2014-02-08 02:12:30 +00:00
|
|
|
}
|
2014-02-11 01:02:02 +00:00
|
|
|
*/
|
2014-02-08 02:12:30 +00:00
|
|
|
|
2015-04-06 19:50:44 +00:00
|
|
|
if (!LZ4_Supported()) {
|
|
|
|
fprintf(stderr, "skipping lz4 and lz4hc compression tests\n");
|
2014-02-08 02:12:30 +00:00
|
|
|
} else {
|
2014-02-11 01:02:02 +00:00
|
|
|
compression_state.push_back(kLZ4Compression);
|
|
|
|
compression_state.push_back(kLZ4HCCompression);
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
|
|
|
|
2016-04-20 05:54:24 +00:00
|
|
|
if (!XPRESS_Supported()) {
|
|
|
|
fprintf(stderr, "skipping xpress and xpress compression tests\n");
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
compression_state.push_back(kXpressCompression);
|
|
|
|
}
|
|
|
|
|
2014-02-11 01:02:02 +00:00
|
|
|
for (auto state : compression_state) {
|
|
|
|
DoCompressionTest(state);
|
2012-06-28 06:41:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, Randomized) {
|
2013-11-20 06:00:48 +00:00
|
|
|
std::vector<TestArgs> args = GenerateArgList();
|
2013-11-10 09:17:32 +00:00
|
|
|
for (unsigned int i = 0; i < args.size(); i++) {
|
|
|
|
Init(args[i]);
|
|
|
|
Random rnd(test::RandomSeed() + 5);
|
|
|
|
for (int num_entries = 0; num_entries < 2000;
|
|
|
|
num_entries += (num_entries < 50 ? 1 : 200)) {
|
|
|
|
if ((num_entries % 10) == 0) {
|
2014-02-05 00:21:47 +00:00
|
|
|
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
|
|
|
|
static_cast<int>(args.size()), num_entries);
|
2013-11-10 09:17:32 +00:00
|
|
|
}
|
|
|
|
for (int e = 0; e < num_entries; e++) {
|
|
|
|
std::string v;
|
|
|
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
|
|
|
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
|
|
}
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-13 17:32:05 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, RandomizedLongDB) {
|
2013-11-10 09:17:32 +00:00
|
|
|
Random rnd(test::RandomSeed());
|
2015-12-30 13:49:06 +00:00
|
|
|
TestArgs args = {DB_TEST, false, 16, kNoCompression, 0, false};
|
2013-11-10 09:17:32 +00:00
|
|
|
Init(args);
|
|
|
|
int num_entries = 100000;
|
|
|
|
for (int e = 0; e < num_entries; e++) {
|
|
|
|
std::string v;
|
|
|
|
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
|
|
|
|
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
|
|
|
|
}
|
|
|
|
Test(&rnd);
|
|
|
|
|
|
|
|
// We must have created enough data to force merging
|
|
|
|
int files = 0;
|
|
|
|
for (int level = 0; level < db()->NumberLevels(); level++) {
|
|
|
|
std::string value;
|
|
|
|
char name[100];
|
|
|
|
snprintf(name, sizeof(name), "rocksdb.num-files-at-level%d", level);
|
|
|
|
ASSERT_TRUE(db()->GetProperty(name, &value));
|
|
|
|
files += atoi(value.c_str());
|
|
|
|
}
|
|
|
|
ASSERT_GT(files, 0);
|
|
|
|
}
|
2015-10-13 17:32:05 +00:00
|
|
|
#endif // ROCKSDB_LITE
|
2013-11-10 09:17:32 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
class MemTableTest : public testing::Test {};
|
2013-11-10 09:17:32 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(MemTableTest, Simple) {
|
2013-11-10 09:17:32 +00:00
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
auto table_factory = std::make_shared<SkipListFactory>();
|
2014-01-14 23:32:37 +00:00
|
|
|
Options options;
|
|
|
|
options.memtable_factory = table_factory;
|
2014-10-01 23:19:16 +00:00
|
|
|
ImmutableCFOptions ioptions(options);
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
2017-06-02 19:08:01 +00:00
|
|
|
MemTable* memtable =
|
|
|
|
new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
|
|
|
|
kMaxSequenceNumber, 0 /* column_family_id */);
|
2013-11-10 09:17:32 +00:00
|
|
|
memtable->Ref();
|
|
|
|
WriteBatch batch;
|
|
|
|
WriteBatchInternal::SetSequence(&batch, 100);
|
|
|
|
batch.Put(std::string("k1"), std::string("v1"));
|
|
|
|
batch.Put(std::string("k2"), std::string("v2"));
|
|
|
|
batch.Put(std::string("k3"), std::string("v3"));
|
|
|
|
batch.Put(std::string("largekey"), std::string("vlarge"));
|
2016-09-12 21:14:40 +00:00
|
|
|
batch.DeleteRange(std::string("chi"), std::string("xigua"));
|
|
|
|
batch.DeleteRange(std::string("begin"), std::string("end"));
|
2014-11-18 18:20:10 +00:00
|
|
|
ColumnFamilyMemTablesDefault cf_mems_default(memtable);
|
support for concurrent adds to memtable
Summary:
This diff adds support for concurrent adds to the skiplist memtable
implementations. Memory allocation is made thread-safe by the addition of
a spinlock, with small per-core buffers to avoid contention. Concurrent
memtable writes are made via an additional method and don't impose a
performance overhead on the non-concurrent case, so parallelism can be
selected on a per-batch basis.
Write thread synchronization is an increasing bottleneck for higher levels
of concurrency, so this diff adds --enable_write_thread_adaptive_yield
(default off). This feature causes threads joining a write batch
group to spin for a short time (default 100 usec) using sched_yield,
rather than going to sleep on a mutex. If the timing of the yield calls
indicates that another thread has actually run during the yield then
spinning is avoided. This option improves performance for concurrent
situations even without parallel adds, although it has the potential to
increase CPU usage (and the heuristic adaptation is not yet mature).
Parallel writes are not currently compatible with
inplace updates, update callbacks, or delete filtering.
Enable it with --allow_concurrent_memtable_write (and
--enable_write_thread_adaptive_yield). Parallel memtable writes
are performance neutral when there is no actual parallelism, and in
my experiments (SSD server-class Linux and varying contention and key
sizes for fillrandom) they are always a performance win when there is
more than one thread.
Statistics are updated earlier in the write path, dropping the number
of DB mutex acquisitions from 2 to 1 for almost all cases.
This diff was motivated and inspired by Yahoo's cLSM work. It is more
conservative than cLSM: RocksDB's write batch group leader role is
preserved (along with all of the existing flush and write throttling
logic) and concurrent writers are blocked until all memtable insertions
have completed and the sequence number has been advanced, to preserve
linearizability.
My test config is "db_bench -benchmarks=fillrandom -threads=$T
-batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T
-level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999
-disable_auto_compactions --max_write_buffer_number=8
-max_background_flushes=8 --disable_wal --write_buffer_size=160000000
--block_size=16384 --allow_concurrent_memtable_write" on a two-socket
Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1
thread I get ~440Kops/sec. Peak performance for 1 socket (numactl
-N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance
across both sockets happens at 30 threads, and is ~900Kops/sec, although
with fewer threads there is less performance loss when the system has
background work.
Test Plan:
1. concurrent stress tests for InlineSkipList and DynamicBloom
2. make clean; make check
3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench
4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench
5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench
6. make clean; OPT=-DROCKSDB_LITE make check
7. verify no perf regressions when disabled
Reviewers: igor, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba
Differential Revision: https://reviews.facebook.net/D50589
2015-08-14 23:59:07 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
WriteBatchInternal::InsertInto(&batch, &cf_mems_default, nullptr).ok());
|
2013-11-10 09:17:32 +00:00
|
|
|
|
2016-09-12 21:14:40 +00:00
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
Arena arena;
|
2016-11-19 22:14:35 +00:00
|
|
|
ScopedArenaIterator arena_iter_guard;
|
|
|
|
std::unique_ptr<InternalIterator> iter_guard;
|
|
|
|
InternalIterator* iter;
|
|
|
|
if (i == 0) {
|
|
|
|
iter = memtable->NewIterator(ReadOptions(), &arena);
|
|
|
|
arena_iter_guard.set(iter);
|
|
|
|
} else {
|
|
|
|
iter = memtable->NewRangeTombstoneIterator(ReadOptions());
|
|
|
|
iter_guard.reset(iter);
|
|
|
|
}
|
2016-11-21 20:07:09 +00:00
|
|
|
if (iter == nullptr) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-09-12 21:14:40 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
while (iter->Valid()) {
|
|
|
|
fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
|
|
|
|
iter->value().ToString().c_str());
|
|
|
|
iter->Next();
|
|
|
|
}
|
2013-11-10 09:17:32 +00:00
|
|
|
}
|
|
|
|
|
2013-12-02 05:23:44 +00:00
|
|
|
delete memtable->Unref();
|
2013-11-10 09:17:32 +00:00
|
|
|
}
|
|
|
|
|
2013-12-06 00:51:26 +00:00
|
|
|
// Test the empty key
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, SimpleEmptyKey) {
|
2014-01-24 19:09:04 +00:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 00:51:26 +00:00
|
|
|
Random rnd(test::RandomSeed() + 1);
|
|
|
|
Add("", "v");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, SimpleSingle) {
|
2014-01-24 19:09:04 +00:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 00:51:26 +00:00
|
|
|
Random rnd(test::RandomSeed() + 2);
|
|
|
|
Add("abc", "v");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, SimpleMulti) {
|
2014-01-24 19:09:04 +00:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 00:51:26 +00:00
|
|
|
Random rnd(test::RandomSeed() + 3);
|
|
|
|
Add("abc", "v");
|
|
|
|
Add("abcd", "v");
|
|
|
|
Add("ac", "v2");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, SimpleSpecialKey) {
|
2014-01-24 19:09:04 +00:00
|
|
|
auto args = GenerateArgList();
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
Init(arg);
|
2013-12-06 00:51:26 +00:00
|
|
|
Random rnd(test::RandomSeed() + 4);
|
|
|
|
Add("\xff\xff", "v3");
|
|
|
|
Test(&rnd);
|
|
|
|
}
|
|
|
|
}
|
2013-11-10 09:17:32 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(HarnessTest, FooterTests) {
|
2014-05-01 18:09:32 +00:00
|
|
|
{
|
|
|
|
// upconvert legacy block based
|
|
|
|
std::string encoded;
|
2015-01-13 22:33:04 +00:00
|
|
|
Footer footer(kLegacyBlockBasedTableMagicNumber, 0);
|
2014-05-01 18:09:32 +00:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 22:33:04 +00:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 0U);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// xxhash block based
|
|
|
|
std::string encoded;
|
2015-01-13 22:33:04 +00:00
|
|
|
Footer footer(kBlockBasedTableMagicNumber, 1);
|
2014-05-01 18:09:32 +00:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.set_checksum(kxxHash);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 22:33:04 +00:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 1U);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
2015-07-20 18:09:14 +00:00
|
|
|
// Plain table is not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2014-05-01 18:09:32 +00:00
|
|
|
{
|
|
|
|
// upconvert legacy plain table
|
|
|
|
std::string encoded;
|
2015-01-13 22:33:04 +00:00
|
|
|
Footer footer(kLegacyPlainTableMagicNumber, 0);
|
2014-05-01 18:09:32 +00:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 22:33:04 +00:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 0U);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// xxhash block based
|
|
|
|
std::string encoded;
|
2015-01-13 22:33:04 +00:00
|
|
|
Footer footer(kPlainTableMagicNumber, 1);
|
2014-05-01 18:09:32 +00:00
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.set_checksum(kxxHash);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
2015-01-13 22:33:04 +00:00
|
|
|
ASSERT_EQ(decoded_footer.version(), 1U);
|
|
|
|
}
|
2015-07-20 18:09:14 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|
2015-01-13 22:33:04 +00:00
|
|
|
{
|
|
|
|
// version == 2
|
|
|
|
std::string encoded;
|
|
|
|
Footer footer(kBlockBasedTableMagicNumber, 2);
|
|
|
|
BlockHandle meta_index(10, 5), index(20, 15);
|
|
|
|
footer.set_metaindex_handle(meta_index);
|
|
|
|
footer.set_index_handle(index);
|
|
|
|
footer.EncodeTo(&encoded);
|
|
|
|
Footer decoded_footer;
|
|
|
|
Slice encoded_slice(encoded);
|
|
|
|
decoded_footer.DecodeFrom(&encoded_slice);
|
|
|
|
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
|
|
|
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
|
|
|
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
|
|
|
ASSERT_EQ(decoded_footer.version(), 2U);
|
2014-05-01 18:09:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-05 18:22:37 +00:00
|
|
|
class IndexBlockRestartIntervalTest
|
|
|
|
: public BlockBasedTableTest,
|
|
|
|
public ::testing::WithParamInterface<int> {
|
|
|
|
public:
|
|
|
|
static std::vector<int> GetRestartValues() { return {-1, 0, 1, 8, 16, 32}; }
|
|
|
|
};
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
|
|
IndexBlockRestartIntervalTest, IndexBlockRestartIntervalTest,
|
|
|
|
::testing::ValuesIn(IndexBlockRestartIntervalTest::GetRestartValues()));
|
|
|
|
|
|
|
|
TEST_P(IndexBlockRestartIntervalTest, IndexBlockRestartInterval) {
|
|
|
|
const int kKeysInTable = 10000;
|
|
|
|
const int kKeySize = 100;
|
|
|
|
const int kValSize = 500;
|
|
|
|
|
|
|
|
int index_block_restart_interval = GetParam();
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = 64; // small block size to get big index block
|
|
|
|
table_options.index_block_restart_interval = index_block_restart_interval;
|
|
|
|
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
|
|
|
|
|
|
|
TableConstructor c(BytewiseComparator());
|
|
|
|
static Random rnd(301);
|
|
|
|
for (int i = 0; i < kKeysInTable; i++) {
|
|
|
|
InternalKey k(RandomString(&rnd, kKeySize), 0, kTypeValue);
|
|
|
|
c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
stl_wrappers::KVMap kvmap;
|
|
|
|
std::unique_ptr<InternalKeyComparator> comparator(
|
|
|
|
new InternalKeyComparator(BytewiseComparator()));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
|
|
|
|
auto reader = c.GetTableReader();
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
// Test point lookup
|
|
|
|
for (auto& kv : kvmap) {
|
|
|
|
db_iter->Seek(kv.first);
|
|
|
|
|
|
|
|
ASSERT_TRUE(db_iter->Valid());
|
|
|
|
ASSERT_OK(db_iter->status());
|
|
|
|
ASSERT_EQ(db_iter->key(), kv.first);
|
|
|
|
ASSERT_EQ(db_iter->value(), kv.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test iterating
|
|
|
|
auto kv_iter = kvmap.begin();
|
|
|
|
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
|
|
|
|
ASSERT_EQ(db_iter->key(), kv_iter->first);
|
|
|
|
ASSERT_EQ(db_iter->value(), kv_iter->second);
|
|
|
|
kv_iter++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(kv_iter, kvmap.end());
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
c.ResetTableReader();
|
2016-02-05 18:22:37 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 00:33:26 +00:00
|
|
|
class PrefixTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
PrefixTest() : testing::Test() {}
|
|
|
|
~PrefixTest() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// A simple PrefixExtractor that only works for test PrefixAndWholeKeyTest
|
|
|
|
class TestPrefixExtractor : public rocksdb::SliceTransform {
|
|
|
|
public:
|
|
|
|
~TestPrefixExtractor() override{};
|
|
|
|
const char* Name() const override { return "TestPrefixExtractor"; }
|
|
|
|
|
|
|
|
rocksdb::Slice Transform(const rocksdb::Slice& src) const override {
|
|
|
|
assert(IsValid(src));
|
|
|
|
return rocksdb::Slice(src.data(), 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool InDomain(const rocksdb::Slice& src) const override {
|
|
|
|
assert(IsValid(src));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-07-22 01:13:59 +00:00
|
|
|
bool InRange(const rocksdb::Slice& dst) const override { return true; }
|
2016-02-23 00:33:26 +00:00
|
|
|
|
|
|
|
bool IsValid(const rocksdb::Slice& src) const {
|
|
|
|
if (src.size() != 4) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[0] != '[') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[1] < '0' || src[1] > '9') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[2] != ']') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (src[3] < '0' || src[3] > '9') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(PrefixTest, PrefixAndWholeKeyTest) {
|
|
|
|
rocksdb::Options options;
|
|
|
|
options.compaction_style = rocksdb::kCompactionStyleUniversal;
|
|
|
|
options.num_levels = 20;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.optimize_filters_for_hits = false;
|
|
|
|
options.target_file_size_base = 268435456;
|
|
|
|
options.prefix_extractor = std::make_shared<TestPrefixExtractor>();
|
|
|
|
rocksdb::BlockBasedTableOptions bbto;
|
|
|
|
bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10));
|
|
|
|
bbto.block_size = 262144;
|
|
|
|
bbto.whole_key_filtering = true;
|
|
|
|
|
2016-04-11 19:15:46 +00:00
|
|
|
const std::string kDBPath = test::TmpDir() + "/table_prefix_test";
|
2016-02-23 00:33:26 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
DestroyDB(kDBPath, options);
|
|
|
|
rocksdb::DB* db;
|
|
|
|
ASSERT_OK(rocksdb::DB::Open(options, kDBPath, &db));
|
|
|
|
|
|
|
|
// Create a bunch of keys with 10 filters.
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
std::string prefix = "[" + std::to_string(i) + "]";
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
std::string key = prefix + std::to_string(j);
|
|
|
|
db->Put(rocksdb::WriteOptions(), key, "1");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger compaction.
|
|
|
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
|
|
|
delete db;
|
|
|
|
// In the second round, turn whole_key_filtering off and expect
|
|
|
|
// rocksdb still works.
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:59:37 +00:00
|
|
|
TEST_F(BlockBasedTableTest, TableWithGlobalSeqno) {
|
|
|
|
BlockBasedTableOptions bbto;
|
|
|
|
test::StringSink* sink = new test::StringSink();
|
|
|
|
unique_ptr<WritableFileWriter> file_writer(test::GetWritableFileWriter(sink));
|
|
|
|
Options options;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
|
|
|
|
const ImmutableCFOptions ioptions(options);
|
|
|
|
InternalKeyComparator ikc(options.comparator);
|
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
|
|
|
|
int_tbl_prop_collector_factories;
|
|
|
|
int_tbl_prop_collector_factories.emplace_back(
|
|
|
|
new SstFileWriterPropertiesCollectorFactory(2 /* version */,
|
|
|
|
0 /* global_seqno*/));
|
|
|
|
std::string column_family_name;
|
|
|
|
std::unique_ptr<TableBuilder> builder(options.table_factory->NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
|
|
|
|
kNoCompression, CompressionOptions(),
|
|
|
|
nullptr /* compression_dict */,
|
|
|
|
false /* skip_filters */, column_family_name, -1),
|
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
|
|
|
file_writer.get()));
|
|
|
|
|
|
|
|
for (char c = 'a'; c <= 'z'; ++c) {
|
|
|
|
std::string key(8, c);
|
|
|
|
std::string value = key;
|
|
|
|
InternalKey ik(key, 0, kTypeValue);
|
|
|
|
|
|
|
|
builder->Add(ik.Encode(), value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(builder->Finish());
|
|
|
|
file_writer->Flush();
|
|
|
|
|
|
|
|
test::RandomRWStringSink ss_rw(sink);
|
|
|
|
uint32_t version;
|
|
|
|
uint64_t global_seqno;
|
|
|
|
uint64_t global_seqno_offset;
|
|
|
|
|
|
|
|
// Helper function to get version, global_seqno, global_seqno_offset
|
|
|
|
std::function<void()> GetVersionAndGlobalSeqno = [&]() {
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
|
|
|
new test::StringSource(ss_rw.contents(), 73342, true)));
|
|
|
|
|
|
|
|
TableProperties* props = nullptr;
|
|
|
|
ASSERT_OK(ReadTableProperties(file_reader.get(), ss_rw.contents().size(),
|
|
|
|
kBlockBasedTableMagicNumber, ioptions,
|
|
|
|
&props));
|
|
|
|
|
|
|
|
UserCollectedProperties user_props = props->user_collected_properties;
|
|
|
|
version = DecodeFixed32(
|
|
|
|
user_props[ExternalSstFilePropertyNames::kVersion].c_str());
|
|
|
|
global_seqno = DecodeFixed64(
|
|
|
|
user_props[ExternalSstFilePropertyNames::kGlobalSeqno].c_str());
|
|
|
|
global_seqno_offset =
|
|
|
|
props->properties_offsets[ExternalSstFilePropertyNames::kGlobalSeqno];
|
|
|
|
|
|
|
|
delete props;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Helper function to update the value of the global seqno in the file
|
|
|
|
std::function<void(uint64_t)> SetGlobalSeqno = [&](uint64_t val) {
|
|
|
|
std::string new_global_seqno;
|
|
|
|
PutFixed64(&new_global_seqno, val);
|
|
|
|
|
|
|
|
ASSERT_OK(ss_rw.Write(global_seqno_offset, new_global_seqno));
|
|
|
|
};
|
|
|
|
|
|
|
|
// Helper function to get the contents of the table InternalIterator
|
|
|
|
unique_ptr<TableReader> table_reader;
|
|
|
|
std::function<InternalIterator*()> GetTableInternalIter = [&]() {
|
|
|
|
unique_ptr<RandomAccessFileReader> file_reader(
|
|
|
|
test::GetRandomAccessFileReader(
|
|
|
|
new test::StringSource(ss_rw.contents(), 73342, true)));
|
|
|
|
|
|
|
|
options.table_factory->NewTableReader(
|
|
|
|
TableReaderOptions(ioptions, EnvOptions(), ikc), std::move(file_reader),
|
|
|
|
ss_rw.contents().size(), &table_reader);
|
|
|
|
|
|
|
|
return table_reader->NewIterator(ReadOptions());
|
|
|
|
};
|
|
|
|
|
|
|
|
GetVersionAndGlobalSeqno();
|
|
|
|
ASSERT_EQ(2, version);
|
|
|
|
ASSERT_EQ(0, global_seqno);
|
|
|
|
|
|
|
|
InternalIterator* iter = GetTableInternalIter();
|
|
|
|
char current_c = 'a';
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 0);
|
|
|
|
ASSERT_EQ(pik.user_key, iter->value());
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
|
|
|
|
current_c++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(current_c, 'z' + 1);
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// Update global sequence number to 10
|
|
|
|
SetGlobalSeqno(10);
|
|
|
|
GetVersionAndGlobalSeqno();
|
|
|
|
ASSERT_EQ(2, version);
|
|
|
|
ASSERT_EQ(10, global_seqno);
|
|
|
|
|
|
|
|
iter = GetTableInternalIter();
|
|
|
|
current_c = 'a';
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 10);
|
|
|
|
ASSERT_EQ(pik.user_key, iter->value());
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
|
|
|
|
current_c++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(current_c, 'z' + 1);
|
|
|
|
|
|
|
|
// Verify Seek
|
|
|
|
for (char c = 'a'; c <= 'z'; c++) {
|
|
|
|
std::string k = std::string(8, c);
|
|
|
|
InternalKey ik(k, 10, kValueTypeForSeek);
|
|
|
|
iter->Seek(ik.Encode());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 10);
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), k);
|
|
|
|
ASSERT_EQ(iter->value().ToString(), k);
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// Update global sequence number to 3
|
|
|
|
SetGlobalSeqno(3);
|
|
|
|
GetVersionAndGlobalSeqno();
|
|
|
|
ASSERT_EQ(2, version);
|
|
|
|
ASSERT_EQ(3, global_seqno);
|
|
|
|
|
|
|
|
iter = GetTableInternalIter();
|
|
|
|
current_c = 'a';
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 3);
|
|
|
|
ASSERT_EQ(pik.user_key, iter->value());
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
|
|
|
|
current_c++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(current_c, 'z' + 1);
|
|
|
|
|
|
|
|
// Verify Seek
|
|
|
|
for (char c = 'a'; c <= 'z'; c++) {
|
|
|
|
std::string k = std::string(8, c);
|
|
|
|
// seqno=4 is less than 3 so we still should get our key
|
|
|
|
InternalKey ik(k, 4, kValueTypeForSeek);
|
|
|
|
iter->Seek(ik.Encode());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
|
|
|
|
ParsedInternalKey pik;
|
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
|
|
|
|
|
|
|
|
ASSERT_EQ(pik.type, ValueType::kTypeValue);
|
|
|
|
ASSERT_EQ(pik.sequence, 3);
|
|
|
|
ASSERT_EQ(pik.user_key.ToString(), k);
|
|
|
|
ASSERT_EQ(iter->value().ToString(), k);
|
|
|
|
}
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|