2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2014-03-01 02:19:07 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <stdint.h>
|
2014-03-01 02:19:07 +00:00
|
|
|
#include <memory>
|
2017-03-22 16:11:23 +00:00
|
|
|
#include <set>
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 02:30:33 +00:00
|
|
|
#include <string>
|
2016-08-01 21:50:19 +00:00
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2014-03-01 02:19:07 +00:00
|
|
|
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/cf_options.h"
|
2014-09-04 23:18:36 +00:00
|
|
|
#include "rocksdb/options.h"
|
2015-12-16 02:20:10 +00:00
|
|
|
#include "rocksdb/persistent_cache.h"
|
2013-11-13 06:46:51 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
2014-03-01 02:19:07 +00:00
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "rocksdb/table.h"
|
2018-02-13 00:57:56 +00:00
|
|
|
#include "table/block.h"
|
2017-03-04 02:09:43 +00:00
|
|
|
#include "table/filter_block.h"
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "table/persistent_cache_helper.h"
|
2014-11-13 19:39:30 +00:00
|
|
|
#include "table/table_properties_internal.h"
|
2015-12-16 02:20:10 +00:00
|
|
|
#include "table/table_reader.h"
|
2017-02-07 00:29:29 +00:00
|
|
|
#include "table/two_level_iterator.h"
|
2013-09-02 06:23:40 +00:00
|
|
|
#include "util/coding.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
#include "util/file_reader_writer.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
class BlockHandle;
|
2014-03-01 02:19:07 +00:00
|
|
|
class Cache;
|
|
|
|
class FilterBlockReader;
|
2014-09-08 17:37:05 +00:00
|
|
|
class BlockBasedFilterBlockReader;
|
|
|
|
class FullFilterBlockReader;
|
2012-04-17 15:36:46 +00:00
|
|
|
class Footer;
|
2014-03-01 02:19:07 +00:00
|
|
|
class InternalKeyComparator;
|
|
|
|
class Iterator;
|
2011-03-18 22:37:00 +00:00
|
|
|
class RandomAccessFile;
|
2012-04-17 15:36:46 +00:00
|
|
|
class TableCache;
|
2013-10-30 17:52:33 +00:00
|
|
|
class TableReader;
|
2014-03-01 02:19:07 +00:00
|
|
|
class WritableFile;
|
2014-01-24 18:57:15 +00:00
|
|
|
struct BlockBasedTableOptions;
|
2014-03-01 02:19:07 +00:00
|
|
|
struct EnvOptions;
|
|
|
|
struct ReadOptions;
|
2014-09-29 18:09:09 +00:00
|
|
|
class GetContext;
|
2015-10-12 22:06:38 +00:00
|
|
|
class InternalIterator;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-01-20 10:07:13 +00:00
|
|
|
using std::unique_ptr;
|
|
|
|
|
2016-08-01 21:50:19 +00:00
|
|
|
typedef std::vector<std::pair<std::string, std::string>> KVPairBlock;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// A Table is a sorted map from strings to strings. Tables are
|
2011-05-21 02:17:43 +00:00
|
|
|
// immutable and persistent. A Table may be safely accessed from
|
|
|
|
// multiple threads without external synchronization.
|
2013-10-30 17:52:33 +00:00
|
|
|
class BlockBasedTable : public TableReader {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2013-10-10 18:43:24 +00:00
|
|
|
static const std::string kFilterBlockPrefix;
|
2014-09-08 17:37:05 +00:00
|
|
|
static const std::string kFullFilterBlockPrefix;
|
2017-03-07 21:48:02 +00:00
|
|
|
static const std::string kPartitionedFilterBlockPrefix;
|
2015-12-16 02:20:10 +00:00
|
|
|
// The longest prefix of the cache key used to identify blocks.
|
|
|
|
// For Posix files the unique ID is three varints.
|
|
|
|
static const size_t kMaxCacheKeyPrefixSize = kMaxVarint64Length * 3 + 1;
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2011-03-28 20:43:44 +00:00
|
|
|
// Attempt to open the table that is stored in bytes [0..file_size)
|
|
|
|
// of "file", and read the metadata entries necessary to allow
|
|
|
|
// retrieving data from the table.
|
2011-03-18 22:37:00 +00:00
|
|
|
//
|
2013-10-30 17:52:33 +00:00
|
|
|
// If successful, returns ok and sets "*table_reader" to the newly opened
|
|
|
|
// table. The client should delete "*table_reader" when no longer needed.
|
|
|
|
// If there was an error while initializing the table, sets "*table_reader"
|
|
|
|
// to nullptr and returns a non-ok status.
|
2011-03-18 22:37:00 +00:00
|
|
|
//
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
// @param file must remain live while this Table is in use.
|
2016-07-20 18:23:31 +00:00
|
|
|
// @param prefetch_index_and_filter_in_cache can be used to disable
|
|
|
|
// prefetching of
|
|
|
|
// index and filter blocks into block cache at startup
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
// @param skip_filters Disables loading/accessing the filter block. Overrides
|
2016-07-20 18:23:31 +00:00
|
|
|
// prefetch_index_and_filter_in_cache, so filter will be skipped if both
|
|
|
|
// are set.
|
2014-09-04 23:18:36 +00:00
|
|
|
static Status Open(const ImmutableCFOptions& ioptions,
|
|
|
|
const EnvOptions& env_options,
|
2014-01-24 18:57:15 +00:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_key_comparator,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-17 23:16:11 +00:00
|
|
|
unique_ptr<RandomAccessFileReader>&& file,
|
|
|
|
uint64_t file_size, unique_ptr<TableReader>* table_reader,
|
2016-07-20 18:23:31 +00:00
|
|
|
bool prefetch_index_and_filter_in_cache = true,
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
bool skip_filters = false, int level = -1);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-25 19:23:07 +00:00
|
|
|
bool PrefixMayMatch(const Slice& internal_key);
|
2013-08-13 21:04:56 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Returns a new iterator over the table contents.
|
|
|
|
// The result of NewIterator() is initially invalid (caller must
|
|
|
|
// call one of the Seek methods on the iterator before using it).
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
// @param skip_filters Disables loading/accessing the filter block
|
2017-05-05 22:01:04 +00:00
|
|
|
InternalIterator* NewIterator(
|
|
|
|
const ReadOptions&, Arena* arena = nullptr,
|
|
|
|
bool skip_filters = false) override;
|
2013-10-29 00:54:09 +00:00
|
|
|
|
2016-08-19 22:10:31 +00:00
|
|
|
InternalIterator* NewRangeTombstoneIterator(
|
|
|
|
const ReadOptions& read_options) override;
|
|
|
|
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
// @param skip_filters Disables loading/accessing the filter block
|
2014-01-28 05:58:46 +00:00
|
|
|
Status Get(const ReadOptions& readOptions, const Slice& key,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 18:15:07 +00:00
|
|
|
GetContext* get_context, bool skip_filters = false) override;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-03-03 01:07:03 +00:00
|
|
|
// Pre-fetch the disk blocks that correspond to the key range specified by
|
2016-04-28 09:30:44 +00:00
|
|
|
// (kbegin, kend). The call will return error status in the event of
|
2015-03-03 01:07:03 +00:00
|
|
|
// IO or iteration error.
|
|
|
|
Status Prefetch(const Slice* begin, const Slice* end) override;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Given a key, return an approximate byte offset in the file where
|
|
|
|
// the data for that key begins (or would begin if the key were
|
|
|
|
// present in the file). The returned value is in terms of file
|
|
|
|
// bytes, and so includes effects like compression of the underlying data.
|
|
|
|
// E.g., the approximate offset of the last key in the table will
|
|
|
|
// be close to the file length.
|
2013-10-29 00:54:09 +00:00
|
|
|
uint64_t ApproximateOffsetOf(const Slice& key) override;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-01-31 23:20:24 +00:00
|
|
|
// Returns true if the block for the specified key is in cache.
|
2014-06-20 08:23:02 +00:00
|
|
|
// REQUIRES: key is in this table && block cache enabled
|
2014-01-28 05:58:46 +00:00
|
|
|
bool TEST_KeyInCache(const ReadOptions& options, const Slice& key);
|
2013-01-31 23:20:24 +00:00
|
|
|
|
2013-06-14 00:25:09 +00:00
|
|
|
// Set up the table for Compaction. Might change some parameters with
|
|
|
|
// posix_fadvise
|
2013-10-29 00:54:09 +00:00
|
|
|
void SetupForCompaction() override;
|
|
|
|
|
2014-02-08 03:26:49 +00:00
|
|
|
std::shared_ptr<const TableProperties> GetTableProperties() const override;
|
2013-05-17 22:53:01 +00:00
|
|
|
|
2014-08-05 18:27:34 +00:00
|
|
|
size_t ApproximateMemoryUsage() const override;
|
|
|
|
|
2014-12-23 21:24:07 +00:00
|
|
|
// convert SST file to a human readable form
|
|
|
|
Status DumpTable(WritableFile* out_file) override;
|
|
|
|
|
2017-08-09 22:49:40 +00:00
|
|
|
Status VerifyChecksum() override;
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
void Close() override;
|
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
~BlockBasedTable();
|
2013-10-10 18:43:24 +00:00
|
|
|
|
2014-02-19 23:38:57 +00:00
|
|
|
bool TEST_filter_block_preloaded() const;
|
2014-03-01 02:19:07 +00:00
|
|
|
bool TEST_index_reader_preloaded() const;
|
2017-03-04 02:09:43 +00:00
|
|
|
|
|
|
|
// IndexReader is the interface that provide the functionality for index
|
|
|
|
// access.
|
|
|
|
class IndexReader {
|
|
|
|
public:
|
2017-05-05 22:01:04 +00:00
|
|
|
explicit IndexReader(const InternalKeyComparator* icomparator,
|
|
|
|
Statistics* stats)
|
|
|
|
: icomparator_(icomparator), statistics_(stats) {}
|
2017-03-04 02:09:43 +00:00
|
|
|
|
|
|
|
virtual ~IndexReader() {}
|
|
|
|
|
|
|
|
// Create an iterator for index access.
|
|
|
|
// If iter is null then a new object is created on heap and the callee will
|
|
|
|
// have the ownership. If a non-null iter is passed in it will be used, and
|
|
|
|
// the returned value is either the same as iter or a new on-heap object
|
|
|
|
// that
|
|
|
|
// wrapps the passed iter. In the latter case the return value would point
|
|
|
|
// to
|
|
|
|
// a different object then iter and the callee has the ownership of the
|
|
|
|
// returned object.
|
|
|
|
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
|
2018-04-20 22:08:00 +00:00
|
|
|
bool total_order_seek = true,
|
|
|
|
bool fill_cache = true) = 0;
|
2017-03-04 02:09:43 +00:00
|
|
|
|
|
|
|
// The size of the index.
|
|
|
|
virtual size_t size() const = 0;
|
|
|
|
// Memory usage of the index block
|
|
|
|
virtual size_t usable_size() const = 0;
|
|
|
|
// return the statistics pointer
|
|
|
|
virtual Statistics* statistics() const { return statistics_; }
|
|
|
|
// Report an approximation of how much memory has been used other than
|
|
|
|
// memory
|
|
|
|
// that was allocated in block cache.
|
|
|
|
virtual size_t ApproximateMemoryUsage() const = 0;
|
|
|
|
|
2017-08-18 17:53:03 +00:00
|
|
|
virtual void CacheDependencies(bool /* unused */) {}
|
|
|
|
|
2017-08-23 14:48:54 +00:00
|
|
|
// Prefetch all the blocks referenced by this index to the buffer
|
|
|
|
void PrefetchBlocks(FilePrefetchBuffer* buf);
|
|
|
|
|
2017-03-04 02:09:43 +00:00
|
|
|
protected:
|
2017-05-05 22:01:04 +00:00
|
|
|
const InternalKeyComparator* icomparator_;
|
2017-03-04 02:09:43 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
Statistics* statistics_;
|
|
|
|
};
|
2014-02-19 23:38:57 +00:00
|
|
|
|
2015-12-16 02:20:10 +00:00
|
|
|
static Slice GetCacheKey(const char* cache_key_prefix,
|
|
|
|
size_t cache_key_prefix_size,
|
|
|
|
const BlockHandle& handle, char* cache_key);
|
|
|
|
|
2016-08-01 21:50:19 +00:00
|
|
|
// Retrieve all key value pairs from data blocks in the table.
|
|
|
|
// The key retrieved are internal keys.
|
|
|
|
Status GetKVPairsFromDataBlocks(std::vector<KVPairBlock>* kv_pair_blocks);
|
|
|
|
|
2018-02-13 00:57:56 +00:00
|
|
|
template <class TValue>
|
|
|
|
struct CachableEntry;
|
|
|
|
struct Rep;
|
|
|
|
|
|
|
|
Rep* get_rep() { return rep_; }
|
|
|
|
|
|
|
|
// input_iter: if it is not null, update this one and return it as Iterator
|
|
|
|
static BlockIter* NewDataBlockIterator(Rep* rep, const ReadOptions& ro,
|
|
|
|
const Slice& index_value,
|
|
|
|
BlockIter* input_iter = nullptr,
|
|
|
|
bool is_index = false,
|
|
|
|
GetContext* get_context = nullptr);
|
|
|
|
static BlockIter* NewDataBlockIterator(Rep* rep, const ReadOptions& ro,
|
|
|
|
const BlockHandle& block_hanlde,
|
|
|
|
BlockIter* input_iter = nullptr,
|
|
|
|
bool is_index = false,
|
|
|
|
GetContext* get_context = nullptr,
|
|
|
|
Status s = Status());
|
|
|
|
|
|
|
|
class PartitionedIndexIteratorState;
|
2017-02-07 00:29:29 +00:00
|
|
|
|
2017-03-22 16:11:23 +00:00
|
|
|
friend class PartitionIndexReader;
|
|
|
|
|
|
|
|
protected:
|
2011-03-18 22:37:00 +00:00
|
|
|
Rep* rep_;
|
2017-05-18 13:28:11 +00:00
|
|
|
explicit BlockBasedTable(Rep* rep) : rep_(rep) {}
|
2017-03-22 16:11:23 +00:00
|
|
|
|
|
|
|
private:
|
2017-08-23 14:48:54 +00:00
|
|
|
friend class MockedBlockBasedTable;
|
2018-01-29 22:34:56 +00:00
|
|
|
static std::atomic<uint64_t> next_cache_key_id_;
|
2018-02-07 23:42:35 +00:00
|
|
|
|
2016-11-05 16:10:51 +00:00
|
|
|
// If block cache enabled (compressed or uncompressed), looks for the block
|
|
|
|
// identified by handle in (1) uncompressed cache, (2) compressed cache, and
|
|
|
|
// then (3) file. If found, inserts into the cache(s) that were searched
|
|
|
|
// unsuccessfully (e.g., if found in file, will add to both uncompressed and
|
|
|
|
// compressed caches if they're enabled).
|
|
|
|
//
|
|
|
|
// @param block_entry value is set to the uncompressed block if found. If
|
|
|
|
// in uncompressed block cache, also sets cache_handle to reference that
|
|
|
|
// block.
|
2017-08-18 17:53:03 +00:00
|
|
|
static Status MaybeLoadDataBlockToCache(FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
Rep* rep, const ReadOptions& ro,
|
2017-03-22 16:11:23 +00:00
|
|
|
const BlockHandle& handle,
|
|
|
|
Slice compression_dict,
|
|
|
|
CachableEntry<Block>* block_entry,
|
2017-12-13 05:06:26 +00:00
|
|
|
bool is_index = false,
|
|
|
|
GetContext* get_context = nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
// For the following two functions:
|
|
|
|
// if `no_io == true`, we will not try to read filter/index from sst file
|
|
|
|
// were they not present in cache yet.
|
2017-08-23 14:48:54 +00:00
|
|
|
CachableEntry<FilterBlockReader> GetFilter(
|
2017-12-13 05:06:26 +00:00
|
|
|
FilePrefetchBuffer* prefetch_buffer = nullptr, bool no_io = false,
|
|
|
|
GetContext* get_context = nullptr) const;
|
2017-03-22 16:11:23 +00:00
|
|
|
virtual CachableEntry<FilterBlockReader> GetFilter(
|
2017-08-23 14:48:54 +00:00
|
|
|
FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle,
|
2017-12-13 05:06:26 +00:00
|
|
|
const bool is_a_filter_partition, bool no_io,
|
|
|
|
GetContext* get_context) const;
|
2013-11-13 06:46:51 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
// Get the iterator from the index reader.
|
2014-07-30 23:34:35 +00:00
|
|
|
// If input_iter is not set, return new Iterator
|
|
|
|
// If input_iter is set, update it and return it as Iterator
|
2013-11-13 06:46:51 +00:00
|
|
|
//
|
2014-03-01 02:19:07 +00:00
|
|
|
// Note: ErrorIterator with Status::Incomplete shall be returned if all the
|
|
|
|
// following conditions are met:
|
|
|
|
// 1. We enabled table_options.cache_index_and_filter_blocks.
|
|
|
|
// 2. index is not present in block cache.
|
|
|
|
// 3. We disallowed any io to be performed, that is, read_options ==
|
|
|
|
// kBlockCacheTier
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
InternalIterator* NewIndexIterator(
|
|
|
|
const ReadOptions& read_options, BlockIter* input_iter = nullptr,
|
2017-12-13 05:06:26 +00:00
|
|
|
CachableEntry<IndexReader>* index_entry = nullptr,
|
|
|
|
GetContext* get_context = nullptr);
|
2014-03-01 02:19:07 +00:00
|
|
|
|
|
|
|
// Read block cache from block caches (if set): block_cache and
|
|
|
|
// block_cache_compressed.
|
|
|
|
// On success, Status::OK with be returned and @block will be populated with
|
|
|
|
// pointer to the block as well as its block handle.
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2014-03-01 02:19:07 +00:00
|
|
|
static Status GetDataBlockFromCache(
|
|
|
|
const Slice& block_cache_key, const Slice& compressed_block_cache_key,
|
2016-07-19 16:44:03 +00:00
|
|
|
Cache* block_cache, Cache* block_cache_compressed,
|
2016-08-27 01:55:58 +00:00
|
|
|
const ImmutableCFOptions& ioptions, const ReadOptions& read_options,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
BlockBasedTable::CachableEntry<Block>* block, uint32_t format_version,
|
2017-03-22 16:11:23 +00:00
|
|
|
const Slice& compression_dict, size_t read_amp_bytes_per_bit,
|
2017-12-13 05:06:26 +00:00
|
|
|
bool is_index = false, GetContext* get_context = nullptr);
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
|
2014-03-01 02:19:07 +00:00
|
|
|
// Put a raw block (maybe compressed) to the corresponding block caches.
|
|
|
|
// This method will perform decompression against raw_block if needed and then
|
|
|
|
// populate the block caches.
|
|
|
|
// On success, Status::OK will be returned; also @block will be populated with
|
|
|
|
// uncompressed block and its cache handle.
|
2013-11-13 06:46:51 +00:00
|
|
|
//
|
2014-03-01 02:19:07 +00:00
|
|
|
// REQUIRES: raw_block is heap-allocated. PutDataBlockToCache() will be
|
|
|
|
// responsible for releasing its memory if error occurs.
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2014-03-01 02:19:07 +00:00
|
|
|
static Status PutDataBlockToCache(
|
|
|
|
const Slice& block_cache_key, const Slice& compressed_block_cache_key,
|
|
|
|
Cache* block_cache, Cache* block_cache_compressed,
|
2016-08-27 01:55:58 +00:00
|
|
|
const ReadOptions& read_options, const ImmutableCFOptions& ioptions,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
CachableEntry<Block>* block, Block* raw_block, uint32_t format_version,
|
2017-03-22 16:11:23 +00:00
|
|
|
const Slice& compression_dict, size_t read_amp_bytes_per_bit,
|
2017-12-13 05:06:26 +00:00
|
|
|
bool is_index = false, Cache::Priority pri = Cache::Priority::LOW,
|
|
|
|
GetContext* get_context = nullptr);
|
2013-11-13 06:46:51 +00:00
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
// Calls (*handle_result)(arg, ...) repeatedly, starting with the entry found
|
|
|
|
// after a call to Seek(key), until handle_result returns false.
|
|
|
|
// May not make such a call if filter policy says that key is not present.
|
2012-04-17 15:36:46 +00:00
|
|
|
friend class TableCache;
|
2013-09-02 06:23:40 +00:00
|
|
|
friend class BlockBasedTableBuilder;
|
2012-04-17 15:36:46 +00:00
|
|
|
|
|
|
|
void ReadMeta(const Footer& footer);
|
2014-05-15 21:09:03 +00:00
|
|
|
|
|
|
|
// Create a index reader based on the index type stored in the table.
|
|
|
|
// Optionally, user can pass a preloaded meta_index_iter for the index that
|
|
|
|
// need to access extra meta blocks for index construction. This parameter
|
|
|
|
// helps avoid re-reading meta index block if caller already created one.
|
2015-10-12 22:06:38 +00:00
|
|
|
Status CreateIndexReader(
|
2017-08-11 18:59:13 +00:00
|
|
|
FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader,
|
2017-03-22 16:11:23 +00:00
|
|
|
InternalIterator* preloaded_meta_index_iter = nullptr,
|
|
|
|
const int level = -1);
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2016-05-06 00:20:22 +00:00
|
|
|
bool FullFilterKeyMayMatch(const ReadOptions& read_options,
|
2017-03-22 16:11:23 +00:00
|
|
|
FilterBlockReader* filter, const Slice& user_key,
|
|
|
|
const bool no_io) const;
|
2015-02-03 01:42:57 +00:00
|
|
|
|
2013-11-13 06:46:51 +00:00
|
|
|
// Read the meta block from sst.
|
2017-08-11 18:59:13 +00:00
|
|
|
static Status ReadMetaBlock(Rep* rep, FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
std::unique_ptr<Block>* meta_block,
|
2015-10-12 22:06:38 +00:00
|
|
|
std::unique_ptr<InternalIterator>* iter);
|
2013-11-13 06:46:51 +00:00
|
|
|
|
2017-08-09 22:49:40 +00:00
|
|
|
Status VerifyChecksumInBlocks(InternalIterator* index_iter);
|
|
|
|
|
2013-11-13 06:46:51 +00:00
|
|
|
// Create the filter from the filter block.
|
2017-08-11 18:59:13 +00:00
|
|
|
FilterBlockReader* ReadFilter(FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const BlockHandle& filter_handle,
|
2017-03-22 16:11:23 +00:00
|
|
|
const bool is_a_filter_partition) const;
|
2013-11-13 06:46:51 +00:00
|
|
|
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-10 23:16:11 +00:00
|
|
|
static void SetupCacheKeyPrefix(Rep* rep, uint64_t file_size);
|
2013-01-31 23:20:24 +00:00
|
|
|
|
2013-09-02 06:23:40 +00:00
|
|
|
// Generate a cache key prefix from the file
|
2013-12-03 19:17:58 +00:00
|
|
|
static void GenerateCachePrefix(Cache* cc,
|
2013-09-02 06:23:40 +00:00
|
|
|
RandomAccessFile* file, char* buffer, size_t* size);
|
2013-12-03 19:17:58 +00:00
|
|
|
static void GenerateCachePrefix(Cache* cc,
|
2013-09-02 06:23:40 +00:00
|
|
|
WritableFile* file, char* buffer, size_t* size);
|
|
|
|
|
2014-12-23 21:24:07 +00:00
|
|
|
// Helper functions for DumpTable()
|
|
|
|
Status DumpIndexBlock(WritableFile* out_file);
|
|
|
|
Status DumpDataBlocks(WritableFile* out_file);
|
2016-11-12 17:23:05 +00:00
|
|
|
void DumpKeyValue(const Slice& key, const Slice& value,
|
|
|
|
WritableFile* out_file);
|
2014-12-23 21:24:07 +00:00
|
|
|
|
2013-10-29 00:54:09 +00:00
|
|
|
// No copying allowed
|
2013-10-30 17:52:33 +00:00
|
|
|
explicit BlockBasedTable(const TableReader&) = delete;
|
|
|
|
void operator=(const TableReader&) = delete;
|
2017-03-22 16:11:23 +00:00
|
|
|
|
|
|
|
friend class PartitionedFilterBlockReader;
|
|
|
|
friend class PartitionedFilterBlockTest;
|
2013-10-30 17:52:33 +00:00
|
|
|
};
|
|
|
|
|
2017-02-07 00:29:29 +00:00
|
|
|
// Maitaning state of a two-level iteration on a partitioned index structure
|
2018-02-13 00:57:56 +00:00
|
|
|
class BlockBasedTable::PartitionedIndexIteratorState
|
|
|
|
: public TwoLevelIteratorState {
|
2017-02-07 00:29:29 +00:00
|
|
|
public:
|
2018-02-13 00:57:56 +00:00
|
|
|
PartitionedIndexIteratorState(
|
|
|
|
BlockBasedTable* table,
|
2017-08-23 14:48:54 +00:00
|
|
|
std::unordered_map<uint64_t, CachableEntry<Block>>* block_map = nullptr);
|
2017-02-07 00:29:29 +00:00
|
|
|
InternalIterator* NewSecondaryIterator(const Slice& index_value) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Don't own table_
|
|
|
|
BlockBasedTable* table_;
|
2017-08-23 14:48:54 +00:00
|
|
|
std::unordered_map<uint64_t, CachableEntry<Block>>* block_map_;
|
2017-02-07 00:29:29 +00:00
|
|
|
};
|
|
|
|
|
2017-03-04 02:09:43 +00:00
|
|
|
// CachableEntry represents the entries that *may* be fetched from block cache.
|
|
|
|
// field `value` is the item we want to get.
|
|
|
|
// field `cache_handle` is the cache handle to the block cache. If the value
|
|
|
|
// was not read from cache, `cache_handle` will be nullptr.
|
|
|
|
template <class TValue>
|
|
|
|
struct BlockBasedTable::CachableEntry {
|
|
|
|
CachableEntry(TValue* _value, Cache::Handle* _cache_handle)
|
|
|
|
: value(_value), cache_handle(_cache_handle) {}
|
|
|
|
CachableEntry() : CachableEntry(nullptr, nullptr) {}
|
2017-05-06 03:10:56 +00:00
|
|
|
void Release(Cache* cache, bool force_erase = false) {
|
2017-03-04 02:09:43 +00:00
|
|
|
if (cache_handle) {
|
2017-05-06 03:10:56 +00:00
|
|
|
cache->Release(cache_handle, force_erase);
|
2017-03-04 02:09:43 +00:00
|
|
|
value = nullptr;
|
|
|
|
cache_handle = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bool IsSet() const { return cache_handle != nullptr; }
|
|
|
|
|
|
|
|
TValue* value = nullptr;
|
|
|
|
// if the entry is from the cache, cache_handle will be populated.
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct BlockBasedTable::Rep {
|
|
|
|
Rep(const ImmutableCFOptions& _ioptions, const EnvOptions& _env_options,
|
|
|
|
const BlockBasedTableOptions& _table_opt,
|
|
|
|
const InternalKeyComparator& _internal_comparator, bool skip_filters)
|
|
|
|
: ioptions(_ioptions),
|
|
|
|
env_options(_env_options),
|
|
|
|
table_options(_table_opt),
|
|
|
|
filter_policy(skip_filters ? nullptr : _table_opt.filter_policy.get()),
|
|
|
|
internal_comparator(_internal_comparator),
|
|
|
|
filter_type(FilterType::kNoFilter),
|
2017-12-07 19:50:49 +00:00
|
|
|
index_type(BlockBasedTableOptions::IndexType::kBinarySearch),
|
|
|
|
hash_index_allow_collision(false),
|
2017-03-04 02:09:43 +00:00
|
|
|
whole_key_filtering(_table_opt.whole_key_filtering),
|
|
|
|
prefix_filtering(true),
|
|
|
|
range_del_handle(BlockHandle::NullBlockHandle()),
|
|
|
|
global_seqno(kDisableGlobalSequenceNumber) {}
|
|
|
|
|
|
|
|
const ImmutableCFOptions& ioptions;
|
|
|
|
const EnvOptions& env_options;
|
|
|
|
const BlockBasedTableOptions& table_options;
|
|
|
|
const FilterPolicy* const filter_policy;
|
|
|
|
const InternalKeyComparator& internal_comparator;
|
|
|
|
Status status;
|
|
|
|
unique_ptr<RandomAccessFileReader> file;
|
|
|
|
char cache_key_prefix[kMaxCacheKeyPrefixSize];
|
|
|
|
size_t cache_key_prefix_size = 0;
|
|
|
|
char persistent_cache_key_prefix[kMaxCacheKeyPrefixSize];
|
|
|
|
size_t persistent_cache_key_prefix_size = 0;
|
|
|
|
char compressed_cache_key_prefix[kMaxCacheKeyPrefixSize];
|
|
|
|
size_t compressed_cache_key_prefix_size = 0;
|
|
|
|
uint64_t dummy_index_reader_offset =
|
|
|
|
0; // ID that is unique for the block cache.
|
|
|
|
PersistentCacheOptions persistent_cache_options;
|
|
|
|
|
|
|
|
// Footer contains the fixed table information
|
|
|
|
Footer footer;
|
|
|
|
// index_reader and filter will be populated and used only when
|
|
|
|
// options.block_cache is nullptr; otherwise we will get the index block via
|
|
|
|
// the block cache.
|
|
|
|
unique_ptr<IndexReader> index_reader;
|
|
|
|
unique_ptr<FilterBlockReader> filter;
|
|
|
|
|
|
|
|
enum class FilterType {
|
|
|
|
kNoFilter,
|
|
|
|
kFullFilter,
|
|
|
|
kBlockFilter,
|
|
|
|
kPartitionedFilter,
|
|
|
|
};
|
|
|
|
FilterType filter_type;
|
|
|
|
BlockHandle filter_handle;
|
|
|
|
|
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
|
|
|
// Block containing the data for the compression dictionary. We take ownership
|
|
|
|
// for the entire block struct, even though we only use its Slice member. This
|
|
|
|
// is easier because the Slice member depends on the continued existence of
|
|
|
|
// another member ("allocation").
|
|
|
|
std::unique_ptr<const BlockContents> compression_dict_block;
|
|
|
|
BlockBasedTableOptions::IndexType index_type;
|
|
|
|
bool hash_index_allow_collision;
|
|
|
|
bool whole_key_filtering;
|
|
|
|
bool prefix_filtering;
|
|
|
|
// TODO(kailiu) It is very ugly to use internal key in table, since table
|
|
|
|
// module should not be relying on db module. However to make things easier
|
|
|
|
// and compatible with existing code, we introduce a wrapper that allows
|
|
|
|
// block to extract prefix without knowing if a key is internal or not.
|
|
|
|
unique_ptr<SliceTransform> internal_prefix_transform;
|
|
|
|
|
|
|
|
// only used in level 0 files:
|
|
|
|
// when pin_l0_filter_and_index_blocks_in_cache is true, we do use the
|
|
|
|
// LRU cache, but we always keep the filter & idndex block's handle checked
|
|
|
|
// out here (=we don't call Release()), plus the parsed out objects
|
|
|
|
// the LRU cache will never push flush them out, hence they're pinned
|
|
|
|
CachableEntry<FilterBlockReader> filter_entry;
|
|
|
|
CachableEntry<IndexReader> index_entry;
|
|
|
|
// range deletion meta-block is pinned through reader's lifetime when LRU
|
|
|
|
// cache is enabled.
|
|
|
|
CachableEntry<Block> range_del_entry;
|
|
|
|
BlockHandle range_del_handle;
|
|
|
|
|
|
|
|
// If global_seqno is used, all Keys in this file will have the same
|
|
|
|
// seqno with value `global_seqno`.
|
|
|
|
//
|
|
|
|
// A value of kDisableGlobalSequenceNumber means that this feature is disabled
|
|
|
|
// and every key have it's own seqno.
|
|
|
|
SequenceNumber global_seqno;
|
2018-02-07 23:42:35 +00:00
|
|
|
|
|
|
|
// If false, blocks in this file are definitely all uncompressed. Knowing this
|
|
|
|
// before reading individual blocks enables certain optimizations.
|
|
|
|
bool blocks_maybe_compressed = true;
|
|
|
|
|
2017-07-28 03:16:25 +00:00
|
|
|
bool closed = false;
|
2017-03-04 02:09:43 +00:00
|
|
|
};
|
|
|
|
|
2018-02-13 00:57:56 +00:00
|
|
|
class BlockBasedTableIterator : public InternalIterator {
|
|
|
|
public:
|
|
|
|
BlockBasedTableIterator(BlockBasedTable* table,
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const InternalKeyComparator& icomp,
|
|
|
|
InternalIterator* index_iter, bool check_filter)
|
|
|
|
: table_(table),
|
|
|
|
read_options_(read_options),
|
|
|
|
icomp_(icomp),
|
|
|
|
index_iter_(index_iter),
|
|
|
|
pinned_iters_mgr_(nullptr),
|
|
|
|
block_iter_points_to_real_block_(false),
|
|
|
|
check_filter_(check_filter) {}
|
|
|
|
|
|
|
|
~BlockBasedTableIterator() { delete index_iter_; }
|
|
|
|
|
|
|
|
void Seek(const Slice& target) override;
|
|
|
|
void SeekForPrev(const Slice& target) override;
|
|
|
|
void SeekToFirst() override;
|
|
|
|
void SeekToLast() override;
|
|
|
|
void Next() override;
|
|
|
|
void Prev() override;
|
|
|
|
bool Valid() const override {
|
|
|
|
return block_iter_points_to_real_block_ && data_block_iter_.Valid();
|
|
|
|
}
|
|
|
|
Slice key() const override {
|
|
|
|
assert(Valid());
|
|
|
|
return data_block_iter_.key();
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(Valid());
|
|
|
|
return data_block_iter_.value();
|
|
|
|
}
|
|
|
|
Status status() const override {
|
|
|
|
if (!index_iter_->status().ok()) {
|
|
|
|
return index_iter_->status();
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
} else if (block_iter_points_to_real_block_) {
|
2018-02-13 00:57:56 +00:00
|
|
|
return data_block_iter_.status();
|
|
|
|
} else {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsOutOfBound() override { return is_out_of_bound_; }
|
|
|
|
|
|
|
|
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
|
|
|
|
pinned_iters_mgr_ = pinned_iters_mgr;
|
|
|
|
}
|
|
|
|
bool IsKeyPinned() const override {
|
|
|
|
return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
|
2018-03-08 18:09:59 +00:00
|
|
|
block_iter_points_to_real_block_ && data_block_iter_.IsKeyPinned();
|
2018-02-13 00:57:56 +00:00
|
|
|
}
|
|
|
|
bool IsValuePinned() const override {
|
2018-03-08 18:09:59 +00:00
|
|
|
// BlockIter::IsValuePinned() is always true. No need to check
|
2018-02-13 00:57:56 +00:00
|
|
|
return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
|
|
|
|
block_iter_points_to_real_block_;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CheckPrefixMayMatch(const Slice& ikey) {
|
|
|
|
if (check_filter_ && !table_->PrefixMayMatch(ikey)) {
|
|
|
|
// TODO remember the iterator is invalidated because of prefix
|
|
|
|
// match. This can avoid the upper level file iterator to falsely
|
|
|
|
// believe the position is the end of the SST file and move to
|
|
|
|
// the first key of the next file.
|
|
|
|
ResetDataIter();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetDataIter() {
|
|
|
|
if (block_iter_points_to_real_block_) {
|
2018-03-08 18:09:59 +00:00
|
|
|
if (pinned_iters_mgr_ != nullptr && pinned_iters_mgr_->PinningEnabled()) {
|
2018-02-13 00:57:56 +00:00
|
|
|
data_block_iter_.DelegateCleanupsTo(pinned_iters_mgr_);
|
|
|
|
}
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
data_block_iter_.Invalidate(Status::OK());
|
2018-02-13 00:57:56 +00:00
|
|
|
block_iter_points_to_real_block_ = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SavePrevIndexValue() {
|
|
|
|
if (block_iter_points_to_real_block_) {
|
|
|
|
// Reseek. If they end up with the same data block, we shouldn't re-fetch
|
|
|
|
// the same data block.
|
|
|
|
Slice v = index_iter_->value();
|
|
|
|
prev_index_value_.assign(v.data(), v.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void InitDataBlock();
|
|
|
|
void FindKeyForward();
|
|
|
|
void FindKeyBackward();
|
|
|
|
|
|
|
|
private:
|
|
|
|
BlockBasedTable* table_;
|
|
|
|
const ReadOptions read_options_;
|
|
|
|
const InternalKeyComparator& icomp_;
|
|
|
|
InternalIterator* index_iter_;
|
|
|
|
PinnedIteratorsManager* pinned_iters_mgr_;
|
|
|
|
BlockIter data_block_iter_;
|
|
|
|
bool block_iter_points_to_real_block_;
|
|
|
|
bool is_out_of_bound_ = false;
|
|
|
|
bool check_filter_;
|
|
|
|
// TODO use block offset instead
|
|
|
|
std::string prev_index_value_;
|
|
|
|
|
|
|
|
static const size_t kInitReadaheadSize = 8 * 1024;
|
|
|
|
// Found that 256 KB readahead size provides the best performance, based on
|
|
|
|
// experiments.
|
|
|
|
static const size_t kMaxReadaheadSize;
|
|
|
|
size_t readahead_size_ = kInitReadaheadSize;
|
|
|
|
size_t readahead_limit_ = 0;
|
|
|
|
int num_file_reads_ = 0;
|
|
|
|
};
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|