2019-04-18 17:51:19 +00:00
|
|
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2013-10-29 03:34:02 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-07-18 23:58:13 +00:00
|
|
|
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/plain/plain_table_reader.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
#include <string>
|
2014-02-13 23:27:59 +00:00
|
|
|
#include <vector>
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
#include "db/dbformat.h"
|
|
|
|
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block.h"
|
|
|
|
#include "table/block_based/filter_block.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "table/format.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "table/get_context.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/internal_iterator.h"
|
2013-12-06 00:51:26 +00:00
|
|
|
#include "table/meta_blocks.h"
|
2019-09-05 17:03:42 +00:00
|
|
|
#include "table/plain/plain_table_bloom.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/plain/plain_table_factory.h"
|
|
|
|
#include "table/plain/plain_table_key_coding.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "table/two_level_iterator.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/arena.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/histogram.h"
|
|
|
|
#include "monitoring/perf_context_imp.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "util/coding.h"
|
2013-12-20 17:35:24 +00:00
|
|
|
#include "util/dynamic_bloom.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
#include "util/hash.h"
|
|
|
|
#include "util/stop_watch.h"
|
2014-11-25 04:44:49 +00:00
|
|
|
#include "util/string_util.h"
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2014-01-25 05:10:19 +00:00
|
|
|
namespace {
|
2013-12-20 17:35:24 +00:00
|
|
|
|
2014-02-13 23:27:59 +00:00
|
|
|
// Safely getting a uint32_t element from a char array, where, starting from
|
|
|
|
// `base`, every 4 bytes are considered as an fixed 32 bit integer.
|
|
|
|
inline uint32_t GetFixed32Element(const char* base, size_t offset) {
|
|
|
|
return DecodeFixed32(base + offset * sizeof(uint32_t));
|
|
|
|
}
|
2014-01-25 05:10:19 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Iterator to iterate IndexedTable
|
2015-10-12 22:06:38 +00:00
|
|
|
class PlainTableIterator : public InternalIterator {
|
2014-01-25 05:10:19 +00:00
|
|
|
public:
|
2014-02-08 00:25:38 +00:00
|
|
|
explicit PlainTableIterator(PlainTableReader* table, bool use_prefix_seek);
|
2019-09-12 01:07:12 +00:00
|
|
|
// No copying allowed
|
|
|
|
PlainTableIterator(const PlainTableIterator&) = delete;
|
|
|
|
void operator=(const Iterator&) = delete;
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~PlainTableIterator() override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
bool Valid() const override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
void SeekToFirst() override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
void SeekToLast() override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
void Seek(const Slice& target) override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2016-09-28 01:20:57 +00:00
|
|
|
void SeekForPrev(const Slice& target) override;
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
void Next() override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
void Prev() override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
Slice key() const override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
Slice value() const override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
Status status() const override;
|
2014-01-25 05:10:19 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
PlainTableReader* table_;
|
2014-06-18 23:36:48 +00:00
|
|
|
PlainTableKeyDecoder decoder_;
|
2014-02-08 00:25:38 +00:00
|
|
|
bool use_prefix_seek_;
|
2014-01-25 05:10:19 +00:00
|
|
|
uint32_t offset_;
|
|
|
|
uint32_t next_offset_;
|
2014-06-18 23:36:48 +00:00
|
|
|
Slice key_;
|
2014-01-25 05:10:19 +00:00
|
|
|
Slice value_;
|
|
|
|
Status status_;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
2018-11-09 19:17:34 +00:00
|
|
|
PlainTableReader::PlainTableReader(
|
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file,
|
|
|
|
const EnvOptions& storage_options, const InternalKeyComparator& icomparator,
|
|
|
|
EncodingType encoding_type, uint64_t file_size,
|
|
|
|
const TableProperties* table_properties,
|
|
|
|
const SliceTransform* prefix_extractor)
|
2014-06-09 19:30:19 +00:00
|
|
|
: internal_comparator_(icomparator),
|
2014-06-18 23:36:48 +00:00
|
|
|
encoding_type_(encoding_type),
|
2014-07-18 23:58:13 +00:00
|
|
|
full_scan_mode_(false),
|
2014-11-11 21:47:22 +00:00
|
|
|
user_key_len_(static_cast<uint32_t>(table_properties->fixed_key_len)),
|
2018-05-21 21:33:55 +00:00
|
|
|
prefix_extractor_(prefix_extractor),
|
2014-06-09 19:30:19 +00:00
|
|
|
enable_bloom_(false),
|
2019-01-24 18:11:19 +00:00
|
|
|
bloom_(6),
|
2015-09-16 23:57:43 +00:00
|
|
|
file_info_(std::move(file), storage_options,
|
|
|
|
static_cast<uint32_t>(table_properties->data_size)),
|
2014-09-04 23:18:36 +00:00
|
|
|
ioptions_(ioptions),
|
2014-06-09 19:30:19 +00:00
|
|
|
file_size_(file_size),
|
|
|
|
table_properties_(nullptr) {}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
|
|
|
PlainTableReader::~PlainTableReader() {
|
|
|
|
}
|
|
|
|
|
2018-05-21 21:33:55 +00:00
|
|
|
Status PlainTableReader::Open(
|
|
|
|
const ImmutableCFOptions& ioptions, const EnvOptions& env_options,
|
|
|
|
const InternalKeyComparator& internal_comparator,
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
|
|
|
|
std::unique_ptr<TableReader>* table_reader, const int bloom_bits_per_key,
|
2018-05-21 21:33:55 +00:00
|
|
|
double hash_table_ratio, size_t index_sparseness, size_t huge_page_tlb_size,
|
2019-01-26 01:07:00 +00:00
|
|
|
bool full_scan_mode, const bool immortal_table,
|
|
|
|
const SliceTransform* prefix_extractor) {
|
2014-07-18 23:58:13 +00:00
|
|
|
if (file_size > PlainTableIndex::kMaxFileSize) {
|
2013-11-21 19:11:02 +00:00
|
|
|
return Status::NotSupported("File is too large for PlainTableReader!");
|
|
|
|
}
|
|
|
|
|
2019-10-21 23:51:19 +00:00
|
|
|
TableProperties* props_ptr = nullptr;
|
2014-01-25 05:10:19 +00:00
|
|
|
auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
|
2019-10-21 23:51:19 +00:00
|
|
|
ioptions, &props_ptr,
|
2018-06-16 02:24:21 +00:00
|
|
|
true /* compression_type_missing */);
|
2019-10-21 23:51:19 +00:00
|
|
|
std::shared_ptr<TableProperties> props(props_ptr);
|
2013-12-06 00:51:26 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-06-09 19:30:19 +00:00
|
|
|
assert(hash_table_ratio >= 0.0);
|
2014-06-18 23:36:48 +00:00
|
|
|
auto& user_props = props->user_collected_properties;
|
2016-08-26 18:46:32 +00:00
|
|
|
auto prefix_extractor_in_file = props->prefix_extractor_name;
|
2014-06-18 23:36:48 +00:00
|
|
|
|
2016-08-26 18:46:32 +00:00
|
|
|
if (!full_scan_mode &&
|
|
|
|
!prefix_extractor_in_file.empty() /* old version sst file*/
|
|
|
|
&& prefix_extractor_in_file != "nullptr") {
|
2018-05-21 21:33:55 +00:00
|
|
|
if (!prefix_extractor) {
|
2014-06-18 23:36:48 +00:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Prefix extractor is missing when opening a PlainTable built "
|
|
|
|
"using a prefix extractor");
|
2018-05-21 21:33:55 +00:00
|
|
|
} else if (prefix_extractor_in_file.compare(prefix_extractor->Name()) !=
|
|
|
|
0) {
|
2014-06-18 23:36:48 +00:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Prefix extractor given doesn't match the one used to build "
|
|
|
|
"PlainTable");
|
|
|
|
}
|
|
|
|
}
|
2013-12-06 00:51:26 +00:00
|
|
|
|
2014-06-18 23:36:48 +00:00
|
|
|
EncodingType encoding_type = kPlain;
|
|
|
|
auto encoding_type_prop =
|
|
|
|
user_props.find(PlainTablePropertyNames::kEncodingType);
|
|
|
|
if (encoding_type_prop != user_props.end()) {
|
|
|
|
encoding_type = static_cast<EncodingType>(
|
|
|
|
DecodeFixed32(encoding_type_prop->second.c_str()));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<PlainTableReader> new_reader(new PlainTableReader(
|
2014-09-04 23:18:36 +00:00
|
|
|
ioptions, std::move(file), env_options, internal_comparator,
|
2019-10-21 23:51:19 +00:00
|
|
|
encoding_type, file_size, props.get(), prefix_extractor));
|
2014-06-18 23:36:48 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
s = new_reader->MmapDataIfNeeded();
|
2013-10-29 03:34:02 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2013-12-06 00:51:26 +00:00
|
|
|
|
2014-06-18 23:36:48 +00:00
|
|
|
if (!full_scan_mode) {
|
2019-10-21 23:51:19 +00:00
|
|
|
s = new_reader->PopulateIndex(props.get(), bloom_bits_per_key,
|
|
|
|
hash_table_ratio, index_sparseness,
|
|
|
|
huge_page_tlb_size);
|
2014-06-18 23:36:48 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Flag to indicate it is a full scan mode so that none of the indexes
|
|
|
|
// can be used.
|
2014-07-18 23:58:13 +00:00
|
|
|
new_reader->full_scan_mode_ = true;
|
2014-06-18 23:36:48 +00:00
|
|
|
}
|
2019-10-18 21:43:17 +00:00
|
|
|
// PopulateIndex can add to the props, so don't store them until now
|
2019-10-21 23:51:19 +00:00
|
|
|
new_reader->table_properties_ = props;
|
2014-06-18 23:36:48 +00:00
|
|
|
|
2019-01-26 01:07:00 +00:00
|
|
|
if (immortal_table && new_reader->file_info_.is_mmap_mode) {
|
|
|
|
new_reader->dummy_cleanable_.reset(new Cleanable());
|
|
|
|
}
|
|
|
|
|
2013-12-06 00:51:26 +00:00
|
|
|
*table_reader = std::move(new_reader);
|
2013-10-29 03:34:02 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableReader::SetupForCompaction() {
|
|
|
|
}
|
|
|
|
|
2018-05-21 21:33:55 +00:00
|
|
|
InternalIterator* PlainTableReader::NewIterator(
|
|
|
|
const ReadOptions& options, const SliceTransform* /* prefix_extractor */,
|
2019-06-20 21:28:22 +00:00
|
|
|
Arena* arena, bool /*skip_filters*/, TableReaderCaller /*caller*/,
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 00:37:23 +00:00
|
|
|
size_t /*compaction_readahead_size*/,
|
|
|
|
bool /* allow_unprepared_value */) {
|
2019-10-18 21:43:17 +00:00
|
|
|
// Not necessarily used here, but make sure this has been initialized
|
|
|
|
assert(table_properties_);
|
|
|
|
|
2020-01-28 22:42:21 +00:00
|
|
|
// Auto prefix mode is not implemented in PlainTable.
|
|
|
|
bool use_prefix_seek = !IsTotalOrderMode() && !options.total_order_seek &&
|
|
|
|
!options.auto_prefix_mode;
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
if (arena == nullptr) {
|
Fix interaction between CompactionFilter::Decision::kRemoveAndSkipUnt…
Summary:
Fixes the following scenario:
1. Set prefix extractor. Enable bloom filters, with `whole_key_filtering = false`. Use compaction filter that sometimes returns `kRemoveAndSkipUntil`.
2. Do a compaction.
3. Compaction creates an iterator with `total_order_seek = false`, calls `SeekToFirst()` on it, then repeatedly calls `Next()`.
4. At some point compaction filter returns `kRemoveAndSkipUntil`.
5. Compaction calls `Seek(skip_until)` on the iterator. The key that it seeks to happens to have prefix that doesn't match the bloom filter. Since `total_order_seek = false`, iterator becomes invalid, and compaction thinks that it has reached the end. The rest of the compaction input is silently discarded.
The fix is to make compaction iterator use `total_order_seek = true`.
The implementation for PlainTable is quite awkward. I've made `kRemoveAndSkipUntil` officially incompatible with PlainTable. If you try to use them together, compaction will fail, and DB will enter read-only mode (`bg_error_`). That's not a very graceful way to communicate a misconfiguration, but the alternatives don't seem worth the implementation time and complexity. To be able to check in advance that `kRemoveAndSkipUntil` is not going to be used with PlainTable, we'd need to extend the interface of either `CompactionFilter` or `InternalIterator`. It seems unlikely that anyone will ever want to use `kRemoveAndSkipUntil` with PlainTable: PlainTable probably has very few users, and `kRemoveAndSkipUntil` has only one user so far: us (logdevice).
Closes https://github.com/facebook/rocksdb/pull/2349
Differential Revision: D5110388
Pulled By: lightmark
fbshipit-source-id: ec29101a99d9dcd97db33923b87f72bce56cc17a
2017-06-02 21:56:31 +00:00
|
|
|
return new PlainTableIterator(this, use_prefix_seek);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(PlainTableIterator));
|
Fix interaction between CompactionFilter::Decision::kRemoveAndSkipUnt…
Summary:
Fixes the following scenario:
1. Set prefix extractor. Enable bloom filters, with `whole_key_filtering = false`. Use compaction filter that sometimes returns `kRemoveAndSkipUntil`.
2. Do a compaction.
3. Compaction creates an iterator with `total_order_seek = false`, calls `SeekToFirst()` on it, then repeatedly calls `Next()`.
4. At some point compaction filter returns `kRemoveAndSkipUntil`.
5. Compaction calls `Seek(skip_until)` on the iterator. The key that it seeks to happens to have prefix that doesn't match the bloom filter. Since `total_order_seek = false`, iterator becomes invalid, and compaction thinks that it has reached the end. The rest of the compaction input is silently discarded.
The fix is to make compaction iterator use `total_order_seek = true`.
The implementation for PlainTable is quite awkward. I've made `kRemoveAndSkipUntil` officially incompatible with PlainTable. If you try to use them together, compaction will fail, and DB will enter read-only mode (`bg_error_`). That's not a very graceful way to communicate a misconfiguration, but the alternatives don't seem worth the implementation time and complexity. To be able to check in advance that `kRemoveAndSkipUntil` is not going to be used with PlainTable, we'd need to extend the interface of either `CompactionFilter` or `InternalIterator`. It seems unlikely that anyone will ever want to use `kRemoveAndSkipUntil` with PlainTable: PlainTable probably has very few users, and `kRemoveAndSkipUntil` has only one user so far: us (logdevice).
Closes https://github.com/facebook/rocksdb/pull/2349
Differential Revision: D5110388
Pulled By: lightmark
fbshipit-source-id: ec29101a99d9dcd97db33923b87f72bce56cc17a
2017-06-02 21:56:31 +00:00
|
|
|
return new (mem) PlainTableIterator(this, use_prefix_seek);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-18 23:58:13 +00:00
|
|
|
Status PlainTableReader::PopulateIndexRecordList(
|
2019-03-27 17:18:56 +00:00
|
|
|
PlainTableIndexBuilder* index_builder,
|
|
|
|
std::vector<uint32_t>* prefix_hashes) {
|
2013-10-29 03:34:02 +00:00
|
|
|
Slice prev_key_prefix_slice;
|
2015-09-16 23:57:43 +00:00
|
|
|
std::string prev_key_prefix_buf;
|
2013-11-21 19:11:02 +00:00
|
|
|
uint32_t pos = data_start_offset_;
|
2013-12-20 17:35:24 +00:00
|
|
|
|
2014-07-18 23:58:13 +00:00
|
|
|
bool is_first_record = true;
|
|
|
|
Slice key_prefix_slice;
|
2015-09-16 23:57:43 +00:00
|
|
|
PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_,
|
2018-05-21 21:33:55 +00:00
|
|
|
prefix_extractor_);
|
2015-09-16 23:57:43 +00:00
|
|
|
while (pos < file_info_.data_end_offset) {
|
2013-11-21 19:11:02 +00:00
|
|
|
uint32_t key_offset = pos;
|
2014-01-27 21:53:22 +00:00
|
|
|
ParsedInternalKey key;
|
2014-01-25 05:10:19 +00:00
|
|
|
Slice value_slice;
|
2014-06-18 23:36:48 +00:00
|
|
|
bool seekable = false;
|
|
|
|
Status s = Next(&decoder, &pos, &key, nullptr, &value_slice, &seekable);
|
2014-02-08 00:25:38 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-07-18 23:58:13 +00:00
|
|
|
|
|
|
|
key_prefix_slice = GetPrefix(key);
|
2014-06-09 19:30:19 +00:00
|
|
|
if (enable_bloom_) {
|
|
|
|
bloom_.AddHash(GetSliceHash(key.user_key));
|
2014-07-18 23:58:13 +00:00
|
|
|
} else {
|
|
|
|
if (is_first_record || prev_key_prefix_slice != key_prefix_slice) {
|
|
|
|
if (!is_first_record) {
|
|
|
|
prefix_hashes->push_back(GetSliceHash(prev_key_prefix_slice));
|
|
|
|
}
|
2015-09-16 23:57:43 +00:00
|
|
|
if (file_info_.is_mmap_mode) {
|
|
|
|
prev_key_prefix_slice = key_prefix_slice;
|
|
|
|
} else {
|
|
|
|
prev_key_prefix_buf = key_prefix_slice.ToString();
|
|
|
|
prev_key_prefix_slice = prev_key_prefix_buf;
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-18 23:58:13 +00:00
|
|
|
index_builder->AddKeyPrefix(GetPrefix(key), key_offset);
|
2014-06-18 23:36:48 +00:00
|
|
|
|
2014-07-18 23:58:13 +00:00
|
|
|
if (!seekable && is_first_record) {
|
|
|
|
return Status::Corruption("Key for a prefix is not seekable");
|
2013-11-21 19:11:02 +00:00
|
|
|
}
|
2014-07-18 23:58:13 +00:00
|
|
|
|
2014-01-25 05:10:19 +00:00
|
|
|
is_first_record = false;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
2014-01-25 05:10:19 +00:00
|
|
|
|
2014-07-18 23:58:13 +00:00
|
|
|
prefix_hashes->push_back(GetSliceHash(key_prefix_slice));
|
2014-07-21 17:31:33 +00:00
|
|
|
auto s = index_.InitFromRawData(index_builder->Finish());
|
|
|
|
return s;
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
|
|
|
|
2019-09-13 17:24:38 +00:00
|
|
|
void PlainTableReader::AllocateBloom(int bloom_bits_per_key, int num_keys,
|
|
|
|
size_t huge_page_tlb_size) {
|
|
|
|
uint32_t bloom_total_bits = num_keys * bloom_bits_per_key;
|
|
|
|
if (bloom_total_bits > 0) {
|
|
|
|
enable_bloom_ = true;
|
|
|
|
bloom_.SetTotalBits(&arena_, bloom_total_bits, ioptions_.bloom_locality,
|
|
|
|
huge_page_tlb_size, ioptions_.info_log);
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
2013-11-21 19:11:02 +00:00
|
|
|
|
2019-09-13 17:24:38 +00:00
|
|
|
void PlainTableReader::FillBloom(const std::vector<uint32_t>& prefix_hashes) {
|
2014-07-18 23:58:13 +00:00
|
|
|
assert(bloom_.IsInitialized());
|
2019-09-13 17:24:38 +00:00
|
|
|
for (const auto prefix_hash : prefix_hashes) {
|
2014-07-18 23:58:13 +00:00
|
|
|
bloom_.AddHash(prefix_hash);
|
2013-11-21 19:11:02 +00:00
|
|
|
}
|
2013-12-20 17:35:24 +00:00
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
Status PlainTableReader::MmapDataIfNeeded() {
|
|
|
|
if (file_info_.is_mmap_mode) {
|
|
|
|
// Get mmapped memory.
|
2020-04-30 21:48:51 +00:00
|
|
|
return file_info_.file->Read(IOOptions(), 0,
|
|
|
|
static_cast<size_t>(file_size_),
|
2020-03-06 22:02:09 +00:00
|
|
|
&file_info_.file_data, nullptr, nullptr);
|
2015-09-16 23:57:43 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
2014-06-18 23:36:48 +00:00
|
|
|
}
|
|
|
|
|
2014-06-09 19:30:19 +00:00
|
|
|
Status PlainTableReader::PopulateIndex(TableProperties* props,
|
|
|
|
int bloom_bits_per_key,
|
|
|
|
double hash_table_ratio,
|
|
|
|
size_t index_sparseness,
|
|
|
|
size_t huge_page_tlb_size) {
|
2014-04-23 01:31:55 +00:00
|
|
|
assert(props != nullptr);
|
|
|
|
|
2019-03-01 23:41:55 +00:00
|
|
|
BlockContents index_block_contents;
|
|
|
|
Status s = ReadMetaBlock(file_info_.file.get(), nullptr /* prefetch_buffer */,
|
|
|
|
file_size_, kPlainTableMagicNumber, ioptions_,
|
|
|
|
PlainTableIndexBuilder::kPlainTableIndexBlock,
|
2019-06-19 02:00:03 +00:00
|
|
|
BlockType::kIndex, &index_block_contents,
|
2019-03-01 23:41:55 +00:00
|
|
|
true /* compression_type_missing */);
|
|
|
|
|
|
|
|
bool index_in_file = s.ok();
|
|
|
|
|
|
|
|
BlockContents bloom_block_contents;
|
|
|
|
bool bloom_in_file = false;
|
|
|
|
// We only need to read the bloom block if index block is in file.
|
|
|
|
if (index_in_file) {
|
|
|
|
s = ReadMetaBlock(file_info_.file.get(), nullptr /* prefetch_buffer */,
|
|
|
|
file_size_, kPlainTableMagicNumber, ioptions_,
|
2019-06-19 02:00:03 +00:00
|
|
|
BloomBlockBuilder::kBloomBlock, BlockType::kFilter,
|
|
|
|
&bloom_block_contents,
|
2019-03-01 23:41:55 +00:00
|
|
|
true /* compression_type_missing */);
|
|
|
|
bloom_in_file = s.ok() && bloom_block_contents.data.size() > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice* bloom_block;
|
|
|
|
if (bloom_in_file) {
|
|
|
|
// If bloom_block_contents.allocation is not empty (which will be the case
|
|
|
|
// for non-mmap mode), it holds the alloated memory for the bloom block.
|
|
|
|
// It needs to be kept alive to keep `bloom_block` valid.
|
|
|
|
bloom_block_alloc_ = std::move(bloom_block_contents.allocation);
|
|
|
|
bloom_block = &bloom_block_contents.data;
|
|
|
|
} else {
|
|
|
|
bloom_block = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice* index_block;
|
|
|
|
if (index_in_file) {
|
|
|
|
// If index_block_contents.allocation is not empty (which will be the case
|
|
|
|
// for non-mmap mode), it holds the alloated memory for the index block.
|
|
|
|
// It needs to be kept alive to keep `index_block` valid.
|
|
|
|
index_block_alloc_ = std::move(index_block_contents.allocation);
|
|
|
|
index_block = &index_block_contents.data;
|
|
|
|
} else {
|
|
|
|
index_block = nullptr;
|
|
|
|
}
|
2014-07-18 23:58:13 +00:00
|
|
|
|
2018-05-21 21:33:55 +00:00
|
|
|
if ((prefix_extractor_ == nullptr) && (hash_table_ratio != 0)) {
|
|
|
|
// moptions.prefix_extractor is requried for a hash-based look-up.
|
2014-02-08 00:25:38 +00:00
|
|
|
return Status::NotSupported(
|
|
|
|
"PlainTable requires a prefix extractor enable prefix hash mode.");
|
|
|
|
}
|
|
|
|
|
2014-01-25 05:10:19 +00:00
|
|
|
// First, read the whole file, for every kIndexIntervalForSamePrefixKeys rows
|
|
|
|
// for a prefix (starting from the first one), generate a record of (hash,
|
|
|
|
// offset) and append it to IndexRecordList, which is a data structure created
|
|
|
|
// to store them.
|
2014-02-08 00:25:38 +00:00
|
|
|
|
2019-03-01 23:41:55 +00:00
|
|
|
if (!index_in_file) {
|
|
|
|
// Allocate bloom filter here for total order mode.
|
|
|
|
if (IsTotalOrderMode()) {
|
2019-09-13 17:24:38 +00:00
|
|
|
AllocateBloom(bloom_bits_per_key,
|
2019-10-18 21:43:17 +00:00
|
|
|
static_cast<uint32_t>(props->num_entries),
|
2019-09-13 17:24:38 +00:00
|
|
|
huge_page_tlb_size);
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
2019-03-01 23:41:55 +00:00
|
|
|
} else if (bloom_in_file) {
|
|
|
|
enable_bloom_ = true;
|
|
|
|
auto num_blocks_property = props->user_collected_properties.find(
|
|
|
|
PlainTablePropertyNames::kNumBloomBlocks);
|
|
|
|
|
|
|
|
uint32_t num_blocks = 0;
|
|
|
|
if (num_blocks_property != props->user_collected_properties.end()) {
|
|
|
|
Slice temp_slice(num_blocks_property->second);
|
|
|
|
if (!GetVarint32(&temp_slice, &num_blocks)) {
|
|
|
|
num_blocks = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// cast away const qualifier, because bloom_ won't be changed
|
2019-09-16 23:15:18 +00:00
|
|
|
bloom_.SetRawData(const_cast<char*>(bloom_block->data()),
|
|
|
|
static_cast<uint32_t>(bloom_block->size()) * 8,
|
|
|
|
num_blocks);
|
2019-03-01 23:41:55 +00:00
|
|
|
} else {
|
|
|
|
// Index in file but no bloom in file. Disable bloom filter in this case.
|
|
|
|
enable_bloom_ = false;
|
|
|
|
bloom_bits_per_key = 0;
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
2019-03-01 23:41:55 +00:00
|
|
|
|
2018-05-21 21:33:55 +00:00
|
|
|
PlainTableIndexBuilder index_builder(&arena_, ioptions_, prefix_extractor_,
|
|
|
|
index_sparseness, hash_table_ratio,
|
|
|
|
huge_page_tlb_size);
|
2014-07-18 23:58:13 +00:00
|
|
|
|
|
|
|
std::vector<uint32_t> prefix_hashes;
|
2019-03-01 23:41:55 +00:00
|
|
|
if (!index_in_file) {
|
2019-09-13 17:24:38 +00:00
|
|
|
// Populates _bloom if enabled (total order mode)
|
2019-03-01 23:41:55 +00:00
|
|
|
s = PopulateIndexRecordList(&index_builder, &prefix_hashes);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s = index_.InitFromRawData(*index_block);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!index_in_file) {
|
2019-09-13 17:24:38 +00:00
|
|
|
if (!IsTotalOrderMode()) {
|
|
|
|
// Calculated bloom filter size and allocate memory for
|
|
|
|
// bloom filter based on the number of prefixes, then fill it.
|
|
|
|
AllocateBloom(bloom_bits_per_key, index_.GetNumPrefixes(),
|
|
|
|
huge_page_tlb_size);
|
|
|
|
if (enable_bloom_) {
|
|
|
|
FillBloom(prefix_hashes);
|
|
|
|
}
|
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2014-04-23 01:31:55 +00:00
|
|
|
// Fill two table properties.
|
2019-03-01 23:41:55 +00:00
|
|
|
if (!index_in_file) {
|
|
|
|
props->user_collected_properties["plain_table_hash_table_size"] =
|
|
|
|
ToString(index_.GetIndexSize() * PlainTableIndex::kOffsetLen);
|
|
|
|
props->user_collected_properties["plain_table_sub_index_size"] =
|
|
|
|
ToString(index_.GetSubIndexSize());
|
|
|
|
} else {
|
|
|
|
props->user_collected_properties["plain_table_hash_table_size"] =
|
|
|
|
ToString(0);
|
|
|
|
props->user_collected_properties["plain_table_sub_index_size"] =
|
|
|
|
ToString(0);
|
|
|
|
}
|
2014-04-23 01:31:55 +00:00
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-11-18 02:29:40 +00:00
|
|
|
Status PlainTableReader::GetOffset(PlainTableKeyDecoder* decoder,
|
|
|
|
const Slice& target, const Slice& prefix,
|
2013-12-20 17:35:24 +00:00
|
|
|
uint32_t prefix_hash, bool& prefix_matched,
|
2014-02-13 23:27:59 +00:00
|
|
|
uint32_t* offset) const {
|
2013-11-21 19:11:02 +00:00
|
|
|
prefix_matched = false;
|
2014-07-18 23:58:13 +00:00
|
|
|
uint32_t prefix_index_offset;
|
|
|
|
auto res = index_.GetOffset(prefix_hash, &prefix_index_offset);
|
|
|
|
if (res == PlainTableIndex::kNoPrefixForBucket) {
|
2015-09-16 23:57:43 +00:00
|
|
|
*offset = file_info_.data_end_offset;
|
2013-12-20 17:35:24 +00:00
|
|
|
return Status::OK();
|
2014-07-18 23:58:13 +00:00
|
|
|
} else if (res == PlainTableIndex::kDirectToFile) {
|
|
|
|
*offset = prefix_index_offset;
|
2013-12-20 17:35:24 +00:00
|
|
|
return Status::OK();
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
// point to sub-index, need to do a binary search
|
2014-07-18 23:58:13 +00:00
|
|
|
uint32_t upper_bound;
|
|
|
|
const char* base_ptr =
|
|
|
|
index_.GetSubIndexBasePtrAndUpperBound(prefix_index_offset, &upper_bound);
|
2013-10-29 03:34:02 +00:00
|
|
|
uint32_t low = 0;
|
2013-11-21 19:11:02 +00:00
|
|
|
uint32_t high = upper_bound;
|
2014-01-27 21:53:22 +00:00
|
|
|
ParsedInternalKey mid_key;
|
|
|
|
ParsedInternalKey parsed_target;
|
|
|
|
if (!ParseInternalKey(target, &parsed_target)) {
|
|
|
|
return Status::Corruption(Slice());
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
|
2013-11-21 19:11:02 +00:00
|
|
|
// The key is between [low, high). Do a binary search between it.
|
2013-10-29 03:34:02 +00:00
|
|
|
while (high - low > 1) {
|
|
|
|
uint32_t mid = (high + low) / 2;
|
2014-02-13 23:27:59 +00:00
|
|
|
uint32_t file_offset = GetFixed32Element(base_ptr, mid);
|
2015-09-16 23:57:43 +00:00
|
|
|
uint32_t tmp;
|
2015-11-18 02:29:40 +00:00
|
|
|
Status s = decoder->NextKeyNoValue(file_offset, &mid_key, nullptr, &tmp);
|
2013-12-20 17:35:24 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
int cmp_result = internal_comparator_.Compare(mid_key, parsed_target);
|
|
|
|
if (cmp_result < 0) {
|
2013-10-29 03:34:02 +00:00
|
|
|
low = mid;
|
|
|
|
} else {
|
|
|
|
if (cmp_result == 0) {
|
|
|
|
// Happen to have found the exact key or target is smaller than the
|
|
|
|
// first key after base_offset.
|
2013-11-21 19:11:02 +00:00
|
|
|
prefix_matched = true;
|
2014-02-13 23:27:59 +00:00
|
|
|
*offset = file_offset;
|
2013-12-20 17:35:24 +00:00
|
|
|
return Status::OK();
|
2013-10-29 03:34:02 +00:00
|
|
|
} else {
|
|
|
|
high = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-20 17:35:24 +00:00
|
|
|
// Both of the key at the position low or low+1 could share the same
|
|
|
|
// prefix as target. We need to rule out one of them to avoid to go
|
|
|
|
// to the wrong prefix.
|
2014-01-27 21:53:22 +00:00
|
|
|
ParsedInternalKey low_key;
|
2015-09-16 23:57:43 +00:00
|
|
|
uint32_t tmp;
|
2014-02-13 23:27:59 +00:00
|
|
|
uint32_t low_key_offset = GetFixed32Element(base_ptr, low);
|
2015-11-18 02:29:40 +00:00
|
|
|
Status s = decoder->NextKeyNoValue(low_key_offset, &low_key, nullptr, &tmp);
|
2014-06-18 23:36:48 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
if (GetPrefix(low_key) == prefix) {
|
|
|
|
prefix_matched = true;
|
2014-02-13 23:27:59 +00:00
|
|
|
*offset = low_key_offset;
|
2013-12-20 17:35:24 +00:00
|
|
|
} else if (low + 1 < upper_bound) {
|
|
|
|
// There is possible a next prefix, return it
|
2013-11-21 19:11:02 +00:00
|
|
|
prefix_matched = false;
|
2014-02-13 23:27:59 +00:00
|
|
|
*offset = GetFixed32Element(base_ptr, low + 1);
|
2013-12-20 17:35:24 +00:00
|
|
|
} else {
|
|
|
|
// target is larger than a key of the last prefix in this bucket
|
|
|
|
// but with a different prefix. Key does not exist.
|
2015-09-16 23:57:43 +00:00
|
|
|
*offset = file_info_.data_end_offset;
|
2013-11-21 19:11:02 +00:00
|
|
|
}
|
2013-12-20 17:35:24 +00:00
|
|
|
return Status::OK();
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
bool PlainTableReader::MatchBloom(uint32_t hash) const {
|
2015-10-07 18:23:20 +00:00
|
|
|
if (!enable_bloom_) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bloom_.MayContainHash(hash)) {
|
|
|
|
PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
|
|
|
|
return false;
|
|
|
|
}
|
2013-11-21 23:13:45 +00:00
|
|
|
}
|
|
|
|
|
2014-06-18 23:36:48 +00:00
|
|
|
Status PlainTableReader::Next(PlainTableKeyDecoder* decoder, uint32_t* offset,
|
|
|
|
ParsedInternalKey* parsed_key,
|
|
|
|
Slice* internal_key, Slice* value,
|
|
|
|
bool* seekable) const {
|
2015-09-16 23:57:43 +00:00
|
|
|
if (*offset == file_info_.data_end_offset) {
|
|
|
|
*offset = file_info_.data_end_offset;
|
2013-11-21 19:11:02 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
if (*offset > file_info_.data_end_offset) {
|
2013-11-21 19:11:02 +00:00
|
|
|
return Status::Corruption("Offset is out of file size");
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
uint32_t bytes_read;
|
|
|
|
Status s = decoder->NextKey(*offset, parsed_key, internal_key, value,
|
|
|
|
&bytes_read, seekable);
|
2014-02-26 22:36:54 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2015-09-16 23:57:43 +00:00
|
|
|
*offset = *offset + bytes_read;
|
2013-11-21 19:11:02 +00:00
|
|
|
return Status::OK();
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2014-06-12 17:06:18 +00:00
|
|
|
void PlainTableReader::Prepare(const Slice& target) {
|
|
|
|
if (enable_bloom_) {
|
|
|
|
uint32_t prefix_hash = GetSliceHash(GetPrefix(target));
|
|
|
|
bloom_.Prefetch(prefix_hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
Status PlainTableReader::Get(const ReadOptions& /*ro*/, const Slice& target,
|
2018-05-21 21:33:55 +00:00
|
|
|
GetContext* get_context,
|
|
|
|
const SliceTransform* /* prefix_extractor */,
|
|
|
|
bool /*skip_filters*/) {
|
2013-11-21 19:11:02 +00:00
|
|
|
// Check bloom filter first.
|
2014-02-08 00:25:38 +00:00
|
|
|
Slice prefix_slice;
|
|
|
|
uint32_t prefix_hash;
|
|
|
|
if (IsTotalOrderMode()) {
|
2014-07-18 23:58:13 +00:00
|
|
|
if (full_scan_mode_) {
|
2014-06-18 23:36:48 +00:00
|
|
|
status_ =
|
|
|
|
Status::InvalidArgument("Get() is not allowed in full scan mode.");
|
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
// Match whole user key for bloom filter check.
|
|
|
|
if (!MatchBloom(GetSliceHash(GetUserKey(target)))) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
// in total order mode, there is only one bucket 0, and we always use empty
|
|
|
|
// prefix.
|
|
|
|
prefix_slice = Slice();
|
|
|
|
prefix_hash = 0;
|
|
|
|
} else {
|
|
|
|
prefix_slice = GetPrefix(target);
|
|
|
|
prefix_hash = GetSliceHash(prefix_slice);
|
|
|
|
if (!MatchBloom(prefix_hash)) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
2013-11-21 19:11:02 +00:00
|
|
|
uint32_t offset;
|
|
|
|
bool prefix_match;
|
2015-11-18 02:29:40 +00:00
|
|
|
PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_,
|
2018-05-21 21:33:55 +00:00
|
|
|
prefix_extractor_);
|
2015-11-18 02:29:40 +00:00
|
|
|
Status s = GetOffset(&decoder, target, prefix_slice, prefix_hash,
|
|
|
|
prefix_match, &offset);
|
2015-09-16 23:57:43 +00:00
|
|
|
|
2013-12-20 17:35:24 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
ParsedInternalKey found_key;
|
|
|
|
ParsedInternalKey parsed_target;
|
|
|
|
if (!ParseInternalKey(target, &parsed_target)) {
|
|
|
|
return Status::Corruption(Slice());
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
Slice found_value;
|
2015-09-16 23:57:43 +00:00
|
|
|
while (offset < file_info_.data_end_offset) {
|
2014-10-31 18:59:54 +00:00
|
|
|
s = Next(&decoder, &offset, &found_key, nullptr, &found_value);
|
2013-11-21 19:11:02 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (!prefix_match) {
|
|
|
|
// Need to verify prefix for the first key found if it is not yet
|
|
|
|
// checked.
|
2013-12-20 17:35:24 +00:00
|
|
|
if (GetPrefix(found_key) != prefix_slice) {
|
|
|
|
return Status::OK();
|
2013-11-21 19:11:02 +00:00
|
|
|
}
|
|
|
|
prefix_match = true;
|
|
|
|
}
|
2014-09-29 18:09:09 +00:00
|
|
|
// TODO(ljin): since we know the key comparison result here,
|
|
|
|
// can we enable the fast path?
|
2014-01-27 21:53:22 +00:00
|
|
|
if (internal_comparator_.Compare(found_key, parsed_target) >= 0) {
|
2018-04-05 22:54:24 +00:00
|
|
|
bool dont_care __attribute__((__unused__));
|
2019-01-26 01:07:00 +00:00
|
|
|
if (!get_context->SaveValue(found_key, found_value, &dont_care,
|
|
|
|
dummy_cleanable_.get())) {
|
2014-01-27 21:53:22 +00:00
|
|
|
break;
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
}
|
2013-11-21 19:11:02 +00:00
|
|
|
return Status::OK();
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
2019-06-10 22:30:05 +00:00
|
|
|
uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& /*key*/,
|
2019-06-20 21:28:22 +00:00
|
|
|
TableReaderCaller /*caller*/) {
|
2013-10-29 03:34:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-16 21:16:49 +00:00
|
|
|
uint64_t PlainTableReader::ApproximateSize(const Slice& /*start*/,
|
|
|
|
const Slice& /*end*/,
|
|
|
|
TableReaderCaller /*caller*/) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
PlainTableIterator::PlainTableIterator(PlainTableReader* table,
|
|
|
|
bool use_prefix_seek)
|
2014-06-18 23:36:48 +00:00
|
|
|
: table_(table),
|
2015-09-16 23:57:43 +00:00
|
|
|
decoder_(&table_->file_info_, table_->encoding_type_,
|
|
|
|
table_->user_key_len_, table_->prefix_extractor_),
|
2014-06-18 23:36:48 +00:00
|
|
|
use_prefix_seek_(use_prefix_seek) {
|
2015-09-16 23:57:43 +00:00
|
|
|
next_offset_ = offset_ = table_->file_info_.data_end_offset;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PlainTableIterator::~PlainTableIterator() {
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PlainTableIterator::Valid() const {
|
2015-09-16 23:57:43 +00:00
|
|
|
return offset_ < table_->file_info_.data_end_offset &&
|
|
|
|
offset_ >= table_->data_start_offset_;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::SeekToFirst() {
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
status_ = Status::OK();
|
2013-11-21 19:11:02 +00:00
|
|
|
next_offset_ = table_->data_start_offset_;
|
2015-09-16 23:57:43 +00:00
|
|
|
if (next_offset_ >= table_->file_info_.data_end_offset) {
|
|
|
|
next_offset_ = offset_ = table_->file_info_.data_end_offset;
|
2013-12-20 17:35:24 +00:00
|
|
|
} else {
|
|
|
|
Next();
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::SeekToLast() {
|
|
|
|
assert(false);
|
2014-02-08 00:25:38 +00:00
|
|
|
status_ = Status::NotSupported("SeekToLast() is not supported in PlainTable");
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
next_offset_ = offset_ = table_->file_info_.data_end_offset;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::Seek(const Slice& target) {
|
Fix interaction between CompactionFilter::Decision::kRemoveAndSkipUnt…
Summary:
Fixes the following scenario:
1. Set prefix extractor. Enable bloom filters, with `whole_key_filtering = false`. Use compaction filter that sometimes returns `kRemoveAndSkipUntil`.
2. Do a compaction.
3. Compaction creates an iterator with `total_order_seek = false`, calls `SeekToFirst()` on it, then repeatedly calls `Next()`.
4. At some point compaction filter returns `kRemoveAndSkipUntil`.
5. Compaction calls `Seek(skip_until)` on the iterator. The key that it seeks to happens to have prefix that doesn't match the bloom filter. Since `total_order_seek = false`, iterator becomes invalid, and compaction thinks that it has reached the end. The rest of the compaction input is silently discarded.
The fix is to make compaction iterator use `total_order_seek = true`.
The implementation for PlainTable is quite awkward. I've made `kRemoveAndSkipUntil` officially incompatible with PlainTable. If you try to use them together, compaction will fail, and DB will enter read-only mode (`bg_error_`). That's not a very graceful way to communicate a misconfiguration, but the alternatives don't seem worth the implementation time and complexity. To be able to check in advance that `kRemoveAndSkipUntil` is not going to be used with PlainTable, we'd need to extend the interface of either `CompactionFilter` or `InternalIterator`. It seems unlikely that anyone will ever want to use `kRemoveAndSkipUntil` with PlainTable: PlainTable probably has very few users, and `kRemoveAndSkipUntil` has only one user so far: us (logdevice).
Closes https://github.com/facebook/rocksdb/pull/2349
Differential Revision: D5110388
Pulled By: lightmark
fbshipit-source-id: ec29101a99d9dcd97db33923b87f72bce56cc17a
2017-06-02 21:56:31 +00:00
|
|
|
if (use_prefix_seek_ != !table_->IsTotalOrderMode()) {
|
|
|
|
// This check is done here instead of NewIterator() to permit creating an
|
|
|
|
// iterator with total_order_seek = true even if we won't be able to Seek()
|
|
|
|
// it. This is needed for compaction: it creates iterator with
|
|
|
|
// total_order_seek = true but usually never does Seek() on it,
|
|
|
|
// only SeekToFirst().
|
|
|
|
status_ =
|
|
|
|
Status::InvalidArgument(
|
|
|
|
"total_order_seek not implemented for PlainTable.");
|
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-02-08 00:25:38 +00:00
|
|
|
// If the user doesn't set prefix seek option and we are not able to do a
|
|
|
|
// total Seek(). assert failure.
|
Fix interaction between CompactionFilter::Decision::kRemoveAndSkipUnt…
Summary:
Fixes the following scenario:
1. Set prefix extractor. Enable bloom filters, with `whole_key_filtering = false`. Use compaction filter that sometimes returns `kRemoveAndSkipUntil`.
2. Do a compaction.
3. Compaction creates an iterator with `total_order_seek = false`, calls `SeekToFirst()` on it, then repeatedly calls `Next()`.
4. At some point compaction filter returns `kRemoveAndSkipUntil`.
5. Compaction calls `Seek(skip_until)` on the iterator. The key that it seeks to happens to have prefix that doesn't match the bloom filter. Since `total_order_seek = false`, iterator becomes invalid, and compaction thinks that it has reached the end. The rest of the compaction input is silently discarded.
The fix is to make compaction iterator use `total_order_seek = true`.
The implementation for PlainTable is quite awkward. I've made `kRemoveAndSkipUntil` officially incompatible with PlainTable. If you try to use them together, compaction will fail, and DB will enter read-only mode (`bg_error_`). That's not a very graceful way to communicate a misconfiguration, but the alternatives don't seem worth the implementation time and complexity. To be able to check in advance that `kRemoveAndSkipUntil` is not going to be used with PlainTable, we'd need to extend the interface of either `CompactionFilter` or `InternalIterator`. It seems unlikely that anyone will ever want to use `kRemoveAndSkipUntil` with PlainTable: PlainTable probably has very few users, and `kRemoveAndSkipUntil` has only one user so far: us (logdevice).
Closes https://github.com/facebook/rocksdb/pull/2349
Differential Revision: D5110388
Pulled By: lightmark
fbshipit-source-id: ec29101a99d9dcd97db33923b87f72bce56cc17a
2017-06-02 21:56:31 +00:00
|
|
|
if (table_->IsTotalOrderMode()) {
|
2014-07-18 23:58:13 +00:00
|
|
|
if (table_->full_scan_mode_) {
|
2014-06-18 23:36:48 +00:00
|
|
|
status_ =
|
|
|
|
Status::InvalidArgument("Seek() is not allowed in full scan mode.");
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-06-18 23:36:48 +00:00
|
|
|
return;
|
2014-07-18 23:58:13 +00:00
|
|
|
} else if (table_->GetIndexSize() > 1) {
|
2014-06-18 23:36:48 +00:00
|
|
|
assert(false);
|
|
|
|
status_ = Status::NotSupported(
|
|
|
|
"PlainTable cannot issue non-prefix seek unless in total order "
|
|
|
|
"mode.");
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-06-18 23:36:48 +00:00
|
|
|
return;
|
|
|
|
}
|
2014-02-08 00:25:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Slice prefix_slice = table_->GetPrefix(target);
|
2014-04-01 22:00:48 +00:00
|
|
|
uint32_t prefix_hash = 0;
|
|
|
|
// Bloom filter is ignored in total-order mode.
|
|
|
|
if (!table_->IsTotalOrderMode()) {
|
2014-02-08 00:25:38 +00:00
|
|
|
prefix_hash = GetSliceHash(prefix_slice);
|
2014-04-01 22:00:48 +00:00
|
|
|
if (!table_->MatchBloom(prefix_hash)) {
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
status_ = Status::OK();
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-04-01 22:00:48 +00:00
|
|
|
return;
|
|
|
|
}
|
2013-11-21 23:13:45 +00:00
|
|
|
}
|
2013-11-21 19:11:02 +00:00
|
|
|
bool prefix_match;
|
2015-11-18 02:29:40 +00:00
|
|
|
status_ = table_->GetOffset(&decoder_, target, prefix_slice, prefix_hash,
|
|
|
|
prefix_match, &next_offset_);
|
2013-12-20 17:35:24 +00:00
|
|
|
if (!status_.ok()) {
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2013-12-20 17:35:24 +00:00
|
|
|
return;
|
|
|
|
}
|
2013-11-21 19:11:02 +00:00
|
|
|
|
2015-09-16 23:57:43 +00:00
|
|
|
if (next_offset_ < table_->file_info_.data_end_offset) {
|
2013-11-21 19:11:02 +00:00
|
|
|
for (Next(); status_.ok() && Valid(); Next()) {
|
|
|
|
if (!prefix_match) {
|
|
|
|
// Need to verify the first key's prefix
|
2013-12-20 17:35:24 +00:00
|
|
|
if (table_->GetPrefix(key()) != prefix_slice) {
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2013-11-21 19:11:02 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
prefix_match = true;
|
|
|
|
}
|
2014-01-27 21:53:22 +00:00
|
|
|
if (table_->internal_comparator_.Compare(key(), target) >= 0) {
|
2013-11-21 19:11:02 +00:00
|
|
|
break;
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
2013-11-21 19:11:02 +00:00
|
|
|
} else {
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = table_->file_info_.data_end_offset;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
void PlainTableIterator::SeekForPrev(const Slice& /*target*/) {
|
2016-09-28 01:20:57 +00:00
|
|
|
assert(false);
|
|
|
|
status_ =
|
|
|
|
Status::NotSupported("SeekForPrev() is not supported in PlainTable");
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 09:44:14 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2016-09-28 01:20:57 +00:00
|
|
|
}
|
|
|
|
|
2013-10-29 03:34:02 +00:00
|
|
|
void PlainTableIterator::Next() {
|
|
|
|
offset_ = next_offset_;
|
2015-09-16 23:57:43 +00:00
|
|
|
if (offset_ < table_->file_info_.data_end_offset) {
|
2014-01-27 21:53:22 +00:00
|
|
|
Slice tmp_slice;
|
|
|
|
ParsedInternalKey parsed_key;
|
2014-06-18 23:36:48 +00:00
|
|
|
status_ =
|
|
|
|
table_->Next(&decoder_, &next_offset_, &parsed_key, &key_, &value_);
|
|
|
|
if (!status_.ok()) {
|
2015-09-16 23:57:43 +00:00
|
|
|
offset_ = next_offset_ = table_->file_info_.data_end_offset;
|
2014-01-27 21:53:22 +00:00
|
|
|
}
|
|
|
|
}
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PlainTableIterator::Prev() {
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice PlainTableIterator::key() const {
|
2014-01-27 21:53:22 +00:00
|
|
|
assert(Valid());
|
2014-06-18 23:36:48 +00:00
|
|
|
return key_;
|
2013-10-29 03:34:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Slice PlainTableIterator::value() const {
|
2014-01-27 21:53:22 +00:00
|
|
|
assert(Valid());
|
2013-10-29 03:34:02 +00:00
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PlainTableIterator::status() const {
|
|
|
|
return status_;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-04-15 20:39:26 +00:00
|
|
|
#endif // ROCKSDB_LITE
|