2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/memtable.h"
|
2013-07-23 21:42:27 +00:00
|
|
|
|
|
|
|
#include <memory>
|
2014-03-12 23:40:14 +00:00
|
|
|
#include <algorithm>
|
Add a new mem-table representation based on cuckoo hash.
Summary:
= Major Changes =
* Add a new mem-table representation, HashCuckooRep, which is based cuckoo hash.
Cuckoo hash uses multiple hash functions. This allows each key to have multiple
possible locations in the mem-table.
- Put: When insert a key, it will try to find whether one of its possible
locations is vacant and store the key. If none of its possible
locations are available, then it will kick out a victim key and
store at that location. The kicked-out victim key will then be
stored at a vacant space of its possible locations or kick-out
another victim. In this diff, the kick-out path (known as
cuckoo-path) is found using BFS, which guarantees to be the shortest.
- Get: Simply tries all possible locations of a key --- this guarantees
worst-case constant time complexity.
- Time complexity: O(1) for Get, and average O(1) for Put if the
fullness of the mem-table is below 80%.
- Default using two hash functions, the number of hash functions used
by the cuckoo-hash may dynamically increase if it fails to find a
short-enough kick-out path.
- Currently, HashCuckooRep does not support iteration and snapshots,
as our current main purpose of this is to optimize point access.
= Minor Changes =
* Add IsSnapshotSupported() to DB to indicate whether the current DB
supports snapshots. If it returns false, then DB::GetSnapshot() will
always return nullptr.
Test Plan:
Run existing tests. Will develop a test specifically for cuckoo hash in
the next diff.
Reviewers: sdong, haobo
Reviewed By: sdong
CC: leveldb, dhruba, igor
Differential Revision: https://reviews.facebook.net/D16155
2014-04-30 00:13:46 +00:00
|
|
|
#include <limits>
|
2013-07-23 21:42:27 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/dbformat.h"
|
2013-12-03 02:34:05 +00:00
|
|
|
#include "db/merge_context.h"
|
2014-12-02 20:09:20 +00:00
|
|
|
#include "db/writebuffer.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
|
|
|
#include "rocksdb/merge_operator.h"
|
2014-01-31 01:18:17 +00:00
|
|
|
#include "rocksdb/slice_transform.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/internal_iterator.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
#include "table/merger.h"
|
2014-01-31 01:18:17 +00:00
|
|
|
#include "util/arena.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
2013-08-23 06:10:02 +00:00
|
|
|
#include "util/murmurhash.h"
|
2013-11-27 19:47:40 +00:00
|
|
|
#include "util/mutexlock.h"
|
2013-11-18 19:32:54 +00:00
|
|
|
#include "util/perf_context_imp.h"
|
2014-01-17 20:46:06 +00:00
|
|
|
#include "util/statistics.h"
|
2013-11-18 19:32:54 +00:00
|
|
|
#include "util/stop_watch.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-09-17 19:49:13 +00:00
|
|
|
MemTableOptions::MemTableOptions(
|
2014-10-27 19:10:13 +00:00
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const MutableCFOptions& mutable_cf_options)
|
2014-09-17 19:49:13 +00:00
|
|
|
: write_buffer_size(mutable_cf_options.write_buffer_size),
|
|
|
|
arena_block_size(mutable_cf_options.arena_block_size),
|
|
|
|
memtable_prefix_bloom_bits(mutable_cf_options.memtable_prefix_bloom_bits),
|
|
|
|
memtable_prefix_bloom_probes(
|
|
|
|
mutable_cf_options.memtable_prefix_bloom_probes),
|
2014-09-09 01:46:52 +00:00
|
|
|
memtable_prefix_bloom_huge_page_tlb_size(
|
2014-09-17 19:49:13 +00:00
|
|
|
mutable_cf_options.memtable_prefix_bloom_huge_page_tlb_size),
|
2014-10-27 19:10:13 +00:00
|
|
|
inplace_update_support(ioptions.inplace_update_support),
|
|
|
|
inplace_update_num_locks(mutable_cf_options.inplace_update_num_locks),
|
|
|
|
inplace_callback(ioptions.inplace_callback),
|
2014-09-17 19:49:13 +00:00
|
|
|
max_successive_merges(mutable_cf_options.max_successive_merges),
|
2014-10-27 19:10:13 +00:00
|
|
|
filter_deletes(mutable_cf_options.filter_deletes),
|
|
|
|
statistics(ioptions.statistics),
|
|
|
|
merge_operator(ioptions.merge_operator),
|
|
|
|
info_log(ioptions.info_log) {}
|
2014-09-09 01:46:52 +00:00
|
|
|
|
|
|
|
MemTable::MemTable(const InternalKeyComparator& cmp,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
2014-12-02 20:09:20 +00:00
|
|
|
const MutableCFOptions& mutable_cf_options,
|
2015-05-29 21:36:35 +00:00
|
|
|
WriteBuffer* write_buffer, SequenceNumber earliest_seq)
|
2011-03-18 22:37:00 +00:00
|
|
|
: comparator_(cmp),
|
2014-10-27 19:10:13 +00:00
|
|
|
moptions_(ioptions, mutable_cf_options),
|
2011-05-21 02:17:43 +00:00
|
|
|
refs_(0),
|
2014-10-27 19:10:13 +00:00
|
|
|
kArenaBlockSize(OptimizeBlockSize(moptions_.arena_block_size)),
|
|
|
|
arena_(moptions_.arena_block_size),
|
2014-12-02 20:09:20 +00:00
|
|
|
allocator_(&arena_, write_buffer),
|
2014-09-09 01:46:52 +00:00
|
|
|
table_(ioptions.memtable_factory->CreateMemTableRep(
|
2014-12-02 20:09:20 +00:00
|
|
|
comparator_, &allocator_, ioptions.prefix_extractor,
|
|
|
|
ioptions.info_log)),
|
2015-06-13 01:04:30 +00:00
|
|
|
data_size_(0),
|
2014-04-23 00:17:33 +00:00
|
|
|
num_entries_(0),
|
2015-03-18 23:11:02 +00:00
|
|
|
num_deletes_(0),
|
2012-10-19 21:00:53 +00:00
|
|
|
flush_in_progress_(false),
|
|
|
|
flush_completed_(false),
|
2012-11-29 00:42:36 +00:00
|
|
|
file_number_(0),
|
2013-06-11 21:23:58 +00:00
|
|
|
first_seqno_(0),
|
2015-05-29 21:36:35 +00:00
|
|
|
earliest_seqno_(earliest_seq),
|
2013-07-16 18:56:46 +00:00
|
|
|
mem_next_logfile_number_(0),
|
2015-03-18 23:11:02 +00:00
|
|
|
locks_(moptions_.inplace_update_support
|
|
|
|
? moptions_.inplace_update_num_locks
|
|
|
|
: 0),
|
2014-09-09 01:46:52 +00:00
|
|
|
prefix_extractor_(ioptions.prefix_extractor),
|
2014-09-11 01:46:09 +00:00
|
|
|
should_flush_(ShouldFlushNow()),
|
2015-03-03 18:59:36 +00:00
|
|
|
flush_scheduled_(false),
|
|
|
|
env_(ioptions.env) {
|
2014-03-12 23:40:14 +00:00
|
|
|
// if should_flush_ == true without an entry inserted, something must have
|
|
|
|
// gone wrong already.
|
|
|
|
assert(!should_flush_);
|
2014-10-27 19:10:13 +00:00
|
|
|
if (prefix_extractor_ && moptions_.memtable_prefix_bloom_bits > 0) {
|
2014-05-04 20:55:53 +00:00
|
|
|
prefix_bloom_.reset(new DynamicBloom(
|
2014-12-02 20:09:20 +00:00
|
|
|
&allocator_,
|
2014-10-27 19:10:13 +00:00
|
|
|
moptions_.memtable_prefix_bloom_bits, ioptions.bloom_locality,
|
|
|
|
moptions_.memtable_prefix_bloom_probes, nullptr,
|
|
|
|
moptions_.memtable_prefix_bloom_huge_page_tlb_size,
|
2014-09-09 01:46:52 +00:00
|
|
|
ioptions.info_log));
|
2013-11-27 22:27:02 +00:00
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-09-11 01:46:09 +00:00
|
|
|
MemTable::~MemTable() { assert(refs_ == 0); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-07-23 21:42:27 +00:00
|
|
|
size_t MemTable::ApproximateMemoryUsage() {
|
Add a new mem-table representation based on cuckoo hash.
Summary:
= Major Changes =
* Add a new mem-table representation, HashCuckooRep, which is based cuckoo hash.
Cuckoo hash uses multiple hash functions. This allows each key to have multiple
possible locations in the mem-table.
- Put: When insert a key, it will try to find whether one of its possible
locations is vacant and store the key. If none of its possible
locations are available, then it will kick out a victim key and
store at that location. The kicked-out victim key will then be
stored at a vacant space of its possible locations or kick-out
another victim. In this diff, the kick-out path (known as
cuckoo-path) is found using BFS, which guarantees to be the shortest.
- Get: Simply tries all possible locations of a key --- this guarantees
worst-case constant time complexity.
- Time complexity: O(1) for Get, and average O(1) for Put if the
fullness of the mem-table is below 80%.
- Default using two hash functions, the number of hash functions used
by the cuckoo-hash may dynamically increase if it fails to find a
short-enough kick-out path.
- Currently, HashCuckooRep does not support iteration and snapshots,
as our current main purpose of this is to optimize point access.
= Minor Changes =
* Add IsSnapshotSupported() to DB to indicate whether the current DB
supports snapshots. If it returns false, then DB::GetSnapshot() will
always return nullptr.
Test Plan:
Run existing tests. Will develop a test specifically for cuckoo hash in
the next diff.
Reviewers: sdong, haobo
Reviewed By: sdong
CC: leveldb, dhruba, igor
Differential Revision: https://reviews.facebook.net/D16155
2014-04-30 00:13:46 +00:00
|
|
|
size_t arena_usage = arena_.ApproximateMemoryUsage();
|
|
|
|
size_t table_usage = table_->ApproximateMemoryUsage();
|
|
|
|
// let MAX_USAGE = std::numeric_limits<size_t>::max()
|
|
|
|
// then if arena_usage + total_usage >= MAX_USAGE, return MAX_USAGE.
|
|
|
|
// the following variation is to avoid numeric overflow.
|
|
|
|
if (arena_usage >= std::numeric_limits<size_t>::max() - table_usage) {
|
|
|
|
return std::numeric_limits<size_t>::max();
|
|
|
|
}
|
|
|
|
// otherwise, return the actual usage
|
|
|
|
return arena_usage + table_usage;
|
2013-07-23 21:42:27 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-03-12 23:40:14 +00:00
|
|
|
bool MemTable::ShouldFlushNow() const {
|
|
|
|
// In a lot of times, we cannot allocate arena blocks that exactly matches the
|
|
|
|
// buffer size. Thus we have to decide if we should over-allocate or
|
|
|
|
// under-allocate.
|
2015-09-01 06:11:12 +00:00
|
|
|
// This constant variable can be interpreted as: if we still have more than
|
2014-03-12 23:40:14 +00:00
|
|
|
// "kAllowOverAllocationRatio * kArenaBlockSize" space left, we'd try to over
|
|
|
|
// allocate one more block.
|
|
|
|
const double kAllowOverAllocationRatio = 0.6;
|
|
|
|
|
|
|
|
// If arena still have room for new block allocation, we can safely say it
|
|
|
|
// shouldn't flush.
|
|
|
|
auto allocated_memory =
|
|
|
|
table_->ApproximateMemoryUsage() + arena_.MemoryAllocatedBytes();
|
|
|
|
|
2014-03-14 21:01:45 +00:00
|
|
|
// if we can still allocate one more block without exceeding the
|
|
|
|
// over-allocation ratio, then we should not flush.
|
|
|
|
if (allocated_memory + kArenaBlockSize <
|
2014-09-09 01:46:52 +00:00
|
|
|
moptions_.write_buffer_size +
|
|
|
|
kArenaBlockSize * kAllowOverAllocationRatio) {
|
2014-03-12 23:40:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-09-09 01:46:52 +00:00
|
|
|
// if user keeps adding entries that exceeds moptions.write_buffer_size,
|
|
|
|
// we need to flush earlier even though we still have much available
|
|
|
|
// memory left.
|
|
|
|
if (allocated_memory > moptions_.write_buffer_size +
|
|
|
|
kArenaBlockSize * kAllowOverAllocationRatio) {
|
2014-03-12 23:40:14 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this code path, Arena has already allocated its "last block", which
|
|
|
|
// means the total allocatedmemory size is either:
|
2014-03-14 21:01:45 +00:00
|
|
|
// (1) "moderately" over allocated the memory (no more than `0.6 * arena
|
2014-03-12 23:40:14 +00:00
|
|
|
// block size`. Or,
|
|
|
|
// (2) the allocated memory is less than write buffer size, but we'll stop
|
|
|
|
// here since if we allocate a new arena block, we'll over allocate too much
|
|
|
|
// more (half of the arena block size) memory.
|
|
|
|
//
|
|
|
|
// In either case, to avoid over-allocate, the last block will stop allocation
|
|
|
|
// when its usage reaches a certain ratio, which we carefully choose "0.75
|
|
|
|
// full" as the stop condition because it addresses the following issue with
|
|
|
|
// great simplicity: What if the next inserted entry's size is
|
|
|
|
// bigger than AllocatedAndUnused()?
|
|
|
|
//
|
|
|
|
// The answer is: if the entry size is also bigger than 0.25 *
|
|
|
|
// kArenaBlockSize, a dedicated block will be allocated for it; otherwise
|
|
|
|
// arena will anyway skip the AllocatedAndUnused() and allocate a new, empty
|
|
|
|
// and regular block. In either case, we *overly* over-allocated.
|
|
|
|
//
|
|
|
|
// Therefore, setting the last block to be at most "0.75 full" avoids both
|
|
|
|
// cases.
|
|
|
|
//
|
|
|
|
// NOTE: the average percentage of waste space of this approach can be counted
|
|
|
|
// as: "arena block size * 0.25 / write buffer size". User who specify a small
|
|
|
|
// write buffer size and/or big arena block size may suffer.
|
|
|
|
return arena_.AllocatedAndUnused() < kArenaBlockSize / 4;
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:50:59 +00:00
|
|
|
int MemTable::KeyComparator::operator()(const char* prefix_len_key1,
|
|
|
|
const char* prefix_len_key2) const {
|
|
|
|
// Internal keys are encoded as length-prefixed strings.
|
|
|
|
Slice k1 = GetLengthPrefixedSlice(prefix_len_key1);
|
|
|
|
Slice k2 = GetLengthPrefixedSlice(prefix_len_key2);
|
|
|
|
return comparator.Compare(k1, k2);
|
|
|
|
}
|
|
|
|
|
|
|
|
int MemTable::KeyComparator::operator()(const char* prefix_len_key,
|
|
|
|
const Slice& key)
|
2011-03-18 22:37:00 +00:00
|
|
|
const {
|
|
|
|
// Internal keys are encoded as length-prefixed strings.
|
2014-01-25 01:50:59 +00:00
|
|
|
Slice a = GetLengthPrefixedSlice(prefix_len_key);
|
|
|
|
return comparator.Compare(a, key);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-08-23 06:10:02 +00:00
|
|
|
Slice MemTableRep::UserKey(const char* key) const {
|
|
|
|
Slice slice = GetLengthPrefixedSlice(key);
|
|
|
|
return Slice(slice.data(), slice.size() - 8);
|
|
|
|
}
|
|
|
|
|
2014-04-04 22:37:28 +00:00
|
|
|
KeyHandle MemTableRep::Allocate(const size_t len, char** buf) {
|
2014-12-02 20:09:20 +00:00
|
|
|
*buf = allocator_->Allocate(len);
|
2014-04-04 22:37:28 +00:00
|
|
|
return static_cast<KeyHandle>(*buf);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Encode a suitable internal key target for "target" and return it.
|
|
|
|
// Uses *scratch as scratch space, and the returned pointer will point
|
|
|
|
// into this scratch space.
|
2013-11-21 03:49:27 +00:00
|
|
|
const char* EncodeKey(std::string* scratch, const Slice& target) {
|
2011-03-18 22:37:00 +00:00
|
|
|
scratch->clear();
|
2014-11-11 21:47:22 +00:00
|
|
|
PutVarint32(scratch, static_cast<uint32_t>(target.size()));
|
2011-03-18 22:37:00 +00:00
|
|
|
scratch->append(target.data(), target.size());
|
|
|
|
return scratch->data();
|
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
class MemTableIterator : public InternalIterator {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2014-08-25 23:14:30 +00:00
|
|
|
MemTableIterator(
|
2014-09-09 01:46:52 +00:00
|
|
|
const MemTable& mem, const ReadOptions& read_options, Arena* arena)
|
2014-03-28 21:38:10 +00:00
|
|
|
: bloom_(nullptr),
|
|
|
|
prefix_extractor_(mem.prefix_extractor_),
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
valid_(false),
|
|
|
|
arena_mode_(arena != nullptr) {
|
2014-09-09 01:46:52 +00:00
|
|
|
if (prefix_extractor_ != nullptr && !read_options.total_order_seek) {
|
2014-03-28 21:38:10 +00:00
|
|
|
bloom_ = mem.prefix_bloom_.get();
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
iter_ = mem.table_->GetDynamicPrefixIterator(arena);
|
2013-11-04 00:32:46 +00:00
|
|
|
} else {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
iter_ = mem.table_->GetIterator(arena);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~MemTableIterator() {
|
|
|
|
if (arena_mode_) {
|
|
|
|
iter_->~Iterator();
|
|
|
|
} else {
|
|
|
|
delete iter_;
|
2013-11-04 00:32:46 +00:00
|
|
|
}
|
|
|
|
}
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual bool Valid() const override { return valid_; }
|
|
|
|
virtual void Seek(const Slice& k) override {
|
2015-02-28 01:06:06 +00:00
|
|
|
PERF_TIMER_GUARD(seek_on_memtable_time);
|
|
|
|
PERF_COUNTER_ADD(seek_on_memtable_count, 1);
|
2015-10-07 18:23:20 +00:00
|
|
|
if (bloom_ != nullptr) {
|
|
|
|
if (!bloom_->MayContain(
|
|
|
|
prefix_extractor_->Transform(ExtractUserKey(k)))) {
|
|
|
|
PERF_COUNTER_ADD(bloom_memtable_miss_count, 1);
|
|
|
|
valid_ = false;
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
|
|
|
|
}
|
2013-11-27 22:27:02 +00:00
|
|
|
}
|
|
|
|
iter_->Seek(k, nullptr);
|
|
|
|
valid_ = iter_->Valid();
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void SeekToFirst() override {
|
2013-11-27 22:27:02 +00:00
|
|
|
iter_->SeekToFirst();
|
|
|
|
valid_ = iter_->Valid();
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void SeekToLast() override {
|
2013-11-27 22:27:02 +00:00
|
|
|
iter_->SeekToLast();
|
|
|
|
valid_ = iter_->Valid();
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void Next() override {
|
2013-11-27 22:27:02 +00:00
|
|
|
assert(Valid());
|
|
|
|
iter_->Next();
|
|
|
|
valid_ = iter_->Valid();
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void Prev() override {
|
2013-11-27 22:27:02 +00:00
|
|
|
assert(Valid());
|
|
|
|
iter_->Prev();
|
|
|
|
valid_ = iter_->Valid();
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Slice key() const override {
|
2013-11-27 22:27:02 +00:00
|
|
|
assert(Valid());
|
2013-07-23 21:42:27 +00:00
|
|
|
return GetLengthPrefixedSlice(iter_->key());
|
|
|
|
}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Slice value() const override {
|
2013-11-27 22:27:02 +00:00
|
|
|
assert(Valid());
|
2013-07-23 21:42:27 +00:00
|
|
|
Slice key_slice = GetLengthPrefixedSlice(iter_->key());
|
2011-03-18 22:37:00 +00:00
|
|
|
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
|
|
|
|
}
|
|
|
|
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual Status status() const override { return Status::OK(); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-12-16 20:08:30 +00:00
|
|
|
virtual Status PinData() override {
|
|
|
|
// memtable data is always pinned
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status ReleasePinnedData() override {
|
|
|
|
// memtable data is always pinned
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool IsKeyPinned() const override {
|
|
|
|
// memtable data is always pinned
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2014-03-28 21:38:10 +00:00
|
|
|
DynamicBloom* bloom_;
|
|
|
|
const SliceTransform* const prefix_extractor_;
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
MemTableRep::Iterator* iter_;
|
2013-11-27 22:27:02 +00:00
|
|
|
bool valid_;
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
bool arena_mode_;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
MemTableIterator(const MemTableIterator&);
|
|
|
|
void operator=(const MemTableIterator&);
|
|
|
|
};
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* MemTable::NewIterator(const ReadOptions& read_options,
|
|
|
|
Arena* arena) {
|
2014-09-05 00:40:41 +00:00
|
|
|
assert(arena != nullptr);
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(MemTableIterator));
|
2014-09-09 01:46:52 +00:00
|
|
|
return new (mem) MemTableIterator(*this, read_options, arena);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
port::RWMutex* MemTable::GetLock(const Slice& key) {
|
2014-02-18 22:58:55 +00:00
|
|
|
static murmur_hash hash;
|
|
|
|
return &locks_[hash(key) % locks_.size()];
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
}
|
|
|
|
|
2015-06-13 01:04:30 +00:00
|
|
|
uint64_t MemTable::ApproximateSize(const Slice& start_ikey,
|
|
|
|
const Slice& end_ikey) {
|
|
|
|
uint64_t entry_count = table_->ApproximateNumEntries(start_ikey, end_ikey);
|
|
|
|
if (entry_count == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
uint64_t n = num_entries_.load(std::memory_order_relaxed);
|
|
|
|
if (n == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (entry_count > n) {
|
|
|
|
// table_->ApproximateNumEntries() is just an estimate so it can be larger
|
|
|
|
// than actual entries we have. Cap it to entries we have to limit the
|
|
|
|
// inaccuracy.
|
|
|
|
entry_count = n;
|
|
|
|
}
|
|
|
|
uint64_t data_size = data_size_.load(std::memory_order_relaxed);
|
|
|
|
return entry_count * (data_size / n);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void MemTable::Add(SequenceNumber s, ValueType type,
|
2013-11-27 22:27:02 +00:00
|
|
|
const Slice& key, /* user key */
|
2011-03-18 22:37:00 +00:00
|
|
|
const Slice& value) {
|
|
|
|
// Format of an entry is concatenation of:
|
|
|
|
// key_size : varint32 of internal_key.size()
|
|
|
|
// key bytes : char[internal_key.size()]
|
|
|
|
// value_size : varint32 of value.size()
|
|
|
|
// value bytes : char[value.size()]
|
2014-11-11 21:47:22 +00:00
|
|
|
uint32_t key_size = static_cast<uint32_t>(key.size());
|
|
|
|
uint32_t val_size = static_cast<uint32_t>(value.size());
|
|
|
|
uint32_t internal_key_size = key_size + 8;
|
|
|
|
const uint32_t encoded_len = VarintLength(internal_key_size) +
|
|
|
|
internal_key_size + VarintLength(val_size) +
|
|
|
|
val_size;
|
2014-04-04 22:37:28 +00:00
|
|
|
char* buf = nullptr;
|
|
|
|
KeyHandle handle = table_->Allocate(encoded_len, &buf);
|
|
|
|
assert(buf != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
char* p = EncodeVarint32(buf, internal_key_size);
|
|
|
|
memcpy(p, key.data(), key_size);
|
|
|
|
p += key_size;
|
2015-05-29 21:36:35 +00:00
|
|
|
uint64_t packed = PackSequenceAndType(s, type);
|
|
|
|
EncodeFixed64(p, packed);
|
2011-03-18 22:37:00 +00:00
|
|
|
p += 8;
|
|
|
|
p = EncodeVarint32(p, val_size);
|
|
|
|
memcpy(p, value.data(), val_size);
|
2014-02-03 21:48:30 +00:00
|
|
|
assert((unsigned)(p + val_size - buf) == (unsigned)encoded_len);
|
2014-04-04 22:37:28 +00:00
|
|
|
table_->Insert(handle);
|
2015-06-13 01:04:30 +00:00
|
|
|
num_entries_.store(num_entries_.load(std::memory_order_relaxed) + 1,
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
data_size_.store(data_size_.load(std::memory_order_relaxed) + encoded_len,
|
|
|
|
std::memory_order_relaxed);
|
2015-03-18 23:11:02 +00:00
|
|
|
if (type == kTypeDeletion) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-11-27 22:27:02 +00:00
|
|
|
if (prefix_bloom_) {
|
|
|
|
assert(prefix_extractor_);
|
|
|
|
prefix_bloom_->Add(prefix_extractor_->Transform(key));
|
|
|
|
}
|
|
|
|
|
2013-02-28 22:09:30 +00:00
|
|
|
// The first sequence number inserted into the memtable
|
|
|
|
assert(first_seqno_ == 0 || s > first_seqno_);
|
|
|
|
if (first_seqno_ == 0) {
|
|
|
|
first_seqno_ = s;
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
if (earliest_seqno_ == kMaxSequenceNumber) {
|
|
|
|
earliest_seqno_ = first_seqno_;
|
|
|
|
}
|
|
|
|
assert(first_seqno_ >= earliest_seqno_);
|
2013-02-28 22:09:30 +00:00
|
|
|
}
|
2014-03-12 23:40:14 +00:00
|
|
|
|
|
|
|
should_flush_ = ShouldFlushNow();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-02-11 17:46:30 +00:00
|
|
|
// Callback from MemTable::Get()
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct Saver {
|
|
|
|
Status* status;
|
|
|
|
const LookupKey* key;
|
|
|
|
bool* found_final_value; // Is value set correctly? Used by KeyMayExist
|
|
|
|
bool* merge_in_progress;
|
|
|
|
std::string* value;
|
2015-05-29 21:36:35 +00:00
|
|
|
SequenceNumber seq;
|
2014-02-11 17:46:30 +00:00
|
|
|
const MergeOperator* merge_operator;
|
|
|
|
// the merge operations encountered;
|
|
|
|
MergeContext* merge_context;
|
|
|
|
MemTable* mem;
|
|
|
|
Logger* logger;
|
|
|
|
Statistics* statistics;
|
|
|
|
bool inplace_update_support;
|
2015-03-03 18:59:36 +00:00
|
|
|
Env* env_;
|
2014-02-11 17:46:30 +00:00
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
static bool SaveValue(void* arg, const char* entry) {
|
|
|
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
|
|
|
MergeContext* merge_context = s->merge_context;
|
|
|
|
const MergeOperator* merge_operator = s->merge_operator;
|
|
|
|
|
|
|
|
assert(s != nullptr && merge_context != nullptr);
|
|
|
|
|
|
|
|
// entry format is:
|
|
|
|
// klength varint32
|
|
|
|
// userkey char[klength-8]
|
|
|
|
// tag uint64
|
|
|
|
// vlength varint32
|
|
|
|
// value char[vlength]
|
|
|
|
// Check that it belongs to same user key. We do not check the
|
|
|
|
// sequence number since the Seek() call above should have skipped
|
|
|
|
// all entries with overly large sequence numbers.
|
|
|
|
uint32_t key_length;
|
|
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
2015-09-08 22:30:49 +00:00
|
|
|
if (s->mem->GetInternalKeyComparator().user_comparator()->Equal(
|
|
|
|
Slice(key_ptr, key_length - 8), s->key->user_key())) {
|
2014-02-11 17:46:30 +00:00
|
|
|
// Correct user key
|
|
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
2015-05-29 21:36:35 +00:00
|
|
|
ValueType type;
|
|
|
|
UnPackSequenceAndType(tag, &s->seq, &type);
|
|
|
|
|
|
|
|
switch (type) {
|
2014-02-11 17:46:30 +00:00
|
|
|
case kTypeValue: {
|
|
|
|
if (s->inplace_update_support) {
|
|
|
|
s->mem->GetLock(s->key->user_key())->ReadLock();
|
|
|
|
}
|
|
|
|
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
|
|
*(s->status) = Status::OK();
|
|
|
|
if (*(s->merge_in_progress)) {
|
|
|
|
assert(merge_operator);
|
2015-03-03 18:59:36 +00:00
|
|
|
bool merge_success = false;
|
|
|
|
{
|
|
|
|
StopWatchNano timer(s->env_, s->statistics != nullptr);
|
|
|
|
PERF_TIMER_GUARD(merge_operator_time_nanos);
|
|
|
|
merge_success = merge_operator->FullMerge(
|
|
|
|
s->key->user_key(), &v, merge_context->GetOperands(), s->value,
|
|
|
|
s->logger);
|
|
|
|
RecordTick(s->statistics, MERGE_OPERATION_TOTAL_TIME,
|
|
|
|
timer.ElapsedNanos());
|
|
|
|
}
|
|
|
|
if (!merge_success) {
|
2014-02-11 17:46:30 +00:00
|
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
|
|
*(s->status) =
|
|
|
|
Status::Corruption("Error: Could not perform merge.");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s->value->assign(v.data(), v.size());
|
|
|
|
}
|
|
|
|
if (s->inplace_update_support) {
|
2014-06-16 22:41:46 +00:00
|
|
|
s->mem->GetLock(s->key->user_key())->ReadUnlock();
|
2014-02-11 17:46:30 +00:00
|
|
|
}
|
|
|
|
*(s->found_final_value) = true;
|
|
|
|
return false;
|
|
|
|
}
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
case kTypeDeletion:
|
|
|
|
case kTypeSingleDeletion: {
|
2014-02-11 17:46:30 +00:00
|
|
|
if (*(s->merge_in_progress)) {
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
assert(merge_operator != nullptr);
|
2014-02-11 17:46:30 +00:00
|
|
|
*(s->status) = Status::OK();
|
2015-03-03 18:59:36 +00:00
|
|
|
bool merge_success = false;
|
|
|
|
{
|
|
|
|
StopWatchNano timer(s->env_, s->statistics != nullptr);
|
|
|
|
PERF_TIMER_GUARD(merge_operator_time_nanos);
|
|
|
|
merge_success = merge_operator->FullMerge(
|
|
|
|
s->key->user_key(), nullptr, merge_context->GetOperands(),
|
|
|
|
s->value, s->logger);
|
|
|
|
RecordTick(s->statistics, MERGE_OPERATION_TOTAL_TIME,
|
|
|
|
timer.ElapsedNanos());
|
|
|
|
}
|
|
|
|
if (!merge_success) {
|
2014-02-11 17:46:30 +00:00
|
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
|
|
*(s->status) =
|
|
|
|
Status::Corruption("Error: Could not perform merge.");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*(s->status) = Status::NotFound();
|
|
|
|
}
|
|
|
|
*(s->found_final_value) = true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case kTypeMerge: {
|
2014-07-31 00:24:36 +00:00
|
|
|
if (!merge_operator) {
|
|
|
|
*(s->status) = Status::InvalidArgument(
|
|
|
|
"merge_operator is not properly initialized.");
|
|
|
|
// Normally we continue the loop (return true) when we see a merge
|
2014-07-31 00:25:11 +00:00
|
|
|
// operand. But in case of an error, we should stop the loop
|
2014-07-31 00:24:36 +00:00
|
|
|
// immediately and pretend we have found the value to stop further
|
|
|
|
// seek. Otherwise, the later call will override this error status.
|
|
|
|
*(s->found_final_value) = true;
|
|
|
|
return false;
|
|
|
|
}
|
2014-02-11 17:46:30 +00:00
|
|
|
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
|
|
*(s->merge_in_progress) = true;
|
|
|
|
merge_context->PushOperand(v);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// s->state could be Corrupt, merge or notfound
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
|
2015-05-29 21:36:35 +00:00
|
|
|
MergeContext* merge_context, SequenceNumber* seq) {
|
2014-08-27 00:19:03 +00:00
|
|
|
// The sequence number is updated synchronously in version_set.h
|
2014-09-05 19:01:01 +00:00
|
|
|
if (IsEmpty()) {
|
2014-08-27 00:19:03 +00:00
|
|
|
// Avoiding recording stats for speed.
|
|
|
|
return false;
|
|
|
|
}
|
2014-08-22 22:28:58 +00:00
|
|
|
PERF_TIMER_GUARD(get_from_memtable_time);
|
2013-11-18 19:32:54 +00:00
|
|
|
|
2013-11-27 22:27:02 +00:00
|
|
|
Slice user_key = key.user_key();
|
2014-02-11 17:46:30 +00:00
|
|
|
bool found_final_value = false;
|
|
|
|
bool merge_in_progress = s->IsMergeInProgress();
|
2015-10-07 18:23:20 +00:00
|
|
|
bool const may_contain =
|
|
|
|
nullptr == prefix_bloom_
|
|
|
|
? false
|
|
|
|
: prefix_bloom_->MayContain(prefix_extractor_->Transform(user_key));
|
|
|
|
if (prefix_bloom_ && !may_contain) {
|
2013-11-27 22:27:02 +00:00
|
|
|
// iter is null if prefix bloom says the key does not exist
|
2015-10-07 18:23:20 +00:00
|
|
|
PERF_COUNTER_ADD(bloom_memtable_miss_count, 1);
|
2015-05-29 21:36:35 +00:00
|
|
|
*seq = kMaxSequenceNumber;
|
2013-11-27 22:27:02 +00:00
|
|
|
} else {
|
2015-10-07 18:23:20 +00:00
|
|
|
if (prefix_bloom_) {
|
|
|
|
PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
|
|
|
|
}
|
2014-02-11 17:46:30 +00:00
|
|
|
Saver saver;
|
|
|
|
saver.status = s;
|
|
|
|
saver.found_final_value = &found_final_value;
|
|
|
|
saver.merge_in_progress = &merge_in_progress;
|
|
|
|
saver.key = &key;
|
|
|
|
saver.value = value;
|
2015-05-29 21:36:35 +00:00
|
|
|
saver.seq = kMaxSequenceNumber;
|
2014-02-11 17:46:30 +00:00
|
|
|
saver.mem = this;
|
2014-09-09 00:45:06 +00:00
|
|
|
saver.merge_context = merge_context;
|
2014-10-27 19:10:13 +00:00
|
|
|
saver.merge_operator = moptions_.merge_operator;
|
|
|
|
saver.logger = moptions_.info_log;
|
2014-09-09 01:46:52 +00:00
|
|
|
saver.inplace_update_support = moptions_.inplace_update_support;
|
2014-10-27 19:10:13 +00:00
|
|
|
saver.statistics = moptions_.statistics;
|
2015-03-03 18:59:36 +00:00
|
|
|
saver.env_ = env_;
|
2014-02-11 17:46:30 +00:00
|
|
|
table_->Get(key, &saver, SaveValue);
|
2015-05-29 21:36:35 +00:00
|
|
|
|
|
|
|
*seq = saver.seq;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// No change to value, since we have not yet found a Put/Delete
|
2013-11-18 19:32:54 +00:00
|
|
|
if (!found_final_value && merge_in_progress) {
|
Simplify querying of merge results
Summary:
While working on supporting mixing merge operators with
single deletes ( https://reviews.facebook.net/D43179 ),
I realized that returning and dealing with merge results
can be made simpler. Submitting this as a separate diff
because it is not directly related to single deletes.
Before, callers of merge helper had to retrieve the merge
result in one of two ways depending on whether the merge
was successful or not (success = result of merge was single
kTypeValue). For successful merges, the caller could query
the resulting key/value pair and for unsuccessful merges,
the result could be retrieved in the form of two deques of
keys and values. However, with single deletes, a successful merge
does not return a single key/value pair (if merge
operands are merged with a single delete, we have to generate
a value and keep the original single delete around to make
sure that we are not accidentially producing a key overwrite).
In addition, the two existing call sites of the merge
helper were taking the same actions independently from whether
the merge was successful or not, so this patch simplifies that.
Test Plan: make clean all check
Reviewers: rven, sdong, yhchiang, anthony, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43353
2015-08-18 00:34:38 +00:00
|
|
|
*s = Status::MergeInProgress();
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_COUNTER_ADD(get_from_memtable_count, 1);
|
2013-11-18 19:32:54 +00:00
|
|
|
return found_final_value;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2014-01-14 15:55:16 +00:00
|
|
|
void MemTable::Update(SequenceNumber seq,
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
const Slice& key,
|
|
|
|
const Slice& value) {
|
|
|
|
LookupKey lkey(key, seq);
|
2013-11-27 22:27:02 +00:00
|
|
|
Slice mem_key = lkey.memtable_key();
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
|
2014-01-16 02:17:58 +00:00
|
|
|
std::unique_ptr<MemTableRep::Iterator> iter(
|
2014-06-25 00:52:30 +00:00
|
|
|
table_->GetDynamicPrefixIterator());
|
2014-01-25 01:50:59 +00:00
|
|
|
iter->Seek(lkey.internal_key(), mem_key.data());
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
|
|
|
|
if (iter->Valid()) {
|
|
|
|
// entry format is:
|
2014-01-14 15:55:16 +00:00
|
|
|
// key_length varint32
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
// userkey char[klength-8]
|
|
|
|
// tag uint64
|
|
|
|
// vlength varint32
|
|
|
|
// value char[vlength]
|
|
|
|
// Check that it belongs to same user key. We do not check the
|
|
|
|
// sequence number since the Seek() call above should have skipped
|
|
|
|
// all entries with overly large sequence numbers.
|
|
|
|
const char* entry = iter->key();
|
2014-01-17 20:22:39 +00:00
|
|
|
uint32_t key_length = 0;
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
2015-09-08 22:30:49 +00:00
|
|
|
if (comparator_.comparator.user_comparator()->Equal(
|
|
|
|
Slice(key_ptr, key_length - 8), lkey.user_key())) {
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
// Correct user key
|
|
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
2015-05-29 21:36:35 +00:00
|
|
|
ValueType type;
|
|
|
|
SequenceNumber unused;
|
|
|
|
UnPackSequenceAndType(tag, &unused, &type);
|
|
|
|
switch (type) {
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
case kTypeValue: {
|
2014-01-14 15:55:16 +00:00
|
|
|
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
|
2014-11-11 21:47:22 +00:00
|
|
|
uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
|
|
|
|
uint32_t new_size = static_cast<uint32_t>(value.size());
|
2014-01-14 15:55:16 +00:00
|
|
|
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
// Update value, if new value size <= previous value size
|
|
|
|
if (new_size <= prev_size ) {
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
char* p = EncodeVarint32(const_cast<char*>(key_ptr) + key_length,
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
new_size);
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
WriteLock wl(GetLock(lkey.user_key()));
|
|
|
|
memcpy(p, value.data(), value.size());
|
2014-02-03 21:48:30 +00:00
|
|
|
assert((unsigned)((p + value.size()) - entry) ==
|
|
|
|
(unsigned)(VarintLength(key_length) + key_length +
|
|
|
|
VarintLength(value.size()) + value.size()));
|
2014-01-14 15:55:16 +00:00
|
|
|
return;
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// If the latest value is kTypeDeletion, kTypeMerge or kTypeLogData
|
2014-01-14 15:55:16 +00:00
|
|
|
// we don't have enough space for update inplace
|
|
|
|
Add(seq, kTypeValue, key, value);
|
|
|
|
return;
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-14 15:55:16 +00:00
|
|
|
// key doesn't exist
|
|
|
|
Add(seq, kTypeValue, key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MemTable::UpdateCallback(SequenceNumber seq,
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
const Slice& key,
|
2014-09-09 01:46:52 +00:00
|
|
|
const Slice& delta) {
|
2014-01-14 15:55:16 +00:00
|
|
|
LookupKey lkey(key, seq);
|
|
|
|
Slice memkey = lkey.memtable_key();
|
|
|
|
|
2014-04-23 04:14:25 +00:00
|
|
|
std::unique_ptr<MemTableRep::Iterator> iter(
|
2014-06-25 00:52:30 +00:00
|
|
|
table_->GetDynamicPrefixIterator());
|
2014-01-25 01:50:59 +00:00
|
|
|
iter->Seek(lkey.internal_key(), memkey.data());
|
2014-01-14 15:55:16 +00:00
|
|
|
|
|
|
|
if (iter->Valid()) {
|
|
|
|
// entry format is:
|
|
|
|
// key_length varint32
|
|
|
|
// userkey char[klength-8]
|
|
|
|
// tag uint64
|
|
|
|
// vlength varint32
|
|
|
|
// value char[vlength]
|
|
|
|
// Check that it belongs to same user key. We do not check the
|
|
|
|
// sequence number since the Seek() call above should have skipped
|
|
|
|
// all entries with overly large sequence numbers.
|
|
|
|
const char* entry = iter->key();
|
2014-01-17 20:22:39 +00:00
|
|
|
uint32_t key_length = 0;
|
2014-01-14 15:55:16 +00:00
|
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
2015-09-08 22:30:49 +00:00
|
|
|
if (comparator_.comparator.user_comparator()->Equal(
|
|
|
|
Slice(key_ptr, key_length - 8), lkey.user_key())) {
|
2014-01-14 15:55:16 +00:00
|
|
|
// Correct user key
|
|
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
2015-05-29 21:36:35 +00:00
|
|
|
ValueType type;
|
|
|
|
uint64_t unused;
|
|
|
|
UnPackSequenceAndType(tag, &unused, &type);
|
|
|
|
switch (type) {
|
2014-01-14 15:55:16 +00:00
|
|
|
case kTypeValue: {
|
|
|
|
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
|
2014-11-11 21:47:22 +00:00
|
|
|
uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
|
|
|
|
char* prev_buffer = const_cast<char*>(prev_value.data());
|
2014-11-11 21:47:22 +00:00
|
|
|
uint32_t new_prev_size = prev_size;
|
2014-01-14 15:55:16 +00:00
|
|
|
|
|
|
|
std::string str_value;
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
WriteLock wl(GetLock(lkey.user_key()));
|
2014-09-09 01:46:52 +00:00
|
|
|
auto status = moptions_.inplace_callback(prev_buffer, &new_prev_size,
|
|
|
|
delta, &str_value);
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
if (status == UpdateStatus::UPDATED_INPLACE) {
|
2014-01-14 15:55:16 +00:00
|
|
|
// Value already updated by callback.
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
assert(new_prev_size <= prev_size);
|
|
|
|
if (new_prev_size < prev_size) {
|
|
|
|
// overwrite the new prev_size
|
|
|
|
char* p = EncodeVarint32(const_cast<char*>(key_ptr) + key_length,
|
|
|
|
new_prev_size);
|
|
|
|
if (VarintLength(new_prev_size) < VarintLength(prev_size)) {
|
|
|
|
// shift the value buffer as well.
|
|
|
|
memcpy(p, prev_buffer, new_prev_size);
|
|
|
|
}
|
|
|
|
}
|
2014-10-27 19:10:13 +00:00
|
|
|
RecordTick(moptions_.statistics, NUMBER_KEYS_UPDATED);
|
2014-03-12 23:40:14 +00:00
|
|
|
should_flush_ = ShouldFlushNow();
|
2014-01-14 15:55:16 +00:00
|
|
|
return true;
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
} else if (status == UpdateStatus::UPDATED) {
|
|
|
|
Add(seq, kTypeValue, key, Slice(str_value));
|
2014-10-27 19:10:13 +00:00
|
|
|
RecordTick(moptions_.statistics, NUMBER_KEYS_WRITTEN);
|
2014-03-12 23:40:14 +00:00
|
|
|
should_flush_ = ShouldFlushNow();
|
2014-01-14 15:55:16 +00:00
|
|
|
return true;
|
Allow callback to change size of existing value. Change return type of the callback function to an enum status to handle 3 cases.
Summary:
This diff fixes 2 hacks:
* The callback function can modify the existing value inplace, if the merged value fits within the existing buffer size. But currently the existing buffer size is not being modified. Now the callback recieves a int* allowing the size to be modified. Since size is encoded as a varint in the internal key for memtable. It might happen that the entire value might have be copied to the new location if the new size varint is smaller than the existing size varint.
* The callback function has 3 functionalities
1. Modify existing buffer inplace, and update size correspondingly. Now to indicate that, Returns 1.
2. Generate a new buffer indicating merged value. Returns 2.
3. Fails to do either of above, based on whatever application logic. Returns 0.
Test Plan: Just make all for now. I'm adding another unit test to test each scenario.
Reviewers: dhruba, haobo
Reviewed By: haobo
CC: leveldb, sdong, kailiu, xinyaohu, sumeet, danguo
Differential Revision: https://reviews.facebook.net/D15195
2014-01-16 23:11:19 +00:00
|
|
|
} else if (status == UpdateStatus::UPDATE_FAILED) {
|
|
|
|
// No action required. Return.
|
2014-03-12 23:40:14 +00:00
|
|
|
should_flush_ = ShouldFlushNow();
|
2014-01-14 15:55:16 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If the latest value is not kTypeValue
|
|
|
|
// or key doesn't exist
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
size_t MemTable::CountSuccessiveMergeEntries(const LookupKey& key) {
|
|
|
|
Slice memkey = key.memtable_key();
|
|
|
|
|
|
|
|
// A total ordered iterator is costly for some memtablerep (prefix aware
|
|
|
|
// reps). By passing in the user key, we allow efficient iterator creation.
|
|
|
|
// The iterator only needs to be ordered within the same user key.
|
2014-01-16 02:17:58 +00:00
|
|
|
std::unique_ptr<MemTableRep::Iterator> iter(
|
2014-06-25 00:52:30 +00:00
|
|
|
table_->GetDynamicPrefixIterator());
|
2014-01-25 01:50:59 +00:00
|
|
|
iter->Seek(key.internal_key(), memkey.data());
|
2014-01-11 01:33:56 +00:00
|
|
|
|
|
|
|
size_t num_successive_merges = 0;
|
|
|
|
|
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
|
|
const char* entry = iter->key();
|
2014-01-17 20:22:39 +00:00
|
|
|
uint32_t key_length = 0;
|
2014-01-11 01:33:56 +00:00
|
|
|
const char* iter_key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
2015-09-08 22:30:49 +00:00
|
|
|
if (!comparator_.comparator.user_comparator()->Equal(
|
|
|
|
Slice(iter_key_ptr, key_length - 8), key.user_key())) {
|
2014-01-11 01:33:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint64_t tag = DecodeFixed64(iter_key_ptr + key_length - 8);
|
2015-05-29 21:36:35 +00:00
|
|
|
ValueType type;
|
|
|
|
uint64_t unused;
|
|
|
|
UnPackSequenceAndType(tag, &unused, &type);
|
|
|
|
if (type != kTypeMerge) {
|
2014-01-11 01:33:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
++num_successive_merges;
|
|
|
|
}
|
|
|
|
|
|
|
|
return num_successive_merges;
|
|
|
|
}
|
|
|
|
|
2014-02-11 17:46:30 +00:00
|
|
|
void MemTableRep::Get(const LookupKey& k, void* callback_args,
|
|
|
|
bool (*callback_func)(void* arg, const char* entry)) {
|
2014-06-25 00:52:30 +00:00
|
|
|
auto iter = GetDynamicPrefixIterator();
|
2014-02-11 17:46:30 +00:00
|
|
|
for (iter->Seek(k.internal_key(), k.memtable_key().data());
|
|
|
|
iter->Valid() && callback_func(callback_args, iter->key());
|
|
|
|
iter->Next()) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|