2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/memtable.h"
|
2013-07-23 21:42:27 +00:00
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/dbformat.h"
|
2011-03-30 18:35:40 +00:00
|
|
|
#include "leveldb/comparator.h"
|
|
|
|
#include "leveldb/env.h"
|
|
|
|
#include "leveldb/iterator.h"
|
2013-03-21 22:59:47 +00:00
|
|
|
#include "leveldb/merge_operator.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
|
|
|
|
|
|
|
namespace leveldb {
|
|
|
|
|
|
|
|
static Slice GetLengthPrefixedSlice(const char* data) {
|
|
|
|
uint32_t len;
|
|
|
|
const char* p = data;
|
|
|
|
p = GetVarint32Ptr(p, p + 5, &len); // +5: we assume "p" is not corrupted
|
|
|
|
return Slice(p, len);
|
|
|
|
}
|
|
|
|
|
2013-07-23 21:42:27 +00:00
|
|
|
MemTable::MemTable(const InternalKeyComparator& cmp,
|
|
|
|
std::shared_ptr<MemTableRepFactory> table_factory,
|
Make arena block size configurable
Summary:
Add an option for arena block size, default value 4096 bytes. Arena will allocate blocks with such size.
I am not sure about passing parameter to skiplist in the new virtualized framework, though I talked to Jim a bit. So add Jim as reviewer.
Test Plan:
new unit test, I am running db_test.
For passing paramter from configured option to Arena, I tried tests like:
TEST(DBTest, Arena_Option) {
std::string dbname = test::TmpDir() + "/db_arena_option_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
opts.arena_block_size = 1000000; // tested 99, 999999
Status s = DB::Open(opts, dbname, &db);
db->Put(WriteOptions(), "a", "123");
}
and printed some debug info. The results look good. Any suggestion for such a unit-test?
Reviewers: haobo, dhruba, emayanke, jpaton
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D11799
2013-07-31 19:42:23 +00:00
|
|
|
int numlevel,
|
|
|
|
const Options& options)
|
2011-03-18 22:37:00 +00:00
|
|
|
: comparator_(cmp),
|
2011-05-21 02:17:43 +00:00
|
|
|
refs_(0),
|
Make arena block size configurable
Summary:
Add an option for arena block size, default value 4096 bytes. Arena will allocate blocks with such size.
I am not sure about passing parameter to skiplist in the new virtualized framework, though I talked to Jim a bit. So add Jim as reviewer.
Test Plan:
new unit test, I am running db_test.
For passing paramter from configured option to Arena, I tried tests like:
TEST(DBTest, Arena_Option) {
std::string dbname = test::TmpDir() + "/db_arena_option_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
opts.arena_block_size = 1000000; // tested 99, 999999
Status s = DB::Open(opts, dbname, &db);
db->Put(WriteOptions(), "a", "123");
}
and printed some debug info. The results look good. Any suggestion for such a unit-test?
Reviewers: haobo, dhruba, emayanke, jpaton
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D11799
2013-07-31 19:42:23 +00:00
|
|
|
arena_impl_(options.arena_block_size),
|
|
|
|
table_(table_factory->CreateMemTableRep(comparator_, &arena_impl_)),
|
2012-10-19 21:00:53 +00:00
|
|
|
flush_in_progress_(false),
|
|
|
|
flush_completed_(false),
|
2012-11-29 00:42:36 +00:00
|
|
|
file_number_(0),
|
2013-02-28 22:09:30 +00:00
|
|
|
edit_(numlevel),
|
2013-06-11 21:23:58 +00:00
|
|
|
first_seqno_(0),
|
2013-07-23 21:42:27 +00:00
|
|
|
mem_logfile_number_(0) { }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
MemTable::~MemTable() {
|
2011-05-21 02:17:43 +00:00
|
|
|
assert(refs_ == 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-07-23 21:42:27 +00:00
|
|
|
size_t MemTable::ApproximateMemoryUsage() {
|
Make arena block size configurable
Summary:
Add an option for arena block size, default value 4096 bytes. Arena will allocate blocks with such size.
I am not sure about passing parameter to skiplist in the new virtualized framework, though I talked to Jim a bit. So add Jim as reviewer.
Test Plan:
new unit test, I am running db_test.
For passing paramter from configured option to Arena, I tried tests like:
TEST(DBTest, Arena_Option) {
std::string dbname = test::TmpDir() + "/db_arena_option_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
opts.arena_block_size = 1000000; // tested 99, 999999
Status s = DB::Open(opts, dbname, &db);
db->Put(WriteOptions(), "a", "123");
}
and printed some debug info. The results look good. Any suggestion for such a unit-test?
Reviewers: haobo, dhruba, emayanke, jpaton
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D11799
2013-07-31 19:42:23 +00:00
|
|
|
return arena_impl_.ApproximateMemoryUsage();
|
2013-07-23 21:42:27 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
|
|
|
|
const {
|
|
|
|
// Internal keys are encoded as length-prefixed strings.
|
|
|
|
Slice a = GetLengthPrefixedSlice(aptr);
|
|
|
|
Slice b = GetLengthPrefixedSlice(bptr);
|
|
|
|
return comparator.Compare(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode a suitable internal key target for "target" and return it.
|
|
|
|
// Uses *scratch as scratch space, and the returned pointer will point
|
|
|
|
// into this scratch space.
|
|
|
|
static const char* EncodeKey(std::string* scratch, const Slice& target) {
|
|
|
|
scratch->clear();
|
|
|
|
PutVarint32(scratch, target.size());
|
|
|
|
scratch->append(target.data(), target.size());
|
|
|
|
return scratch->data();
|
|
|
|
}
|
|
|
|
|
|
|
|
class MemTableIterator: public Iterator {
|
|
|
|
public:
|
2013-07-23 21:42:27 +00:00
|
|
|
explicit MemTableIterator(MemTableRep* table)
|
|
|
|
: iter_(table->GetIterator()) { }
|
|
|
|
|
|
|
|
virtual bool Valid() const { return iter_->Valid(); }
|
|
|
|
virtual void Seek(const Slice& k) { iter_->Seek(EncodeKey(&tmp_, k)); }
|
|
|
|
virtual void SeekToFirst() { iter_->SeekToFirst(); }
|
|
|
|
virtual void SeekToLast() { iter_->SeekToLast(); }
|
|
|
|
virtual void Next() { iter_->Next(); }
|
|
|
|
virtual void Prev() { iter_->Prev(); }
|
|
|
|
virtual Slice key() const {
|
|
|
|
return GetLengthPrefixedSlice(iter_->key());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual Slice value() const {
|
2013-07-23 21:42:27 +00:00
|
|
|
Slice key_slice = GetLengthPrefixedSlice(iter_->key());
|
2011-03-18 22:37:00 +00:00
|
|
|
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status status() const { return Status::OK(); }
|
|
|
|
|
|
|
|
private:
|
2013-07-23 21:42:27 +00:00
|
|
|
std::shared_ptr<MemTableRep::Iterator> iter_;
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string tmp_; // For passing to EncodeKey
|
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
MemTableIterator(const MemTableIterator&);
|
|
|
|
void operator=(const MemTableIterator&);
|
|
|
|
};
|
|
|
|
|
|
|
|
Iterator* MemTable::NewIterator() {
|
2013-07-23 21:42:27 +00:00
|
|
|
return new MemTableIterator(table_.get());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemTable::Add(SequenceNumber s, ValueType type,
|
|
|
|
const Slice& key,
|
|
|
|
const Slice& value) {
|
|
|
|
// Format of an entry is concatenation of:
|
|
|
|
// key_size : varint32 of internal_key.size()
|
|
|
|
// key bytes : char[internal_key.size()]
|
|
|
|
// value_size : varint32 of value.size()
|
|
|
|
// value bytes : char[value.size()]
|
|
|
|
size_t key_size = key.size();
|
|
|
|
size_t val_size = value.size();
|
|
|
|
size_t internal_key_size = key_size + 8;
|
|
|
|
const size_t encoded_len =
|
|
|
|
VarintLength(internal_key_size) + internal_key_size +
|
|
|
|
VarintLength(val_size) + val_size;
|
Make arena block size configurable
Summary:
Add an option for arena block size, default value 4096 bytes. Arena will allocate blocks with such size.
I am not sure about passing parameter to skiplist in the new virtualized framework, though I talked to Jim a bit. So add Jim as reviewer.
Test Plan:
new unit test, I am running db_test.
For passing paramter from configured option to Arena, I tried tests like:
TEST(DBTest, Arena_Option) {
std::string dbname = test::TmpDir() + "/db_arena_option_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
opts.arena_block_size = 1000000; // tested 99, 999999
Status s = DB::Open(opts, dbname, &db);
db->Put(WriteOptions(), "a", "123");
}
and printed some debug info. The results look good. Any suggestion for such a unit-test?
Reviewers: haobo, dhruba, emayanke, jpaton
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D11799
2013-07-31 19:42:23 +00:00
|
|
|
char* buf = arena_impl_.Allocate(encoded_len);
|
2011-03-18 22:37:00 +00:00
|
|
|
char* p = EncodeVarint32(buf, internal_key_size);
|
|
|
|
memcpy(p, key.data(), key_size);
|
|
|
|
p += key_size;
|
|
|
|
EncodeFixed64(p, (s << 8) | type);
|
|
|
|
p += 8;
|
|
|
|
p = EncodeVarint32(p, val_size);
|
|
|
|
memcpy(p, value.data(), val_size);
|
2012-10-19 21:00:53 +00:00
|
|
|
assert((p + val_size) - buf == (unsigned)encoded_len);
|
2013-07-23 21:42:27 +00:00
|
|
|
table_->Insert(buf);
|
2013-02-28 22:09:30 +00:00
|
|
|
|
|
|
|
// The first sequence number inserted into the memtable
|
|
|
|
assert(first_seqno_ == 0 || s > first_seqno_);
|
|
|
|
if (first_seqno_ == 0) {
|
|
|
|
first_seqno_ = s;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
std::deque<std::string>* operands, const Options& options) {
|
2011-06-22 02:36:45 +00:00
|
|
|
Slice memkey = key.memtable_key();
|
2013-07-23 21:42:27 +00:00
|
|
|
std::shared_ptr<MemTableRep::Iterator> iter(table_.get()->GetIterator());
|
|
|
|
iter->Seek(memkey.data());
|
2013-03-21 22:59:47 +00:00
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// It is the caller's responsibility to allocate/delete operands list
|
|
|
|
assert(operands != nullptr);
|
2013-03-21 22:59:47 +00:00
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
bool merge_in_progress = s->IsMergeInProgress();
|
2013-03-21 22:59:47 +00:00
|
|
|
auto merge_operator = options.merge_operator;
|
|
|
|
auto logger = options.info_log;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
std::string merge_result;
|
|
|
|
|
2013-07-23 21:42:27 +00:00
|
|
|
for (; iter->Valid(); iter->Next()) {
|
2011-06-22 02:36:45 +00:00
|
|
|
// entry format is:
|
|
|
|
// klength varint32
|
2013-01-04 01:13:56 +00:00
|
|
|
// userkey char[klength-8]
|
2011-06-22 02:36:45 +00:00
|
|
|
// tag uint64
|
|
|
|
// vlength varint32
|
|
|
|
// value char[vlength]
|
|
|
|
// Check that it belongs to same user key. We do not check the
|
|
|
|
// sequence number since the Seek() call above should have skipped
|
|
|
|
// all entries with overly large sequence numbers.
|
2013-07-23 21:42:27 +00:00
|
|
|
const char* entry = iter->key();
|
2011-06-22 02:36:45 +00:00
|
|
|
uint32_t key_length;
|
|
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
|
|
|
|
if (comparator_.comparator.user_comparator()->Compare(
|
|
|
|
Slice(key_ptr, key_length - 8),
|
|
|
|
key.user_key()) == 0) {
|
|
|
|
// Correct user key
|
|
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
|
|
|
switch (static_cast<ValueType>(tag & 0xff)) {
|
|
|
|
case kTypeValue: {
|
|
|
|
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
*s = Status::OK();
|
2013-03-21 22:59:47 +00:00
|
|
|
if (merge_in_progress) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
assert(merge_operator);
|
|
|
|
if (!merge_operator->Merge(key.user_key(), &v, *operands,
|
|
|
|
value, logger.get())) {
|
|
|
|
*s = Status::Corruption("Error: Could not perform merge.");
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
} else {
|
|
|
|
value->assign(v.data(), v.size());
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
case kTypeDeletion: {
|
|
|
|
if (merge_in_progress) {
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
assert(merge_operator);
|
|
|
|
*s = Status::OK();
|
|
|
|
if (!merge_operator->Merge(key.user_key(), nullptr, *operands,
|
|
|
|
value, logger.get())) {
|
|
|
|
*s = Status::Corruption("Error: Could not perform merge.");
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
} else {
|
|
|
|
*s = Status::NotFound(Slice());
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
return true;
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
case kTypeMerge: {
|
|
|
|
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
|
|
merge_in_progress = true;
|
|
|
|
operands->push_front(v.ToString());
|
|
|
|
while(operands->size() >= 2) {
|
|
|
|
// Attempt to associative merge. (Returns true if successful)
|
|
|
|
if (merge_operator->PartialMerge(key.user_key(),
|
|
|
|
Slice((*operands)[0]),
|
|
|
|
Slice((*operands)[1]),
|
|
|
|
&merge_result,
|
|
|
|
logger.get())) {
|
|
|
|
operands->pop_front();
|
|
|
|
swap(operands->front(), merge_result);
|
|
|
|
} else {
|
|
|
|
// Stack them because user can't associative merge
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
2013-05-21 04:28:09 +00:00
|
|
|
} else {
|
|
|
|
// exit loop if user key does not match
|
|
|
|
break;
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 03:14:32 +00:00
|
|
|
// No change to value, since we have not yet found a Put/Delete
|
|
|
|
|
2013-03-21 22:59:47 +00:00
|
|
|
if (merge_in_progress) {
|
|
|
|
*s = Status::MergeInProgress("");
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace leveldb
|