2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/db.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-07-23 21:42:27 +00:00
|
|
|
#include <memory>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/memtable.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/memtablerep.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/logging.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
static std::string PrintContents(WriteBatch* b) {
|
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
2013-07-23 21:42:27 +00:00
|
|
|
auto factory = std::make_shared<SkipListFactory>();
|
2014-01-14 23:32:37 +00:00
|
|
|
Options options;
|
|
|
|
options.memtable_factory = factory;
|
|
|
|
MemTable* mem = new MemTable(cmp, options);
|
2011-05-21 02:17:43 +00:00
|
|
|
mem->Ref();
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string state;
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
Status s = WriteBatchInternal::InsertInto(b, mem, &options);
|
2012-03-09 00:23:21 +00:00
|
|
|
int count = 0;
|
2011-05-21 02:17:43 +00:00
|
|
|
Iterator* iter = mem->NewIterator();
|
2011-03-18 22:37:00 +00:00
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey ikey;
|
2012-11-06 20:02:18 +00:00
|
|
|
memset((void *)&ikey, 0, sizeof(ikey));
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
|
|
|
|
switch (ikey.type) {
|
|
|
|
case kTypeValue:
|
|
|
|
state.append("Put(");
|
|
|
|
state.append(ikey.user_key.ToString());
|
|
|
|
state.append(", ");
|
|
|
|
state.append(iter->value().ToString());
|
|
|
|
state.append(")");
|
2012-03-09 00:23:21 +00:00
|
|
|
count++;
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
2013-03-21 22:59:47 +00:00
|
|
|
case kTypeMerge:
|
|
|
|
state.append("Merge(");
|
|
|
|
state.append(ikey.user_key.ToString());
|
|
|
|
state.append(", ");
|
|
|
|
state.append(iter->value().ToString());
|
|
|
|
state.append(")");
|
|
|
|
count++;
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
case kTypeDeletion:
|
|
|
|
state.append("Delete(");
|
|
|
|
state.append(ikey.user_key.ToString());
|
|
|
|
state.append(")");
|
2012-03-09 00:23:21 +00:00
|
|
|
count++;
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
2014-01-07 22:41:42 +00:00
|
|
|
case kTypeColumnFamilyDeletion:
|
|
|
|
case kTypeColumnFamilyValue:
|
|
|
|
case kTypeColumnFamilyMerge:
|
2013-08-14 23:32:46 +00:00
|
|
|
case kTypeLogData:
|
|
|
|
assert(false);
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
state.append("@");
|
|
|
|
state.append(NumberToString(ikey.sequence));
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
if (!s.ok()) {
|
2013-08-14 23:32:46 +00:00
|
|
|
state.append(s.ToString());
|
2012-03-09 00:23:21 +00:00
|
|
|
} else if (count != WriteBatchInternal::Count(b)) {
|
|
|
|
state.append("CountMismatch()");
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-12-02 05:23:44 +00:00
|
|
|
delete mem->Unref();
|
2011-03-18 22:37:00 +00:00
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
class WriteBatchTest { };
|
|
|
|
|
|
|
|
TEST(WriteBatchTest, Empty) {
|
|
|
|
WriteBatch batch;
|
|
|
|
ASSERT_EQ("", PrintContents(&batch));
|
|
|
|
ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
|
2013-06-26 17:50:58 +00:00
|
|
|
ASSERT_EQ(0, batch.Count());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(WriteBatchTest, Multiple) {
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put(Slice("foo"), Slice("bar"));
|
|
|
|
batch.Delete(Slice("box"));
|
|
|
|
batch.Put(Slice("baz"), Slice("boo"));
|
|
|
|
WriteBatchInternal::SetSequence(&batch, 100);
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch));
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
|
|
|
|
ASSERT_EQ("Put(baz, boo)@102"
|
|
|
|
"Delete(box)@101"
|
|
|
|
"Put(foo, bar)@100",
|
|
|
|
PrintContents(&batch));
|
2013-06-26 17:50:58 +00:00
|
|
|
ASSERT_EQ(3, batch.Count());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(WriteBatchTest, Corruption) {
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put(Slice("foo"), Slice("bar"));
|
|
|
|
batch.Delete(Slice("box"));
|
|
|
|
WriteBatchInternal::SetSequence(&batch, 200);
|
|
|
|
Slice contents = WriteBatchInternal::Contents(&batch);
|
|
|
|
WriteBatchInternal::SetContents(&batch,
|
|
|
|
Slice(contents.data(),contents.size()-1));
|
|
|
|
ASSERT_EQ("Put(foo, bar)@200"
|
2013-08-14 23:32:46 +00:00
|
|
|
"Corruption: bad WriteBatch Delete",
|
2011-03-18 22:37:00 +00:00
|
|
|
PrintContents(&batch));
|
|
|
|
}
|
|
|
|
|
2012-03-09 00:23:21 +00:00
|
|
|
TEST(WriteBatchTest, Append) {
|
|
|
|
WriteBatch b1, b2;
|
|
|
|
WriteBatchInternal::SetSequence(&b1, 200);
|
|
|
|
WriteBatchInternal::SetSequence(&b2, 300);
|
|
|
|
WriteBatchInternal::Append(&b1, &b2);
|
|
|
|
ASSERT_EQ("",
|
|
|
|
PrintContents(&b1));
|
2013-06-26 17:50:58 +00:00
|
|
|
ASSERT_EQ(0, b1.Count());
|
2012-03-09 00:23:21 +00:00
|
|
|
b2.Put("a", "va");
|
|
|
|
WriteBatchInternal::Append(&b1, &b2);
|
|
|
|
ASSERT_EQ("Put(a, va)@200",
|
|
|
|
PrintContents(&b1));
|
2013-06-26 17:50:58 +00:00
|
|
|
ASSERT_EQ(1, b1.Count());
|
2012-03-09 00:23:21 +00:00
|
|
|
b2.Clear();
|
|
|
|
b2.Put("b", "vb");
|
|
|
|
WriteBatchInternal::Append(&b1, &b2);
|
|
|
|
ASSERT_EQ("Put(a, va)@200"
|
|
|
|
"Put(b, vb)@201",
|
|
|
|
PrintContents(&b1));
|
2013-06-26 17:50:58 +00:00
|
|
|
ASSERT_EQ(2, b1.Count());
|
2012-03-09 00:23:21 +00:00
|
|
|
b2.Delete("foo");
|
|
|
|
WriteBatchInternal::Append(&b1, &b2);
|
|
|
|
ASSERT_EQ("Put(a, va)@200"
|
|
|
|
"Put(b, vb)@202"
|
|
|
|
"Put(b, vb)@201"
|
|
|
|
"Delete(foo)@203",
|
|
|
|
PrintContents(&b1));
|
2013-06-26 17:50:58 +00:00
|
|
|
ASSERT_EQ(4, b1.Count());
|
2012-03-09 00:23:21 +00:00
|
|
|
}
|
|
|
|
|
2013-08-22 01:27:48 +00:00
|
|
|
namespace {
|
|
|
|
struct TestHandler : public WriteBatch::Handler {
|
|
|
|
std::string seen;
|
2014-01-07 22:41:42 +00:00
|
|
|
virtual void PutCF(uint32_t column_family_id, const Slice& key,
|
|
|
|
const Slice& value) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
seen += "Put(" + key.ToString() + ", " + value.ToString() + ")";
|
|
|
|
} else {
|
|
|
|
seen += "PutCF(" + std::to_string(column_family_id) + ", " +
|
|
|
|
key.ToString() + ", " + value.ToString() + ")";
|
|
|
|
}
|
2013-08-22 01:27:48 +00:00
|
|
|
}
|
2014-01-07 22:41:42 +00:00
|
|
|
virtual void MergeCF(uint32_t column_family_id, const Slice& key,
|
|
|
|
const Slice& value) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")";
|
|
|
|
} else {
|
|
|
|
seen += "MergeCF(" + std::to_string(column_family_id) + ", " +
|
|
|
|
key.ToString() + ", " + value.ToString() + ")";
|
|
|
|
}
|
2013-08-22 01:27:48 +00:00
|
|
|
}
|
|
|
|
virtual void LogData(const Slice& blob) {
|
|
|
|
seen += "LogData(" + blob.ToString() + ")";
|
|
|
|
}
|
2014-01-07 22:41:42 +00:00
|
|
|
virtual void DeleteCF(uint32_t column_family_id, const Slice& key) {
|
|
|
|
if (column_family_id == 0) {
|
|
|
|
seen += "Delete(" + key.ToString() + ")";
|
|
|
|
} else {
|
|
|
|
seen += "DeleteCF(" + std::to_string(column_family_id) + ", " +
|
|
|
|
key.ToString() + ")";
|
|
|
|
}
|
2013-08-22 01:27:48 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2013-08-14 23:32:46 +00:00
|
|
|
TEST(WriteBatchTest, Blob) {
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put(Slice("k1"), Slice("v1"));
|
|
|
|
batch.Put(Slice("k2"), Slice("v2"));
|
|
|
|
batch.Put(Slice("k3"), Slice("v3"));
|
|
|
|
batch.PutLogData(Slice("blob1"));
|
|
|
|
batch.Delete(Slice("k2"));
|
|
|
|
batch.PutLogData(Slice("blob2"));
|
|
|
|
batch.Merge(Slice("foo"), Slice("bar"));
|
|
|
|
ASSERT_EQ(5, batch.Count());
|
|
|
|
ASSERT_EQ("Merge(foo, bar)@4"
|
|
|
|
"Put(k1, v1)@0"
|
|
|
|
"Delete(k2)@3"
|
|
|
|
"Put(k2, v2)@1"
|
|
|
|
"Put(k3, v3)@2",
|
|
|
|
PrintContents(&batch));
|
|
|
|
|
2013-08-22 01:27:48 +00:00
|
|
|
TestHandler handler;
|
|
|
|
batch.Iterate(&handler);
|
|
|
|
ASSERT_EQ(
|
|
|
|
"Put(k1, v1)"
|
|
|
|
"Put(k2, v2)"
|
|
|
|
"Put(k3, v3)"
|
|
|
|
"LogData(blob1)"
|
|
|
|
"Delete(k2)"
|
|
|
|
"LogData(blob2)"
|
|
|
|
"Merge(foo, bar)",
|
|
|
|
handler.seen);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(WriteBatchTest, Continue) {
|
|
|
|
WriteBatch batch;
|
|
|
|
|
|
|
|
struct Handler : public TestHandler {
|
|
|
|
int num_seen = 0;
|
2014-01-07 22:41:42 +00:00
|
|
|
virtual void PutCF(uint32_t column_family_id, const Slice& key,
|
|
|
|
const Slice& value) {
|
2013-08-22 01:27:48 +00:00
|
|
|
++num_seen;
|
2014-01-07 22:41:42 +00:00
|
|
|
TestHandler::PutCF(column_family_id, key, value);
|
2013-08-14 23:32:46 +00:00
|
|
|
}
|
2014-01-07 22:41:42 +00:00
|
|
|
virtual void MergeCF(uint32_t column_family_id, const Slice& key,
|
|
|
|
const Slice& value) {
|
2013-08-22 01:27:48 +00:00
|
|
|
++num_seen;
|
2014-01-07 22:41:42 +00:00
|
|
|
TestHandler::MergeCF(column_family_id, key, value);
|
2013-08-14 23:32:46 +00:00
|
|
|
}
|
|
|
|
virtual void LogData(const Slice& blob) {
|
2013-08-22 01:27:48 +00:00
|
|
|
++num_seen;
|
|
|
|
TestHandler::LogData(blob);
|
2013-08-14 23:32:46 +00:00
|
|
|
}
|
2014-01-07 22:41:42 +00:00
|
|
|
virtual void DeleteCF(uint32_t column_family_id, const Slice& key) {
|
2013-08-22 01:27:48 +00:00
|
|
|
++num_seen;
|
2014-01-07 22:41:42 +00:00
|
|
|
TestHandler::DeleteCF(column_family_id, key);
|
2013-08-22 01:27:48 +00:00
|
|
|
}
|
|
|
|
virtual bool Continue() override {
|
|
|
|
return num_seen < 3;
|
2013-08-14 23:32:46 +00:00
|
|
|
}
|
|
|
|
} handler;
|
2013-08-22 01:27:48 +00:00
|
|
|
|
|
|
|
batch.Put(Slice("k1"), Slice("v1"));
|
|
|
|
batch.PutLogData(Slice("blob1"));
|
|
|
|
batch.Delete(Slice("k1"));
|
|
|
|
batch.PutLogData(Slice("blob2"));
|
|
|
|
batch.Merge(Slice("foo"), Slice("bar"));
|
2013-08-14 23:32:46 +00:00
|
|
|
batch.Iterate(&handler);
|
|
|
|
ASSERT_EQ(
|
|
|
|
"Put(k1, v1)"
|
|
|
|
"LogData(blob1)"
|
2013-08-22 01:27:48 +00:00
|
|
|
"Delete(k1)",
|
2013-08-14 23:32:46 +00:00
|
|
|
handler.seen);
|
|
|
|
}
|
|
|
|
|
2013-11-07 20:37:58 +00:00
|
|
|
TEST(WriteBatchTest, PutGatherSlices) {
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put(Slice("foo"), Slice("bar"));
|
|
|
|
|
|
|
|
{
|
|
|
|
// Try a write where the key is one slice but the value is two
|
|
|
|
Slice key_slice("baz");
|
|
|
|
Slice value_slices[2] = { Slice("header"), Slice("payload") };
|
|
|
|
batch.Put(SliceParts(&key_slice, 1),
|
|
|
|
SliceParts(value_slices, 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// One where the key is composite but the value is a single slice
|
|
|
|
Slice key_slices[3] = { Slice("key"), Slice("part2"), Slice("part3") };
|
|
|
|
Slice value_slice("value");
|
|
|
|
batch.Put(SliceParts(key_slices, 3),
|
|
|
|
SliceParts(&value_slice, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatchInternal::SetSequence(&batch, 100);
|
|
|
|
ASSERT_EQ("Put(baz, headerpayload)@101"
|
|
|
|
"Put(foo, bar)@100"
|
|
|
|
"Put(keypart2part3, value)@102",
|
|
|
|
PrintContents(&batch));
|
|
|
|
ASSERT_EQ(3, batch.Count());
|
|
|
|
}
|
|
|
|
|
2014-01-07 22:41:42 +00:00
|
|
|
TEST(WriteBatchTest, ColumnFamiliesBatchTest) {
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put(0, Slice("foo"), Slice("bar"));
|
|
|
|
batch.Put(2, Slice("twofoo"), Slice("bar2"));
|
|
|
|
batch.Put(8, Slice("eightfoo"), Slice("bar8"));
|
|
|
|
batch.Delete(8, Slice("eightfoo"));
|
|
|
|
batch.Merge(3, Slice("threethree"), Slice("3three"));
|
|
|
|
batch.Put(0, Slice("foo"), Slice("bar"));
|
|
|
|
batch.Merge(Slice("omom"), Slice("nom"));
|
|
|
|
|
|
|
|
TestHandler handler;
|
|
|
|
batch.Iterate(&handler);
|
|
|
|
ASSERT_EQ(
|
|
|
|
"Put(foo, bar)"
|
|
|
|
"PutCF(2, twofoo, bar2)"
|
|
|
|
"PutCF(8, eightfoo, bar8)"
|
|
|
|
"DeleteCF(8, eightfoo)"
|
|
|
|
"MergeCF(3, threethree, 3three)"
|
|
|
|
"Put(foo, bar)"
|
|
|
|
"Merge(omom, nom)",
|
|
|
|
handler.seen);
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2013-10-04 04:49:15 +00:00
|
|
|
return rocksdb::test::RunAllTests();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|