mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 07:30:54 +00:00
961c7590d6
Summary: Preliminary user-timestamp support for delete. If ["a", ts=100] exists, you can delete it by calling `DB::Delete(write_options, key)` in which `write_options.timestamp` points to a `ts` higher than 100. Implementation A new ValueType, i.e. `kTypeDeletionWithTimestamp` is added for deletion marker with timestamp. The reason for a separate `kTypeDeletionWithTimestamp`: RocksDB may drop tombstones (keys with kTypeDeletion) when compacting them to the bottom level. This is OK and useful if timestamp is disabled. When timestamp is enabled, should we still reuse `kTypeDeletion`, we may drop the tombstone with a more recent timestamp, causing deleted keys to re-appear. Test plan (dev server) ``` make check ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/6253 Reviewed By: ltamasi Differential Revision: D20995328 Pulled By: riversand963 fbshipit-source-id: a9e5c22968ad76f98e3dc6ee0151265a3f0df619
207 lines
6.8 KiB
C++
207 lines
6.8 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
#include "db/dbformat.h"
|
|
|
|
#include <stdio.h>
|
|
#include <cinttypes>
|
|
#include "monitoring/perf_context_imp.h"
|
|
#include "port/port.h"
|
|
#include "util/coding.h"
|
|
#include "util/string_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// kValueTypeForSeek defines the ValueType that should be passed when
|
|
// constructing a ParsedInternalKey object for seeking to a particular
|
|
// sequence number (since we sort sequence numbers in decreasing order
|
|
// and the value type is embedded as the low 8 bits in the sequence
|
|
// number in internal keys, we need to use the highest-numbered
|
|
// ValueType, not the lowest).
|
|
const ValueType kValueTypeForSeek = kTypeDeletionWithTimestamp;
|
|
const ValueType kValueTypeForSeekForPrev = kTypeDeletion;
|
|
|
|
uint64_t PackSequenceAndType(uint64_t seq, ValueType t) {
|
|
assert(seq <= kMaxSequenceNumber);
|
|
assert(IsExtendedValueType(t));
|
|
return (seq << 8) | t;
|
|
}
|
|
|
|
EntryType GetEntryType(ValueType value_type) {
|
|
switch (value_type) {
|
|
case kTypeValue:
|
|
return kEntryPut;
|
|
case kTypeDeletion:
|
|
return kEntryDelete;
|
|
case kTypeSingleDeletion:
|
|
return kEntrySingleDelete;
|
|
case kTypeMerge:
|
|
return kEntryMerge;
|
|
case kTypeRangeDeletion:
|
|
return kEntryRangeDeletion;
|
|
case kTypeBlobIndex:
|
|
return kEntryBlobIndex;
|
|
default:
|
|
return kEntryOther;
|
|
}
|
|
}
|
|
|
|
bool ParseFullKey(const Slice& internal_key, FullKey* fkey) {
|
|
ParsedInternalKey ikey;
|
|
if (!ParseInternalKey(internal_key, &ikey)) {
|
|
return false;
|
|
}
|
|
fkey->user_key = ikey.user_key;
|
|
fkey->sequence = ikey.sequence;
|
|
fkey->type = GetEntryType(ikey.type);
|
|
return true;
|
|
}
|
|
|
|
void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t) {
|
|
*seq = packed >> 8;
|
|
*t = static_cast<ValueType>(packed & 0xff);
|
|
|
|
assert(*seq <= kMaxSequenceNumber);
|
|
assert(IsExtendedValueType(*t));
|
|
}
|
|
|
|
void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
|
|
result->append(key.user_key.data(), key.user_key.size());
|
|
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
|
|
}
|
|
|
|
void AppendInternalKeyWithDifferentTimestamp(std::string* result,
|
|
const ParsedInternalKey& key,
|
|
const Slice& ts) {
|
|
assert(key.user_key.size() >= ts.size());
|
|
result->append(key.user_key.data(), key.user_key.size() - ts.size());
|
|
result->append(ts.data(), ts.size());
|
|
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
|
|
}
|
|
|
|
void AppendInternalKeyFooter(std::string* result, SequenceNumber s,
|
|
ValueType t) {
|
|
PutFixed64(result, PackSequenceAndType(s, t));
|
|
}
|
|
|
|
std::string ParsedInternalKey::DebugString(bool hex) const {
|
|
char buf[50];
|
|
snprintf(buf, sizeof(buf), "' seq:%" PRIu64 ", type:%d", sequence,
|
|
static_cast<int>(type));
|
|
std::string result = "'";
|
|
result += user_key.ToString(hex);
|
|
result += buf;
|
|
return result;
|
|
}
|
|
|
|
std::string InternalKey::DebugString(bool hex) const {
|
|
std::string result;
|
|
ParsedInternalKey parsed;
|
|
if (ParseInternalKey(rep_, &parsed)) {
|
|
result = parsed.DebugString(hex);
|
|
} else {
|
|
result = "(bad)";
|
|
result.append(EscapeString(rep_));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
const char* InternalKeyComparator::Name() const { return name_.c_str(); }
|
|
|
|
int InternalKeyComparator::Compare(const ParsedInternalKey& a,
|
|
const ParsedInternalKey& b) const {
|
|
// Order by:
|
|
// increasing user key (according to user-supplied comparator)
|
|
// decreasing sequence number
|
|
// decreasing type (though sequence# should be enough to disambiguate)
|
|
int r = user_comparator_.Compare(a.user_key, b.user_key);
|
|
if (r == 0) {
|
|
if (a.sequence > b.sequence) {
|
|
r = -1;
|
|
} else if (a.sequence < b.sequence) {
|
|
r = +1;
|
|
} else if (a.type > b.type) {
|
|
r = -1;
|
|
} else if (a.type < b.type) {
|
|
r = +1;
|
|
}
|
|
}
|
|
return r;
|
|
}
|
|
|
|
void InternalKeyComparator::FindShortestSeparator(std::string* start,
|
|
const Slice& limit) const {
|
|
// Attempt to shorten the user portion of the key
|
|
Slice user_start = ExtractUserKey(*start);
|
|
Slice user_limit = ExtractUserKey(limit);
|
|
std::string tmp(user_start.data(), user_start.size());
|
|
user_comparator_.FindShortestSeparator(&tmp, user_limit);
|
|
if (tmp.size() <= user_start.size() &&
|
|
user_comparator_.Compare(user_start, tmp) < 0) {
|
|
// User key has become shorter physically, but larger logically.
|
|
// Tack on the earliest possible number to the shortened user key.
|
|
PutFixed64(&tmp,
|
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
|
assert(this->Compare(*start, tmp) < 0);
|
|
assert(this->Compare(tmp, limit) < 0);
|
|
start->swap(tmp);
|
|
}
|
|
}
|
|
|
|
void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
|
|
Slice user_key = ExtractUserKey(*key);
|
|
std::string tmp(user_key.data(), user_key.size());
|
|
user_comparator_.FindShortSuccessor(&tmp);
|
|
if (tmp.size() <= user_key.size() &&
|
|
user_comparator_.Compare(user_key, tmp) < 0) {
|
|
// User key has become shorter physically, but larger logically.
|
|
// Tack on the earliest possible number to the shortened user key.
|
|
PutFixed64(&tmp,
|
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
|
assert(this->Compare(*key, tmp) < 0);
|
|
key->swap(tmp);
|
|
}
|
|
}
|
|
|
|
LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s,
|
|
const Slice* ts) {
|
|
size_t usize = _user_key.size();
|
|
size_t ts_sz = (nullptr == ts) ? 0 : ts->size();
|
|
size_t needed = usize + ts_sz + 13; // A conservative estimate
|
|
char* dst;
|
|
if (needed <= sizeof(space_)) {
|
|
dst = space_;
|
|
} else {
|
|
dst = new char[needed];
|
|
}
|
|
start_ = dst;
|
|
// NOTE: We don't support users keys of more than 2GB :)
|
|
dst = EncodeVarint32(dst, static_cast<uint32_t>(usize + ts_sz + 8));
|
|
kstart_ = dst;
|
|
memcpy(dst, _user_key.data(), usize);
|
|
dst += usize;
|
|
if (nullptr != ts) {
|
|
memcpy(dst, ts->data(), ts_sz);
|
|
dst += ts_sz;
|
|
}
|
|
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
|
|
dst += 8;
|
|
end_ = dst;
|
|
}
|
|
|
|
void IterKey::EnlargeBuffer(size_t key_size) {
|
|
// If size is smaller than buffer size, continue using current buffer,
|
|
// or the static allocated one, as default
|
|
assert(key_size > buf_size_);
|
|
// Need to enlarge the buffer.
|
|
ResetBuffer();
|
|
buf_ = new char[key_size];
|
|
buf_size_ = key_size;
|
|
}
|
|
} // namespace ROCKSDB_NAMESPACE
|