2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2013-10-16 21:59:46 +00:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2014-01-27 21:53:22 +00:00
|
|
|
#include "db/dbformat.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-06-17 05:26:34 +00:00
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "util/coding.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 06:09:15 +00:00
|
|
|
#include "util/perf_context_imp.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-04-01 21:45:30 +00:00
|
|
|
uint64_t PackSequenceAndType(uint64_t seq, ValueType t) {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(seq <= kMaxSequenceNumber);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
assert(IsValueType(t));
|
2011-03-18 22:37:00 +00:00
|
|
|
return (seq << 8) | t;
|
|
|
|
}
|
|
|
|
|
2015-05-29 21:36:35 +00:00
|
|
|
void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t) {
|
|
|
|
*seq = packed >> 8;
|
|
|
|
*t = static_cast<ValueType>(packed & 0xff);
|
|
|
|
|
|
|
|
assert(*seq <= kMaxSequenceNumber);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
assert(IsValueType(*t));
|
2015-05-29 21:36:35 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
|
|
|
|
result->append(key.user_key.data(), key.user_key.size());
|
|
|
|
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
|
|
|
|
}
|
|
|
|
|
2012-12-16 02:28:36 +00:00
|
|
|
std::string ParsedInternalKey::DebugString(bool hex) const {
|
2011-03-18 22:37:00 +00:00
|
|
|
char buf[50];
|
2015-06-17 05:26:34 +00:00
|
|
|
snprintf(buf, sizeof(buf), "' @ %" PRIu64 ": %d", sequence,
|
|
|
|
static_cast<int>(type));
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string result = "'";
|
2012-12-16 02:28:36 +00:00
|
|
|
result += user_key.ToString(hex);
|
2011-03-18 22:37:00 +00:00
|
|
|
result += buf;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-12-16 02:28:36 +00:00
|
|
|
std::string InternalKey::DebugString(bool hex) const {
|
2011-10-05 23:30:28 +00:00
|
|
|
std::string result;
|
|
|
|
ParsedInternalKey parsed;
|
|
|
|
if (ParseInternalKey(rep_, &parsed)) {
|
2012-12-16 02:28:36 +00:00
|
|
|
result = parsed.DebugString(hex);
|
2011-10-05 23:30:28 +00:00
|
|
|
} else {
|
|
|
|
result = "(bad)";
|
|
|
|
result.append(EscapeString(rep_));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
const char* InternalKeyComparator::Name() const {
|
2013-06-10 20:28:58 +00:00
|
|
|
return name_.c_str();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
|
|
|
|
// Order by:
|
|
|
|
// increasing user key (according to user-supplied comparator)
|
|
|
|
// decreasing sequence number
|
|
|
|
// decreasing type (though sequence# should be enough to disambiguate)
|
|
|
|
int r = user_comparator_->Compare(ExtractUserKey(akey), ExtractUserKey(bkey));
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_COUNTER_ADD(user_key_comparison_count, 1);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (r == 0) {
|
|
|
|
const uint64_t anum = DecodeFixed64(akey.data() + akey.size() - 8);
|
|
|
|
const uint64_t bnum = DecodeFixed64(bkey.data() + bkey.size() - 8);
|
|
|
|
if (anum > bnum) {
|
|
|
|
r = -1;
|
|
|
|
} else if (anum < bnum) {
|
|
|
|
r = +1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-01-27 21:53:22 +00:00
|
|
|
int InternalKeyComparator::Compare(const ParsedInternalKey& a,
|
|
|
|
const ParsedInternalKey& b) const {
|
|
|
|
// Order by:
|
|
|
|
// increasing user key (according to user-supplied comparator)
|
|
|
|
// decreasing sequence number
|
|
|
|
// decreasing type (though sequence# should be enough to disambiguate)
|
|
|
|
int r = user_comparator_->Compare(a.user_key, b.user_key);
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_COUNTER_ADD(user_key_comparison_count, 1);
|
2014-01-27 21:53:22 +00:00
|
|
|
if (r == 0) {
|
|
|
|
if (a.sequence > b.sequence) {
|
|
|
|
r = -1;
|
|
|
|
} else if (a.sequence < b.sequence) {
|
|
|
|
r = +1;
|
|
|
|
} else if (a.type > b.type) {
|
|
|
|
r = -1;
|
|
|
|
} else if (a.type < b.type) {
|
|
|
|
r = +1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
void InternalKeyComparator::FindShortestSeparator(
|
|
|
|
std::string* start,
|
|
|
|
const Slice& limit) const {
|
|
|
|
// Attempt to shorten the user portion of the key
|
|
|
|
Slice user_start = ExtractUserKey(*start);
|
|
|
|
Slice user_limit = ExtractUserKey(limit);
|
|
|
|
std::string tmp(user_start.data(), user_start.size());
|
|
|
|
user_comparator_->FindShortestSeparator(&tmp, user_limit);
|
2011-11-14 17:06:16 +00:00
|
|
|
if (tmp.size() < user_start.size() &&
|
|
|
|
user_comparator_->Compare(user_start, tmp) < 0) {
|
|
|
|
// User key has become shorter physically, but larger logically.
|
|
|
|
// Tack on the earliest possible number to the shortened user key.
|
2011-03-18 22:37:00 +00:00
|
|
|
PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
|
|
|
|
assert(this->Compare(*start, tmp) < 0);
|
|
|
|
assert(this->Compare(tmp, limit) < 0);
|
|
|
|
start->swap(tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
|
|
|
|
Slice user_key = ExtractUserKey(*key);
|
|
|
|
std::string tmp(user_key.data(), user_key.size());
|
|
|
|
user_comparator_->FindShortSuccessor(&tmp);
|
2011-11-14 17:06:16 +00:00
|
|
|
if (tmp.size() < user_key.size() &&
|
|
|
|
user_comparator_->Compare(user_key, tmp) < 0) {
|
|
|
|
// User key has become shorter physically, but larger logically.
|
|
|
|
// Tack on the earliest possible number to the shortened user key.
|
2011-03-18 22:37:00 +00:00
|
|
|
PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
|
|
|
|
assert(this->Compare(*key, tmp) < 0);
|
|
|
|
key->swap(tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-06 19:14:28 +00:00
|
|
|
LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) {
|
|
|
|
size_t usize = _user_key.size();
|
2011-06-22 02:36:45 +00:00
|
|
|
size_t needed = usize + 13; // A conservative estimate
|
|
|
|
char* dst;
|
|
|
|
if (needed <= sizeof(space_)) {
|
|
|
|
dst = space_;
|
|
|
|
} else {
|
|
|
|
dst = new char[needed];
|
|
|
|
}
|
|
|
|
start_ = dst;
|
2014-11-11 21:47:22 +00:00
|
|
|
// NOTE: We don't support users keys of more than 2GB :)
|
|
|
|
dst = EncodeVarint32(dst, static_cast<uint32_t>(usize + 8));
|
2011-06-22 02:36:45 +00:00
|
|
|
kstart_ = dst;
|
2014-11-06 19:14:28 +00:00
|
|
|
memcpy(dst, _user_key.data(), usize);
|
2011-06-22 02:36:45 +00:00
|
|
|
dst += usize;
|
|
|
|
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
|
|
|
|
dst += 8;
|
|
|
|
end_ = dst;
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|