mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 22:44:05 +00:00
allow nullptr Slice only as sentinel
Summary:
Allow `Slice` holding nullptr as a sentinel value but not in comparisons. This new restriction eliminates the need for the manual checks in 39ef900551
, while still conforming to glibc's `memcmp` API. Thanks siying for the idea. Users may need to migrate, so mentioned it in HISTORY.md.
Closes https://github.com/facebook/rocksdb/pull/2777
Differential Revision: D5686016
Pulled By: ajkr
fbshipit-source-id: 03a2ca3fd9a0ebade9d0d5686c81d59a9534f563
This commit is contained in:
parent
ccf7f833e3
commit
234f33a3f9
|
@ -2,6 +2,7 @@
|
|||
## Unreleased
|
||||
### Public API Change
|
||||
* Users of `Statistics::getHistogramString()` will see fewer histogram buckets and different bucket endpoints.
|
||||
* `Slice::compare` and BytewiseComparator `Compare` no longer accept `Slice`s containing nullptr.
|
||||
|
||||
### New Features
|
||||
* Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators.
|
||||
|
|
|
@ -214,16 +214,8 @@ inline bool operator!=(const Slice& x, const Slice& y) {
|
|||
}
|
||||
|
||||
inline int Slice::compare(const Slice& b) const {
|
||||
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
||||
if (min_len == 0) {
|
||||
if (size_ > 0) {
|
||||
return 1;
|
||||
} else if (b.size_ > 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
assert(data_ != nullptr && b.data_ != nullptr);
|
||||
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
||||
int r = memcmp(data_, b.data_, min_len);
|
||||
if (r == 0) {
|
||||
if (size_ < b.size_) r = -1;
|
||||
|
|
|
@ -109,7 +109,11 @@ class CuckooBuilderTest : public testing::Test {
|
|||
expected_locations.begin();
|
||||
if (key_idx == keys.size()) {
|
||||
// i is not one of the expected locations. Empty bucket.
|
||||
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
||||
if (read_slice.data() == nullptr) {
|
||||
ASSERT_EQ(0, expected_unused_bucket.size());
|
||||
} else {
|
||||
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
||||
}
|
||||
} else {
|
||||
keys_found[key_idx] = true;
|
||||
ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0);
|
||||
|
|
|
@ -25,6 +25,8 @@ class MaxOperator : public MergeOperator {
|
|||
if (merge_in.existing_value) {
|
||||
max = Slice(merge_in.existing_value->data(),
|
||||
merge_in.existing_value->size());
|
||||
} else if (max.data() == nullptr) {
|
||||
max = Slice();
|
||||
}
|
||||
|
||||
for (const auto& op : merge_in.operand_list) {
|
||||
|
|
Loading…
Reference in a new issue