mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 22:44:05 +00:00
36a5f8ed7f
- Replace raw slice comparison with a call to user comparator. Added test for custom comparators. - Fix end of namespace comments. - Fixed bug in picking inputs for a level-0 compaction. When finding overlapping files, the covered range may expand as files are added to the input set. We now correctly expand the range when this happens instead of continuing to use the old range. For example, suppose L0 contains files with the following ranges: F1: a .. d F2: c .. g F3: f .. j and the initial compaction target is F3. We used to search for range f..j which yielded {F2,F3}. However we now expand the range as soon as another file is added. In this case, when F2 is added, we expand the range to c..j and restart the search. That picks up file F1 as well. This change fixes a bug related to deleted keys showing up incorrectly after a compaction as described in Issue 44. (Sync with upstream @25072954)
69 lines
1.9 KiB
C++
69 lines
1.9 KiB
C++
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "util/arena.h"
|
|
#include <assert.h>
|
|
|
|
namespace leveldb {
|
|
|
|
static const int kBlockSize = 4096;
|
|
|
|
Arena::Arena() {
|
|
blocks_memory_ = 0;
|
|
alloc_ptr_ = NULL; // First allocation will allocate a block
|
|
alloc_bytes_remaining_ = 0;
|
|
}
|
|
|
|
Arena::~Arena() {
|
|
for (size_t i = 0; i < blocks_.size(); i++) {
|
|
delete[] blocks_[i];
|
|
}
|
|
}
|
|
|
|
char* Arena::AllocateFallback(size_t bytes) {
|
|
if (bytes > kBlockSize / 4) {
|
|
// Object is more than a quarter of our block size. Allocate it separately
|
|
// to avoid wasting too much space in leftover bytes.
|
|
char* result = AllocateNewBlock(bytes);
|
|
return result;
|
|
}
|
|
|
|
// We waste the remaining space in the current block.
|
|
alloc_ptr_ = AllocateNewBlock(kBlockSize);
|
|
alloc_bytes_remaining_ = kBlockSize;
|
|
|
|
char* result = alloc_ptr_;
|
|
alloc_ptr_ += bytes;
|
|
alloc_bytes_remaining_ -= bytes;
|
|
return result;
|
|
}
|
|
|
|
char* Arena::AllocateAligned(size_t bytes) {
|
|
const int align = sizeof(void*); // We'll align to pointer size
|
|
assert((align & (align-1)) == 0); // Pointer size should be a power of 2
|
|
size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
|
|
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
|
|
size_t needed = bytes + slop;
|
|
char* result;
|
|
if (needed <= alloc_bytes_remaining_) {
|
|
result = alloc_ptr_ + slop;
|
|
alloc_ptr_ += needed;
|
|
alloc_bytes_remaining_ -= needed;
|
|
} else {
|
|
// AllocateFallback always returned aligned memory
|
|
result = AllocateFallback(bytes);
|
|
}
|
|
assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
|
|
return result;
|
|
}
|
|
|
|
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
|
char* result = new char[block_bytes];
|
|
blocks_memory_ += block_bytes;
|
|
blocks_.push_back(result);
|
|
return result;
|
|
}
|
|
|
|
} // namespace leveldb
|