2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2014-01-31 01:18:17 +00:00
|
|
|
#include "util/arena.h"
|
2014-05-04 20:55:53 +00:00
|
|
|
#include <sys/mman.h>
|
2014-01-08 23:06:07 +00:00
|
|
|
#include <algorithm>
|
2014-05-04 22:52:23 +00:00
|
|
|
#include "rocksdb/env.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-01-31 01:18:17 +00:00
|
|
|
const size_t Arena::kMinBlockSize = 4096;
|
|
|
|
const size_t Arena::kMaxBlockSize = 2 << 30;
|
2014-01-08 23:06:07 +00:00
|
|
|
static const int kAlignUnit = sizeof(void*);
|
|
|
|
|
|
|
|
size_t OptimizeBlockSize(size_t block_size) {
|
|
|
|
// Make sure block_size is in optimal range
|
2014-01-31 01:18:17 +00:00
|
|
|
block_size = std::max(Arena::kMinBlockSize, block_size);
|
|
|
|
block_size = std::min(Arena::kMaxBlockSize, block_size);
|
2014-01-08 23:06:07 +00:00
|
|
|
|
|
|
|
// make sure block_size is the multiple of kAlignUnit
|
|
|
|
if (block_size % kAlignUnit != 0) {
|
|
|
|
block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
|
Make arena block size configurable
Summary:
Add an option for arena block size, default value 4096 bytes. Arena will allocate blocks with such size.
I am not sure about passing parameter to skiplist in the new virtualized framework, though I talked to Jim a bit. So add Jim as reviewer.
Test Plan:
new unit test, I am running db_test.
For passing paramter from configured option to Arena, I tried tests like:
TEST(DBTest, Arena_Option) {
std::string dbname = test::TmpDir() + "/db_arena_option_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
opts.arena_block_size = 1000000; // tested 99, 999999
Status s = DB::Open(opts, dbname, &db);
db->Put(WriteOptions(), "a", "123");
}
and printed some debug info. The results look good. Any suggestion for such a unit-test?
Reviewers: haobo, dhruba, emayanke, jpaton
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D11799
2013-07-31 19:42:23 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-01-08 23:06:07 +00:00
|
|
|
return block_size;
|
|
|
|
}
|
|
|
|
|
2014-01-31 01:18:17 +00:00
|
|
|
Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) {
|
2014-01-08 23:06:07 +00:00
|
|
|
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
|
|
|
|
kBlockSize % kAlignUnit == 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-31 01:18:17 +00:00
|
|
|
Arena::~Arena() {
|
2014-01-08 23:06:07 +00:00
|
|
|
for (const auto& block : blocks_) {
|
|
|
|
delete[] block;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2014-05-04 20:55:53 +00:00
|
|
|
for (const auto& mmap_info : huge_blocks_) {
|
|
|
|
auto ret = munmap(mmap_info.addr_, mmap_info.length_);
|
|
|
|
if (ret != 0) {
|
|
|
|
// TODO(sdong): Better handling
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-01-31 01:18:17 +00:00
|
|
|
char* Arena::AllocateFallback(size_t bytes, bool aligned) {
|
2014-01-08 23:06:07 +00:00
|
|
|
if (bytes > kBlockSize / 4) {
|
2014-03-12 23:40:14 +00:00
|
|
|
++irregular_block_num;
|
2011-03-18 22:37:00 +00:00
|
|
|
// Object is more than a quarter of our block size. Allocate it separately
|
|
|
|
// to avoid wasting too much space in leftover bytes.
|
2014-01-08 23:06:07 +00:00
|
|
|
return AllocateNewBlock(bytes);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We waste the remaining space in the current block.
|
2014-01-08 23:06:07 +00:00
|
|
|
auto block_head = AllocateNewBlock(kBlockSize);
|
|
|
|
alloc_bytes_remaining_ = kBlockSize - bytes;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2014-01-08 23:06:07 +00:00
|
|
|
if (aligned) {
|
|
|
|
aligned_alloc_ptr_ = block_head + bytes;
|
|
|
|
unaligned_alloc_ptr_ = block_head + kBlockSize;
|
|
|
|
return block_head;
|
|
|
|
} else {
|
|
|
|
aligned_alloc_ptr_ = block_head;
|
|
|
|
unaligned_alloc_ptr_ = block_head + kBlockSize - bytes;
|
|
|
|
return unaligned_alloc_ptr_;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 22:52:23 +00:00
|
|
|
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_tlb_size,
|
|
|
|
Logger* logger) {
|
2014-01-08 23:06:07 +00:00
|
|
|
assert((kAlignUnit & (kAlignUnit - 1)) ==
|
|
|
|
0); // Pointer size should be a power of 2
|
2014-05-04 20:55:53 +00:00
|
|
|
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
if (huge_page_tlb_size > 0 && bytes > 0) {
|
|
|
|
// Allocate from a huge page TBL table.
|
2014-05-04 22:52:23 +00:00
|
|
|
assert(logger != nullptr); // logger need to be passed in.
|
2014-05-04 20:55:53 +00:00
|
|
|
size_t reserved_size =
|
|
|
|
((bytes - 1U) / huge_page_tlb_size + 1U) * huge_page_tlb_size;
|
|
|
|
assert(reserved_size >= bytes);
|
|
|
|
void* addr = mmap(nullptr, reserved_size, (PROT_READ | PROT_WRITE),
|
|
|
|
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);
|
|
|
|
if (addr == MAP_FAILED) {
|
2014-05-04 22:52:23 +00:00
|
|
|
Warn(logger, "AllocateAligned fail to allocate huge TLB pages: %s",
|
|
|
|
strerror(errno));
|
2014-05-04 20:55:53 +00:00
|
|
|
// fail back to malloc
|
|
|
|
} else {
|
|
|
|
blocks_memory_ += reserved_size;
|
|
|
|
huge_blocks_.push_back(MmapInfo(addr, reserved_size));
|
|
|
|
return reinterpret_cast<char*>(addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-01-08 23:06:07 +00:00
|
|
|
size_t current_mod =
|
|
|
|
reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
|
|
|
|
size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
|
2011-03-18 22:37:00 +00:00
|
|
|
size_t needed = bytes + slop;
|
|
|
|
char* result;
|
|
|
|
if (needed <= alloc_bytes_remaining_) {
|
2014-01-08 23:06:07 +00:00
|
|
|
result = aligned_alloc_ptr_ + slop;
|
|
|
|
aligned_alloc_ptr_ += needed;
|
2011-03-18 22:37:00 +00:00
|
|
|
alloc_bytes_remaining_ -= needed;
|
|
|
|
} else {
|
|
|
|
// AllocateFallback always returned aligned memory
|
2014-01-08 23:06:07 +00:00
|
|
|
result = AllocateFallback(bytes, true /* aligned */);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2014-01-08 23:06:07 +00:00
|
|
|
assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
|
2011-03-18 22:37:00 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-01-31 01:18:17 +00:00
|
|
|
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
2014-01-08 23:06:07 +00:00
|
|
|
char* block = new char[block_bytes];
|
2011-03-18 22:37:00 +00:00
|
|
|
blocks_memory_ += block_bytes;
|
2014-01-08 23:06:07 +00:00
|
|
|
blocks_.push_back(block);
|
|
|
|
return block;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|