rocksdb/memory/arena.cc
Peter Dillinger 8367f0d2d7 Improve / refactor anonymous mmap capabilities (#10810)
Summary:
The motivation for this change is a planned feature (related to HyperClockCache) that will depend on a large array that can essentially grow automatically, up to some bound, without the pointer address changing and with guaranteed zero-initialization of the data. Anonymous mmaps provide such functionality, and this change provides an internal API for that.

The other existing use of anonymous mmap in RocksDB is for allocating in huge pages. That code and other related Arena code used some awkward non-RAII and pre-C++11 idioms, so I cleaned up much of that as well, with RAII, move semantics, constexpr, etc.

More specifcs:
* Minimize conditional compilation
* Add Windows support for anonymous mmaps
* Use std::deque instead of std::vector for more efficient bag

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10810

Test Plan: unit test added for new functionality

Reviewed By: riversand963

Differential Revision: D40347204

Pulled By: pdillinger

fbshipit-source-id: ca83fcc47e50fabf7595069380edd2954f4f879c
2022-10-17 17:10:16 -07:00

170 lines
5.3 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "memory/arena.h"
#include <algorithm>
#include "logging/logging.h"
#include "port/malloc.h"
#include "port/port.h"
#include "rocksdb/env.h"
#include "test_util/sync_point.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {
size_t Arena::OptimizeBlockSize(size_t block_size) {
// Make sure block_size is in optimal range
block_size = std::max(Arena::kMinBlockSize, block_size);
block_size = std::min(Arena::kMaxBlockSize, block_size);
// make sure block_size is the multiple of kAlignUnit
if (block_size % kAlignUnit != 0) {
block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
}
return block_size;
}
Arena::Arena(size_t block_size, AllocTracker* tracker, size_t huge_page_size)
: kBlockSize(OptimizeBlockSize(block_size)), tracker_(tracker) {
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
kBlockSize % kAlignUnit == 0);
TEST_SYNC_POINT_CALLBACK("Arena::Arena:0", const_cast<size_t*>(&kBlockSize));
alloc_bytes_remaining_ = sizeof(inline_block_);
blocks_memory_ += alloc_bytes_remaining_;
aligned_alloc_ptr_ = inline_block_;
unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
if (MemMapping::kHugePageSupported) {
hugetlb_size_ = huge_page_size;
if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
}
}
if (tracker_ != nullptr) {
tracker_->Allocate(kInlineSize);
}
}
Arena::~Arena() {
if (tracker_ != nullptr) {
assert(tracker_->is_freed());
tracker_->FreeMem();
}
}
char* Arena::AllocateFallback(size_t bytes, bool aligned) {
if (bytes > kBlockSize / 4) {
++irregular_block_num;
// Object is more than a quarter of our block size. Allocate it separately
// to avoid wasting too much space in leftover bytes.
return AllocateNewBlock(bytes);
}
// We waste the remaining space in the current block.
size_t size = 0;
char* block_head = nullptr;
if (MemMapping::kHugePageSupported && hugetlb_size_ > 0) {
size = hugetlb_size_;
block_head = AllocateFromHugePage(size);
}
if (!block_head) {
size = kBlockSize;
block_head = AllocateNewBlock(size);
}
alloc_bytes_remaining_ = size - bytes;
if (aligned) {
aligned_alloc_ptr_ = block_head + bytes;
unaligned_alloc_ptr_ = block_head + size;
return block_head;
} else {
aligned_alloc_ptr_ = block_head;
unaligned_alloc_ptr_ = block_head + size - bytes;
return unaligned_alloc_ptr_;
}
}
char* Arena::AllocateFromHugePage(size_t bytes) {
MemMapping mm = MemMapping::AllocateHuge(bytes);
auto addr = static_cast<char*>(mm.Get());
if (addr) {
huge_blocks_.push_back(std::move(mm));
blocks_memory_ += bytes;
if (tracker_ != nullptr) {
tracker_->Allocate(bytes);
}
}
return addr;
}
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
Logger* logger) {
if (MemMapping::kHugePageSupported && hugetlb_size_ > 0 &&
huge_page_size > 0 && bytes > 0) {
// Allocate from a huge page TLB table.
size_t reserved_size =
((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
assert(reserved_size >= bytes);
char* addr = AllocateFromHugePage(reserved_size);
if (addr == nullptr) {
ROCKS_LOG_WARN(logger,
"AllocateAligned fail to allocate huge TLB pages: %s",
errnoStr(errno).c_str());
// fail back to malloc
} else {
return addr;
}
}
size_t current_mod =
reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
size_t needed = bytes + slop;
char* result;
if (needed <= alloc_bytes_remaining_) {
result = aligned_alloc_ptr_ + slop;
aligned_alloc_ptr_ += needed;
alloc_bytes_remaining_ -= needed;
} else {
// AllocateFallback always returns aligned memory
result = AllocateFallback(bytes, true /* aligned */);
}
assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
return result;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
auto uniq = std::make_unique<char[]>(block_bytes);
char* block = uniq.get();
blocks_.push_back(std::move(uniq));
size_t allocated_size;
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
allocated_size = malloc_usable_size(block);
#ifndef NDEBUG
// It's hard to predict what malloc_usable_size() returns.
// A callback can allow users to change the costed size.
std::pair<size_t*, size_t*> pair(&allocated_size, &block_bytes);
TEST_SYNC_POINT_CALLBACK("Arena::AllocateNewBlock:0", &pair);
#endif // NDEBUG
#else
allocated_size = block_bytes;
#endif // ROCKSDB_MALLOC_USABLE_SIZE
blocks_memory_ += allocated_size;
if (tracker_ != nullptr) {
tracker_->Allocate(allocated_size);
}
return block;
}
} // namespace ROCKSDB_NAMESPACE