mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 14:31:35 +00:00
8367f0d2d7
Summary: The motivation for this change is a planned feature (related to HyperClockCache) that will depend on a large array that can essentially grow automatically, up to some bound, without the pointer address changing and with guaranteed zero-initialization of the data. Anonymous mmaps provide such functionality, and this change provides an internal API for that. The other existing use of anonymous mmap in RocksDB is for allocating in huge pages. That code and other related Arena code used some awkward non-RAII and pre-C++11 idioms, so I cleaned up much of that as well, with RAII, move semantics, constexpr, etc. More specifcs: * Minimize conditional compilation * Add Windows support for anonymous mmaps * Use std::deque instead of std::vector for more efficient bag Pull Request resolved: https://github.com/facebook/rocksdb/pull/10810 Test Plan: unit test added for new functionality Reviewed By: riversand963 Differential Revision: D40347204 Pulled By: pdillinger fbshipit-source-id: ca83fcc47e50fabf7595069380edd2954f4f879c
136 lines
5.1 KiB
C++
136 lines
5.1 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
// Arena is an implementation of Allocator class. For a request of small size,
|
|
// it allocates a block with pre-defined block size. For a request of big
|
|
// size, it uses malloc to directly get the requested size.
|
|
|
|
#pragma once
|
|
|
|
#include <cstddef>
|
|
#include <deque>
|
|
|
|
#include "memory/allocator.h"
|
|
#include "port/mmap.h"
|
|
#include "rocksdb/env.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
class Arena : public Allocator {
|
|
public:
|
|
// No copying allowed
|
|
Arena(const Arena&) = delete;
|
|
void operator=(const Arena&) = delete;
|
|
|
|
static constexpr size_t kInlineSize = 2048;
|
|
static constexpr size_t kMinBlockSize = 4096;
|
|
static constexpr size_t kMaxBlockSize = 2u << 30;
|
|
|
|
static constexpr unsigned kAlignUnit = alignof(std::max_align_t);
|
|
static_assert((kAlignUnit & (kAlignUnit - 1)) == 0,
|
|
"Pointer size should be power of 2");
|
|
|
|
// huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
|
|
// supported hugepage size of the system), block allocation will try huge
|
|
// page TLB first. If allocation fails, will fall back to normal case.
|
|
explicit Arena(size_t block_size = kMinBlockSize,
|
|
AllocTracker* tracker = nullptr, size_t huge_page_size = 0);
|
|
~Arena();
|
|
|
|
char* Allocate(size_t bytes) override;
|
|
|
|
// huge_page_size: if >0, will try to allocate from huage page TLB.
|
|
// The argument will be the size of the page size for huge page TLB. Bytes
|
|
// will be rounded up to multiple of the page size to allocate through mmap
|
|
// anonymous option with huge page on. The extra space allocated will be
|
|
// wasted. If allocation fails, will fall back to normal case. To enable it,
|
|
// need to reserve huge pages for it to be allocated, like:
|
|
// sysctl -w vm.nr_hugepages=20
|
|
// See linux doc Documentation/vm/hugetlbpage.txt for details.
|
|
// huge page allocation can fail. In this case it will fail back to
|
|
// normal cases. The messages will be logged to logger. So when calling with
|
|
// huge_page_tlb_size > 0, we highly recommend a logger is passed in.
|
|
// Otherwise, the error message will be printed out to stderr directly.
|
|
char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
|
|
Logger* logger = nullptr) override;
|
|
|
|
// Returns an estimate of the total memory usage of data allocated
|
|
// by the arena (exclude the space allocated but not yet used for future
|
|
// allocations).
|
|
size_t ApproximateMemoryUsage() const {
|
|
return blocks_memory_ + blocks_.size() * sizeof(char*) -
|
|
alloc_bytes_remaining_;
|
|
}
|
|
|
|
size_t MemoryAllocatedBytes() const { return blocks_memory_; }
|
|
|
|
size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
|
|
|
|
// If an allocation is too big, we'll allocate an irregular block with the
|
|
// same size of that allocation.
|
|
size_t IrregularBlockNum() const { return irregular_block_num; }
|
|
|
|
size_t BlockSize() const override { return kBlockSize; }
|
|
|
|
bool IsInInlineBlock() const {
|
|
return blocks_.empty() && huge_blocks_.empty();
|
|
}
|
|
|
|
// check and adjust the block_size so that the return value is
|
|
// 1. in the range of [kMinBlockSize, kMaxBlockSize].
|
|
// 2. the multiple of align unit.
|
|
static size_t OptimizeBlockSize(size_t block_size);
|
|
|
|
private:
|
|
alignas(std::max_align_t) char inline_block_[kInlineSize];
|
|
// Number of bytes allocated in one block
|
|
const size_t kBlockSize;
|
|
// Allocated memory blocks
|
|
std::deque<std::unique_ptr<char[]>> blocks_;
|
|
// Huge page allocations
|
|
std::deque<MemMapping> huge_blocks_;
|
|
size_t irregular_block_num = 0;
|
|
|
|
// Stats for current active block.
|
|
// For each block, we allocate aligned memory chucks from one end and
|
|
// allocate unaligned memory chucks from the other end. Otherwise the
|
|
// memory waste for alignment will be higher if we allocate both types of
|
|
// memory from one direction.
|
|
char* unaligned_alloc_ptr_ = nullptr;
|
|
char* aligned_alloc_ptr_ = nullptr;
|
|
// How many bytes left in currently active block?
|
|
size_t alloc_bytes_remaining_ = 0;
|
|
|
|
size_t hugetlb_size_ = 0;
|
|
|
|
char* AllocateFromHugePage(size_t bytes);
|
|
char* AllocateFallback(size_t bytes, bool aligned);
|
|
char* AllocateNewBlock(size_t block_bytes);
|
|
|
|
// Bytes of memory in blocks allocated so far
|
|
size_t blocks_memory_ = 0;
|
|
// Non-owned
|
|
AllocTracker* tracker_;
|
|
};
|
|
|
|
inline char* Arena::Allocate(size_t bytes) {
|
|
// The semantics of what to return are a bit messy if we allow
|
|
// 0-byte allocations, so we disallow them here (we don't need
|
|
// them for our internal use).
|
|
assert(bytes > 0);
|
|
if (bytes <= alloc_bytes_remaining_) {
|
|
unaligned_alloc_ptr_ -= bytes;
|
|
alloc_bytes_remaining_ -= bytes;
|
|
return unaligned_alloc_ptr_;
|
|
}
|
|
return AllocateFallback(bytes, false /* unaligned */);
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|