diff --git a/memory/arena.cc b/memory/arena.cc index d42868edf0..0a920203dc 100644 --- a/memory/arena.cc +++ b/memory/arena.cc @@ -143,9 +143,10 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size, } char* Arena::AllocateNewBlock(size_t block_bytes) { - auto uniq = std::make_unique(block_bytes); - char* block = uniq.get(); - blocks_.push_back(std::move(uniq)); + // NOTE: std::make_unique zero-initializes the block so is not appropriate + // here + char* block = new char[block_bytes]; + blocks_.push_back(std::unique_ptr(block)); size_t allocated_size; #ifdef ROCKSDB_MALLOC_USABLE_SIZE diff --git a/memory/arena_test.cc b/memory/arena_test.cc index 30887c23a0..21bf7ed628 100644 --- a/memory/arena_test.cc +++ b/memory/arena_test.cc @@ -256,6 +256,36 @@ TEST(MmapTest, AllocateLazyZeroed) { } } +TEST_F(ArenaTest, UnmappedAllocation) { + // Verify that it's possible to get unmapped pages in large allocations, + // for memory efficiency and to ensure we don't accidentally waste time & + // space initializing the memory. + constexpr size_t kBlockSize = 2U << 20; + Arena arena(kBlockSize); + + // The allocator might give us back recycled memory for a while, but + // shouldn't last forever. + for (int i = 0;; ++i) { + char* p = arena.Allocate(kBlockSize); + + // Start counting page faults + PopMinorPageFaultCount(); + + // Overwrite the whole allocation + for (size_t j = 0; j < kBlockSize; ++j) { + p[j] = static_cast(j & 255); + } + + size_t faults = PopMinorPageFaultCount(); + if (faults >= kBlockSize * 3 / 4 / port::kPageSize) { + // Most of the access generated page faults => GOOD + break; + } + // Should have succeeded after enough tries + ASSERT_LT(i, 1000); + } +} + } // namespace ROCKSDB_NAMESPACE int main(int argc, char** argv) {