2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-12-02 20:09:20 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2023-12-01 19:10:30 +00:00
|
|
|
#include <cassert>
|
2022-10-28 20:16:50 +00:00
|
|
|
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/allocator.h"
|
|
|
|
#include "memory/arena.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2014-12-02 20:09:20 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-12-02 20:09:20 +00:00
|
|
|
|
2017-06-02 21:13:59 +00:00
|
|
|
AllocTracker::AllocTracker(WriteBufferManager* write_buffer_manager)
|
|
|
|
: write_buffer_manager_(write_buffer_manager),
|
|
|
|
bytes_allocated_(0),
|
|
|
|
done_allocating_(false),
|
|
|
|
freed_(false) {}
|
2014-12-02 20:09:20 +00:00
|
|
|
|
2017-06-02 21:13:59 +00:00
|
|
|
AllocTracker::~AllocTracker() { FreeMem(); }
|
2014-12-02 20:09:20 +00:00
|
|
|
|
2017-06-02 21:13:59 +00:00
|
|
|
void AllocTracker::Allocate(size_t bytes) {
|
2016-06-21 01:01:03 +00:00
|
|
|
assert(write_buffer_manager_ != nullptr);
|
2018-11-19 00:51:15 +00:00
|
|
|
if (write_buffer_manager_->enabled() ||
|
|
|
|
write_buffer_manager_->cost_to_cache()) {
|
Avoid updating memtable allocated bytes if write_buffer_size is not set
Summary: If options.write_buffer_size is not set, nor options.write_buffer_manager, no need to update the bytes allocated counter in MemTableAllocator, which is expensive in parallel memtable insert case. Remove it can improve parallel memtable insert throughput by 10% with write batch size 128.
Test Plan:
Run benchmarks
TEST_TMPDIR=/dev/shm/ ./db_bench --benchmarks=fillrandom -disable_auto_compactions -level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999 -num=10000000 --writes=1000000 -max_background_flushes=16 -max_write_buffer_number=16 --threads=32 --batch_size=128 -allow_concurrent_memtable_write -enable_write_thread_adaptive_yield
The throughput grows 10% with the benchmark.
Reviewers: andrewkr, yiwu, IslamAbdelRahman, igor, ngbronson
Reviewed By: ngbronson
Subscribers: ngbronson, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D60465
2016-07-07 18:13:57 +00:00
|
|
|
bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
|
|
|
|
write_buffer_manager_->ReserveMem(bytes);
|
|
|
|
}
|
2014-12-02 20:09:20 +00:00
|
|
|
}
|
|
|
|
|
2017-06-02 21:13:59 +00:00
|
|
|
void AllocTracker::DoneAllocating() {
|
|
|
|
if (write_buffer_manager_ != nullptr && !done_allocating_) {
|
2018-11-19 00:51:15 +00:00
|
|
|
if (write_buffer_manager_->enabled() ||
|
|
|
|
write_buffer_manager_->cost_to_cache()) {
|
2017-06-02 21:13:59 +00:00
|
|
|
write_buffer_manager_->ScheduleFreeMem(
|
|
|
|
bytes_allocated_.load(std::memory_order_relaxed));
|
|
|
|
} else {
|
|
|
|
assert(bytes_allocated_.load(std::memory_order_relaxed) == 0);
|
|
|
|
}
|
|
|
|
done_allocating_ = true;
|
Avoid updating memtable allocated bytes if write_buffer_size is not set
Summary: If options.write_buffer_size is not set, nor options.write_buffer_manager, no need to update the bytes allocated counter in MemTableAllocator, which is expensive in parallel memtable insert case. Remove it can improve parallel memtable insert throughput by 10% with write batch size 128.
Test Plan:
Run benchmarks
TEST_TMPDIR=/dev/shm/ ./db_bench --benchmarks=fillrandom -disable_auto_compactions -level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999 -num=10000000 --writes=1000000 -max_background_flushes=16 -max_write_buffer_number=16 --threads=32 --batch_size=128 -allow_concurrent_memtable_write -enable_write_thread_adaptive_yield
The throughput grows 10% with the benchmark.
Reviewers: andrewkr, yiwu, IslamAbdelRahman, igor, ngbronson
Reviewed By: ngbronson
Subscribers: ngbronson, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D60465
2016-07-07 18:13:57 +00:00
|
|
|
}
|
2014-12-02 20:09:20 +00:00
|
|
|
}
|
|
|
|
|
2017-06-02 21:13:59 +00:00
|
|
|
void AllocTracker::FreeMem() {
|
|
|
|
if (!done_allocating_) {
|
|
|
|
DoneAllocating();
|
|
|
|
}
|
|
|
|
if (write_buffer_manager_ != nullptr && !freed_) {
|
2018-11-19 00:51:15 +00:00
|
|
|
if (write_buffer_manager_->enabled() ||
|
|
|
|
write_buffer_manager_->cost_to_cache()) {
|
Avoid updating memtable allocated bytes if write_buffer_size is not set
Summary: If options.write_buffer_size is not set, nor options.write_buffer_manager, no need to update the bytes allocated counter in MemTableAllocator, which is expensive in parallel memtable insert case. Remove it can improve parallel memtable insert throughput by 10% with write batch size 128.
Test Plan:
Run benchmarks
TEST_TMPDIR=/dev/shm/ ./db_bench --benchmarks=fillrandom -disable_auto_compactions -level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999 -num=10000000 --writes=1000000 -max_background_flushes=16 -max_write_buffer_number=16 --threads=32 --batch_size=128 -allow_concurrent_memtable_write -enable_write_thread_adaptive_yield
The throughput grows 10% with the benchmark.
Reviewers: andrewkr, yiwu, IslamAbdelRahman, igor, ngbronson
Reviewed By: ngbronson
Subscribers: ngbronson, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D60465
2016-07-07 18:13:57 +00:00
|
|
|
write_buffer_manager_->FreeMem(
|
|
|
|
bytes_allocated_.load(std::memory_order_relaxed));
|
|
|
|
} else {
|
|
|
|
assert(bytes_allocated_.load(std::memory_order_relaxed) == 0);
|
|
|
|
}
|
2017-06-02 21:13:59 +00:00
|
|
|
freed_ = true;
|
2014-12-02 20:09:20 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|