diff --git a/HISTORY.md b/HISTORY.md index d7fcc90165..bbe3e61a29 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -5,6 +5,7 @@ ### New Features * Added support for pipelined & parallel compression optimization for `BlockBasedTableBuilder`. This optimization makes block building, block compression and block appending a pipeline, and uses multiple threads to accelerate block compression. Users can set `CompressionOptions::parallel_threads` greater than 1 to enable compression parallelism. +* Provide an allocator for memkind to be used with block cache. This is to work with memory technologies (Intel DCPMM is one such technology currently available) that require different libraries for allocation and management (such as PMDK and memkind). The high capacities available make it possible to provision large caches (up to several TBs in size) beyond what is achievable with DRAM. ### Bug Fixes * Fix a bug when making options.bottommost_compression, options.compression_opts and options.bottommost_compression_opts dynamically changeable: the modified values are not written to option files or returned back to users when being queried. diff --git a/memory/memkind_kmem_allocator.cc b/memory/memkind_kmem_allocator.cc index 2f16219bd8..0ce985f7e1 100644 --- a/memory/memkind_kmem_allocator.cc +++ b/memory/memkind_kmem_allocator.cc @@ -13,7 +13,7 @@ namespace rocksdb { void* MemkindKmemAllocator::Allocate(size_t size) { void* p = memkind_malloc(MEMKIND_DAX_KMEM, size); if (p == NULL) { - throw std::bad_alloc(); + throw std::bad_alloc(); } return p; } @@ -23,7 +23,8 @@ void MemkindKmemAllocator::Deallocate(void* p) { } #ifdef ROCKSDB_MALLOC_USABLE_SIZE -size_t MemkindKmemAllocator::UsableSize(void* p, size_t /*allocation_size*/) const { +size_t MemkindKmemAllocator::UsableSize(void* p, + size_t /*allocation_size*/) const { return memkind_malloc_usable_size(MEMKIND_DAX_KMEM, p); } #endif // ROCKSDB_MALLOC_USABLE_SIZE diff --git a/memory/memkind_kmem_allocator.h b/memory/memkind_kmem_allocator.h index 1448567743..8bf3c6c723 100644 --- a/memory/memkind_kmem_allocator.h +++ b/memory/memkind_kmem_allocator.h @@ -25,4 +25,3 @@ class MemkindKmemAllocator : public MemoryAllocator { } // namespace rocksdb #endif // MEMKIND - diff --git a/memory/memkind_kmem_allocator_test.cc b/memory/memkind_kmem_allocator_test.cc index 6231dc8947..4153f0c233 100644 --- a/memory/memkind_kmem_allocator_test.cc +++ b/memory/memkind_kmem_allocator_test.cc @@ -8,11 +8,11 @@ #ifdef MEMKIND #include "memkind_kmem_allocator.h" -#include "test_util/testharness.h" #include "rocksdb/cache.h" #include "rocksdb/db.h" #include "rocksdb/options.h" #include "table/block_based/block_based_table_factory.h" +#include "test_util/testharness.h" namespace rocksdb { TEST(MemkindKmemAllocatorTest, Allocate) { @@ -44,8 +44,8 @@ TEST(MemkindKmemAllocatorTest, DatabaseBlockCache) { ASSERT_OK(DestroyDB(dbname, options)); options.create_if_missing = true; - std::shared_ptr cache = NewLRUCache(1024 * 1024, 6, false, false, - std::make_shared()); + std::shared_ptr cache = NewLRUCache( + 1024 * 1024, 6, false, false, std::make_shared()); BlockBasedTableOptions table_options; table_options.block_cache = cache; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); @@ -65,7 +65,8 @@ TEST(MemkindKmemAllocatorTest, DatabaseBlockCache) { s = db->Put(wo, Slice(key), Slice(val)); ASSERT_OK(s); } - ASSERT_OK(db->Flush(FlushOptions())); // Flush all data from memtable so that reads are from block cache + ASSERT_OK(db->Flush(FlushOptions())); // Flush all data from memtable so that + // reads are from block cache // Read and check block cache usage ReadOptions ro; @@ -93,7 +94,9 @@ int main(int argc, char** argv) { #else int main(int /*argc*/, char** /*argv*/) { - printf("Skip memkind_kmem_allocator_test as the required library memkind is missing."); + printf( + "Skip memkind_kmem_allocator_test as the required library memkind is " + "missing."); } #endif // MEMKIND diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 3726081da6..e7e71d118e 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -2638,7 +2638,7 @@ class Benchmark { } return cache; } else { - if(FLAGS_use_cache_memkind_kmem_allocator) { + if (FLAGS_use_cache_memkind_kmem_allocator) { #ifdef MEMKIND return NewLRUCache( static_cast(capacity), FLAGS_cache_numshardbits, diff --git a/utilities/transactions/transaction_lock_mgr_test.cc b/utilities/transactions/transaction_lock_mgr_test.cc index e66758692a..f42caed86e 100644 --- a/utilities/transactions/transaction_lock_mgr_test.cc +++ b/utilities/transactions/transaction_lock_mgr_test.cc @@ -5,12 +5,12 @@ #ifndef ROCKSDB_LITE +#include "utilities/transactions/transaction_lock_mgr.h" #include "port/port.h" #include "port/stack_trace.h" +#include "rocksdb/utilities/transaction_db.h" #include "test_util/testharness.h" #include "test_util/testutil.h" -#include "rocksdb/utilities/transaction_db.h" -#include "utilities/transactions/transaction_lock_mgr.h" #include "utilities/transactions/transaction_db_mutex_impl.h" namespace ROCKSDB_NAMESPACE { @@ -29,9 +29,9 @@ class TransactionLockMgrTest : public testing::Test { txn_opt.transaction_lock_timeout = 0; ASSERT_OK(TransactionDB::Open(opt, txn_opt, db_dir_, &db_)); - locker_.reset(new TransactionLockMgr( - db_, txn_opt.num_stripes, txn_opt.max_num_locks, - txn_opt.max_num_deadlocks, mutex_factory_)); + locker_.reset( + new TransactionLockMgr(db_, txn_opt.num_stripes, txn_opt.max_num_locks, + txn_opt.max_num_deadlocks, mutex_factory_)); } void TearDown() override {