mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 16:30:56 +00:00
Get rid of some shared_ptrs
Summary: I went through all remaining shared_ptrs and removed the ones that I found not-necessary. Only GenerateCachePrefix() is called fairly often, so don't expect much perf wins. The ones that are left are accessed infrequently and I think we're fine with keeping them. Test Plan: make asan_check Reviewers: dhruba, haobo Reviewed By: dhruba CC: leveldb Differential Revision: https://reviews.facebook.net/D14427
This commit is contained in:
parent
930cb0b9ee
commit
043fc14c3e
|
@ -235,7 +235,7 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
|
|||
mutex_(options.use_adaptive_mutex),
|
||||
shutting_down_(nullptr),
|
||||
bg_cv_(&mutex_),
|
||||
mem_rep_factory_(options_.memtable_factory),
|
||||
mem_rep_factory_(options_.memtable_factory.get()),
|
||||
mem_(new MemTable(internal_comparator_, mem_rep_factory_,
|
||||
NumberLevels(), options_)),
|
||||
logfile_number_(0),
|
||||
|
|
|
@ -315,7 +315,7 @@ class DBImpl : public DB {
|
|||
port::Mutex mutex_;
|
||||
port::AtomicPointer shutting_down_;
|
||||
port::CondVar bg_cv_; // Signalled when background work finishes
|
||||
std::shared_ptr<MemTableRepFactory> mem_rep_factory_;
|
||||
MemTableRepFactory* mem_rep_factory_;
|
||||
MemTable* mem_;
|
||||
MemTableList imm_; // Memtable that are not changing
|
||||
uint64_t logfile_number_;
|
||||
|
|
|
@ -61,7 +61,7 @@ class DBIter: public Iterator {
|
|||
const Comparator* cmp, Iterator* iter, SequenceNumber s)
|
||||
: dbname_(dbname),
|
||||
env_(env),
|
||||
logger_(options.info_log),
|
||||
logger_(options.info_log.get()),
|
||||
user_comparator_(cmp),
|
||||
user_merge_operator_(options.merge_operator.get()),
|
||||
iter_(iter),
|
||||
|
@ -122,7 +122,7 @@ class DBIter: public Iterator {
|
|||
|
||||
const std::string* const dbname_;
|
||||
Env* const env_;
|
||||
shared_ptr<Logger> logger_;
|
||||
Logger* logger_;
|
||||
const Comparator* const user_comparator_;
|
||||
const MergeOperator* const user_merge_operator_;
|
||||
Iterator* const iter_;
|
||||
|
@ -293,7 +293,7 @@ void DBIter::MergeValuesNewToOld() {
|
|||
// ignore corruption if there is any.
|
||||
const Slice value = iter_->value();
|
||||
user_merge_operator_->FullMerge(ikey.user_key, &value, operands,
|
||||
&saved_value_, logger_.get());
|
||||
&saved_value_, logger_);
|
||||
// iter_ is positioned after put
|
||||
iter_->Next();
|
||||
return;
|
||||
|
@ -310,7 +310,7 @@ void DBIter::MergeValuesNewToOld() {
|
|||
Slice(operands[0]),
|
||||
Slice(operands[1]),
|
||||
&merge_result,
|
||||
logger_.get())) {
|
||||
logger_)) {
|
||||
operands.pop_front();
|
||||
swap(operands.front(), merge_result);
|
||||
} else {
|
||||
|
@ -327,7 +327,7 @@ void DBIter::MergeValuesNewToOld() {
|
|||
// feed null as the existing value to the merge operator, such that
|
||||
// client can differentiate this scenario and do things accordingly.
|
||||
user_merge_operator_->FullMerge(saved_key_, nullptr, operands,
|
||||
&saved_value_, logger_.get());
|
||||
&saved_value_, logger_);
|
||||
}
|
||||
|
||||
void DBIter::Prev() {
|
||||
|
|
|
@ -33,7 +33,7 @@ struct hash<rocksdb::Slice> {
|
|||
namespace rocksdb {
|
||||
|
||||
MemTable::MemTable(const InternalKeyComparator& cmp,
|
||||
std::shared_ptr<MemTableRepFactory> table_factory,
|
||||
MemTableRepFactory* table_factory,
|
||||
int numlevel,
|
||||
const Options& options)
|
||||
: comparator_(cmp),
|
||||
|
@ -274,7 +274,7 @@ bool MemTable::Update(SequenceNumber seq, ValueType type,
|
|||
Slice memkey = lkey.memtable_key();
|
||||
|
||||
std::shared_ptr<MemTableRep::Iterator> iter(
|
||||
table_.get()->GetIterator(lkey.user_key()));
|
||||
table_->GetIterator(lkey.user_key()));
|
||||
iter->Seek(memkey.data());
|
||||
|
||||
if (iter->Valid()) {
|
||||
|
|
|
@ -35,7 +35,7 @@ class MemTable {
|
|||
// is zero and the caller must call Ref() at least once.
|
||||
explicit MemTable(
|
||||
const InternalKeyComparator& comparator,
|
||||
std::shared_ptr<MemTableRepFactory> table_factory,
|
||||
MemTableRepFactory* table_factory,
|
||||
int numlevel = 7,
|
||||
const Options& options = Options());
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ class Repairer {
|
|||
std::string scratch;
|
||||
Slice record;
|
||||
WriteBatch batch;
|
||||
MemTable* mem = new MemTable(icmp_, options_.memtable_factory,
|
||||
MemTable* mem = new MemTable(icmp_, options_.memtable_factory.get(),
|
||||
options_.num_levels);
|
||||
mem->Ref();
|
||||
int counter = 0;
|
||||
|
|
|
@ -22,7 +22,7 @@ namespace rocksdb {
|
|||
static std::string PrintContents(WriteBatch* b) {
|
||||
InternalKeyComparator cmp(BytewiseComparator());
|
||||
auto factory = std::make_shared<SkipListFactory>();
|
||||
MemTable* mem = new MemTable(cmp, factory);
|
||||
MemTable* mem = new MemTable(cmp, factory.get());
|
||||
mem->Ref();
|
||||
std::string state;
|
||||
Options options;
|
||||
|
|
|
@ -127,9 +127,10 @@ BlockBasedTableBuilder::BlockBasedTableBuilder(
|
|||
rep_->filter_block->StartBlock(0);
|
||||
}
|
||||
if (options.block_cache_compressed.get() != nullptr) {
|
||||
BlockBasedTable::GenerateCachePrefix(options.block_cache_compressed, file,
|
||||
&rep_->compressed_cache_key_prefix[0],
|
||||
&rep_->compressed_cache_key_prefix_size);
|
||||
BlockBasedTable::GenerateCachePrefix(
|
||||
options.block_cache_compressed.get(), file,
|
||||
&rep_->compressed_cache_key_prefix[0],
|
||||
&rep_->compressed_cache_key_prefix_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -95,18 +95,18 @@ void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
|
|||
rep->cache_key_prefix_size = 0;
|
||||
rep->compressed_cache_key_prefix_size = 0;
|
||||
if (rep->options.block_cache != nullptr) {
|
||||
GenerateCachePrefix(rep->options.block_cache, rep->file.get(),
|
||||
GenerateCachePrefix(rep->options.block_cache.get(), rep->file.get(),
|
||||
&rep->cache_key_prefix[0],
|
||||
&rep->cache_key_prefix_size);
|
||||
}
|
||||
if (rep->options.block_cache_compressed != nullptr) {
|
||||
GenerateCachePrefix(rep->options.block_cache_compressed, rep->file.get(),
|
||||
&rep->compressed_cache_key_prefix[0],
|
||||
GenerateCachePrefix(rep->options.block_cache_compressed.get(),
|
||||
rep->file.get(), &rep->compressed_cache_key_prefix[0],
|
||||
&rep->compressed_cache_key_prefix_size);
|
||||
}
|
||||
}
|
||||
|
||||
void BlockBasedTable::GenerateCachePrefix(shared_ptr<Cache> cc,
|
||||
void BlockBasedTable::GenerateCachePrefix(Cache* cc,
|
||||
RandomAccessFile* file, char* buffer, size_t* size) {
|
||||
|
||||
// generate an id from the file
|
||||
|
@ -120,7 +120,7 @@ void BlockBasedTable::GenerateCachePrefix(shared_ptr<Cache> cc,
|
|||
}
|
||||
}
|
||||
|
||||
void BlockBasedTable::GenerateCachePrefix(shared_ptr<Cache> cc,
|
||||
void BlockBasedTable::GenerateCachePrefix(Cache* cc,
|
||||
WritableFile* file, char* buffer, size_t* size) {
|
||||
|
||||
// generate an id from the file
|
||||
|
|
|
@ -167,9 +167,9 @@ class BlockBasedTable : public TableReader {
|
|||
rep_ = rep;
|
||||
}
|
||||
// Generate a cache key prefix from the file
|
||||
static void GenerateCachePrefix(shared_ptr<Cache> cc,
|
||||
static void GenerateCachePrefix(Cache* cc,
|
||||
RandomAccessFile* file, char* buffer, size_t* size);
|
||||
static void GenerateCachePrefix(shared_ptr<Cache> cc,
|
||||
static void GenerateCachePrefix(Cache* cc,
|
||||
WritableFile* file, char* buffer, size_t* size);
|
||||
|
||||
// The longest prefix of the cache key used to identify blocks.
|
||||
|
|
|
@ -370,7 +370,7 @@ class MemTableConstructor: public Constructor {
|
|||
: Constructor(cmp),
|
||||
internal_comparator_(cmp),
|
||||
table_factory_(new SkipListFactory) {
|
||||
memtable_ = new MemTable(internal_comparator_, table_factory_);
|
||||
memtable_ = new MemTable(internal_comparator_, table_factory_.get());
|
||||
memtable_->Ref();
|
||||
}
|
||||
~MemTableConstructor() {
|
||||
|
@ -378,7 +378,7 @@ class MemTableConstructor: public Constructor {
|
|||
}
|
||||
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
||||
delete memtable_->Unref();
|
||||
memtable_ = new MemTable(internal_comparator_, table_factory_);
|
||||
memtable_ = new MemTable(internal_comparator_, table_factory_.get());
|
||||
memtable_->Ref();
|
||||
int seq = 1;
|
||||
for (KVMap::const_iterator it = data.begin();
|
||||
|
@ -930,19 +930,19 @@ TEST(TableTest, NumBlockStat) {
|
|||
|
||||
class BlockCacheProperties {
|
||||
public:
|
||||
explicit BlockCacheProperties(std::shared_ptr<Statistics> statistics) {
|
||||
explicit BlockCacheProperties(Statistics* statistics) {
|
||||
block_cache_miss =
|
||||
statistics.get()->getTickerCount(BLOCK_CACHE_MISS);
|
||||
statistics->getTickerCount(BLOCK_CACHE_MISS);
|
||||
block_cache_hit =
|
||||
statistics.get()->getTickerCount(BLOCK_CACHE_HIT);
|
||||
statistics->getTickerCount(BLOCK_CACHE_HIT);
|
||||
index_block_cache_miss =
|
||||
statistics.get()->getTickerCount(BLOCK_CACHE_INDEX_MISS);
|
||||
statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
|
||||
index_block_cache_hit =
|
||||
statistics.get()->getTickerCount(BLOCK_CACHE_INDEX_HIT);
|
||||
statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
|
||||
data_block_cache_miss =
|
||||
statistics.get()->getTickerCount(BLOCK_CACHE_DATA_MISS);
|
||||
statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
|
||||
data_block_cache_hit =
|
||||
statistics.get()->getTickerCount(BLOCK_CACHE_DATA_HIT);
|
||||
statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
|
||||
}
|
||||
|
||||
// Check if the fetched props matches the expected ones.
|
||||
|
@ -993,7 +993,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
|
||||
// At first, no block will be accessed.
|
||||
{
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
// index will be added to block cache.
|
||||
props.AssertEqual(
|
||||
1, // index block miss
|
||||
|
@ -1006,7 +1006,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
// Only index block will be accessed
|
||||
{
|
||||
iter.reset(c.NewIterator());
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
// NOTE: to help better highlight the "detla" of each ticker, I use
|
||||
// <last_value> + <added_value> to indicate the increment of changed
|
||||
// value; other numbers remain the same.
|
||||
|
@ -1021,7 +1021,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
// Only data block will be accessed
|
||||
{
|
||||
iter->SeekToFirst();
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
props.AssertEqual(
|
||||
1,
|
||||
1,
|
||||
|
@ -1034,7 +1034,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
{
|
||||
iter.reset(c.NewIterator());
|
||||
iter->SeekToFirst();
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
props.AssertEqual(
|
||||
1,
|
||||
1 + 1, // index block hit
|
||||
|
@ -1054,7 +1054,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
iter.reset(c.NewIterator());
|
||||
iter->SeekToFirst();
|
||||
ASSERT_EQ("key", iter->key().ToString());
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
// Nothing is affected at all
|
||||
props.AssertEqual(0, 0, 0, 0);
|
||||
}
|
||||
|
@ -1065,7 +1065,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
options.block_cache = NewLRUCache(1);
|
||||
c.Reopen(options);
|
||||
{
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
props.AssertEqual(
|
||||
1, // index block miss
|
||||
0,
|
||||
|
@ -1080,7 +1080,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
// It first cache index block then data block. But since the cache size
|
||||
// is only 1, index block will be purged after data block is inserted.
|
||||
iter.reset(c.NewIterator());
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
props.AssertEqual(
|
||||
1 + 1, // index block miss
|
||||
0,
|
||||
|
@ -1093,7 +1093,7 @@ TEST(TableTest, BlockCacheTest) {
|
|||
// SeekToFirst() accesses data block. With similar reason, we expect data
|
||||
// block's cache miss.
|
||||
iter->SeekToFirst();
|
||||
BlockCacheProperties props(options.statistics);
|
||||
BlockCacheProperties props(options.statistics.get());
|
||||
props.AssertEqual(
|
||||
2,
|
||||
0,
|
||||
|
@ -1268,7 +1268,7 @@ class MemTableTest { };
|
|||
TEST(MemTableTest, Simple) {
|
||||
InternalKeyComparator cmp(BytewiseComparator());
|
||||
auto table_factory = std::make_shared<SkipListFactory>();
|
||||
MemTable* memtable = new MemTable(cmp, table_factory);
|
||||
MemTable* memtable = new MemTable(cmp, table_factory.get());
|
||||
memtable->Ref();
|
||||
WriteBatch batch;
|
||||
Options options;
|
||||
|
|
Loading…
Reference in a new issue