2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/table_cache.h"
|
|
|
|
|
2015-06-23 17:25:45 +00:00
|
|
|
#include "db/dbformat.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/filename.h"
|
2014-01-07 04:29:17 +00:00
|
|
|
#include "db/version_edit.h"
|
2013-02-25 21:58:34 +00:00
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
#include "table/iterator_wrapper.h"
|
2014-01-28 05:58:46 +00:00
|
|
|
#include "table/table_reader.h"
|
2014-09-29 18:09:09 +00:00
|
|
|
#include "table/get_context.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/coding.h"
|
2013-06-07 17:02:28 +00:00
|
|
|
#include "util/stop_watch.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-06-23 17:25:45 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
template <class T>
|
2011-03-18 22:37:00 +00:00
|
|
|
static void DeleteEntry(const Slice& key, void* value) {
|
2015-06-23 17:25:45 +00:00
|
|
|
T* typed_value = reinterpret_cast<T*>(value);
|
|
|
|
delete typed_value;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void UnrefEntry(void* arg1, void* arg2) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(arg1);
|
|
|
|
Cache::Handle* h = reinterpret_cast<Cache::Handle*>(arg2);
|
|
|
|
cache->Release(h);
|
|
|
|
}
|
|
|
|
|
2014-06-13 22:54:19 +00:00
|
|
|
static Slice GetSliceForFileNumber(const uint64_t* file_number) {
|
2014-01-02 18:29:48 +00:00
|
|
|
return Slice(reinterpret_cast<const char*>(file_number),
|
|
|
|
sizeof(*file_number));
|
2013-12-27 00:25:45 +00:00
|
|
|
}
|
|
|
|
|
2015-06-23 17:25:45 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
void AppendVarint64(IterKey* key, uint64_t v) {
|
|
|
|
char buf[10];
|
|
|
|
auto ptr = EncodeVarint64(buf, v);
|
|
|
|
key->TrimAppend(key->Size(), buf, ptr - buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
TableCache::TableCache(const ImmutableCFOptions& ioptions,
|
|
|
|
const EnvOptions& env_options, Cache* const cache)
|
2015-06-23 17:25:45 +00:00
|
|
|
: ioptions_(ioptions), env_options_(env_options), cache_(cache) {
|
|
|
|
if (ioptions_.row_cache) {
|
|
|
|
// If the same cache is shared by multiple instances, we need to
|
|
|
|
// disambiguate its entries.
|
|
|
|
PutVarint64(&row_cache_id_, ioptions_.row_cache->NewId());
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
TableCache::~TableCache() {
|
|
|
|
}
|
|
|
|
|
2014-01-07 04:29:17 +00:00
|
|
|
TableReader* TableCache::GetTableReaderFromHandle(Cache::Handle* handle) {
|
|
|
|
return reinterpret_cast<TableReader*>(cache_->Value(handle));
|
|
|
|
}
|
|
|
|
|
|
|
|
void TableCache::ReleaseHandle(Cache::Handle* handle) {
|
|
|
|
cache_->Release(handle);
|
|
|
|
}
|
|
|
|
|
2014-09-04 23:18:36 +00:00
|
|
|
Status TableCache::FindTable(const EnvOptions& env_options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2014-06-13 22:54:19 +00:00
|
|
|
const FileDescriptor& fd, Cache::Handle** handle,
|
2014-06-20 08:23:02 +00:00
|
|
|
const bool no_io) {
|
2012-04-17 15:36:46 +00:00
|
|
|
Status s;
|
2014-07-02 16:54:20 +00:00
|
|
|
uint64_t number = fd.GetNumber();
|
|
|
|
Slice key = GetSliceForFileNumber(&number);
|
2012-04-17 15:36:46 +00:00
|
|
|
*handle = cache_->Lookup(key);
|
2013-02-25 21:58:34 +00:00
|
|
|
if (*handle == nullptr) {
|
2013-07-12 23:56:52 +00:00
|
|
|
if (no_io) { // Dont do IO and return a not-found status
|
2013-08-25 05:48:51 +00:00
|
|
|
return Status::Incomplete("Table not found in table_cache, no_io is set");
|
2013-07-12 23:56:52 +00:00
|
|
|
}
|
2014-07-02 16:54:20 +00:00
|
|
|
std::string fname =
|
2014-09-04 23:18:36 +00:00
|
|
|
TableFileName(ioptions_.db_paths, fd.GetNumber(), fd.GetPathId());
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<RandomAccessFile> file;
|
2013-10-30 17:52:33 +00:00
|
|
|
unique_ptr<TableReader> table_reader;
|
2014-09-04 23:18:36 +00:00
|
|
|
s = ioptions_.env->NewRandomAccessFile(fname, &file, env_options);
|
|
|
|
RecordTick(ioptions_.statistics, NO_FILE_OPENS);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (s.ok()) {
|
2014-09-04 23:18:36 +00:00
|
|
|
if (ioptions_.advise_random_on_open) {
|
2013-05-17 22:53:01 +00:00
|
|
|
file->Hint(RandomAccessFile::RANDOM);
|
|
|
|
}
|
2014-09-04 23:18:36 +00:00
|
|
|
StopWatch sw(ioptions_.env, ioptions_.statistics, TABLE_OPEN_IO_MICROS);
|
|
|
|
s = ioptions_.table_factory->NewTableReader(
|
|
|
|
ioptions_, env_options, internal_comparator, std::move(file),
|
2014-06-13 22:54:19 +00:00
|
|
|
fd.GetFileSize(), &table_reader);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
2013-10-30 17:52:33 +00:00
|
|
|
assert(table_reader == nullptr);
|
2014-09-04 23:18:36 +00:00
|
|
|
RecordTick(ioptions_.statistics, NO_FILE_ERRORS);
|
2011-03-18 22:37:00 +00:00
|
|
|
// We do not cache error results so that if the error is transient,
|
|
|
|
// or somebody repairs the file, we recover automatically.
|
2012-04-17 15:36:46 +00:00
|
|
|
} else {
|
2015-06-23 17:25:45 +00:00
|
|
|
*handle = cache_->Insert(key, table_reader.release(), 1,
|
|
|
|
&DeleteEntry<TableReader>);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Iterator* TableCache::NewIterator(const ReadOptions& options,
|
2014-09-04 23:18:36 +00:00
|
|
|
const EnvOptions& env_options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& icomparator,
|
2014-06-13 22:54:19 +00:00
|
|
|
const FileDescriptor& fd,
|
2013-10-30 17:52:33 +00:00
|
|
|
TableReader** table_reader_ptr,
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
bool for_compaction, Arena* arena) {
|
2013-10-30 17:52:33 +00:00
|
|
|
if (table_reader_ptr != nullptr) {
|
|
|
|
*table_reader_ptr = nullptr;
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
2014-06-13 22:54:19 +00:00
|
|
|
TableReader* table_reader = fd.table_reader;
|
2014-04-17 21:07:05 +00:00
|
|
|
Cache::Handle* handle = nullptr;
|
2014-01-07 04:29:17 +00:00
|
|
|
Status s;
|
2014-04-17 21:07:05 +00:00
|
|
|
if (table_reader == nullptr) {
|
2014-09-04 23:18:36 +00:00
|
|
|
s = FindTable(env_options, icomparator, fd, &handle,
|
2014-06-13 22:54:19 +00:00
|
|
|
options.read_tier == kBlockCacheTier);
|
2014-04-17 22:14:04 +00:00
|
|
|
if (!s.ok()) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
return NewErrorIterator(s, arena);
|
2014-04-17 22:14:04 +00:00
|
|
|
}
|
2014-04-17 21:07:05 +00:00
|
|
|
table_reader = GetTableReaderFromHandle(handle);
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
Iterator* result = table_reader->NewIterator(options, arena);
|
2014-04-17 21:07:05 +00:00
|
|
|
if (handle != nullptr) {
|
2014-02-06 23:42:16 +00:00
|
|
|
result->RegisterCleanup(&UnrefEntry, cache_, handle);
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
2013-10-30 17:52:33 +00:00
|
|
|
if (table_reader_ptr != nullptr) {
|
|
|
|
*table_reader_ptr = table_reader;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-05-17 22:53:01 +00:00
|
|
|
|
|
|
|
if (for_compaction) {
|
2013-10-30 17:52:33 +00:00
|
|
|
table_reader->SetupForCompaction();
|
2013-05-17 22:53:01 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Status TableCache::Get(const ReadOptions& options,
|
2014-01-27 21:53:22 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2014-09-29 18:09:09 +00:00
|
|
|
const FileDescriptor& fd, const Slice& k,
|
|
|
|
GetContext* get_context) {
|
2014-06-13 22:54:19 +00:00
|
|
|
TableReader* t = fd.table_reader;
|
2014-01-07 04:29:17 +00:00
|
|
|
Status s;
|
2014-04-17 21:07:05 +00:00
|
|
|
Cache::Handle* handle = nullptr;
|
2015-06-23 17:25:45 +00:00
|
|
|
std::string* row_cache_entry = nullptr;
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
IterKey row_cache_key;
|
|
|
|
std::string row_cache_entry_buffer;
|
|
|
|
|
|
|
|
if (ioptions_.row_cache) {
|
|
|
|
uint64_t fd_number = fd.GetNumber();
|
|
|
|
auto user_key = ExtractUserKey(k);
|
|
|
|
// We use the user key as cache key instead of the internal key,
|
|
|
|
// otherwise the whole cache would be invalidated every time the
|
|
|
|
// sequence key increases. However, to support caching snapshot
|
|
|
|
// reads, we append the sequence number (incremented by 1 to
|
|
|
|
// distinguish from 0) only in this case.
|
|
|
|
uint64_t seq_no =
|
|
|
|
options.snapshot == nullptr ? 0 : 1 + GetInternalKeySeqno(k);
|
|
|
|
|
|
|
|
// Compute row cache key.
|
|
|
|
row_cache_key.TrimAppend(row_cache_key.Size(), row_cache_id_.data(),
|
|
|
|
row_cache_id_.size());
|
|
|
|
AppendVarint64(&row_cache_key, fd_number);
|
|
|
|
AppendVarint64(&row_cache_key, seq_no);
|
|
|
|
row_cache_key.TrimAppend(row_cache_key.Size(), user_key.data(),
|
|
|
|
user_key.size());
|
|
|
|
|
|
|
|
if (auto row_handle = ioptions_.row_cache->Lookup(row_cache_key.GetKey())) {
|
|
|
|
auto found_row_cache_entry = static_cast<const std::string*>(
|
|
|
|
ioptions_.row_cache->Value(row_handle));
|
|
|
|
replayGetContextLog(*found_row_cache_entry, user_key, get_context);
|
|
|
|
ioptions_.row_cache->Release(row_handle);
|
|
|
|
RecordTick(ioptions_.statistics, ROW_CACHE_HIT);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not found, setting up the replay log.
|
|
|
|
RecordTick(ioptions_.statistics, ROW_CACHE_MISS);
|
|
|
|
row_cache_entry = &row_cache_entry_buffer;
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2014-04-17 21:07:05 +00:00
|
|
|
if (!t) {
|
2014-09-04 23:18:36 +00:00
|
|
|
s = FindTable(env_options_, internal_comparator, fd, &handle,
|
2014-01-27 21:53:22 +00:00
|
|
|
options.read_tier == kBlockCacheTier);
|
2014-04-17 22:14:04 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
t = GetTableReaderFromHandle(handle);
|
|
|
|
}
|
2014-01-07 04:29:17 +00:00
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
if (s.ok()) {
|
2015-06-23 17:25:45 +00:00
|
|
|
get_context->SetReplayLog(row_cache_entry); // nullptr if no cache.
|
2014-09-29 18:09:09 +00:00
|
|
|
s = t->Get(options, k, get_context);
|
2015-06-23 17:25:45 +00:00
|
|
|
get_context->SetReplayLog(nullptr);
|
2014-04-17 21:07:05 +00:00
|
|
|
if (handle != nullptr) {
|
2014-01-07 04:29:17 +00:00
|
|
|
ReleaseHandle(handle);
|
|
|
|
}
|
2013-08-25 05:48:51 +00:00
|
|
|
} else if (options.read_tier && s.IsIncomplete()) {
|
2015-06-23 17:25:45 +00:00
|
|
|
// Couldn't find Table in cache but treat as kFound if no_io set
|
2014-09-29 18:09:09 +00:00
|
|
|
get_context->MarkKeyMayExist();
|
2013-07-12 23:56:52 +00:00
|
|
|
return Status::OK();
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
2015-06-23 17:25:45 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// Put the replay log in row cache only if something was found.
|
|
|
|
if (s.ok() && row_cache_entry && !row_cache_entry->empty()) {
|
|
|
|
size_t charge =
|
|
|
|
row_cache_key.Size() + row_cache_entry->size() + sizeof(std::string);
|
|
|
|
void* row_ptr = new std::string(std::move(*row_cache_entry));
|
|
|
|
auto row_handle = ioptions_.row_cache->Insert(
|
|
|
|
row_cache_key.GetKey(), row_ptr, charge, &DeleteEntry<std::string>);
|
|
|
|
ioptions_.row_cache->Release(row_handle);
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
return s;
|
|
|
|
}
|
2014-09-04 23:18:36 +00:00
|
|
|
|
2014-02-14 00:28:21 +00:00
|
|
|
Status TableCache::GetTableProperties(
|
2014-09-04 23:18:36 +00:00
|
|
|
const EnvOptions& env_options,
|
2014-06-13 22:54:19 +00:00
|
|
|
const InternalKeyComparator& internal_comparator, const FileDescriptor& fd,
|
2014-02-14 00:28:21 +00:00
|
|
|
std::shared_ptr<const TableProperties>* properties, bool no_io) {
|
|
|
|
Status s;
|
2014-06-13 22:54:19 +00:00
|
|
|
auto table_reader = fd.table_reader;
|
2014-02-14 00:28:21 +00:00
|
|
|
// table already been pre-loaded?
|
2014-04-17 21:07:05 +00:00
|
|
|
if (table_reader) {
|
|
|
|
*properties = table_reader->GetTableProperties();
|
|
|
|
|
2014-02-14 00:28:21 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-04-17 21:07:05 +00:00
|
|
|
Cache::Handle* table_handle = nullptr;
|
2014-09-04 23:18:36 +00:00
|
|
|
s = FindTable(env_options, internal_comparator, fd, &table_handle, no_io);
|
2014-02-14 00:28:21 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
assert(table_handle);
|
|
|
|
auto table = GetTableReaderFromHandle(table_handle);
|
|
|
|
*properties = table->GetTableProperties();
|
|
|
|
ReleaseHandle(table_handle);
|
|
|
|
return s;
|
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2014-08-05 18:27:34 +00:00
|
|
|
size_t TableCache::GetMemoryUsageByTableReader(
|
2014-09-04 23:18:36 +00:00
|
|
|
const EnvOptions& env_options,
|
2014-08-05 18:27:34 +00:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
|
|
|
const FileDescriptor& fd) {
|
|
|
|
Status s;
|
|
|
|
auto table_reader = fd.table_reader;
|
|
|
|
// table already been pre-loaded?
|
|
|
|
if (table_reader) {
|
|
|
|
return table_reader->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::Handle* table_handle = nullptr;
|
2014-09-04 23:18:36 +00:00
|
|
|
s = FindTable(env_options, internal_comparator, fd, &table_handle, true);
|
2014-08-05 18:27:34 +00:00
|
|
|
if (!s.ok()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
assert(table_handle);
|
|
|
|
auto table = GetTableReaderFromHandle(table_handle);
|
|
|
|
auto ret = table->ApproximateMemoryUsage();
|
|
|
|
ReleaseHandle(table_handle);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 17:07:55 +00:00
|
|
|
void TableCache::Evict(Cache* cache, uint64_t file_number) {
|
|
|
|
cache->Erase(GetSliceForFileNumber(&file_number));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|