move block based table related options BlockBasedTableOptions

Summary:
I will move compression related options in a separate diff since this
diff is already pretty lengthy.
I guess I will also need to change JNI accordingly :(

Test Plan: make all check

Reviewers: yhchiang, igor, sdong

Reviewed By: igor

Subscribers: leveldb

Differential Revision: https://reviews.facebook.net/D21915
This commit is contained in:
Lei Jin 2014-08-25 14:22:05 -07:00
parent 17b54aea53
commit 384400128f
36 changed files with 465 additions and 442 deletions

View File

@ -20,6 +20,10 @@
* Statistics APIs now take uint32_t as type instead of Tickers. Also make two access functions getTickerCount and histogramData const
* Add DB property rocksdb.estimate-num-keys, estimated number of live keys in DB.
* Add DB::GetIntProperty(), which returns DB properties that are integer as uint64_t.
* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key.
* Move BlockBasedTable related options to BlockBasedTableOptions from Options. Change corresponding JNI interface. Options affected include:
no_block_cache, block_cache, block_cache_compressed, block_size, block_size_deviation, block_restart_interval, filter_policy, whole_key_filtering. filter_policy is changed to shared_ptr from a raw pointer.
* Remove deprecated options: disable_seek_compaction and db_stats_log_interval
## 3.3.0 (7/10/2014)
### New Features

120
db/c.cc
View File

@ -55,6 +55,7 @@ using rocksdb::MergeOperator;
using rocksdb::NewBloomFilterPolicy;
using rocksdb::NewLRUCache;
using rocksdb::Options;
using rocksdb::BlockBasedTableOptions;
using rocksdb::RandomAccessFile;
using rocksdb::Range;
using rocksdb::ReadOptions;
@ -81,6 +82,7 @@ struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; };
struct rocksdb_readoptions_t { ReadOptions rep; };
struct rocksdb_writeoptions_t { WriteOptions rep; };
struct rocksdb_options_t { Options rep; };
struct rocksdb_block_based_table_options_t { BlockBasedTableOptions rep; };
struct rocksdb_seqfile_t { SequentialFile* rep; };
struct rocksdb_randomfile_t { RandomAccessFile* rep; };
struct rocksdb_writablefile_t { WritableFile* rep; };
@ -1053,6 +1055,74 @@ const char* rocksdb_writebatch_data(rocksdb_writebatch_t* b, size_t* size) {
return b->rep.Data().c_str();
}
rocksdb_block_based_table_options_t*
rocksdb_block_based_options_create() {
return new rocksdb_block_based_table_options_t;
}
void rocksdb_block_based_options_destroy(
rocksdb_block_based_table_options_t* options) {
delete options;
}
void rocksdb_block_based_options_set_block_size(
rocksdb_block_based_table_options_t* options, size_t block_size) {
options->rep.block_size = block_size;
}
void rocksdb_block_based_options_set_block_size_deviation(
rocksdb_block_based_table_options_t* options, int block_size_deviation) {
options->rep.block_size_deviation = block_size_deviation;
}
void rocksdb_block_based_options_set_block_restart_interval(
rocksdb_block_based_table_options_t* options, int block_restart_interval) {
options->rep.block_restart_interval = block_restart_interval;
}
void rocksdb_block_based_options_set_filter_policy(
rocksdb_block_based_table_options_t* options,
rocksdb_filterpolicy_t* filter_policy) {
options->rep.filter_policy.reset(filter_policy);
}
void rocksdb_block_based_options_set_no_block_cache(
rocksdb_block_based_table_options_t* options,
unsigned char no_block_cache) {
options->rep.no_block_cache = no_block_cache;
}
void rocksdb_block_based_options_set_block_cache(
rocksdb_block_based_table_options_t* options,
rocksdb_cache_t* block_cache) {
if (block_cache) {
options->rep.block_cache = block_cache->rep;
}
}
void rocksdb_block_based_options_set_block_cache_compressed(
rocksdb_block_based_table_options_t* options,
rocksdb_cache_t* block_cache_compressed) {
if (block_cache_compressed) {
options->rep.block_cache_compressed = block_cache_compressed->rep;
}
}
void rocksdb_block_based_options_set_whole_key_filtering(
rocksdb_block_based_table_options_t* options, unsigned char v) {
options->rep.whole_key_filtering = v;
}
void rocksdb_options_set_block_based_table_factory(
rocksdb_options_t *opt,
rocksdb_block_based_table_options_t* table_options) {
if (table_options) {
opt->rep.table_factory.reset(
rocksdb::NewBlockBasedTableFactory(table_options->rep));
}
}
rocksdb_options_t* rocksdb_options_create() {
return new rocksdb_options_t;
}
@ -1111,12 +1181,6 @@ void rocksdb_options_set_compaction_filter_factory_v2(
opt->rep.compaction_filter_factory_v2 = std::shared_ptr<CompactionFilterFactoryV2>(compaction_filter_factory_v2);
}
void rocksdb_options_set_filter_policy(
rocksdb_options_t* opt,
rocksdb_filterpolicy_t* policy) {
opt->rep.filter_policy = policy;
}
void rocksdb_options_set_create_if_missing(
rocksdb_options_t* opt, unsigned char v) {
opt->rep.create_if_missing = v;
@ -1160,26 +1224,6 @@ void rocksdb_options_set_max_open_files(rocksdb_options_t* opt, int n) {
opt->rep.max_open_files = n;
}
void rocksdb_options_set_cache(rocksdb_options_t* opt, rocksdb_cache_t* c) {
if (c) {
opt->rep.block_cache = c->rep;
}
}
void rocksdb_options_set_cache_compressed(rocksdb_options_t* opt, rocksdb_cache_t* c) {
if (c) {
opt->rep.block_cache_compressed = c->rep;
}
}
void rocksdb_options_set_block_size(rocksdb_options_t* opt, size_t s) {
opt->rep.block_size = s;
}
void rocksdb_options_set_block_restart_interval(rocksdb_options_t* opt, int n) {
opt->rep.block_restart_interval = n;
}
void rocksdb_options_set_target_file_size_base(
rocksdb_options_t* opt, uint64_t n) {
opt->rep.target_file_size_base = n;
@ -1272,11 +1316,6 @@ void rocksdb_options_set_prefix_extractor(
opt->rep.prefix_extractor.reset(prefix_extractor);
}
void rocksdb_options_set_whole_key_filtering(
rocksdb_options_t* opt, unsigned char v) {
opt->rep.whole_key_filtering = v;
}
void rocksdb_options_set_disable_data_sync(
rocksdb_options_t* opt, int disable_data_sync) {
opt->rep.disableDataSync = disable_data_sync;
@ -1287,11 +1326,6 @@ void rocksdb_options_set_use_fsync(
opt->rep.use_fsync = use_fsync;
}
void rocksdb_options_set_db_stats_log_interval(
rocksdb_options_t* opt, int db_stats_log_interval) {
opt->rep.db_stats_log_interval = db_stats_log_interval;
}
void rocksdb_options_set_db_log_dir(
rocksdb_options_t* opt, const char* db_log_dir) {
opt->rep.db_log_dir = db_log_dir;
@ -1351,11 +1385,6 @@ void rocksdb_options_set_stats_dump_period_sec(
opt->rep.stats_dump_period_sec = v;
}
void rocksdb_options_set_block_size_deviation(
rocksdb_options_t* opt, int v) {
opt->rep.block_size_deviation = v;
}
void rocksdb_options_set_advise_random_on_open(
rocksdb_options_t* opt, unsigned char v) {
opt->rep.advise_random_on_open = v;
@ -1450,11 +1479,6 @@ void rocksdb_options_set_max_manifest_file_size(
opt->rep.max_manifest_file_size = v;
}
void rocksdb_options_set_no_block_cache(
rocksdb_options_t* opt, unsigned char v) {
opt->rep.no_block_cache = v;
}
void rocksdb_options_set_table_cache_numshardbits(
rocksdb_options_t* opt, int v) {
opt->rep.table_cache_numshardbits = v;
@ -1474,10 +1498,6 @@ void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t* opt, int di
opt->rep.disable_auto_compactions = disable;
}
void rocksdb_options_set_disable_seek_compaction(rocksdb_options_t* opt, int disable) {
opt->rep.disable_seek_compaction = disable;
}
void rocksdb_options_set_delete_obsolete_files_period_micros(
rocksdb_options_t* opt, uint64_t v) {
opt->rep.delete_obsolete_files_period_micros = v;

View File

@ -335,6 +335,7 @@ int main(int argc, char** argv) {
rocksdb_cache_t* cache;
rocksdb_env_t* env;
rocksdb_options_t* options;
rocksdb_block_based_table_options_t* table_options;
rocksdb_readoptions_t* roptions;
rocksdb_writeoptions_t* woptions;
char* err = NULL;
@ -353,14 +354,15 @@ int main(int argc, char** argv) {
options = rocksdb_options_create();
rocksdb_options_set_comparator(options, cmp);
rocksdb_options_set_error_if_exists(options, 1);
rocksdb_options_set_cache(options, cache);
rocksdb_options_set_env(options, env);
rocksdb_options_set_info_log(options, NULL);
rocksdb_options_set_write_buffer_size(options, 100000);
rocksdb_options_set_paranoid_checks(options, 1);
rocksdb_options_set_max_open_files(options, 10);
rocksdb_options_set_block_size(options, 1024);
rocksdb_options_set_block_restart_interval(options, 8);
table_options = rocksdb_block_based_options_create();
rocksdb_block_based_options_set_block_cache(table_options, cache);
rocksdb_options_set_block_based_table_factory(options, table_options);
rocksdb_options_set_compression(options, rocksdb_no_compression);
rocksdb_options_set_compression_options(options, -14, -1, 0);
int compression_levels[] = {rocksdb_no_compression, rocksdb_no_compression,
@ -540,10 +542,13 @@ int main(int argc, char** argv) {
policy = rocksdb_filterpolicy_create_bloom(10);
}
table_options = rocksdb_block_based_options_create();
rocksdb_block_based_options_set_filter_policy(table_options, policy);
// Create new database
rocksdb_close(db);
rocksdb_destroy_db(options, dbname, &err);
rocksdb_options_set_filter_policy(options, policy);
rocksdb_options_set_block_based_table_factory(options, table_options);
db = rocksdb_open(options, dbname, &err);
CheckNoError(err);
rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err);
@ -565,8 +570,9 @@ int main(int argc, char** argv) {
CheckGet(db, roptions, "foo", "foovalue");
CheckGet(db, roptions, "bar", "barvalue");
}
rocksdb_options_set_filter_policy(options, NULL);
rocksdb_filterpolicy_destroy(policy);
// Reset the policy
rocksdb_block_based_options_set_filter_policy(table_options, NULL);
rocksdb_options_set_block_based_table_factory(options, table_options);
}
StartPhase("compaction_filter");
@ -757,9 +763,7 @@ int main(int argc, char** argv) {
StartPhase("prefix");
{
// Create new database
rocksdb_filterpolicy_t* policy = rocksdb_filterpolicy_create_bloom(10);
rocksdb_options_set_allow_mmap_reads(options, 1);
rocksdb_options_set_filter_policy(options, policy);
rocksdb_options_set_prefix_extractor(options, rocksdb_slicetransform_create_fixed_prefix(3));
rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4);
rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16);
@ -796,13 +800,13 @@ int main(int argc, char** argv) {
rocksdb_iter_get_error(iter, &err);
CheckNoError(err);
rocksdb_iter_destroy(iter);
rocksdb_filterpolicy_destroy(policy);
}
StartPhase("cleanup");
rocksdb_close(db);
rocksdb_options_destroy(options);
rocksdb_block_based_options_destroy(table_options);
rocksdb_readoptions_destroy(roptions);
rocksdb_writeoptions_destroy(woptions);
rocksdb_cache_destroy(cache);

View File

@ -50,11 +50,9 @@ ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const ColumnFamilyOptions& src) {
ColumnFamilyOptions result = src;
result.comparator = icmp;
result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
#ifdef OS_MACOSX
// TODO(icanadi) make write_buffer_size uint64_t instead of size_t
ClipToRange(&result.write_buffer_size, ((size_t)64) << 10, ((size_t)1) << 30);
@ -70,13 +68,7 @@ ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
result.min_write_buffer_number_to_merge =
std::min(result.min_write_buffer_number_to_merge,
result.max_write_buffer_number - 1);
if (result.block_cache == nullptr && !result.no_block_cache) {
result.block_cache = NewLRUCache(8 << 20);
}
result.compression_per_level = src.compression_per_level;
if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
result.block_size_deviation = 0;
}
if (result.max_mem_compaction_level >= result.num_levels) {
result.max_mem_compaction_level = result.num_levels - 1;
}
@ -195,9 +187,7 @@ ColumnFamilyData::ColumnFamilyData(uint32_t id, const std::string& name,
refs_(0),
dropped_(false),
internal_comparator_(options.comparator),
internal_filter_policy_(options.filter_policy),
options_(*db_options, SanitizeOptions(&internal_comparator_,
&internal_filter_policy_, options)),
options_(*db_options, SanitizeOptions(&internal_comparator_, options)),
mem_(nullptr),
imm_(options_.min_write_buffer_number_to_merge),
super_version_(nullptr),

View File

@ -113,7 +113,6 @@ struct SuperVersion {
};
extern ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const ColumnFamilyOptions& src);
class ColumnFamilySet;
@ -272,7 +271,6 @@ class ColumnFamilyData {
bool dropped_; // true if client dropped it
const InternalKeyComparator internal_comparator_;
const InternalFilterPolicy internal_filter_policy_;
Options const options_;

View File

@ -746,9 +746,10 @@ TEST(ColumnFamilyTest, DifferentCompactionStyles) {
default_cf.num_levels = 3;
default_cf.write_buffer_size = 64 << 10; // 64KB
default_cf.target_file_size_base = 30 << 10;
default_cf.filter_policy = nullptr;
default_cf.no_block_cache = true;
default_cf.source_compaction_factor = 100;
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
one.compaction_style = kCompactionStyleUniversal;
// trigger compaction if there are >= 4 files

View File

@ -45,7 +45,9 @@ class CorruptionTest {
db_ = nullptr;
options_.create_if_missing = true;
options_.block_size_deviation = 0; // make unit test pass for now
BlockBasedTableOptions table_options;
table_options.block_size_deviation = 0; // make unit test pass for now
options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
Reopen();
options_.create_if_missing = false;
}
@ -60,9 +62,11 @@ class CorruptionTest {
db_ = nullptr;
Options opt = (options ? *options : options_);
opt.env = &env_;
opt.block_cache = tiny_cache_;
opt.block_size_deviation = 0;
opt.arena_block_size = 4096;
BlockBasedTableOptions table_options;
table_options.block_cache = tiny_cache_;
table_options.block_size_deviation = 0;
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
return DB::Open(opt, dbname_, &db_);
}

View File

@ -239,10 +239,11 @@ DEFINE_int32(universal_compression_size_percent, -1,
DEFINE_int64(cache_size, -1, "Number of bytes to use as a cache of uncompressed"
"data. Negative means use default settings.");
DEFINE_int32(block_size, rocksdb::Options().block_size,
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size,
"Number of bytes in a block.");
DEFINE_int32(block_restart_interval, rocksdb::Options().block_restart_interval,
DEFINE_int32(block_restart_interval,
rocksdb::BlockBasedTableOptions().block_restart_interval,
"Number of keys between restart points "
"for delta encoding of keys.");
@ -844,9 +845,9 @@ class Duration {
class Benchmark {
private:
shared_ptr<Cache> cache_;
shared_ptr<Cache> compressed_cache_;
const FilterPolicy* filter_policy_;
std::shared_ptr<Cache> cache_;
std::shared_ptr<Cache> compressed_cache_;
std::shared_ptr<const FilterPolicy> filter_policy_;
const SliceTransform* prefix_extractor_;
struct DBWithColumnFamilies {
std::vector<ColumnFamilyHandle*> cfh;
@ -1107,7 +1108,6 @@ class Benchmark {
~Benchmark() {
delete db_.db;
delete filter_policy_;
delete prefix_extractor_;
}
@ -1509,7 +1509,7 @@ class Benchmark {
void Compress(ThreadState *thread) {
RandomGenerator gen;
Slice input = gen.Generate(Options().block_size);
Slice input = gen.Generate(FLAGS_block_size);
int64_t bytes = 0;
int64_t produced = 0;
bool ok = true;
@ -1559,7 +1559,7 @@ class Benchmark {
void Uncompress(ThreadState *thread) {
RandomGenerator gen;
Slice input = gen.Generate(Options().block_size);
Slice input = gen.Generate(FLAGS_block_size);
std::string compressed;
bool ok;
@ -1639,11 +1639,6 @@ class Benchmark {
Options options;
options.create_if_missing = !FLAGS_use_existing_db;
options.create_missing_column_families = FLAGS_num_column_families > 1;
options.block_cache = cache_;
options.block_cache_compressed = compressed_cache_;
if (cache_ == nullptr) {
options.no_block_cache = true;
}
options.write_buffer_size = FLAGS_write_buffer_size;
options.max_write_buffer_number = FLAGS_max_write_buffer_number;
options.min_write_buffer_number_to_merge =
@ -1651,9 +1646,6 @@ class Benchmark {
options.max_background_compactions = FLAGS_max_background_compactions;
options.max_background_flushes = FLAGS_max_background_flushes;
options.compaction_style = FLAGS_compaction_style_e;
options.block_size = FLAGS_block_size;
options.block_restart_interval = FLAGS_block_restart_interval;
options.filter_policy = filter_policy_;
if (FLAGS_prefix_size != 0) {
options.prefix_extractor.reset(
NewFixedPrefixTransform(FLAGS_prefix_size));
@ -1745,6 +1737,14 @@ class Benchmark {
} else {
block_based_options.index_type = BlockBasedTableOptions::kBinarySearch;
}
if (cache_ == nullptr) {
block_based_options.no_block_cache = true;
}
block_based_options.block_cache = cache_;
block_based_options.block_cache_compressed = compressed_cache_;
block_based_options.block_size = FLAGS_block_size;
block_based_options.block_restart_interval = FLAGS_block_restart_interval;
block_based_options.filter_policy = filter_policy_;
options.table_factory.reset(
NewBlockBasedTableFactory(block_based_options));
}

View File

@ -246,10 +246,9 @@ struct DBImpl::CompactionState {
Options SanitizeOptions(const std::string& dbname,
const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const Options& src) {
auto db_options = SanitizeOptions(dbname, DBOptions(src));
auto cf_options = SanitizeOptions(icmp, ipolicy, ColumnFamilyOptions(src));
auto cf_options = SanitizeOptions(icmp, ColumnFamilyOptions(src));
return Options(db_options, cf_options);
}
@ -3608,7 +3607,7 @@ bool DBImpl::KeyMayExist(const ReadOptions& options,
roptions.read_tier = kBlockCacheTier; // read from block cache only
auto s = GetImpl(roptions, column_family, key, value, value_found);
// If options.block_cache != nullptr and the index block of the table didn't
// If block_cache is enabled and the index block of the table didn't
// not present in block_cache, the return value will be Status::Incomplete.
// In this case, key may still exist in the table.
return s.ok() || s.IsIncomplete();
@ -4795,10 +4794,6 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
for (auto cf : column_families) {
max_write_buffer_size =
std::max(max_write_buffer_size, cf.options.write_buffer_size);
if (cf.options.block_cache != nullptr && cf.options.no_block_cache) {
return Status::InvalidArgument(
"no_block_cache is true while block_cache is not nullptr");
}
}
DBImpl* impl = new DBImpl(db_options, dbname);
@ -4928,9 +4923,7 @@ Snapshot::~Snapshot() {
Status DestroyDB(const std::string& dbname, const Options& options) {
const InternalKeyComparator comparator(options.comparator);
const InternalFilterPolicy filter_policy(options.filter_policy);
const Options& soptions(SanitizeOptions(
dbname, &comparator, &filter_policy, options));
const Options& soptions(SanitizeOptions(dbname, &comparator, options));
Env* env = soptions.env;
std::vector<std::string> filenames;
std::vector<std::string> archiveFiles;

View File

@ -669,7 +669,6 @@ class DBImpl : public DB {
// it is not equal to src.info_log.
extern Options SanitizeOptions(const std::string& db,
const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const Options& src);
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);

View File

@ -102,6 +102,11 @@ class AtomicCounter {
count_ = 0;
}
};
struct OptionsOverride {
std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
};
} // namespace anon
static std::string Key(int i) {
@ -304,9 +309,6 @@ class SpecialEnv : public EnvWrapper {
};
class DBTest {
private:
const FilterPolicy* filter_policy_;
protected:
// Sequence of option configurations to try
enum OptionConfig {
@ -360,9 +362,9 @@ class DBTest {
kSkipFIFOCompaction = 128,
};
DBTest() : option_config_(kDefault),
env_(new SpecialEnv(Env::Default())) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
ASSERT_OK(DestroyDB(dbname_, Options()));
db_ = nullptr;
@ -378,7 +380,6 @@ class DBTest {
options.db_paths.emplace_back(dbname_ + "_4", 0);
ASSERT_OK(DestroyDB(dbname_, options));
delete env_;
delete filter_policy_;
}
// Switch to a fresh database with the next option configuration to
@ -446,14 +447,19 @@ class DBTest {
}
// Return the current option configuration.
Options CurrentOptions() {
Options CurrentOptions(
const anon::OptionsOverride& options_override = anon::OptionsOverride()) {
Options options;
return CurrentOptions(options);
return CurrentOptions(options, options_override);
}
Options CurrentOptions(const Options& defaultOptions) {
Options CurrentOptions(
const Options& defaultOptions,
const anon::OptionsOverride& options_override = anon::OptionsOverride()) {
// this redudant copy is to minimize code change w/o having lint error.
Options options = defaultOptions;
BlockBasedTableOptions table_options;
bool set_block_based_table_factory = true;
switch (option_config_) {
case kHashSkipList:
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
@ -465,18 +471,20 @@ class DBTest {
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.allow_mmap_reads = true;
options.max_sequential_skip_in_iterations = 999999;
set_block_based_table_factory = false;
break;
case kPlainTableAllBytesPrefix:
options.table_factory.reset(new PlainTableFactory());
options.prefix_extractor.reset(NewNoopTransform());
options.allow_mmap_reads = true;
options.max_sequential_skip_in_iterations = 999999;
set_block_based_table_factory = false;
break;
case kMergePut:
options.merge_operator = MergeOperators::CreatePutOperator();
break;
case kFilter:
options.filter_policy = filter_policy_;
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
break;
case kUncompressed:
options.compression = kNoCompression;
@ -521,15 +529,13 @@ class DBTest {
break;
case kCompressedBlockCache:
options.allow_mmap_writes = true;
options.block_cache_compressed = NewLRUCache(8*1024*1024);
table_options.block_cache_compressed = NewLRUCache(8*1024*1024);
break;
case kInfiniteMaxOpenFiles:
options.max_open_files = -1;
break;
case kxxHashChecksum: {
BlockBasedTableOptions table_options;
table_options.checksum = kxxHash;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
}
case kFIFOCompaction: {
@ -537,22 +543,25 @@ class DBTest {
break;
}
case kBlockBasedTableWithPrefixHashIndex: {
BlockBasedTableOptions table_options;
table_options.index_type = BlockBasedTableOptions::kHashSearch;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
break;
}
case kBlockBasedTableWithWholeKeyHashIndex: {
BlockBasedTableOptions table_options;
table_options.index_type = BlockBasedTableOptions::kHashSearch;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewNoopTransform());
break;
}
default:
break;
}
if (options_override.filter_policy) {
table_options.filter_policy = options_override.filter_policy;
}
if (set_block_based_table_factory) {
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
}
return options;
}
@ -652,7 +661,6 @@ class DBTest {
opts.create_if_missing = true;
}
last_options_ = opts;
return DB::Open(opts, dbname_, &db_);
}
@ -1168,12 +1176,11 @@ TEST(DBTest, ReadOnlyDB) {
// created its index/filter blocks are added to block cache.
TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
Options options = CurrentOptions();
std::unique_ptr<const FilterPolicy> filter_policy(NewBloomFilterPolicy(20));
options.filter_policy = filter_policy.get();
options.create_if_missing = true;
options.statistics = rocksdb::CreateDBStatistics();
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
CreateAndReopenWithCF({"pikachu"}, &options);
@ -1459,8 +1466,9 @@ TEST(DBTest, KeyMayExist) {
do {
ReadOptions ropts;
std::string value;
Options options = CurrentOptions();
options.filter_policy = NewBloomFilterPolicy(20);
anon::OptionsOverride options_override;
options_override.filter_policy.reset(NewBloomFilterPolicy(20));
Options options = CurrentOptions(options_override);
options.statistics = rocksdb::CreateDBStatistics();
CreateAndReopenWithCF({"pikachu"}, &options);
@ -1511,8 +1519,6 @@ TEST(DBTest, KeyMayExist) {
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete options.filter_policy;
// KeyMayExist function only checks data in block caches, which is not used
// by plain table format.
} while (
@ -1587,8 +1593,9 @@ TEST(DBTest, NonBlockingIteration) {
// Tests Writebatch consistency and proper delete behaviour
TEST(DBTest, FilterDeletes) {
do {
Options options = CurrentOptions();
options.filter_policy = NewBloomFilterPolicy(20);
anon::OptionsOverride options_override;
options_override.filter_policy.reset(NewBloomFilterPolicy(20));
Options options = CurrentOptions(options_override);
options.filter_deletes = true;
CreateAndReopenWithCF({"pikachu"}, &options);
WriteBatch batch;
@ -1618,8 +1625,6 @@ TEST(DBTest, FilterDeletes) {
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(AllEntriesFor("c", 1), "[ DEL, d ]"); // Delete issued
batch.Clear();
delete options.filter_policy;
} while (ChangeCompactOptions());
}
@ -3333,32 +3338,37 @@ TEST(DBTest, CompressedCache) {
// Iteration 4: both block cache and compressed cache, but DB is not
// compressed
for (int iter = 0; iter < 4; iter++) {
Options options = CurrentOptions();
Options options;
options.write_buffer_size = 64*1024; // small write buffer
options.statistics = rocksdb::CreateDBStatistics();
BlockBasedTableOptions table_options;
switch (iter) {
case 0:
// only uncompressed block cache
options.block_cache = NewLRUCache(8*1024);
options.block_cache_compressed = nullptr;
table_options.block_cache = NewLRUCache(8*1024);
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 1:
// no block cache, only compressed cache
options.no_block_cache = true;
options.block_cache = nullptr;
options.block_cache_compressed = NewLRUCache(8*1024);
table_options.no_block_cache = true;
table_options.block_cache = nullptr;
table_options.block_cache_compressed = NewLRUCache(8*1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 2:
// both compressed and uncompressed block cache
options.block_cache = NewLRUCache(1024);
options.block_cache_compressed = NewLRUCache(8*1024);
table_options.block_cache = NewLRUCache(1024);
table_options.block_cache_compressed = NewLRUCache(8*1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
break;
case 3:
// both block cache and compressed cache, but DB is not compressed
// also, make block cache sizes bigger, to trigger block cache hits
options.block_cache = NewLRUCache(1024 * 1024);
options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
table_options.block_cache = NewLRUCache(1024 * 1024);
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.compression = kNoCompression;
break;
default:
@ -3367,9 +3377,11 @@ TEST(DBTest, CompressedCache) {
CreateAndReopenWithCF({"pikachu"}, &options);
// default column family doesn't have block cache
Options no_block_cache_opts;
no_block_cache_opts.no_block_cache = true;
no_block_cache_opts.statistics = options.statistics;
options = CurrentOptions(options);
BlockBasedTableOptions table_options_no_bc;
table_options_no_bc.no_block_cache = true;
no_block_cache_opts.table_factory.reset(
NewBlockBasedTableFactory(table_options_no_bc));
ReopenWithColumnFamilies({"default", "pikachu"},
{&no_block_cache_opts, &options});
@ -5229,7 +5241,6 @@ TEST(DBTest, CustomComparator) {
new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
new_options = CurrentOptions(new_options);
DestroyAndReopen(&new_options);
@ -5637,11 +5648,16 @@ TEST(DBTest, FilesDeletedAfterCompaction) {
TEST(DBTest, BloomFilter) {
do {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
env_->count_random_reads_ = true;
options.env = env_;
options.no_block_cache = true;
options.filter_policy = NewBloomFilterPolicy(10);
// ChangeCompactOptions() only changes compaction style, which does not
// trigger reset of table_factory
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
CreateAndReopenWithCF({"pikachu"}, &options);
// Populate multiple layers
@ -5679,7 +5695,6 @@ TEST(DBTest, BloomFilter) {
env_->delay_sstable_sync_.Release_Store(nullptr);
Close();
delete options.filter_policy;
} while (ChangeCompactOptions());
}
@ -7145,15 +7160,18 @@ TEST(DBTest, PrefixScan) {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
options.no_block_cache = true;
options.filter_policy = NewBloomFilterPolicy(10);
options.prefix_extractor.reset(NewFixedPrefixTransform(8));
options.whole_key_filtering = false;
options.disable_auto_compactions = true;
options.max_background_compactions = 2;
options.create_if_missing = true;
options.memtable_factory.reset(NewHashSkipListRepFactory(16));
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
table_options.whole_key_filtering = false;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
// 11 RAND I/Os
DestroyAndReopen(&options);
PrefixScanInit(this);
@ -7171,7 +7189,6 @@ TEST(DBTest, PrefixScan) {
ASSERT_EQ(count, 2);
ASSERT_EQ(env_->random_read_counter_.Read(), 2);
Close();
delete options.filter_policy;
}
TEST(DBTest, TailingIteratorSingle) {

View File

@ -127,9 +127,11 @@ class InternalKeyComparator : public Comparator {
// Filter policy wrapper that converts from internal keys to user keys
class InternalFilterPolicy : public FilterPolicy {
private:
std::shared_ptr<const FilterPolicy> shared_ptr_;
const FilterPolicy* const user_policy_;
public:
explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
explicit InternalFilterPolicy(std::shared_ptr<const FilterPolicy> p)
: shared_ptr_(p), user_policy_(p.get()) {}
virtual const char* Name() const;
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;

View File

@ -57,8 +57,7 @@ class Repairer {
: dbname_(dbname),
env_(options.env),
icmp_(options.comparator),
ipolicy_(options.filter_policy),
options_(SanitizeOptions(dbname, &icmp_, &ipolicy_, options)),
options_(SanitizeOptions(dbname, &icmp_, options)),
raw_table_cache_(
// TableCache can be small since we expect each table to be opened
// once.
@ -109,7 +108,6 @@ class Repairer {
std::string const dbname_;
Env* const env_;
InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_;
Options const options_;
std::shared_ptr<Cache> raw_table_cache_;
TableCache* table_cache_;

View File

@ -257,7 +257,7 @@ void TestInternalKeyPropertiesCollector(
// SanitizeOptions().
options.info_log = std::make_shared<DumbLogger>();
options = SanitizeOptions("db", // just a place holder
&pikc, nullptr, // don't care filter policy
&pikc,
options);
options.comparator = comparator;
} else {

View File

@ -75,6 +75,8 @@ typedef struct rocksdb_iterator_t rocksdb_iterator_t;
typedef struct rocksdb_logger_t rocksdb_logger_t;
typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t;
typedef struct rocksdb_options_t rocksdb_options_t;
typedef struct rocksdb_block_based_table_options_t
rocksdb_block_based_table_options_t;
typedef struct rocksdb_randomfile_t rocksdb_randomfile_t;
typedef struct rocksdb_readoptions_t rocksdb_readoptions_t;
typedef struct rocksdb_seqfile_t rocksdb_seqfile_t;
@ -346,6 +348,34 @@ extern void rocksdb_writebatch_iterate(
void (*deleted)(void*, const char* k, size_t klen));
extern const char* rocksdb_writebatch_data(rocksdb_writebatch_t*, size_t *size);
/* Block based table options */
extern rocksdb_block_based_table_options_t*
rocksdb_block_based_options_create();
extern void rocksdb_block_based_options_destroy(
rocksdb_block_based_table_options_t* options);
extern void rocksdb_block_based_options_set_block_size(
rocksdb_block_based_table_options_t* options, size_t block_size);
extern void rocksdb_block_based_options_set_block_size_deviation(
rocksdb_block_based_table_options_t* options, int block_size_deviation);
extern void rocksdb_block_based_options_set_block_restart_interval(
rocksdb_block_based_table_options_t* options, int block_restart_interval);
extern void rocksdb_block_based_options_set_filter_policy(
rocksdb_block_based_table_options_t* options,
rocksdb_filterpolicy_t* filter_policy);
extern void rocksdb_block_based_options_set_no_block_cache(
rocksdb_block_based_table_options_t* options,
unsigned char no_block_cache);
extern void rocksdb_block_based_options_set_block_cache(
rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache);
extern void rocksdb_block_based_options_set_block_cache_compressed(
rocksdb_block_based_table_options_t* options,
rocksdb_cache_t* block_cache_compressed);
extern void rocksdb_block_based_options_set_whole_key_filtering(
rocksdb_block_based_table_options_t*, unsigned char);
extern void rocksdb_options_set_block_based_table_factory(
rocksdb_options_t *opt, rocksdb_block_based_table_options_t* table_options);
/* Options */
extern rocksdb_options_t* rocksdb_options_create();
@ -376,9 +406,6 @@ extern void rocksdb_options_set_compression_per_level(
rocksdb_options_t* opt,
int* level_values,
size_t num_levels);
extern void rocksdb_options_set_filter_policy(
rocksdb_options_t*,
rocksdb_filterpolicy_t*);
extern void rocksdb_options_set_create_if_missing(
rocksdb_options_t*, unsigned char);
extern void rocksdb_options_set_create_missing_column_families(
@ -392,13 +419,8 @@ extern void rocksdb_options_set_info_log(rocksdb_options_t*, rocksdb_logger_t*);
extern void rocksdb_options_set_info_log_level(rocksdb_options_t*, int);
extern void rocksdb_options_set_write_buffer_size(rocksdb_options_t*, size_t);
extern void rocksdb_options_set_max_open_files(rocksdb_options_t*, int);
extern void rocksdb_options_set_cache(rocksdb_options_t*, rocksdb_cache_t*);
extern void rocksdb_options_set_cache_compressed(rocksdb_options_t*, rocksdb_cache_t*);
extern void rocksdb_options_set_block_size(rocksdb_options_t*, size_t);
extern void rocksdb_options_set_block_restart_interval(rocksdb_options_t*, int);
extern void rocksdb_options_set_compression_options(
rocksdb_options_t*, int, int, int);
extern void rocksdb_options_set_whole_key_filtering(rocksdb_options_t*, unsigned char);
extern void rocksdb_options_set_prefix_extractor(
rocksdb_options_t*, rocksdb_slicetransform_t*);
extern void rocksdb_options_set_num_levels(rocksdb_options_t*, int);
@ -449,8 +471,6 @@ extern void rocksdb_options_set_arena_block_size(
rocksdb_options_t*, size_t);
extern void rocksdb_options_set_use_fsync(
rocksdb_options_t*, int);
extern void rocksdb_options_set_db_stats_log_interval(
rocksdb_options_t*, int);
extern void rocksdb_options_set_db_log_dir(
rocksdb_options_t*, const char*);
extern void rocksdb_options_set_wal_dir(
@ -493,7 +513,6 @@ extern void rocksdb_options_set_max_sequential_skip_in_iterations(
rocksdb_options_t*, uint64_t);
extern void rocksdb_options_set_disable_data_sync(rocksdb_options_t*, int);
extern void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t*, int);
extern void rocksdb_options_set_disable_seek_compaction(rocksdb_options_t*, int);
extern void rocksdb_options_set_delete_obsolete_files_period_micros(
rocksdb_options_t*, uint64_t);
extern void rocksdb_options_set_source_compaction_factor(rocksdb_options_t*, int);

View File

@ -6,6 +6,7 @@
#pragma once
#include <string>
#include "rocksdb/table.h"
namespace rocksdb {
@ -37,7 +38,8 @@ class FlushBlockPolicyFactory {
// Callers must delete the result after any database that is using the
// result has been closed.
virtual FlushBlockPolicy* NewFlushBlockPolicy(
const Options& options, const BlockBuilder& data_block_builder) const = 0;
const BlockBasedTableOptions& table_options,
const BlockBuilder& data_block_builder) const = 0;
virtual ~FlushBlockPolicyFactory() { }
};
@ -51,7 +53,7 @@ class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory {
}
virtual FlushBlockPolicy* NewFlushBlockPolicy(
const Options& options,
const BlockBasedTableOptions& table_options,
const BlockBuilder& data_block_builder) const override;
};

View File

@ -206,34 +206,6 @@ struct ColumnFamilyOptions {
// individual write buffers. Default: 1
int min_write_buffer_number_to_merge;
// Control over blocks (user data is stored in a set of blocks, and
// a block is the unit of reading from disk).
// If non-NULL use the specified cache for blocks.
// If NULL, rocksdb will automatically create and use an 8MB internal cache.
// Default: nullptr
std::shared_ptr<Cache> block_cache;
// If non-NULL use the specified cache for compressed blocks.
// If NULL, rocksdb will not use a compressed block cache.
// Default: nullptr
std::shared_ptr<Cache> block_cache_compressed;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
//
// Default: 4K
size_t block_size;
// Number of keys between restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should
// leave this parameter alone.
//
// Default: 16
int block_restart_interval;
// Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically.
//
@ -267,13 +239,6 @@ struct ColumnFamilyOptions {
// different options for compression algorithms
CompressionOptions compression_opts;
// If non-nullptr, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
//
// Default: nullptr
const FilterPolicy* filter_policy;
// If non-nullptr, use the specified function to determine the
// prefixes for keys. These prefixes will be placed in the filter.
// Depending on the workload, this can reduce the number of read-IOP
@ -290,12 +255,6 @@ struct ColumnFamilyOptions {
// Default: nullptr
std::shared_ptr<const SliceTransform> prefix_extractor;
// If true, place whole keys in the filter (not just prefixes).
// This must generally be true for gets to be efficient.
//
// Default: true
bool whole_key_filtering;
// Number of levels for this database
int num_levels;
@ -375,18 +334,6 @@ struct ColumnFamilyOptions {
// stop building a single file in a level->level+1 compaction.
int max_grandparent_overlap_factor;
// We decided to remove seek compaction from RocksDB because:
// 1) It makes more sense for spinning disk workloads, while RocksDB is
// primarily designed for flash and memory,
// 2) It added some complexity to the important code-paths,
// 3) None of our internal customers were really using it.
//
// Since we removed seek compaction, this option is now obsolete.
// We left it here for backwards compatiblity (otherwise it would break the
// build), but we'll remove it at some point.
// Default: true
bool disable_seek_compaction;
// Puts are delayed 0-1 ms when any level has a compaction score that exceeds
// soft_rate_limit. This is ignored when == 0.0.
// CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
@ -404,12 +351,6 @@ struct ColumnFamilyOptions {
// Default: 1000
unsigned int rate_limit_delay_max_milliseconds;
// Disable block cache. If this is set to true,
// then no block cache should be used, and the block_cache should
// point to a nullptr object.
// Default: false
bool no_block_cache;
// size of one block in arena memory allocation.
// If <= 0, a proper value is automatically calculated (usually 1/10 of
// writer_buffer_size).
@ -433,14 +374,6 @@ struct ColumnFamilyOptions {
// Default: true
bool purge_redundant_kvs_while_flush;
// This is used to close a block before it reaches the configured
// 'block_size'. If the percentage of free space in the current block is less
// than this specified number and adding a new record to the block will
// exceed the configured block size, then this block will be closed and the
// new record will be written to the next block.
// Default is 10.
int block_size_deviation;
// The compaction style. Default: kCompactionStyleLevel
CompactionStyle compaction_style;
@ -684,9 +617,6 @@ struct DBOptions {
// Default: false
bool use_fsync;
// This options is not used!!
int db_stats_log_interval;
// A list of paths where SST files can be put into, with its target size.
// Newer data is placed into paths specified earlier in the vector while
// older data gradually moves to paths specified later in the vector.

View File

@ -84,6 +84,46 @@ struct BlockBasedTableOptions {
// protected with this checksum type. Old table files will still be readable,
// even though they have different checksum type.
ChecksumType checksum = kCRC32c;
// Disable block cache. If this is set to true,
// then no block cache should be used, and the block_cache should
// point to a nullptr object.
bool no_block_cache = false;
// If non-NULL use the specified cache for blocks.
// If NULL, rocksdb will automatically create and use an 8MB internal cache.
std::shared_ptr<Cache> block_cache = nullptr;
// If non-NULL use the specified cache for compressed blocks.
// If NULL, rocksdb will not use a compressed block cache.
std::shared_ptr<Cache> block_cache_compressed = nullptr;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
size_t block_size = 4 * 1024;
// This is used to close a block before it reaches the configured
// 'block_size'. If the percentage of free space in the current block is less
// than this specified number and adding a new record to the block will
// exceed the configured block size, then this block will be closed and the
// new record will be written to the next block.
int block_size_deviation = 10;
// Number of keys between restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should
// leave this parameter alone.
int block_restart_interval = 16;
// If non-nullptr, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
// If true, place whole keys in the filter (not just prefixes).
// This must generally be true for gets to be efficient.
bool whole_key_filtering = true;
};
// Table Properties that are specific to block-based table properties.

View File

@ -287,27 +287,6 @@ void Java_org_rocksdb_Options_setUseFsync(
static_cast<bool>(use_fsync);
}
/*
* Class: org_rocksdb_Options
* Method: dbStatsLogInterval
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_dbStatsLogInterval(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->db_stats_log_interval;
}
/*
* Class: org_rocksdb_Options
* Method: setDbStatsLogInterval
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setDbStatsLogInterval(
JNIEnv* env, jobject jobj, jlong jhandle, jint db_stats_log_interval) {
reinterpret_cast<rocksdb::Options*>(jhandle)->db_stats_log_interval =
static_cast<int>(db_stats_log_interval);
}
/*
* Class: org_rocksdb_Options
* Method: dbLogDir

View File

@ -385,7 +385,8 @@ class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
};
struct BlockBasedTableBuilder::Rep {
Options options;
const Options options;
const BlockBasedTableOptions table_options;
const InternalKeyComparator& internal_comparator;
WritableFile* file;
uint64_t offset = 0;
@ -397,7 +398,6 @@ struct BlockBasedTableBuilder::Rep {
std::string last_key;
CompressionType compression_type;
ChecksumType checksum_type;
TableProperties props;
bool closed = false; // Either Finish() or Abandon() has been called.
@ -413,31 +413,32 @@ struct BlockBasedTableBuilder::Rep {
std::vector<std::unique_ptr<TablePropertiesCollector>>
table_properties_collectors;
Rep(const Options& opt, const InternalKeyComparator& icomparator,
WritableFile* f, FlushBlockPolicyFactory* flush_block_policy_factory,
CompressionType compression_type, IndexType index_block_type,
ChecksumType checksum_type)
Rep(const Options& opt, const BlockBasedTableOptions& table_opt,
const InternalKeyComparator& icomparator,
WritableFile* f, CompressionType compression_type)
: options(opt),
table_options(table_opt),
internal_comparator(icomparator),
file(f),
data_block(options, &internal_comparator),
data_block(table_options.block_restart_interval, &internal_comparator),
internal_prefix_transform(options.prefix_extractor.get()),
index_builder(CreateIndexBuilder(index_block_type, &internal_comparator,
&this->internal_prefix_transform)),
index_builder(CreateIndexBuilder(
table_options.index_type, &internal_comparator,
&this->internal_prefix_transform)),
compression_type(compression_type),
checksum_type(checksum_type),
filter_block(opt.filter_policy == nullptr
? nullptr
: new FilterBlockBuilder(opt, &internal_comparator)),
flush_block_policy(flush_block_policy_factory->NewFlushBlockPolicy(
options, data_block)) {
filter_block(table_options.filter_policy == nullptr ?
nullptr :
new FilterBlockBuilder(opt, table_options, &internal_comparator)),
flush_block_policy(
table_options.flush_block_policy_factory->NewFlushBlockPolicy(
table_options, data_block)) {
for (auto& collector_factories :
options.table_properties_collector_factories) {
table_properties_collectors.emplace_back(
collector_factories->CreateTablePropertiesCollector());
}
table_properties_collectors.emplace_back(
new BlockBasedTablePropertiesCollector(index_block_type));
new BlockBasedTablePropertiesCollector(table_options.index_type));
}
};
@ -445,16 +446,14 @@ BlockBasedTableBuilder::BlockBasedTableBuilder(
const Options& options, const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator, WritableFile* file,
CompressionType compression_type)
: rep_(new Rep(options, internal_comparator, file,
table_options.flush_block_policy_factory.get(),
compression_type, table_options.index_type,
table_options.checksum)) {
: rep_(new Rep(options, table_options, internal_comparator,
file, compression_type)) {
if (rep_->filter_block != nullptr) {
rep_->filter_block->StartBlock(0);
}
if (options.block_cache_compressed.get() != nullptr) {
if (table_options.block_cache_compressed.get() != nullptr) {
BlockBasedTable::GenerateCachePrefix(
options.block_cache_compressed.get(), file,
table_options.block_cache_compressed.get(), file,
&rep_->compressed_cache_key_prefix[0],
&rep_->compressed_cache_key_prefix_size);
}
@ -566,7 +565,7 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
char trailer[kBlockTrailerSize];
trailer[0] = type;
char* trailer_without_type = trailer + 1;
switch (r->checksum_type) {
switch (r->table_options.checksum) {
case kNoChecksum:
// we don't support no checksum yet
assert(false);
@ -612,7 +611,7 @@ Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents,
const CompressionType type,
const BlockHandle* handle) {
Rep* r = rep_;
Cache* block_cache_compressed = r->options.block_cache_compressed.get();
Cache* block_cache_compressed = r->table_options.block_cache_compressed.get();
if (type != kNoCompression && block_cache_compressed != nullptr) {
@ -701,7 +700,7 @@ Status BlockBasedTableBuilder::Finish() {
// Add mapping from "<filter_block_prefix>.Name" to location
// of filter data.
std::string key = BlockBasedTable::kFilterBlockPrefix;
key.append(r->options.filter_policy->Name());
key.append(r->table_options.filter_policy->Name());
meta_index_builder.Add(key, filter_block_handle);
}
@ -709,8 +708,8 @@ Status BlockBasedTableBuilder::Finish() {
{
PropertyBlockBuilder property_block_builder;
std::vector<std::string> failed_user_prop_collectors;
r->props.filter_policy_name = r->options.filter_policy != nullptr ?
r->options.filter_policy->Name() : "";
r->props.filter_policy_name = r->table_options.filter_policy != nullptr ?
r->table_options.filter_policy->Name() : "";
r->props.index_size =
r->index_builder->EstimatedSize() + kBlockTrailerSize;
@ -750,12 +749,12 @@ Status BlockBasedTableBuilder::Finish() {
// TODO(icanadi) at some point in the future, when we're absolutely sure
// nobody will roll back to RocksDB 2.x versions, retire the legacy magic
// number and always write new table files with new magic number
bool legacy = (r->checksum_type == kCRC32c);
bool legacy = (r->table_options.checksum == kCRC32c);
Footer footer(legacy ? kLegacyBlockBasedTableMagicNumber
: kBlockBasedTableMagicNumber);
footer.set_metaindex_handle(metaindex_block_handle);
footer.set_index_handle(index_block_handle);
footer.set_checksum(r->checksum_type);
footer.set_checksum(r->table_options.checksum);
std::string footer_encoding;
footer.EncodeTo(&footer_encoding);
r->status = r->file->Append(footer_encoding);

View File

@ -15,6 +15,7 @@
#include <stdint.h>
#include "rocksdb/flush_block_policy.h"
#include "rocksdb/cache.h"
#include "table/block_based_table_builder.h"
#include "table/block_based_table_reader.h"
#include "port/port.h"
@ -28,6 +29,19 @@ BlockBasedTableFactory::BlockBasedTableFactory(
table_options_.flush_block_policy_factory.reset(
new FlushBlockBySizePolicyFactory());
}
if (table_options_.no_block_cache) {
table_options_.block_cache.reset();
} else if (table_options_.block_cache == nullptr) {
table_options_.block_cache = NewLRUCache(8 << 20);
}
if (table_options_.block_size_deviation < 0 ||
table_options_.block_size_deviation > 100) {
table_options_.block_size_deviation = 0;
}
if (table_options_.filter_policy) {
auto* p = new InternalFilterPolicy(table_options_.filter_policy);
table_options_.filter_policy.reset(p);
}
}
Status BlockBasedTableFactory::NewTableReader(
@ -43,6 +57,7 @@ Status BlockBasedTableFactory::NewTableReader(
TableBuilder* BlockBasedTableFactory::NewTableBuilder(
const Options& options, const InternalKeyComparator& internal_comparator,
WritableFile* file, CompressionType compression_type) const {
auto table_builder = new BlockBasedTableBuilder(
options, table_options_, internal_comparator, file, compression_type);

View File

@ -16,6 +16,7 @@
#include "rocksdb/flush_block_policy.h"
#include "rocksdb/options.h"
#include "rocksdb/table.h"
#include "db/dbformat.h"
namespace rocksdb {

View File

@ -334,11 +334,16 @@ class HashIndexReader : public IndexReader {
struct BlockBasedTable::Rep {
Rep(const EnvOptions& storage_options,
const BlockBasedTableOptions& table_opt,
const InternalKeyComparator& internal_comparator)
: soptions(storage_options), internal_comparator(internal_comparator) {}
: soptions(storage_options), table_options(table_opt),
filter_policy(table_opt.filter_policy.get()),
internal_comparator(internal_comparator) {}
Options options;
const EnvOptions& soptions;
const BlockBasedTableOptions& table_options;
const FilterPolicy* const filter_policy;
const InternalKeyComparator& internal_comparator;
Status status;
unique_ptr<RandomAccessFile> file;
@ -398,13 +403,13 @@ void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
assert(kMaxCacheKeyPrefixSize >= 10);
rep->cache_key_prefix_size = 0;
rep->compressed_cache_key_prefix_size = 0;
if (rep->options.block_cache != nullptr) {
GenerateCachePrefix(rep->options.block_cache.get(), rep->file.get(),
if (rep->table_options.block_cache != nullptr) {
GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file.get(),
&rep->cache_key_prefix[0],
&rep->cache_key_prefix_size);
}
if (rep->options.block_cache_compressed != nullptr) {
GenerateCachePrefix(rep->options.block_cache_compressed.get(),
if (rep->table_options.block_cache_compressed != nullptr) {
GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
rep->file.get(), &rep->compressed_cache_key_prefix[0],
&rep->compressed_cache_key_prefix_size);
}
@ -452,7 +457,8 @@ Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions,
// We've successfully read the footer and the index block: we're
// ready to serve requests.
Rep* rep = new BlockBasedTable::Rep(soptions, internal_comparator);
Rep* rep = new BlockBasedTable::Rep(
soptions, table_options, internal_comparator);
rep->options = options;
rep->file = std::move(file);
rep->footer = footer;
@ -493,7 +499,8 @@ Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions,
}
// Will use block cache for index/filter blocks access?
if (options.block_cache && table_options.cache_index_and_filter_blocks) {
if (table_options.block_cache &&
table_options.cache_index_and_filter_blocks) {
// Hack: Call NewIndexIterator() to implicitly add index to the block_cache
unique_ptr<Iterator> iter(new_table->NewIndexIterator(ReadOptions()));
s = iter->status();
@ -501,7 +508,7 @@ Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions,
if (s.ok()) {
// Hack: Call GetFilter() to implicitly add filter to the block_cache
auto filter_entry = new_table->GetFilter();
filter_entry.Release(options.block_cache.get());
filter_entry.Release(table_options.block_cache.get());
}
} else {
// If we don't use block cache for index/filter blocks access, we'll
@ -515,9 +522,9 @@ Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions,
rep->index_reader.reset(index_reader);
// Set filter block
if (rep->options.filter_policy) {
if (rep->filter_policy) {
std::string key = kFilterBlockPrefix;
key.append(rep->options.filter_policy->Name());
key.append(rep->filter_policy->Name());
BlockHandle handle;
if (FindMetaBlock(meta_iter.get(), key, &handle).ok()) {
rep->filter.reset(ReadFilter(handle, rep));
@ -745,7 +752,7 @@ FilterBlockReader* BlockBasedTable::ReadFilter(const BlockHandle& filter_handle,
}
return new FilterBlockReader(
rep->options, block.data, block.heap_allocated);
rep->options, rep->table_options, block.data, block.heap_allocated);
}
BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
@ -755,13 +762,13 @@ BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
return {rep_->filter.get(), nullptr /* cache handle */};
}
if (rep_->options.filter_policy == nullptr /* do not use filter at all */ ||
rep_->options.block_cache == nullptr /* no block cache at all */) {
Cache* block_cache = rep_->table_options.block_cache.get();
if (rep_->filter_policy == nullptr /* do not use filter */ ||
block_cache == nullptr /* no block cache at all */) {
return {nullptr /* filter */, nullptr /* cache handle */};
}
// Fetching from the cache
Cache* block_cache = rep_->options.block_cache.get();
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
auto key = GetCacheKey(
rep_->cache_key_prefix,
@ -790,7 +797,7 @@ BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
if (s.ok()) {
std::string filter_block_key = kFilterBlockPrefix;
filter_block_key.append(rep_->options.filter_policy->Name());
filter_block_key.append(rep_->filter_policy->Name());
BlockHandle handle;
if (FindMetaBlock(iter.get(), filter_block_key, &handle).ok()) {
filter = ReadFilter(handle, rep_, &filter_size);
@ -815,7 +822,7 @@ Iterator* BlockBasedTable::NewIndexIterator(const ReadOptions& read_options,
}
bool no_io = read_options.read_tier == kBlockCacheTier;
Cache* block_cache = rep_->options.block_cache.get();
Cache* block_cache = rep_->table_options.block_cache.get();
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
rep_->footer.index_handle(), cache_key);
@ -874,9 +881,9 @@ Iterator* BlockBasedTable::NewDataBlockIterator(Rep* rep,
const ReadOptions& ro, const Slice& index_value,
BlockIter* input_iter) {
const bool no_io = (ro.read_tier == kBlockCacheTier);
Cache* block_cache = rep->options.block_cache.get();
Cache* block_cache_compressed = rep->options.
block_cache_compressed.get();
Cache* block_cache = rep->table_options.block_cache.get();
Cache* block_cache_compressed =
rep->table_options.block_cache_compressed.get();
CachableEntry<Block> block;
BlockHandle handle;
@ -992,8 +999,8 @@ class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState {
// This will be broken if the user specifies an unusual implementation
// of Options.comparator, or if the user specifies an unusual
// definition of prefixes in Options.filter_policy. In particular, we
// require the following three properties:
// definition of prefixes in BlockBasedTableOptions.filter_policy.
// In particular, we require the following three properties:
//
// 1) key.starts_with(prefix(key))
// 2) Compare(prefix(key), key) <= 0.
@ -1003,7 +1010,7 @@ class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState {
//
// REQUIRES: this method shouldn't be called while the DB lock is held.
bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) {
if (!rep_->options.filter_policy) {
if (!rep_->filter_policy) {
return true;
}
@ -1057,7 +1064,7 @@ bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) {
may_match =
filter_entry.value == nullptr ||
filter_entry.value->PrefixMayMatch(handle.offset(), internal_prefix);
filter_entry.Release(rep_->options.block_cache.get());
filter_entry.Release(rep_->table_options.block_cache.get());
}
Statistics* statistics = rep_->options.statistics.get();
@ -1135,7 +1142,7 @@ Status BlockBasedTable::Get(
}
}
filter_entry.Release(rep_->options.block_cache.get());
filter_entry.Release(rep_->table_options.block_cache.get());
if (s.ok()) {
s = iiter.status();
}
@ -1154,7 +1161,7 @@ bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
Slice input = iiter->value();
Status s = handle.DecodeFrom(&input);
assert(s.ok());
Cache* block_cache = rep_->options.block_cache.get();
Cache* block_cache = rep_->table_options.block_cache.get();
assert(block_cache != nullptr);
char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];

View File

@ -52,9 +52,6 @@ BlockBuilder::BlockBuilder(int block_restart_interval,
restarts_.push_back(0); // First restart point is at offset 0
}
BlockBuilder::BlockBuilder(const Options& options, const Comparator* comparator)
: BlockBuilder(options.block_restart_interval, comparator) {}
void BlockBuilder::Reset() {
buffer_.clear();
restarts_.clear();

View File

@ -21,7 +21,6 @@ class Comparator;
class BlockBuilder {
public:
BlockBuilder(int block_builder, const Comparator* comparator);
explicit BlockBuilder(const Options& options, const Comparator* comparator);
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();

View File

@ -76,7 +76,7 @@ TEST(BlockTest, SimpleTest) {
std::vector<std::string> keys;
std::vector<std::string> values;
BlockBuilder builder(options, ic.get());
BlockBuilder builder(16, ic.get());
int num_records = 100000;
GenerateRandomKVs(&keys, &values, 0, num_records);

View File

@ -22,10 +22,11 @@ static const size_t kFilterBaseLg = 11;
static const size_t kFilterBase = 1 << kFilterBaseLg;
FilterBlockBuilder::FilterBlockBuilder(const Options& opt,
const BlockBasedTableOptions& table_opt,
const Comparator* internal_comparator)
: policy_(opt.filter_policy),
: policy_(table_opt.filter_policy.get()),
prefix_extractor_(opt.prefix_extractor.get()),
whole_key_filtering_(opt.whole_key_filtering),
whole_key_filtering_(table_opt.whole_key_filtering),
comparator_(internal_comparator) {}
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
@ -131,10 +132,11 @@ void FilterBlockBuilder::GenerateFilter() {
}
FilterBlockReader::FilterBlockReader(
const Options& opt, const Slice& contents, bool delete_contents_after_use)
: policy_(opt.filter_policy),
const Options& opt, const BlockBasedTableOptions& table_opt,
const Slice& contents, bool delete_contents_after_use)
: policy_(table_opt.filter_policy.get()),
prefix_extractor_(opt.prefix_extractor.get()),
whole_key_filtering_(opt.whole_key_filtering),
whole_key_filtering_(table_opt.whole_key_filtering),
data_(nullptr),
offset_(nullptr),
num_(0),

View File

@ -21,6 +21,7 @@
#include "rocksdb/options.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/table.h"
#include "util/hash.h"
namespace rocksdb {
@ -36,6 +37,7 @@ class FilterPolicy;
class FilterBlockBuilder {
public:
explicit FilterBlockBuilder(const Options& opt,
const BlockBasedTableOptions& table_opt,
const Comparator* internal_comparator);
void StartBlock(uint64_t block_offset);
@ -70,6 +72,7 @@ class FilterBlockReader {
// REQUIRES: "contents" and *policy must stay live while *this is live.
FilterBlockReader(
const Options& opt,
const BlockBasedTableOptions& table_opt,
const Slice& contents,
bool delete_contents_after_use = false);
bool KeyMayMatch(uint64_t block_offset, const Slice& key);

View File

@ -45,26 +45,26 @@ class TestHashFilter : public FilterPolicy {
class FilterBlockTest {
public:
TestHashFilter policy_;
Options options_;
BlockBasedTableOptions table_options_;
FilterBlockTest() {
options_ = Options();
options_.filter_policy = &policy_;
table_options_.filter_policy.reset(new TestHashFilter());
}
};
TEST(FilterBlockTest, EmptyBuilder) {
FilterBlockBuilder builder(options_, options_.comparator);
FilterBlockBuilder builder(options_, table_options_, options_.comparator);
Slice block = builder.Finish();
ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
FilterBlockReader reader(options_, block);
FilterBlockReader reader(options_, table_options_, block);
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(100000, "foo"));
}
TEST(FilterBlockTest, SingleChunk) {
FilterBlockBuilder builder(options_, options_.comparator);
FilterBlockBuilder builder(options_, table_options_, options_.comparator);
builder.StartBlock(100);
builder.AddKey("foo");
builder.AddKey("bar");
@ -74,7 +74,7 @@ TEST(FilterBlockTest, SingleChunk) {
builder.StartBlock(300);
builder.AddKey("hello");
Slice block = builder.Finish();
FilterBlockReader reader(options_, block);
FilterBlockReader reader(options_, table_options_, block);
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(100, "bar"));
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
@ -85,7 +85,7 @@ TEST(FilterBlockTest, SingleChunk) {
}
TEST(FilterBlockTest, MultiChunk) {
FilterBlockBuilder builder(options_, options_.comparator);
FilterBlockBuilder builder(options_, table_options_, options_.comparator);
// First filter
builder.StartBlock(0);
@ -105,7 +105,7 @@ TEST(FilterBlockTest, MultiChunk) {
builder.AddKey("hello");
Slice block = builder.Finish();
FilterBlockReader reader(options_, block);
FilterBlockReader reader(options_, table_options_, block);
// Check first filter
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));

View File

@ -62,9 +62,11 @@ class FlushBlockBySizePolicy : public FlushBlockPolicy {
};
FlushBlockPolicy* FlushBlockBySizePolicyFactory::NewFlushBlockPolicy(
const Options& options, const BlockBuilder& data_block_builder) const {
const BlockBasedTableOptions& table_options,
const BlockBuilder& data_block_builder) const {
return new FlushBlockBySizePolicy(
options.block_size, options.block_size_deviation, data_block_builder);
table_options.block_size, table_options.block_size_deviation,
data_block_builder);
}
} // namespace rocksdb

View File

@ -194,6 +194,7 @@ class Constructor {
// been added so far. Returns the keys in sorted order in "*keys"
// and stores the key/value pairs in "*kvmap"
void Finish(const Options& options,
const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator,
std::vector<std::string>* keys, KVMap* kvmap) {
last_internal_key_ = &internal_comparator;
@ -205,12 +206,13 @@ class Constructor {
keys->push_back(it->first);
}
data_.clear();
Status s = FinishImpl(options, internal_comparator, *kvmap);
Status s = FinishImpl(options, table_options, internal_comparator, *kvmap);
ASSERT_TRUE(s.ok()) << s.ToString();
}
// Construct the data structure from the data in "data"
virtual Status FinishImpl(const Options& options,
const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator,
const KVMap& data) = 0;
@ -237,11 +239,13 @@ class BlockConstructor: public Constructor {
delete block_;
}
virtual Status FinishImpl(const Options& options,
const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator,
const KVMap& data) {
delete block_;
block_ = nullptr;
BlockBuilder builder(options, &internal_comparator);
BlockBuilder builder(table_options.block_restart_interval,
&internal_comparator);
for (KVMap::const_iterator it = data.begin();
it != data.end();
@ -319,6 +323,7 @@ class TableConstructor: public Constructor {
~TableConstructor() { Reset(); }
virtual Status FinishImpl(const Options& options,
const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator,
const KVMap& data) {
Reset();
@ -417,6 +422,7 @@ class MemTableConstructor: public Constructor {
delete memtable_->Unref();
}
virtual Status FinishImpl(const Options& options,
const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator,
const KVMap& data) {
delete memtable_->Unref();
@ -455,6 +461,7 @@ class DBConstructor: public Constructor {
delete db_;
}
virtual Status FinishImpl(const Options& options,
const BlockBasedTableOptions& table_options,
const InternalKeyComparator& internal_comparator,
const KVMap& data) {
delete db_;
@ -670,12 +677,9 @@ class Harness {
delete constructor_;
constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
options_.compression = args.compression;
// Use shorter block size for tests to exercise block boundary
// conditions more.
options_.block_size = 256;
if (args.reverse_compare) {
options_.comparator = &reverse_key_comparator;
}
@ -685,12 +689,14 @@ class Harness {
support_prev_ = true;
only_support_prefix_seek_ = false;
BlockBasedTableOptions table_options;
switch (args.type) {
case BLOCK_BASED_TABLE_TEST:
table_options.flush_block_policy_factory.reset(
table_options_.flush_block_policy_factory.reset(
new FlushBlockBySizePolicyFactory());
options_.table_factory.reset(new BlockBasedTableFactory(table_options));
table_options_.block_size = 256;
table_options_.block_restart_interval = args.restart_interval;
options_.table_factory.reset(
new BlockBasedTableFactory(table_options_));
constructor_ = new TableConstructor(options_.comparator);
break;
case PLAIN_TABLE_SEMI_FIXED_PREFIX:
@ -733,12 +739,21 @@ class Harness {
new InternalKeyComparator(options_.comparator));
break;
case BLOCK_TEST:
table_options_.block_size = 256;
options_.table_factory.reset(
new BlockBasedTableFactory(table_options_));
constructor_ = new BlockConstructor(options_.comparator);
break;
case MEMTABLE_TEST:
table_options_.block_size = 256;
options_.table_factory.reset(
new BlockBasedTableFactory(table_options_));
constructor_ = new MemTableConstructor(options_.comparator);
break;
case DB_TEST:
table_options_.block_size = 256;
options_.table_factory.reset(
new BlockBasedTableFactory(table_options_));
constructor_ = new DBConstructor(options_.comparator);
break;
}
@ -755,7 +770,8 @@ class Harness {
void Test(Random* rnd) {
std::vector<std::string> keys;
KVMap data;
constructor_->Finish(options_, *internal_comparator_, &keys, &data);
constructor_->Finish(options_, table_options_, *internal_comparator_,
&keys, &data);
TestForwardScan(keys, data);
if (support_prev_) {
@ -924,6 +940,7 @@ class Harness {
private:
Options options_ = Options();
BlockBasedTableOptions table_options_ = BlockBasedTableOptions();
Constructor* constructor_;
bool support_prev_;
bool only_support_prefix_seek_;
@ -1018,10 +1035,12 @@ TEST(BlockBasedTableTest, BasicBlockBasedTableProperties) {
KVMap kvmap;
Options options;
options.compression = kNoCompression;
options.block_restart_interval = 1;
BlockBasedTableOptions table_options;
table_options.block_restart_interval = 1;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
c.Finish(options, GetPlainInternalComparator(options.comparator), &keys,
&kvmap);
c.Finish(options, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
auto& props = *c.table_reader()->GetTableProperties();
ASSERT_EQ(kvmap.size(), props.num_entries);
@ -1035,7 +1054,7 @@ TEST(BlockBasedTableTest, BasicBlockBasedTableProperties) {
ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
// Verify data size.
BlockBuilder block_builder(options, options.comparator);
BlockBuilder block_builder(1, options.comparator);
for (const auto& item : kvmap) {
block_builder.Add(item.first, item.second);
}
@ -1044,16 +1063,17 @@ TEST(BlockBasedTableTest, BasicBlockBasedTableProperties) {
}
TEST(BlockBasedTableTest, FilterPolicyNameProperties) {
TableConstructor c(BytewiseComparator());
TableConstructor c(BytewiseComparator(), true);
c.Add("a1", "val1");
std::vector<std::string> keys;
KVMap kvmap;
BlockBasedTableOptions table_options;
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
Options options;
std::unique_ptr<const FilterPolicy> filter_policy(NewBloomFilterPolicy(10));
options.filter_policy = filter_policy.get();
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
c.Finish(options, GetPlainInternalComparator(options.comparator), &keys,
&kvmap);
c.Finish(options, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
auto& props = *c.table_reader()->GetTableProperties();
ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name);
}
@ -1094,18 +1114,17 @@ TEST(TableTest, HashIndexTest) {
std::vector<std::string> keys;
KVMap kvmap;
Options options;
options.prefix_extractor.reset(NewFixedPrefixTransform(3));
BlockBasedTableOptions table_options;
table_options.index_type = BlockBasedTableOptions::kHashSearch;
table_options.hash_index_allow_collision = true;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(3));
options.block_cache = NewLRUCache(1024);
options.block_size = 1700;
table_options.block_size = 1700;
table_options.block_cache = NewLRUCache(1024);
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
std::unique_ptr<InternalKeyComparator> comparator(
new InternalKeyComparator(BytewiseComparator()));
c.Finish(options, *comparator, &keys, &kvmap);
c.Finish(options, table_options, *comparator, &keys, &kvmap);
auto reader = c.table_reader();
auto props = c.table_reader()->GetTableProperties();
@ -1209,10 +1228,12 @@ TEST(BlockBasedTableTest, IndexSizeStat) {
KVMap kvmap;
Options options;
options.compression = kNoCompression;
options.block_restart_interval = 1;
BlockBasedTableOptions table_options;
table_options.block_restart_interval = 1;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
c.Finish(options, GetPlainInternalComparator(options.comparator), &ks,
&kvmap);
c.Finish(options, table_options,
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
auto index_size = c.table_reader()->GetTableProperties()->index_size;
ASSERT_GT(index_size, last_index_size);
last_index_size = index_size;
@ -1224,8 +1245,10 @@ TEST(BlockBasedTableTest, NumBlockStat) {
TableConstructor c(BytewiseComparator());
Options options;
options.compression = kNoCompression;
options.block_restart_interval = 1;
options.block_size = 1000;
BlockBasedTableOptions table_options;
table_options.block_restart_interval = 1;
table_options.block_size = 1000;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
for (int i = 0; i < 10; ++i) {
// the key/val are slightly smaller than block size, so that each block
@ -1235,8 +1258,8 @@ TEST(BlockBasedTableTest, NumBlockStat) {
std::vector<std::string> ks;
KVMap kvmap;
c.Finish(options, GetPlainInternalComparator(options.comparator), &ks,
&kvmap);
c.Finish(options, table_options,
GetPlainInternalComparator(options.comparator), &ks, &kvmap);
ASSERT_EQ(kvmap.size(),
c.table_reader()->GetTableProperties()->num_data_blocks);
}
@ -1300,20 +1323,19 @@ TEST(BlockBasedTableTest, BlockCacheDisabledTest) {
Options options;
options.create_if_missing = true;
options.statistics = CreateDBStatistics();
options.block_cache = NewLRUCache(1024);
std::unique_ptr<const FilterPolicy> filter_policy(NewBloomFilterPolicy(10));
options.filter_policy = filter_policy.get();
BlockBasedTableOptions table_options;
// Intentionally commented out: table_options.cache_index_and_filter_blocks =
// true;
table_options.block_cache = NewLRUCache(1024);
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
std::vector<std::string> keys;
KVMap kvmap;
TableConstructor c(BytewiseComparator());
TableConstructor c(BytewiseComparator(), true);
c.Add("key", "value");
c.Finish(options, GetPlainInternalComparator(options.comparator), &keys,
&kvmap);
c.Finish(options, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
// preloading filter/index blocks is enabled.
auto reader = dynamic_cast<BlockBasedTable*>(c.table_reader());
@ -1343,10 +1365,10 @@ TEST(BlockBasedTableTest, FilterBlockInBlockCache) {
Options options;
options.create_if_missing = true;
options.statistics = CreateDBStatistics();
options.block_cache = NewLRUCache(1024);
// Enable the cache for index/filter blocks
BlockBasedTableOptions table_options;
table_options.block_cache = NewLRUCache(1024);
table_options.cache_index_and_filter_blocks = true;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
std::vector<std::string> keys;
@ -1354,8 +1376,8 @@ TEST(BlockBasedTableTest, FilterBlockInBlockCache) {
TableConstructor c(BytewiseComparator());
c.Add("key", "value");
c.Finish(options, GetPlainInternalComparator(options.comparator), &keys,
&kvmap);
c.Finish(options, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
// preloading filter/index blocks is prohibited.
auto reader = dynamic_cast<BlockBasedTable*>(c.table_reader());
ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
@ -1404,9 +1426,12 @@ TEST(BlockBasedTableTest, FilterBlockInBlockCache) {
iter.reset();
// -- PART 2: Open without block cache
options.block_cache.reset();
table_options.no_block_cache = true;
table_options.block_cache.reset();
options.table_factory.reset(new BlockBasedTableFactory(table_options));
options.statistics = CreateDBStatistics(); // reset the stats
c.Reopen(options);
table_options.no_block_cache = false;
{
iter.reset(c.NewIterator());
@ -1420,7 +1445,8 @@ TEST(BlockBasedTableTest, FilterBlockInBlockCache) {
// -- PART 3: Open with very small block cache
// In this test, no block will ever get hit since the block cache is
// too small to fit even one entry.
options.block_cache = NewLRUCache(1);
table_options.block_cache = NewLRUCache(1);
options.table_factory.reset(new BlockBasedTableFactory(table_options));
c.Reopen(options);
{
BlockCachePropertiesSnapshot props(options.statistics.get());
@ -1458,11 +1484,12 @@ TEST(BlockBasedTableTest, BlockCacheLeak) {
Options opt;
unique_ptr<InternalKeyComparator> ikc;
ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
opt.block_size = 1024;
opt.compression = kNoCompression;
opt.block_cache =
NewLRUCache(16 * 1024 * 1024); // big enough so we don't ever
// lose cached values.
BlockBasedTableOptions table_options;
table_options.block_size = 1024;
// big enough so we don't ever lose cached values.
table_options.block_cache = NewLRUCache(16 * 1024 * 1024);
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
TableConstructor c(BytewiseComparator());
c.Add("k01", "hello");
@ -1474,7 +1501,7 @@ TEST(BlockBasedTableTest, BlockCacheLeak) {
c.Add("k07", std::string(100000, 'x'));
std::vector<std::string> keys;
KVMap kvmap;
c.Finish(opt, *ikc, &keys, &kvmap);
c.Finish(opt, table_options, *ikc, &keys, &kvmap);
unique_ptr<Iterator> iter(c.NewIterator());
iter->SeekToFirst();
@ -1492,7 +1519,8 @@ TEST(BlockBasedTableTest, BlockCacheLeak) {
}
// rerun with different block cache
opt.block_cache = NewLRUCache(16 * 1024 * 1024);
table_options.block_cache = NewLRUCache(16 * 1024 * 1024);
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
ASSERT_OK(c.Reopen(opt));
table_reader = dynamic_cast<BlockBasedTable*>(c.table_reader());
for (const std::string& key : keys) {
@ -1551,9 +1579,10 @@ TEST(GeneralTableTest, ApproximateOffsetOfPlain) {
KVMap kvmap;
Options options;
test::PlainInternalKeyComparator internal_comparator(options.comparator);
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, internal_comparator, &keys, &kvmap);
BlockBasedTableOptions table_options;
table_options.block_size = 1024;
c.Finish(options, table_options, internal_comparator, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
@ -1580,9 +1609,10 @@ static void DoCompressionTest(CompressionType comp) {
KVMap kvmap;
Options options;
test::PlainInternalKeyComparator ikc(options.comparator);
options.block_size = 1024;
options.compression = comp;
c.Finish(options, ikc, &keys, &kvmap);
BlockBasedTableOptions table_options;
table_options.block_size = 1024;
c.Finish(options, table_options, ikc, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));

View File

@ -41,7 +41,6 @@ int main() {
#include "rocksdb/write_batch.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/statistics.h"
#include "port/port.h"
#include "util/coding.h"
#include "util/crc32c.h"
@ -154,7 +153,7 @@ DEFINE_int32(level0_stop_writes_trigger,
rocksdb::Options().level0_stop_writes_trigger,
"Number of files in level-0 that will trigger put stop.");
DEFINE_int32(block_size, rocksdb::Options().block_size,
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size,
"Number of bytes in a block.");
DEFINE_int32(max_background_compactions,
@ -780,7 +779,6 @@ class StressTest {
}
column_families_.clear();
delete db_;
delete filter_policy_;
}
bool Run() {
@ -1550,8 +1548,13 @@ class StressTest {
void Open() {
assert(db_ == nullptr);
options_.block_cache = cache_;
options_.block_cache_compressed = compressed_cache_;
BlockBasedTableOptions block_based_options;
block_based_options.block_cache = cache_;
block_based_options.block_cache_compressed = compressed_cache_;
block_based_options.block_size = FLAGS_block_size;
block_based_options.filter_policy = filter_policy_;
options_.table_factory.reset(
NewBlockBasedTableFactory(block_based_options));
options_.write_buffer_size = FLAGS_write_buffer_size;
options_.max_write_buffer_number = FLAGS_max_write_buffer_number;
options_.min_write_buffer_number_to_merge =
@ -1560,8 +1563,6 @@ class StressTest {
options_.max_background_flushes = FLAGS_max_background_flushes;
options_.compaction_style =
static_cast<rocksdb::CompactionStyle>(FLAGS_compaction_style);
options_.block_size = FLAGS_block_size;
options_.filter_policy = filter_policy_;
options_.prefix_extractor.reset(NewFixedPrefixTransform(FLAGS_prefix_size));
options_.max_open_files = FLAGS_open_files;
options_.statistics = dbstats;
@ -1718,9 +1719,9 @@ class StressTest {
}
private:
shared_ptr<Cache> cache_;
shared_ptr<Cache> compressed_cache_;
const FilterPolicy* filter_policy_;
std::shared_ptr<Cache> cache_;
std::shared_ptr<Cache> compressed_cache_;
std::shared_ptr<const FilterPolicy> filter_policy_;
DB* db_;
Options options_;
std::vector<ColumnFamilyHandle*> column_families_;

View File

@ -219,10 +219,11 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
map<string, string>::const_iterator itr;
BlockBasedTableOptions table_options;
int bits;
if (ParseIntOption(option_map_, ARG_BLOOM_BITS, bits, exec_state_)) {
if (bits > 0) {
opt.filter_policy = NewBloomFilterPolicy(bits);
table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
} else {
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_BLOOM_BITS +
" must be > 0.");
@ -232,7 +233,8 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
int block_size;
if (ParseIntOption(option_map_, ARG_BLOCK_SIZE, block_size, exec_state_)) {
if (block_size > 0) {
opt.block_size = block_size;
table_options.block_size = block_size;
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
} else {
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_BLOCK_SIZE +
" must be > 0.");

View File

@ -17,7 +17,6 @@
#include "rocksdb/compaction_filter.h"
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h"
@ -39,14 +38,8 @@ ColumnFamilyOptions::ColumnFamilyOptions()
write_buffer_size(4 << 20),
max_write_buffer_number(2),
min_write_buffer_number_to_merge(1),
block_cache(nullptr),
block_cache_compressed(nullptr),
block_size(4096),
block_restart_interval(16),
compression(kSnappyCompression),
filter_policy(nullptr),
prefix_extractor(nullptr),
whole_key_filtering(true),
num_levels(7),
level0_file_num_compaction_trigger(4),
level0_slowdown_writes_trigger(20),
@ -60,15 +53,12 @@ ColumnFamilyOptions::ColumnFamilyOptions()
expanded_compaction_factor(25),
source_compaction_factor(1),
max_grandparent_overlap_factor(10),
disable_seek_compaction(true),
soft_rate_limit(0.0),
hard_rate_limit(0.0),
rate_limit_delay_max_milliseconds(1000),
no_block_cache(false),
arena_block_size(0),
disable_auto_compactions(false),
purge_redundant_kvs_while_flush(true),
block_size_deviation(10),
compaction_style(kCompactionStyleLevel),
verify_checksums_in_compaction(true),
filter_deletes(false),
@ -98,16 +88,10 @@ ColumnFamilyOptions::ColumnFamilyOptions(const Options& options)
max_write_buffer_number(options.max_write_buffer_number),
min_write_buffer_number_to_merge(
options.min_write_buffer_number_to_merge),
block_cache(options.block_cache),
block_cache_compressed(options.block_cache_compressed),
block_size(options.block_size),
block_restart_interval(options.block_restart_interval),
compression(options.compression),
compression_per_level(options.compression_per_level),
compression_opts(options.compression_opts),
filter_policy(options.filter_policy),
prefix_extractor(options.prefix_extractor),
whole_key_filtering(options.whole_key_filtering),
num_levels(options.num_levels),
level0_file_num_compaction_trigger(
options.level0_file_num_compaction_trigger),
@ -123,16 +107,13 @@ ColumnFamilyOptions::ColumnFamilyOptions(const Options& options)
expanded_compaction_factor(options.expanded_compaction_factor),
source_compaction_factor(options.source_compaction_factor),
max_grandparent_overlap_factor(options.max_grandparent_overlap_factor),
disable_seek_compaction(options.disable_seek_compaction),
soft_rate_limit(options.soft_rate_limit),
hard_rate_limit(options.hard_rate_limit),
rate_limit_delay_max_milliseconds(
options.rate_limit_delay_max_milliseconds),
no_block_cache(options.no_block_cache),
arena_block_size(options.arena_block_size),
disable_auto_compactions(options.disable_auto_compactions),
purge_redundant_kvs_while_flush(options.purge_redundant_kvs_while_flush),
block_size_deviation(options.block_size_deviation),
compaction_style(options.compaction_style),
verify_checksums_in_compaction(options.verify_checksums_in_compaction),
compaction_options_universal(options.compaction_options_universal),
@ -175,7 +156,6 @@ DBOptions::DBOptions()
statistics(nullptr),
disableDataSync(false),
use_fsync(false),
db_stats_log_interval(1800),
db_log_dir(""),
wal_dir(""),
delete_obsolete_files_period_micros(6 * 60 * 60 * 1000000UL),
@ -216,7 +196,6 @@ DBOptions::DBOptions(const Options& options)
statistics(options.statistics),
disableDataSync(options.disableDataSync),
use_fsync(options.use_fsync),
db_stats_log_interval(options.db_stats_log_interval),
db_paths(options.db_paths),
db_log_dir(options.db_log_dir),
wal_dir(options.wal_dir),
@ -328,19 +307,6 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
Log(log, " Options.table_factory: %s", table_factory->Name());
Log(log, " Options.write_buffer_size: %zd", write_buffer_size);
Log(log, " Options.max_write_buffer_number: %d", max_write_buffer_number);
Log(log," Options.block_cache: %p", block_cache.get());
Log(log," Options.block_cache_compressed: %p",
block_cache_compressed.get());
if (block_cache) {
Log(log," Options.block_cache_size: %zd",
block_cache->GetCapacity());
}
if (block_cache_compressed) {
Log(log,"Options.block_cache_compressed_size: %zd",
block_cache_compressed->GetCapacity());
}
Log(log," Options.block_size: %zd", block_size);
Log(log," Options.block_restart_interval: %d", block_restart_interval);
if (!compression_per_level.empty()) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Log(log," Options.compression[%d]: %d",
@ -349,11 +315,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
} else {
Log(log," Options.compression: %d", compression);
}
Log(log," Options.filter_policy: %s",
filter_policy == nullptr ? "nullptr" : filter_policy->Name());
Log(log," Options.prefix_extractor: %s",
prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name());
Log(log," Options.whole_key_filtering: %d", whole_key_filtering);
Log(log," Options.num_levels: %d", num_levels);
Log(log," Options.min_write_buffer_number_to_merge: %d",
min_write_buffer_number_to_merge);
@ -393,8 +356,6 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
source_compaction_factor);
Log(log," Options.max_grandparent_overlap_factor: %d",
max_grandparent_overlap_factor);
Log(log," Options.no_block_cache: %d",
no_block_cache);
Log(log," Options.arena_block_size: %zu",
arena_block_size);
Log(log," Options.soft_rate_limit: %.2f",
@ -407,8 +368,6 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
disable_auto_compactions);
Log(log," Options.purge_redundant_kvs_while_flush: %d",
purge_redundant_kvs_while_flush);
Log(log," Options.block_size_deviation: %d",
block_size_deviation);
Log(log," Options.filter_deletes: %d",
filter_deletes);
Log(log, " Options.verify_checksums_in_compaction: %d",

View File

@ -8,6 +8,7 @@
#include "rocksdb/utilities/document_db.h"
#include "rocksdb/cache.h"
#include "rocksdb/table.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/comparator.h"
#include "rocksdb/db.h"
@ -1100,7 +1101,9 @@ Options GetRocksDBOptionsFromOptions(const DocumentDBOptions& options) {
rocksdb_options.max_background_flushes = 1;
rocksdb_options.write_buffer_size = options.memtable_size;
rocksdb_options.max_write_buffer_number = 6;
rocksdb_options.block_cache = NewLRUCache(options.cache_size);
BlockBasedTableOptions table_options;
table_options.block_cache = NewLRUCache(options.cache_size);
rocksdb_options.table_factory.reset(NewBlockBasedTableFactory(table_options));
return rocksdb_options;
}
} // namespace

View File

@ -16,6 +16,7 @@
#include <unordered_set>
#include "rocksdb/cache.h"
#include "rocksdb/table.h"
#include "rocksdb/db.h"
#include "rocksdb/utilities/stackable_db.h"
#include "util/coding.h"
@ -564,7 +565,9 @@ Options GetRocksDBOptionsFromOptions(const SpatialDBOptions& options) {
rocksdb_options.compression_per_level[i] = kLZ4Compression;
}
}
rocksdb_options.block_cache = NewLRUCache(options.cache_size);
BlockBasedTableOptions table_options;
table_options.block_cache = NewLRUCache(options.cache_size);
rocksdb_options.table_factory.reset(NewBlockBasedTableFactory(table_options));
if (options.bulk_load) {
rocksdb_options.PrepareForBulkLoad();
}