2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-08-05 20:40:49 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2014-04-15 20:39:26 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/c.h"
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2021-03-19 19:08:09 +00:00
|
|
|
#include <cstdlib>
|
|
|
|
#include <map>
|
|
|
|
#include <unordered_set>
|
|
|
|
#include <vector>
|
|
|
|
|
2015-07-01 23:13:49 +00:00
|
|
|
#include "port/port.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2014-06-18 02:23:47 +00:00
|
|
|
#include "rocksdb/compaction_filter.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/comparator.h"
|
2015-07-15 21:51:51 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
2022-06-23 23:25:25 +00:00
|
|
|
#include "rocksdb/experimental.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/memtablerep.h"
|
2014-02-12 21:49:00 +00:00
|
|
|
#include "rocksdb/merge_operator.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/options.h"
|
2021-03-19 19:08:09 +00:00
|
|
|
#include "rocksdb/perf_context.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/rate_limiter.h"
|
2014-02-12 21:49:00 +00:00
|
|
|
#include "rocksdb/slice_transform.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
#include "rocksdb/status.h"
|
2014-04-03 06:59:01 +00:00
|
|
|
#include "rocksdb/table.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/universal_compaction.h"
|
2022-01-27 23:44:23 +00:00
|
|
|
#include "rocksdb/utilities/backup_engine.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/utilities/checkpoint.h"
|
2018-03-07 04:51:30 +00:00
|
|
|
#include "rocksdb/utilities/db_ttl.h"
|
2018-09-13 21:12:44 +00:00
|
|
|
#include "rocksdb/utilities/memory_util.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/utilities/optimistic_transaction_db.h"
|
2022-06-30 18:03:52 +00:00
|
|
|
#include "rocksdb/utilities/options_util.h"
|
2021-04-27 17:12:55 +00:00
|
|
|
#include "rocksdb/utilities/table_properties_collectors.h"
|
2017-05-17 05:57:05 +00:00
|
|
|
#include "rocksdb/utilities/transaction.h"
|
|
|
|
#include "rocksdb/utilities/transaction_db.h"
|
2017-09-13 18:56:19 +00:00
|
|
|
#include "rocksdb/utilities/write_batch_with_index.h"
|
|
|
|
#include "rocksdb/write_batch.h"
|
|
|
|
#include "utilities/merge_operators.h"
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::BackupEngine;
|
2022-01-27 23:44:23 +00:00
|
|
|
using ROCKSDB_NAMESPACE::BackupEngineOptions;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::BackupID;
|
|
|
|
using ROCKSDB_NAMESPACE::BackupInfo;
|
|
|
|
using ROCKSDB_NAMESPACE::BatchResult;
|
|
|
|
using ROCKSDB_NAMESPACE::BlockBasedTableOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::BottommostLevelCompaction;
|
|
|
|
using ROCKSDB_NAMESPACE::BytewiseComparator;
|
|
|
|
using ROCKSDB_NAMESPACE::Cache;
|
|
|
|
using ROCKSDB_NAMESPACE::Checkpoint;
|
|
|
|
using ROCKSDB_NAMESPACE::ColumnFamilyDescriptor;
|
|
|
|
using ROCKSDB_NAMESPACE::ColumnFamilyHandle;
|
2022-06-22 22:00:28 +00:00
|
|
|
using ROCKSDB_NAMESPACE::ColumnFamilyMetaData;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::ColumnFamilyOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::CompactionFilter;
|
|
|
|
using ROCKSDB_NAMESPACE::CompactionFilterFactory;
|
|
|
|
using ROCKSDB_NAMESPACE::CompactionOptionsFIFO;
|
|
|
|
using ROCKSDB_NAMESPACE::CompactRangeOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::Comparator;
|
|
|
|
using ROCKSDB_NAMESPACE::CompressionType;
|
|
|
|
using ROCKSDB_NAMESPACE::CuckooTableOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::DB;
|
|
|
|
using ROCKSDB_NAMESPACE::DBOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::DbPath;
|
|
|
|
using ROCKSDB_NAMESPACE::Env;
|
|
|
|
using ROCKSDB_NAMESPACE::EnvOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::FileLock;
|
|
|
|
using ROCKSDB_NAMESPACE::FilterPolicy;
|
|
|
|
using ROCKSDB_NAMESPACE::FlushOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::InfoLogLevel;
|
|
|
|
using ROCKSDB_NAMESPACE::IngestExternalFileOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::Iterator;
|
2022-06-22 22:00:28 +00:00
|
|
|
using ROCKSDB_NAMESPACE::LevelMetaData;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::LiveFileMetaData;
|
|
|
|
using ROCKSDB_NAMESPACE::Logger;
|
2021-04-23 05:21:48 +00:00
|
|
|
using ROCKSDB_NAMESPACE::LRUCacheOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::MemoryAllocator;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::MemoryUtil;
|
|
|
|
using ROCKSDB_NAMESPACE::MergeOperator;
|
|
|
|
using ROCKSDB_NAMESPACE::NewBloomFilterPolicy;
|
2021-04-27 17:12:55 +00:00
|
|
|
using ROCKSDB_NAMESPACE::NewCompactOnDeletionCollectorFactory;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::NewGenericRateLimiter;
|
|
|
|
using ROCKSDB_NAMESPACE::NewLRUCache;
|
2021-07-09 22:44:57 +00:00
|
|
|
using ROCKSDB_NAMESPACE::NewRibbonFilterPolicy;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::OptimisticTransactionDB;
|
|
|
|
using ROCKSDB_NAMESPACE::OptimisticTransactionOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::Options;
|
|
|
|
using ROCKSDB_NAMESPACE::PerfContext;
|
|
|
|
using ROCKSDB_NAMESPACE::PerfLevel;
|
|
|
|
using ROCKSDB_NAMESPACE::PinnableSlice;
|
2022-07-17 14:13:59 +00:00
|
|
|
using ROCKSDB_NAMESPACE::PrepopulateBlobCache;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::RandomAccessFile;
|
|
|
|
using ROCKSDB_NAMESPACE::Range;
|
|
|
|
using ROCKSDB_NAMESPACE::RateLimiter;
|
|
|
|
using ROCKSDB_NAMESPACE::ReadOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::RestoreOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::SequentialFile;
|
|
|
|
using ROCKSDB_NAMESPACE::Slice;
|
|
|
|
using ROCKSDB_NAMESPACE::SliceParts;
|
|
|
|
using ROCKSDB_NAMESPACE::SliceTransform;
|
|
|
|
using ROCKSDB_NAMESPACE::Snapshot;
|
2022-06-22 22:00:28 +00:00
|
|
|
using ROCKSDB_NAMESPACE::SstFileMetaData;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::SstFileWriter;
|
|
|
|
using ROCKSDB_NAMESPACE::Status;
|
2021-04-27 17:12:55 +00:00
|
|
|
using ROCKSDB_NAMESPACE::TablePropertiesCollectorFactory;
|
2020-02-20 20:07:53 +00:00
|
|
|
using ROCKSDB_NAMESPACE::Transaction;
|
|
|
|
using ROCKSDB_NAMESPACE::TransactionDB;
|
|
|
|
using ROCKSDB_NAMESPACE::TransactionDBOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::TransactionLogIterator;
|
|
|
|
using ROCKSDB_NAMESPACE::TransactionOptions;
|
|
|
|
using ROCKSDB_NAMESPACE::WALRecoveryMode;
|
|
|
|
using ROCKSDB_NAMESPACE::WritableFile;
|
|
|
|
using ROCKSDB_NAMESPACE::WriteBatch;
|
|
|
|
using ROCKSDB_NAMESPACE::WriteBatchWithIndex;
|
|
|
|
using ROCKSDB_NAMESPACE::WriteOptions;
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2018-09-13 21:12:44 +00:00
|
|
|
using std::vector;
|
|
|
|
using std::unordered_set;
|
2013-01-20 10:07:13 +00:00
|
|
|
|
2011-08-05 20:40:49 +00:00
|
|
|
extern "C" {
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_t { DB* rep; };
|
2015-01-31 12:47:49 +00:00
|
|
|
struct rocksdb_backup_engine_t { BackupEngine* rep; };
|
2015-02-07 11:25:10 +00:00
|
|
|
struct rocksdb_backup_engine_info_t { std::vector<BackupInfo> rep; };
|
|
|
|
struct rocksdb_restore_options_t { RestoreOptions rep; };
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_iterator_t { Iterator* rep; };
|
|
|
|
struct rocksdb_writebatch_t { WriteBatch rep; };
|
2017-03-23 22:50:51 +00:00
|
|
|
struct rocksdb_writebatch_wi_t { WriteBatchWithIndex* rep; };
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_snapshot_t { const Snapshot* rep; };
|
|
|
|
struct rocksdb_flushoptions_t { FlushOptions rep; };
|
2014-07-08 04:12:25 +00:00
|
|
|
struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; };
|
2014-11-25 23:08:59 +00:00
|
|
|
struct rocksdb_readoptions_t {
|
|
|
|
ReadOptions rep;
|
2017-11-28 18:24:23 +00:00
|
|
|
// stack variables to set pointers to in ReadOptions
|
|
|
|
Slice upper_bound;
|
|
|
|
Slice lower_bound;
|
2022-05-26 16:40:10 +00:00
|
|
|
Slice timestamp;
|
|
|
|
Slice iter_start_ts;
|
|
|
|
};
|
|
|
|
struct rocksdb_writeoptions_t {
|
|
|
|
WriteOptions rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_options_t {
|
|
|
|
Options rep;
|
2014-11-25 23:08:59 +00:00
|
|
|
};
|
2016-12-08 01:44:35 +00:00
|
|
|
struct rocksdb_compactoptions_t {
|
|
|
|
CompactRangeOptions rep;
|
2022-05-26 16:40:10 +00:00
|
|
|
Slice full_history_ts_low;
|
|
|
|
};
|
|
|
|
struct rocksdb_block_based_table_options_t {
|
|
|
|
BlockBasedTableOptions rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_cuckoo_table_options_t {
|
|
|
|
CuckooTableOptions rep;
|
2016-12-08 01:44:35 +00:00
|
|
|
};
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_seqfile_t { SequentialFile* rep; };
|
|
|
|
struct rocksdb_randomfile_t { RandomAccessFile* rep; };
|
|
|
|
struct rocksdb_writablefile_t { WritableFile* rep; };
|
2018-04-27 23:46:38 +00:00
|
|
|
struct rocksdb_wal_iterator_t { TransactionLogIterator* rep; };
|
|
|
|
struct rocksdb_wal_readoptions_t { TransactionLogIterator::ReadOptions rep; };
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_filelock_t { FileLock* rep; };
|
2018-11-09 19:17:34 +00:00
|
|
|
struct rocksdb_logger_t {
|
|
|
|
std::shared_ptr<Logger> rep;
|
|
|
|
};
|
2021-04-23 05:21:48 +00:00
|
|
|
struct rocksdb_lru_cache_options_t {
|
|
|
|
LRUCacheOptions rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_memory_allocator_t {
|
|
|
|
std::shared_ptr<MemoryAllocator> rep;
|
|
|
|
};
|
2018-11-09 19:17:34 +00:00
|
|
|
struct rocksdb_cache_t {
|
|
|
|
std::shared_ptr<Cache> rep;
|
|
|
|
};
|
2014-02-25 18:32:28 +00:00
|
|
|
struct rocksdb_livefiles_t { std::vector<LiveFileMetaData> rep; };
|
2014-07-07 08:18:52 +00:00
|
|
|
struct rocksdb_column_family_handle_t { ColumnFamilyHandle* rep; };
|
2022-06-22 22:00:28 +00:00
|
|
|
struct rocksdb_column_family_metadata_t {
|
|
|
|
ColumnFamilyMetaData rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_level_metadata_t {
|
|
|
|
const LevelMetaData* rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_sst_file_metadata_t {
|
|
|
|
const SstFileMetaData* rep;
|
|
|
|
};
|
2016-11-02 00:02:38 +00:00
|
|
|
struct rocksdb_envoptions_t { EnvOptions rep; };
|
|
|
|
struct rocksdb_ingestexternalfileoptions_t { IngestExternalFileOptions rep; };
|
|
|
|
struct rocksdb_sstfilewriter_t { SstFileWriter* rep; };
|
2018-11-09 19:17:34 +00:00
|
|
|
struct rocksdb_ratelimiter_t {
|
|
|
|
std::shared_ptr<RateLimiter> rep;
|
|
|
|
};
|
2018-03-22 05:05:45 +00:00
|
|
|
struct rocksdb_perfcontext_t { PerfContext* rep; };
|
2017-05-16 17:24:03 +00:00
|
|
|
struct rocksdb_pinnableslice_t {
|
|
|
|
PinnableSlice rep;
|
|
|
|
};
|
2017-05-17 05:57:05 +00:00
|
|
|
struct rocksdb_transactiondb_options_t {
|
|
|
|
TransactionDBOptions rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_transactiondb_t {
|
|
|
|
TransactionDB* rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_transaction_options_t {
|
|
|
|
TransactionOptions rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_transaction_t {
|
|
|
|
Transaction* rep;
|
|
|
|
};
|
2022-01-27 23:44:23 +00:00
|
|
|
struct rocksdb_backup_engine_options_t {
|
|
|
|
BackupEngineOptions rep;
|
2020-10-15 00:50:06 +00:00
|
|
|
};
|
2017-05-17 05:57:05 +00:00
|
|
|
struct rocksdb_checkpoint_t {
|
|
|
|
Checkpoint* rep;
|
|
|
|
};
|
2017-08-23 19:32:42 +00:00
|
|
|
struct rocksdb_optimistictransactiondb_t {
|
|
|
|
OptimisticTransactionDB* rep;
|
|
|
|
};
|
|
|
|
struct rocksdb_optimistictransaction_options_t {
|
|
|
|
OptimisticTransactionOptions rep;
|
|
|
|
};
|
2013-12-10 08:45:07 +00:00
|
|
|
|
2014-07-03 23:52:22 +00:00
|
|
|
struct rocksdb_compactionfiltercontext_t {
|
|
|
|
CompactionFilter::Context rep;
|
|
|
|
};
|
|
|
|
|
2014-06-18 02:23:47 +00:00
|
|
|
struct rocksdb_compactionfilter_t : public CompactionFilter {
|
|
|
|
void* state_;
|
|
|
|
void (*destructor_)(void*);
|
|
|
|
unsigned char (*filter_)(
|
|
|
|
void*,
|
|
|
|
int level,
|
|
|
|
const char* key, size_t key_length,
|
|
|
|
const char* existing_value, size_t value_length,
|
|
|
|
char** new_value, size_t *new_value_length,
|
|
|
|
unsigned char* value_changed);
|
|
|
|
const char* (*name_)(void*);
|
2016-08-18 01:48:43 +00:00
|
|
|
unsigned char ignore_snapshots_;
|
2014-06-18 02:23:47 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~rocksdb_compactionfilter_t() override { (*destructor_)(state_); }
|
2014-06-18 02:23:47 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool Filter(int level, const Slice& key, const Slice& existing_value,
|
|
|
|
std::string* new_value, bool* value_changed) const override {
|
2014-09-08 17:37:05 +00:00
|
|
|
char* c_new_value = nullptr;
|
2014-06-18 02:23:47 +00:00
|
|
|
size_t new_value_length = 0;
|
|
|
|
unsigned char c_value_changed = 0;
|
|
|
|
unsigned char result = (*filter_)(
|
|
|
|
state_,
|
|
|
|
level,
|
|
|
|
key.data(), key.size(),
|
|
|
|
existing_value.data(), existing_value.size(),
|
|
|
|
&c_new_value, &new_value_length, &c_value_changed);
|
|
|
|
if (c_value_changed) {
|
|
|
|
new_value->assign(c_new_value, new_value_length);
|
|
|
|
*value_changed = true;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return (*name_)(state_); }
|
2016-08-18 01:48:43 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool IgnoreSnapshots() const override { return ignore_snapshots_; }
|
2014-06-18 02:23:47 +00:00
|
|
|
};
|
|
|
|
|
2014-07-03 23:52:22 +00:00
|
|
|
struct rocksdb_compactionfilterfactory_t : public CompactionFilterFactory {
|
|
|
|
void* state_;
|
|
|
|
void (*destructor_)(void*);
|
|
|
|
rocksdb_compactionfilter_t* (*create_compaction_filter_)(
|
|
|
|
void*, rocksdb_compactionfiltercontext_t* context);
|
|
|
|
const char* (*name_)(void*);
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~rocksdb_compactionfilterfactory_t() override { (*destructor_)(state_); }
|
2014-07-03 23:52:22 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
std::unique_ptr<CompactionFilter> CreateCompactionFilter(
|
2015-02-26 19:28:41 +00:00
|
|
|
const CompactionFilter::Context& context) override {
|
2014-07-03 23:52:22 +00:00
|
|
|
rocksdb_compactionfiltercontext_t ccontext;
|
|
|
|
ccontext.rep = context;
|
|
|
|
CompactionFilter* cf = (*create_compaction_filter_)(state_, &ccontext);
|
|
|
|
return std::unique_ptr<CompactionFilter>(cf);
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return (*name_)(state_); }
|
2014-07-03 23:52:22 +00:00
|
|
|
};
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
struct rocksdb_comparator_t : public Comparator {
|
2011-08-05 20:40:49 +00:00
|
|
|
void* state_;
|
|
|
|
void (*destructor_)(void*);
|
2022-05-26 16:40:10 +00:00
|
|
|
int (*compare_)(void*, const char* a, size_t alen, const char* b,
|
|
|
|
size_t blen);
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* (*name_)(void*);
|
2022-05-26 16:40:10 +00:00
|
|
|
int (*compare_ts_)(void*, const char* a_ts, size_t a_tslen, const char* b_ts,
|
|
|
|
size_t b_tslen);
|
|
|
|
int (*compare_without_ts_)(void*, const char* a, size_t alen,
|
|
|
|
unsigned char a_has_ts, const char* b, size_t blen,
|
|
|
|
unsigned char b_has_ts);
|
|
|
|
|
|
|
|
rocksdb_comparator_t() : Comparator() {}
|
|
|
|
|
|
|
|
rocksdb_comparator_t(size_t ts_size) : Comparator(ts_size) {}
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~rocksdb_comparator_t() override { (*destructor_)(state_); }
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
int Compare(const Slice& a, const Slice& b) const override {
|
2011-08-05 20:40:49 +00:00
|
|
|
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
int CompareTimestamp(const Slice& a_ts, const Slice& b_ts) const override {
|
|
|
|
if (compare_ts_ == nullptr) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return (*compare_ts_)(state_, a_ts.data(), a_ts.size(), b_ts.data(),
|
|
|
|
b_ts.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
int CompareWithoutTimestamp(const Slice& a, bool a_has_ts, const Slice& b,
|
|
|
|
bool b_has_ts) const override {
|
|
|
|
if (compare_without_ts_ == nullptr) {
|
|
|
|
return Compare(a, b);
|
|
|
|
}
|
|
|
|
return (*compare_without_ts_)(state_, a.data(), a.size(), a_has_ts,
|
|
|
|
b.data(), b.size(), b_has_ts);
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return (*name_)(state_); }
|
2011-08-05 20:40:49 +00:00
|
|
|
|
|
|
|
// No-ops since the C binding does not support key shortening methods.
|
2019-02-14 21:52:47 +00:00
|
|
|
void FindShortestSeparator(std::string*, const Slice&) const override {}
|
|
|
|
void FindShortSuccessor(std::string* /*key*/) const override {}
|
2011-08-05 20:40:49 +00:00
|
|
|
};
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
struct rocksdb_filterpolicy_t : public FilterPolicy {
|
2012-04-17 15:36:46 +00:00
|
|
|
void* state_;
|
|
|
|
void (*destructor_)(void*);
|
|
|
|
const char* (*name_)(void*);
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~rocksdb_filterpolicy_t() override { (*destructor_)(state_); }
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return (*name_)(state_); }
|
2012-04-17 15:36:46 +00:00
|
|
|
};
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_mergeoperator_t : public MergeOperator {
|
|
|
|
void* state_;
|
|
|
|
void (*destructor_)(void*);
|
|
|
|
const char* (*name_)(void*);
|
|
|
|
char* (*full_merge_)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t key_length,
|
|
|
|
const char* existing_value, size_t existing_value_length,
|
|
|
|
const char* const* operands_list, const size_t* operands_list_length,
|
|
|
|
int num_operands,
|
|
|
|
unsigned char* success, size_t* new_value_length);
|
2014-03-25 00:57:13 +00:00
|
|
|
char* (*partial_merge_)(void*, const char* key, size_t key_length,
|
|
|
|
const char* const* operands_list,
|
|
|
|
const size_t* operands_list_length, int num_operands,
|
|
|
|
unsigned char* success, size_t* new_value_length);
|
2014-02-23 16:58:11 +00:00
|
|
|
void (*delete_value_)(
|
|
|
|
void*,
|
|
|
|
const char* value, size_t value_length);
|
2014-02-12 21:49:00 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~rocksdb_mergeoperator_t() override { (*destructor_)(state_); }
|
2014-02-12 21:49:00 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return (*name_)(state_); }
|
2014-02-12 21:49:00 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool FullMergeV2(const MergeOperationInput& merge_in,
|
|
|
|
MergeOperationOutput* merge_out) const override {
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 16:49:03 +00:00
|
|
|
size_t n = merge_in.operand_list.size();
|
2014-02-12 21:49:00 +00:00
|
|
|
std::vector<const char*> operand_pointers(n);
|
|
|
|
std::vector<size_t> operand_sizes(n);
|
|
|
|
for (size_t i = 0; i < n; i++) {
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 16:49:03 +00:00
|
|
|
Slice operand(merge_in.operand_list[i]);
|
2014-02-12 21:49:00 +00:00
|
|
|
operand_pointers[i] = operand.data();
|
|
|
|
operand_sizes[i] = operand.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* existing_value_data = nullptr;
|
|
|
|
size_t existing_value_len = 0;
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 16:49:03 +00:00
|
|
|
if (merge_in.existing_value != nullptr) {
|
|
|
|
existing_value_data = merge_in.existing_value->data();
|
|
|
|
existing_value_len = merge_in.existing_value->size();
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char success;
|
|
|
|
size_t new_value_len;
|
|
|
|
char* tmp_new_value = (*full_merge_)(
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 16:49:03 +00:00
|
|
|
state_, merge_in.key.data(), merge_in.key.size(), existing_value_data,
|
|
|
|
existing_value_len, &operand_pointers[0], &operand_sizes[0],
|
|
|
|
static_cast<int>(n), &success, &new_value_len);
|
|
|
|
merge_out->new_value.assign(tmp_new_value, new_value_len);
|
2014-02-12 21:49:00 +00:00
|
|
|
|
2014-02-23 16:58:11 +00:00
|
|
|
if (delete_value_ != nullptr) {
|
|
|
|
(*delete_value_)(state_, tmp_new_value, new_value_len);
|
|
|
|
} else {
|
|
|
|
free(tmp_new_value);
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool PartialMergeMulti(const Slice& key,
|
|
|
|
const std::deque<Slice>& operand_list,
|
|
|
|
std::string* new_value,
|
|
|
|
Logger* /*logger*/) const override {
|
2014-03-25 00:57:13 +00:00
|
|
|
size_t operand_count = operand_list.size();
|
|
|
|
std::vector<const char*> operand_pointers(operand_count);
|
|
|
|
std::vector<size_t> operand_sizes(operand_count);
|
|
|
|
for (size_t i = 0; i < operand_count; ++i) {
|
|
|
|
Slice operand(operand_list[i]);
|
|
|
|
operand_pointers[i] = operand.data();
|
|
|
|
operand_sizes[i] = operand.size();
|
|
|
|
}
|
2014-02-12 21:49:00 +00:00
|
|
|
|
|
|
|
unsigned char success;
|
|
|
|
size_t new_value_len;
|
|
|
|
char* tmp_new_value = (*partial_merge_)(
|
2014-03-25 00:57:13 +00:00
|
|
|
state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
|
2014-11-11 21:47:22 +00:00
|
|
|
static_cast<int>(operand_count), &success, &new_value_len);
|
2014-02-12 21:49:00 +00:00
|
|
|
new_value->assign(tmp_new_value, new_value_len);
|
|
|
|
|
2014-02-23 16:58:11 +00:00
|
|
|
if (delete_value_ != nullptr) {
|
|
|
|
(*delete_value_)(state_, tmp_new_value, new_value_len);
|
|
|
|
} else {
|
|
|
|
free(tmp_new_value);
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
return success;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-07-24 18:47:34 +00:00
|
|
|
struct rocksdb_dbpath_t {
|
|
|
|
DbPath rep;
|
|
|
|
};
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
struct rocksdb_env_t {
|
2011-08-05 20:40:49 +00:00
|
|
|
Env* rep;
|
|
|
|
bool is_default;
|
|
|
|
};
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
struct rocksdb_slicetransform_t : public SliceTransform {
|
|
|
|
void* state_;
|
|
|
|
void (*destructor_)(void*);
|
|
|
|
const char* (*name_)(void*);
|
|
|
|
char* (*transform_)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t length,
|
|
|
|
size_t* dst_length);
|
|
|
|
unsigned char (*in_domain_)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t length);
|
|
|
|
unsigned char (*in_range_)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t length);
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~rocksdb_slicetransform_t() override { (*destructor_)(state_); }
|
2014-02-12 21:49:00 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* Name() const override { return (*name_)(state_); }
|
2014-02-12 21:49:00 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
Slice Transform(const Slice& src) const override {
|
2014-02-12 21:49:00 +00:00
|
|
|
size_t len;
|
|
|
|
char* dst = (*transform_)(state_, src.data(), src.size(), &len);
|
|
|
|
return Slice(dst, len);
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool InDomain(const Slice& src) const override {
|
2014-02-12 21:49:00 +00:00
|
|
|
return (*in_domain_)(state_, src.data(), src.size());
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
bool InRange(const Slice& src) const override {
|
2014-02-12 21:49:00 +00:00
|
|
|
return (*in_range_)(state_, src.data(), src.size());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
struct rocksdb_universal_compaction_options_t {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::CompactionOptionsUniversal* rep;
|
2013-12-14 07:58:18 +00:00
|
|
|
};
|
|
|
|
|
2011-08-05 20:40:49 +00:00
|
|
|
static bool SaveError(char** errptr, const Status& s) {
|
2014-03-10 19:56:46 +00:00
|
|
|
assert(errptr != nullptr);
|
2011-08-05 20:40:49 +00:00
|
|
|
if (s.ok()) {
|
|
|
|
return false;
|
2014-03-10 19:56:46 +00:00
|
|
|
} else if (*errptr == nullptr) {
|
2011-08-05 20:40:49 +00:00
|
|
|
*errptr = strdup(s.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
// TODO(sanjay): Merge with existing error?
|
2015-07-07 23:58:20 +00:00
|
|
|
// This is a bug if *errptr is not created by malloc()
|
2011-08-05 20:40:49 +00:00
|
|
|
free(*errptr);
|
|
|
|
*errptr = strdup(s.ToString().c_str());
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char* CopyString(const std::string& str) {
|
|
|
|
char* result = reinterpret_cast<char*>(malloc(sizeof(char) * str.size()));
|
|
|
|
memcpy(result, str.data(), sizeof(char) * str.size());
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_t* rocksdb_open(
|
|
|
|
const rocksdb_options_t* options,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* name,
|
|
|
|
char** errptr) {
|
|
|
|
DB* db;
|
|
|
|
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
|
2014-03-10 19:56:46 +00:00
|
|
|
return nullptr;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_t* result = new rocksdb_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
result->rep = db;
|
2014-04-27 19:57:10 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-03-07 04:51:30 +00:00
|
|
|
rocksdb_t* rocksdb_open_with_ttl(
|
|
|
|
const rocksdb_options_t* options,
|
|
|
|
const char* name,
|
|
|
|
int ttl,
|
|
|
|
char** errptr) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::DBWithTTL* db;
|
|
|
|
if (SaveError(errptr, ROCKSDB_NAMESPACE::DBWithTTL::Open(
|
|
|
|
options->rep, std::string(name), &db, ttl))) {
|
2018-03-07 04:51:30 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-09-17 22:39:25 +00:00
|
|
|
rocksdb_t* rocksdb_open_for_read_only(const rocksdb_options_t* options,
|
|
|
|
const char* name,
|
|
|
|
unsigned char error_if_wal_file_exists,
|
|
|
|
char** errptr) {
|
2014-04-27 19:57:10 +00:00
|
|
|
DB* db;
|
2020-09-17 22:39:25 +00:00
|
|
|
if (SaveError(errptr, DB::OpenForReadOnly(options->rep, std::string(name),
|
|
|
|
&db, error_if_wal_file_exists))) {
|
2014-04-27 19:57:10 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
2011-08-05 20:40:49 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-06-27 15:54:28 +00:00
|
|
|
rocksdb_t* rocksdb_open_as_secondary(const rocksdb_options_t* options,
|
|
|
|
const char* name,
|
|
|
|
const char* secondary_path,
|
|
|
|
char** errptr) {
|
|
|
|
DB* db;
|
|
|
|
if (SaveError(errptr,
|
|
|
|
DB::OpenAsSecondary(options->rep, std::string(name),
|
|
|
|
std::string(secondary_path), &db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-01-31 12:47:49 +00:00
|
|
|
rocksdb_backup_engine_t* rocksdb_backup_engine_open(
|
2015-02-09 17:53:30 +00:00
|
|
|
const rocksdb_options_t* options, const char* path, char** errptr) {
|
2015-01-31 12:47:49 +00:00
|
|
|
BackupEngine* be;
|
2022-01-27 23:44:23 +00:00
|
|
|
if (SaveError(errptr, BackupEngine::Open(
|
|
|
|
options->rep.env,
|
|
|
|
BackupEngineOptions(path, nullptr, true,
|
|
|
|
options->rep.info_log.get()),
|
|
|
|
&be))) {
|
2015-01-31 12:47:49 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_backup_engine_t* result = new rocksdb_backup_engine_t;
|
|
|
|
result->rep = be;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-10-15 00:50:06 +00:00
|
|
|
rocksdb_backup_engine_t* rocksdb_backup_engine_open_opts(
|
2022-01-27 23:44:23 +00:00
|
|
|
const rocksdb_backup_engine_options_t* options, rocksdb_env_t* env,
|
2020-10-15 00:50:06 +00:00
|
|
|
char** errptr) {
|
|
|
|
BackupEngine* be;
|
|
|
|
if (SaveError(errptr, BackupEngine::Open(options->rep, env->rep, &be))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_backup_engine_t* result = new rocksdb_backup_engine_t;
|
|
|
|
result->rep = be;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-09 17:53:30 +00:00
|
|
|
void rocksdb_backup_engine_create_new_backup(rocksdb_backup_engine_t* be,
|
2018-05-25 05:26:09 +00:00
|
|
|
rocksdb_t* db,
|
|
|
|
char** errptr) {
|
2015-01-31 12:47:49 +00:00
|
|
|
SaveError(errptr, be->rep->CreateNewBackup(db->rep));
|
|
|
|
}
|
|
|
|
|
2018-05-25 05:26:09 +00:00
|
|
|
void rocksdb_backup_engine_create_new_backup_flush(rocksdb_backup_engine_t* be,
|
|
|
|
rocksdb_t* db,
|
|
|
|
unsigned char flush_before_backup,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, be->rep->CreateNewBackup(db->rep, flush_before_backup));
|
|
|
|
}
|
|
|
|
|
2016-04-22 20:49:59 +00:00
|
|
|
void rocksdb_backup_engine_purge_old_backups(rocksdb_backup_engine_t* be,
|
|
|
|
uint32_t num_backups_to_keep,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, be->rep->PurgeOldBackups(num_backups_to_keep));
|
|
|
|
}
|
|
|
|
|
2015-02-07 11:25:10 +00:00
|
|
|
rocksdb_restore_options_t* rocksdb_restore_options_create() {
|
|
|
|
return new rocksdb_restore_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_restore_options_destroy(rocksdb_restore_options_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
2015-02-09 17:53:30 +00:00
|
|
|
void rocksdb_restore_options_set_keep_log_files(rocksdb_restore_options_t* opt,
|
|
|
|
int v) {
|
2015-02-07 11:25:10 +00:00
|
|
|
opt->rep.keep_log_files = v;
|
|
|
|
}
|
|
|
|
|
2018-05-25 05:26:09 +00:00
|
|
|
|
|
|
|
void rocksdb_backup_engine_verify_backup(rocksdb_backup_engine_t* be,
|
|
|
|
uint32_t backup_id, char** errptr) {
|
|
|
|
SaveError(errptr, be->rep->VerifyBackup(static_cast<BackupID>(backup_id)));
|
|
|
|
}
|
|
|
|
|
2015-02-07 11:25:10 +00:00
|
|
|
void rocksdb_backup_engine_restore_db_from_latest_backup(
|
2015-02-09 17:53:30 +00:00
|
|
|
rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir,
|
|
|
|
const rocksdb_restore_options_t* restore_options, char** errptr) {
|
|
|
|
SaveError(errptr, be->rep->RestoreDBFromLatestBackup(std::string(db_dir),
|
|
|
|
std::string(wal_dir),
|
|
|
|
restore_options->rep));
|
2015-02-07 11:25:10 +00:00
|
|
|
}
|
|
|
|
|
2020-07-08 18:54:30 +00:00
|
|
|
void rocksdb_backup_engine_restore_db_from_backup(
|
|
|
|
rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir,
|
|
|
|
const rocksdb_restore_options_t* restore_options, const uint32_t backup_id,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, be->rep->RestoreDBFromBackup(backup_id, std::string(db_dir),
|
|
|
|
std::string(wal_dir),
|
|
|
|
restore_options->rep));
|
|
|
|
}
|
|
|
|
|
2015-02-07 11:25:10 +00:00
|
|
|
const rocksdb_backup_engine_info_t* rocksdb_backup_engine_get_backup_info(
|
|
|
|
rocksdb_backup_engine_t* be) {
|
|
|
|
rocksdb_backup_engine_info_t* result = new rocksdb_backup_engine_info_t;
|
|
|
|
be->rep->GetBackupInfo(&result->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-09 17:53:30 +00:00
|
|
|
int rocksdb_backup_engine_info_count(const rocksdb_backup_engine_info_t* info) {
|
2015-02-07 11:25:10 +00:00
|
|
|
return static_cast<int>(info->rep.size());
|
|
|
|
}
|
|
|
|
|
2015-02-20 00:41:38 +00:00
|
|
|
int64_t rocksdb_backup_engine_info_timestamp(
|
2015-02-09 17:53:30 +00:00
|
|
|
const rocksdb_backup_engine_info_t* info, int index) {
|
2015-02-07 11:25:10 +00:00
|
|
|
return info->rep[index].timestamp;
|
|
|
|
}
|
|
|
|
|
2015-02-20 00:41:38 +00:00
|
|
|
uint32_t rocksdb_backup_engine_info_backup_id(
|
2015-02-09 17:53:30 +00:00
|
|
|
const rocksdb_backup_engine_info_t* info, int index) {
|
2015-02-07 11:25:10 +00:00
|
|
|
return info->rep[index].backup_id;
|
|
|
|
}
|
|
|
|
|
2015-02-20 00:41:38 +00:00
|
|
|
uint64_t rocksdb_backup_engine_info_size(
|
2015-02-09 17:53:30 +00:00
|
|
|
const rocksdb_backup_engine_info_t* info, int index) {
|
2015-02-07 11:25:10 +00:00
|
|
|
return info->rep[index].size;
|
|
|
|
}
|
|
|
|
|
2015-02-20 00:41:38 +00:00
|
|
|
uint32_t rocksdb_backup_engine_info_number_files(
|
2015-02-09 17:53:30 +00:00
|
|
|
const rocksdb_backup_engine_info_t* info, int index) {
|
2015-02-07 11:25:10 +00:00
|
|
|
return info->rep[index].number_files;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_backup_engine_info_destroy(
|
|
|
|
const rocksdb_backup_engine_info_t* info) {
|
|
|
|
delete info;
|
|
|
|
}
|
|
|
|
|
2015-02-09 17:53:30 +00:00
|
|
|
void rocksdb_backup_engine_close(rocksdb_backup_engine_t* be) {
|
2015-01-31 12:47:49 +00:00
|
|
|
delete be->rep;
|
|
|
|
delete be;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
rocksdb_backup_engine_options_t* rocksdb_backup_engine_options_create(
|
2020-10-15 00:50:06 +00:00
|
|
|
const char* backup_dir) {
|
2022-01-27 23:44:23 +00:00
|
|
|
return new rocksdb_backup_engine_options_t{
|
|
|
|
BackupEngineOptions(std::string(backup_dir))};
|
2020-10-15 00:50:06 +00:00
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_backup_dir(
|
|
|
|
rocksdb_backup_engine_options_t* options, const char* backup_dir) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.backup_dir = std::string(backup_dir);
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_env(
|
|
|
|
rocksdb_backup_engine_options_t* options, rocksdb_env_t* env) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.backup_env = (env ? env->rep : nullptr);
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_share_table_files(
|
|
|
|
rocksdb_backup_engine_options_t* options, unsigned char val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.share_table_files = val;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
unsigned char rocksdb_backup_engine_options_get_share_table_files(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.share_table_files;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_sync(
|
|
|
|
rocksdb_backup_engine_options_t* options, unsigned char val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.sync = val;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
unsigned char rocksdb_backup_engine_options_get_sync(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.sync;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_destroy_old_data(
|
|
|
|
rocksdb_backup_engine_options_t* options, unsigned char val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.destroy_old_data = val;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
unsigned char rocksdb_backup_engine_options_get_destroy_old_data(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.destroy_old_data;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_backup_log_files(
|
|
|
|
rocksdb_backup_engine_options_t* options, unsigned char val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.backup_log_files = val;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
unsigned char rocksdb_backup_engine_options_get_backup_log_files(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.backup_log_files;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_backup_rate_limit(
|
|
|
|
rocksdb_backup_engine_options_t* options, uint64_t limit) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.backup_rate_limit = limit;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
uint64_t rocksdb_backup_engine_options_get_backup_rate_limit(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.backup_rate_limit;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_restore_rate_limit(
|
|
|
|
rocksdb_backup_engine_options_t* options, uint64_t limit) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.restore_rate_limit = limit;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
uint64_t rocksdb_backup_engine_options_get_restore_rate_limit(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.restore_rate_limit;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_max_background_operations(
|
|
|
|
rocksdb_backup_engine_options_t* options, int val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.max_background_operations = val;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
int rocksdb_backup_engine_options_get_max_background_operations(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.max_background_operations;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_callback_trigger_interval_size(
|
|
|
|
rocksdb_backup_engine_options_t* options, uint64_t size) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.callback_trigger_interval_size = size;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
uint64_t rocksdb_backup_engine_options_get_callback_trigger_interval_size(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.callback_trigger_interval_size;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_max_valid_backups_to_open(
|
|
|
|
rocksdb_backup_engine_options_t* options, int val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.max_valid_backups_to_open = val;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
int rocksdb_backup_engine_options_get_max_valid_backups_to_open(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return options->rep.max_valid_backups_to_open;
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_set_share_files_with_checksum_naming(
|
|
|
|
rocksdb_backup_engine_options_t* options, int val) {
|
2020-10-15 00:50:06 +00:00
|
|
|
options->rep.share_files_with_checksum_naming =
|
2022-01-27 23:44:23 +00:00
|
|
|
static_cast<BackupEngineOptions::ShareFilesNaming>(val);
|
2020-10-15 00:50:06 +00:00
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
int rocksdb_backup_engine_options_get_share_files_with_checksum_naming(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
return static_cast<int>(options->rep.share_files_with_checksum_naming);
|
|
|
|
}
|
|
|
|
|
2022-01-27 23:44:23 +00:00
|
|
|
void rocksdb_backup_engine_options_destroy(
|
|
|
|
rocksdb_backup_engine_options_t* options) {
|
2020-10-15 00:50:06 +00:00
|
|
|
delete options;
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
rocksdb_checkpoint_t* rocksdb_checkpoint_object_create(rocksdb_t* db,
|
|
|
|
char** errptr) {
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
if (SaveError(errptr, Checkpoint::Create(db->rep, &checkpoint))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_checkpoint_t* result = new rocksdb_checkpoint_t;
|
|
|
|
result->rep = checkpoint;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_checkpoint_create(rocksdb_checkpoint_t* checkpoint,
|
|
|
|
const char* checkpoint_dir,
|
|
|
|
uint64_t log_size_for_flush, char** errptr) {
|
|
|
|
SaveError(errptr, checkpoint->rep->CreateCheckpoint(
|
|
|
|
std::string(checkpoint_dir), log_size_for_flush));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_checkpoint_object_destroy(rocksdb_checkpoint_t* checkpoint) {
|
|
|
|
delete checkpoint->rep;
|
|
|
|
delete checkpoint;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_close(rocksdb_t* db) {
|
2011-08-05 20:40:49 +00:00
|
|
|
delete db->rep;
|
|
|
|
delete db;
|
|
|
|
}
|
|
|
|
|
2015-07-03 00:23:41 +00:00
|
|
|
void rocksdb_options_set_uint64add_merge_operator(rocksdb_options_t* opt) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.merge_operator =
|
|
|
|
ROCKSDB_NAMESPACE::MergeOperators::CreateUInt64AddOperator();
|
2015-07-03 00:23:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
rocksdb_t* rocksdb_open_and_trim_history(
|
|
|
|
const rocksdb_options_t* db_options, const char* name,
|
|
|
|
int num_column_families, const char* const* column_family_names,
|
|
|
|
const rocksdb_options_t* const* column_family_options,
|
|
|
|
rocksdb_column_family_handle_t** column_family_handles, char* trim_ts,
|
|
|
|
size_t trim_tslen, char** errptr) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i < num_column_families; i++) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep)));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string trim_ts_(trim_ts, trim_tslen);
|
|
|
|
|
|
|
|
DB* db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
if (SaveError(errptr, DB::OpenAndTrimHistory(
|
|
|
|
DBOptions(db_options->rep), std::string(name),
|
|
|
|
column_families, &handles, &db, trim_ts_))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < handles.size(); i++) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle =
|
|
|
|
new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
rocksdb_t* rocksdb_open_column_families(
|
2020-01-31 07:08:56 +00:00
|
|
|
const rocksdb_options_t* db_options, const char* name,
|
|
|
|
int num_column_families, const char* const* column_family_names,
|
2020-01-11 03:25:51 +00:00
|
|
|
const rocksdb_options_t* const* column_family_options,
|
2020-01-31 07:08:56 +00:00
|
|
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
2014-07-07 08:18:52 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i < num_column_families; i++) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep)));
|
|
|
|
}
|
|
|
|
|
|
|
|
DB* db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
if (SaveError(errptr, DB::Open(DBOptions(db_options->rep),
|
|
|
|
std::string(name), column_families, &handles, &db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < handles.size(); i++) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-09-03 21:38:36 +00:00
|
|
|
rocksdb_t* rocksdb_open_column_families_with_ttl(
|
|
|
|
const rocksdb_options_t* db_options, const char* name,
|
|
|
|
int num_column_families, const char* const* column_family_names,
|
|
|
|
const rocksdb_options_t* const* column_family_options,
|
|
|
|
rocksdb_column_family_handle_t** column_family_handles, const int* ttls,
|
|
|
|
char** errptr) {
|
|
|
|
std::vector<int32_t> ttls_vec;
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i < num_column_families; i++) {
|
|
|
|
ttls_vec.push_back(ttls[i]);
|
|
|
|
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep)));
|
|
|
|
}
|
|
|
|
|
|
|
|
ROCKSDB_NAMESPACE::DBWithTTL* db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
if (SaveError(errptr, ROCKSDB_NAMESPACE::DBWithTTL::Open(
|
|
|
|
DBOptions(db_options->rep), std::string(name),
|
|
|
|
column_families, &handles, &db, ttls_vec))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < handles.size(); i++) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle =
|
|
|
|
new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
rocksdb_t* rocksdb_open_for_read_only_column_families(
|
2020-01-31 07:08:56 +00:00
|
|
|
const rocksdb_options_t* db_options, const char* name,
|
|
|
|
int num_column_families, const char* const* column_family_names,
|
2020-01-11 03:25:51 +00:00
|
|
|
const rocksdb_options_t* const* column_family_options,
|
2014-07-07 08:18:52 +00:00
|
|
|
rocksdb_column_family_handle_t** column_family_handles,
|
2020-09-17 22:39:25 +00:00
|
|
|
unsigned char error_if_wal_file_exists, char** errptr) {
|
2014-07-07 08:18:52 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i < num_column_families; i++) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep)));
|
|
|
|
}
|
|
|
|
|
|
|
|
DB* db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
2020-09-17 22:39:25 +00:00
|
|
|
if (SaveError(errptr,
|
|
|
|
DB::OpenForReadOnly(DBOptions(db_options->rep),
|
|
|
|
std::string(name), column_families,
|
|
|
|
&handles, &db, error_if_wal_file_exists))) {
|
2014-07-07 08:18:52 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < handles.size(); i++) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-06-27 15:54:28 +00:00
|
|
|
rocksdb_t* rocksdb_open_as_secondary_column_families(
|
|
|
|
const rocksdb_options_t* db_options, const char* name,
|
|
|
|
const char* secondary_path, int num_column_families,
|
2020-01-11 03:25:51 +00:00
|
|
|
const char* const* column_family_names,
|
|
|
|
const rocksdb_options_t* const* column_family_options,
|
2019-06-27 15:54:28 +00:00
|
|
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i != num_column_families; ++i) {
|
|
|
|
column_families.emplace_back(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
|
|
|
}
|
|
|
|
DB* db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
if (SaveError(errptr, DB::OpenAsSecondary(DBOptions(db_options->rep),
|
|
|
|
std::string(name),
|
|
|
|
std::string(secondary_path),
|
|
|
|
column_families, &handles, &db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i != handles.size(); ++i) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle =
|
|
|
|
new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
char** rocksdb_list_column_families(
|
|
|
|
const rocksdb_options_t* options,
|
|
|
|
const char* name,
|
|
|
|
size_t* lencfs,
|
|
|
|
char** errptr) {
|
|
|
|
std::vector<std::string> fams;
|
|
|
|
SaveError(errptr,
|
|
|
|
DB::ListColumnFamilies(DBOptions(options->rep),
|
|
|
|
std::string(name), &fams));
|
|
|
|
|
|
|
|
*lencfs = fams.size();
|
|
|
|
char** column_families = static_cast<char**>(malloc(sizeof(char*) * fams.size()));
|
|
|
|
for (size_t i = 0; i < fams.size(); i++) {
|
|
|
|
column_families[i] = strdup(fams[i].c_str());
|
|
|
|
}
|
|
|
|
return column_families;
|
|
|
|
}
|
|
|
|
|
2014-07-07 21:41:54 +00:00
|
|
|
void rocksdb_list_column_families_destroy(char** list, size_t len) {
|
|
|
|
for (size_t i = 0; i < len; ++i) {
|
2014-07-07 21:54:11 +00:00
|
|
|
free(list[i]);
|
2014-07-07 21:41:54 +00:00
|
|
|
}
|
2014-07-07 21:54:11 +00:00
|
|
|
free(list);
|
2014-07-07 21:41:54 +00:00
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
rocksdb_column_family_handle_t* rocksdb_create_column_family(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_options_t* column_family_options,
|
|
|
|
const char* column_family_name,
|
|
|
|
char** errptr) {
|
|
|
|
rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t;
|
|
|
|
SaveError(errptr,
|
|
|
|
db->rep->CreateColumnFamily(ColumnFamilyOptions(column_family_options->rep),
|
|
|
|
std::string(column_family_name), &(handle->rep)));
|
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
2020-09-03 21:38:36 +00:00
|
|
|
rocksdb_column_family_handle_t* rocksdb_create_column_family_with_ttl(
|
|
|
|
rocksdb_t* db, const rocksdb_options_t* column_family_options,
|
|
|
|
const char* column_family_name, int ttl, char** errptr) {
|
|
|
|
ROCKSDB_NAMESPACE::DBWithTTL* db_with_ttl =
|
|
|
|
static_cast<ROCKSDB_NAMESPACE::DBWithTTL*>(db->rep);
|
|
|
|
rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t;
|
|
|
|
SaveError(errptr, db_with_ttl->CreateColumnFamilyWithTtl(
|
|
|
|
ColumnFamilyOptions(column_family_options->rep),
|
|
|
|
std::string(column_family_name), &(handle->rep), ttl));
|
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_drop_column_family(
|
|
|
|
rocksdb_t* db,
|
|
|
|
rocksdb_column_family_handle_t* handle,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->DropColumnFamily(handle->rep));
|
|
|
|
}
|
|
|
|
|
2022-08-24 20:49:02 +00:00
|
|
|
uint32_t rocksdb_column_family_handle_get_id(
|
|
|
|
rocksdb_column_family_handle_t* handle) {
|
|
|
|
return handle->rep->GetID();
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_column_family_handle_get_name(
|
|
|
|
rocksdb_column_family_handle_t* handle, size_t* name_len) {
|
|
|
|
auto name = handle->rep->GetName();
|
|
|
|
*name_len = name.size();
|
|
|
|
return CopyString(name);
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_column_family_handle_destroy(rocksdb_column_family_handle_t* handle) {
|
|
|
|
delete handle->rep;
|
|
|
|
delete handle;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_put(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* key, size_t keylen,
|
|
|
|
const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr,
|
|
|
|
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_put_cf(rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen, const char* val,
|
|
|
|
size_t vallen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Put(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_put_with_ts(rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t keylen, const char* ts,
|
|
|
|
size_t tslen, const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Put(options->rep, Slice(key, keylen),
|
|
|
|
Slice(ts, tslen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_put_cf_with_ts(rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen, const char* ts,
|
|
|
|
size_t tslen, const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
2014-07-07 08:18:52 +00:00
|
|
|
SaveError(errptr,
|
2022-05-26 16:40:10 +00:00
|
|
|
db->rep->Put(options->rep, column_family->rep, Slice(key, keylen),
|
|
|
|
Slice(ts, tslen), Slice(val, vallen)));
|
2014-07-07 08:18:52 +00:00
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_delete(rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t keylen, char** errptr) {
|
2011-08-05 20:40:49 +00:00
|
|
|
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_delete_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Delete(options->rep, column_family->rep,
|
2022-05-26 16:40:10 +00:00
|
|
|
Slice(key, keylen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_delete_with_ts(rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t keylen, const char* ts,
|
|
|
|
size_t tslen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen),
|
|
|
|
Slice(ts, tslen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_delete_cf_with_ts(rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen, const char* ts,
|
|
|
|
size_t tslen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Delete(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), Slice(ts, tslen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_singledelete(rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t keylen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->SingleDelete(options->rep, Slice(key, keylen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_singledelete_cf(rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->SingleDelete(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_singledelete_with_ts(rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
const char* ts, size_t tslen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->SingleDelete(options->rep, Slice(key, keylen),
|
|
|
|
Slice(ts, tslen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_singledelete_cf_with_ts(
|
|
|
|
rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key,
|
|
|
|
size_t keylen, const char* ts, size_t tslen, char** errptr) {
|
|
|
|
SaveError(errptr,
|
|
|
|
db->rep->SingleDelete(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), Slice(ts, tslen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_increase_full_history_ts_low(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* ts_low, size_t ts_lowlen, char** errptr) {
|
|
|
|
std::string ts(ts_low, ts_lowlen);
|
|
|
|
SaveError(errptr, db->rep->IncreaseFullHistoryTsLow(column_family->rep, ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_get_full_history_ts_low(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
|
|
|
size_t* ts_len, char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = db->rep->GetFullHistoryTsLow(column_family->rep, &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*ts_len = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*ts_len = 0;
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return result;
|
2014-07-07 08:18:52 +00:00
|
|
|
}
|
|
|
|
|
2020-01-07 20:30:40 +00:00
|
|
|
void rocksdb_delete_range_cf(rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* start_key, size_t start_key_len,
|
|
|
|
const char* end_key, size_t end_key_len,
|
|
|
|
char** errptr) {
|
2020-01-06 18:44:48 +00:00
|
|
|
SaveError(errptr, db->rep->DeleteRange(options->rep, column_family->rep,
|
2020-01-07 20:30:40 +00:00
|
|
|
Slice(start_key, start_key_len),
|
|
|
|
Slice(end_key, end_key_len)));
|
2020-01-06 18:44:48 +00:00
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_merge(rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t keylen, const char* val,
|
|
|
|
size_t vallen, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Merge(options->rep, Slice(key, keylen),
|
|
|
|
Slice(val, vallen)));
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_merge_cf(rocksdb_t* db, const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen, const char* val,
|
|
|
|
size_t vallen, char** errptr) {
|
2014-07-07 08:18:52 +00:00
|
|
|
SaveError(errptr,
|
|
|
|
db->rep->Merge(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_write(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_writebatch_t* batch,
|
2011-08-05 20:40:49 +00:00
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
char* rocksdb_get(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* key, size_t keylen,
|
|
|
|
size_t* vallen,
|
|
|
|
char** errptr) {
|
2014-03-10 19:56:46 +00:00
|
|
|
char* result = nullptr;
|
2011-08-05 20:40:49 +00:00
|
|
|
std::string tmp;
|
|
|
|
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
char* rocksdb_get_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
size_t* vallen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = db->rep->Get(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
char* rocksdb_get_with_ts(rocksdb_t* db, const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t keylen, size_t* vallen,
|
|
|
|
char** ts, size_t* tslen, char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp_val;
|
|
|
|
std::string tmp_ts;
|
|
|
|
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp_val, &tmp_ts);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp_val.size();
|
|
|
|
result = CopyString(tmp_val);
|
|
|
|
*tslen = tmp_ts.size();
|
|
|
|
*ts = CopyString(tmp_ts);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
*tslen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_get_cf_with_ts(rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen, size_t* vallen,
|
|
|
|
char** ts, size_t* tslen, char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
std::string tmp_ts;
|
|
|
|
Status s = db->rep->Get(options->rep, column_family->rep, Slice(key, keylen),
|
|
|
|
&tmp, &tmp_ts);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
*tslen = tmp_ts.size();
|
|
|
|
*ts = CopyString(tmp_ts);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
*tslen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_multi_get(rocksdb_t* db, const rocksdb_readoptions_t* options,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes, char** values_list,
|
|
|
|
size_t* values_list_sizes, char** errs) {
|
2015-06-04 00:57:42 +00:00
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<Status> statuses = db->rep->MultiGet(options->rep, keys, &values);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_multi_get_with_ts(rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
char** values_list, size_t* values_list_sizes,
|
|
|
|
char** timestamp_list,
|
|
|
|
size_t* timestamp_list_sizes, char** errs) {
|
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<std::string> timestamps(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
db->rep->MultiGet(options->rep, keys, &values, ×tamps);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
timestamp_list[i] = CopyString(timestamps[i]);
|
|
|
|
timestamp_list_sizes[i] = timestamps[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
timestamp_list[i] = nullptr;
|
|
|
|
timestamp_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-04 00:57:42 +00:00
|
|
|
void rocksdb_multi_get_cf(
|
2022-05-26 16:40:10 +00:00
|
|
|
rocksdb_t* db, const rocksdb_readoptions_t* options,
|
2015-06-04 00:57:42 +00:00
|
|
|
const rocksdb_column_family_handle_t* const* column_families,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
2022-05-26 16:40:10 +00:00
|
|
|
const size_t* keys_list_sizes, char** values_list,
|
|
|
|
size_t* values_list_sizes, char** errs) {
|
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
std::vector<ColumnFamilyHandle*> cfs(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
cfs[i] = column_families[i]->rep;
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
db->rep->MultiGet(options->rep, cfs, keys, &values);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_multi_get_cf_with_ts(
|
|
|
|
rocksdb_t* db, const rocksdb_readoptions_t* options,
|
|
|
|
const rocksdb_column_family_handle_t* const* column_families,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes, char** values_list,
|
|
|
|
size_t* values_list_sizes, char** timestamps_list,
|
|
|
|
size_t* timestamps_list_sizes, char** errs) {
|
2015-06-04 00:57:42 +00:00
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
std::vector<ColumnFamilyHandle*> cfs(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
cfs[i] = column_families[i]->rep;
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
2022-05-26 16:40:10 +00:00
|
|
|
std::vector<std::string> timestamps(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
db->rep->MultiGet(options->rep, cfs, keys, &values, ×tamps);
|
2015-06-04 00:57:42 +00:00
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
2022-05-26 16:40:10 +00:00
|
|
|
timestamps_list[i] = CopyString(timestamps[i]);
|
|
|
|
timestamps_list_sizes[i] = timestamps[i].size();
|
2015-06-04 00:57:42 +00:00
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
2022-05-26 16:40:10 +00:00
|
|
|
timestamps_list[i] = nullptr;
|
|
|
|
timestamps_list_sizes[i] = 0;
|
2015-06-04 00:57:42 +00:00
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-13 01:17:36 +00:00
|
|
|
void rocksdb_batched_multi_get_cf(rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
rocksdb_pinnableslice_t** values, char** errs,
|
|
|
|
const bool sorted_input) {
|
|
|
|
Slice* key_slices = new Slice[num_keys];
|
|
|
|
PinnableSlice* value_slices = new PinnableSlice[num_keys];
|
|
|
|
Status* statuses = new Status[num_keys];
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
db->rep->MultiGet(options->rep, column_family->rep, num_keys, key_slices,
|
|
|
|
value_slices, statuses, sorted_input);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values[i] = new (rocksdb_pinnableslice_t);
|
|
|
|
values[i]->rep = std::move(value_slices[i]);
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values[i] = nullptr;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delete[] key_slices;
|
|
|
|
delete[] value_slices;
|
|
|
|
delete[] statuses;
|
|
|
|
}
|
|
|
|
|
2020-06-29 19:20:20 +00:00
|
|
|
unsigned char rocksdb_key_may_exist(rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t key_len,
|
|
|
|
char** value, size_t* val_len,
|
|
|
|
const char* timestamp, size_t timestamp_len,
|
|
|
|
unsigned char* value_found) {
|
|
|
|
std::string tmp;
|
|
|
|
std::string time;
|
|
|
|
if (timestamp) {
|
|
|
|
time.assign(timestamp, timestamp_len);
|
|
|
|
}
|
|
|
|
bool found = false;
|
|
|
|
const bool result = db->rep->KeyMayExist(options->rep, Slice(key, key_len),
|
|
|
|
&tmp, timestamp ? &time : nullptr,
|
|
|
|
value_found ? &found : nullptr);
|
|
|
|
if (value_found) {
|
|
|
|
*value_found = found;
|
|
|
|
if (found) {
|
|
|
|
*val_len = tmp.size();
|
|
|
|
*value = CopyString(tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_key_may_exist_cf(
|
|
|
|
rocksdb_t* db, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key,
|
|
|
|
size_t key_len, char** value, size_t* val_len, const char* timestamp,
|
|
|
|
size_t timestamp_len, unsigned char* value_found) {
|
|
|
|
std::string tmp;
|
|
|
|
std::string time;
|
|
|
|
if (timestamp) {
|
|
|
|
time.assign(timestamp, timestamp_len);
|
|
|
|
}
|
|
|
|
bool found = false;
|
|
|
|
const bool result = db->rep->KeyMayExist(
|
|
|
|
options->rep, column_family->rep, Slice(key, key_len), &tmp,
|
|
|
|
timestamp ? &time : nullptr, value_found ? &found : nullptr);
|
|
|
|
if (value_found) {
|
|
|
|
*value_found = found;
|
|
|
|
if (found) {
|
|
|
|
*val_len = tmp.size();
|
|
|
|
*value = CopyString(tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_iterator_t* rocksdb_create_iterator(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
result->rep = db->rep->NewIterator(options->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-04-27 23:46:38 +00:00
|
|
|
rocksdb_wal_iterator_t* rocksdb_get_updates_since(
|
|
|
|
rocksdb_t* db, uint64_t seq_number,
|
|
|
|
const rocksdb_wal_readoptions_t* options,
|
|
|
|
char** errptr) {
|
|
|
|
std::unique_ptr<TransactionLogIterator> iter;
|
|
|
|
TransactionLogIterator::ReadOptions ro;
|
|
|
|
if (options!=nullptr) {
|
|
|
|
ro = options->rep;
|
|
|
|
}
|
|
|
|
if (SaveError(errptr, db->rep->GetUpdatesSince(seq_number, &iter, ro))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_wal_iterator_t* result = new rocksdb_wal_iterator_t;
|
|
|
|
result->rep = iter.release();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_wal_iter_next(rocksdb_wal_iterator_t* iter) {
|
|
|
|
iter->rep->Next();
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_wal_iter_valid(const rocksdb_wal_iterator_t* iter) {
|
|
|
|
return iter->rep->Valid();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_wal_iter_status (const rocksdb_wal_iterator_t* iter, char** errptr) {
|
|
|
|
SaveError(errptr, iter->rep->status());
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_wal_iter_destroy (const rocksdb_wal_iterator_t* iter) {
|
|
|
|
delete iter->rep;
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_writebatch_t* rocksdb_wal_iter_get_batch (const rocksdb_wal_iterator_t* iter, uint64_t* seq) {
|
|
|
|
rocksdb_writebatch_t* result = rocksdb_writebatch_create();
|
|
|
|
BatchResult wal_batch = iter->rep->GetBatch();
|
2019-07-15 19:55:37 +00:00
|
|
|
result->rep = std::move(*wal_batch.writeBatchPtr);
|
2018-04-27 23:46:38 +00:00
|
|
|
if (seq != nullptr) {
|
|
|
|
*seq = wal_batch.sequence;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_get_latest_sequence_number (rocksdb_t *db) {
|
|
|
|
return db->rep->GetLatestSequenceNumber();
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
rocksdb_iterator_t* rocksdb_create_iterator_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
|
|
|
result->rep = db->rep->NewIterator(options->rep, column_family->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-04-22 20:22:21 +00:00
|
|
|
void rocksdb_create_iterators(
|
|
|
|
rocksdb_t *db,
|
|
|
|
rocksdb_readoptions_t* opts,
|
|
|
|
rocksdb_column_family_handle_t** column_families,
|
|
|
|
rocksdb_iterator_t** iterators,
|
|
|
|
size_t size,
|
|
|
|
char** errptr) {
|
|
|
|
std::vector<ColumnFamilyHandle*> column_families_vec;
|
|
|
|
for (size_t i = 0; i < size; i++) {
|
|
|
|
column_families_vec.push_back(column_families[i]->rep);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Iterator*> res;
|
|
|
|
Status status = db->rep->NewIterators(opts->rep, column_families_vec, &res);
|
|
|
|
assert(res.size() == size);
|
|
|
|
if (SaveError(errptr, status)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < size; i++) {
|
|
|
|
iterators[i] = new rocksdb_iterator_t;
|
|
|
|
iterators[i]->rep = res[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
const rocksdb_snapshot_t* rocksdb_create_snapshot(
|
|
|
|
rocksdb_t* db) {
|
|
|
|
rocksdb_snapshot_t* result = new rocksdb_snapshot_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
result->rep = db->rep->GetSnapshot();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_release_snapshot(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_snapshot_t* snapshot) {
|
2011-08-05 20:40:49 +00:00
|
|
|
db->rep->ReleaseSnapshot(snapshot->rep);
|
|
|
|
delete snapshot;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
char* rocksdb_property_value(
|
|
|
|
rocksdb_t* db,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* propname) {
|
|
|
|
std::string tmp;
|
|
|
|
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
2011-08-22 21:08:51 +00:00
|
|
|
// We use strdup() since we expect human readable output.
|
|
|
|
return strdup(tmp.c_str());
|
2011-08-05 20:40:49 +00:00
|
|
|
} else {
|
2014-03-10 19:56:46 +00:00
|
|
|
return nullptr;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-28 01:31:33 +00:00
|
|
|
int rocksdb_property_int(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const char* propname,
|
|
|
|
uint64_t *out_val) {
|
|
|
|
if (db->rep->GetIntProperty(Slice(propname), out_val)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-30 17:08:13 +00:00
|
|
|
int rocksdb_property_int_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* propname,
|
|
|
|
uint64_t *out_val) {
|
|
|
|
if (db->rep->GetIntProperty(column_family->rep, Slice(propname), out_val)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
char* rocksdb_property_value_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* propname) {
|
|
|
|
std::string tmp;
|
|
|
|
if (db->rep->GetProperty(column_family->rep, Slice(propname), &tmp)) {
|
|
|
|
// We use strdup() since we expect human readable output.
|
|
|
|
return strdup(tmp.c_str());
|
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-06 22:14:01 +00:00
|
|
|
void rocksdb_approximate_sizes(rocksdb_t* db, int num_ranges,
|
|
|
|
const char* const* range_start_key,
|
|
|
|
const size_t* range_start_key_len,
|
|
|
|
const char* const* range_limit_key,
|
|
|
|
const size_t* range_limit_key_len,
|
|
|
|
uint64_t* sizes, char** errptr) {
|
2011-08-05 20:40:49 +00:00
|
|
|
Range* ranges = new Range[num_ranges];
|
|
|
|
for (int i = 0; i < num_ranges; i++) {
|
|
|
|
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
|
|
|
ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
|
|
|
|
}
|
2021-01-06 22:14:01 +00:00
|
|
|
Status s = db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
|
|
|
|
if (!s.ok()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
2011-08-05 20:40:49 +00:00
|
|
|
delete[] ranges;
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_approximate_sizes_cf(
|
2021-01-06 22:14:01 +00:00
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_ranges, const char* const* range_start_key,
|
|
|
|
const size_t* range_start_key_len, const char* const* range_limit_key,
|
|
|
|
const size_t* range_limit_key_len, uint64_t* sizes, char** errptr) {
|
2014-07-07 08:18:52 +00:00
|
|
|
Range* ranges = new Range[num_ranges];
|
|
|
|
for (int i = 0; i < num_ranges; i++) {
|
|
|
|
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
|
|
|
ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
|
|
|
|
}
|
2021-01-06 22:14:01 +00:00
|
|
|
Status s = db->rep->GetApproximateSizes(column_family->rep, ranges,
|
|
|
|
num_ranges, sizes);
|
|
|
|
if (!s.ok()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
2014-07-07 08:18:52 +00:00
|
|
|
delete[] ranges;
|
|
|
|
}
|
|
|
|
|
2014-02-25 18:32:28 +00:00
|
|
|
void rocksdb_delete_file(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const char* name) {
|
|
|
|
db->rep->DeleteFile(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb_livefiles_t* rocksdb_livefiles(
|
|
|
|
rocksdb_t* db) {
|
|
|
|
rocksdb_livefiles_t* result = new rocksdb_livefiles_t;
|
|
|
|
db->rep->GetLiveFilesMetaData(&result->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_compact_range(
|
|
|
|
rocksdb_t* db,
|
2012-04-17 15:36:46 +00:00
|
|
|
const char* start_key, size_t start_key_len,
|
|
|
|
const char* limit_key, size_t limit_key_len) {
|
|
|
|
Slice a, b;
|
|
|
|
db->rep->CompactRange(
|
2015-06-17 21:36:14 +00:00
|
|
|
CompactRangeOptions(),
|
2014-03-10 19:56:46 +00:00
|
|
|
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_compact_range_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* start_key, size_t start_key_len,
|
|
|
|
const char* limit_key, size_t limit_key_len) {
|
|
|
|
Slice a, b;
|
|
|
|
db->rep->CompactRange(
|
2015-06-17 21:36:14 +00:00
|
|
|
CompactRangeOptions(), column_family->rep,
|
2014-07-07 08:18:52 +00:00
|
|
|
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
|
|
|
}
|
|
|
|
|
2022-06-23 23:25:25 +00:00
|
|
|
void rocksdb_suggest_compact_range(rocksdb_t* db, const char* start_key,
|
|
|
|
size_t start_key_len, const char* limit_key,
|
|
|
|
size_t limit_key_len, char** errptr) {
|
|
|
|
Slice a, b;
|
|
|
|
Status s = ROCKSDB_NAMESPACE::experimental::SuggestCompactRange(
|
|
|
|
db->rep,
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_suggest_compact_range_cf(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* start_key, size_t start_key_len, const char* limit_key,
|
|
|
|
size_t limit_key_len, char** errptr) {
|
|
|
|
Slice a, b;
|
|
|
|
Status s = db->rep->SuggestCompactRange(
|
|
|
|
column_family->rep,
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
|
2016-12-08 01:44:35 +00:00
|
|
|
void rocksdb_compact_range_opt(rocksdb_t* db, rocksdb_compactoptions_t* opt,
|
|
|
|
const char* start_key, size_t start_key_len,
|
|
|
|
const char* limit_key, size_t limit_key_len) {
|
|
|
|
Slice a, b;
|
|
|
|
db->rep->CompactRange(
|
|
|
|
opt->rep,
|
|
|
|
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_compact_range_cf_opt(rocksdb_t* db,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
rocksdb_compactoptions_t* opt,
|
|
|
|
const char* start_key, size_t start_key_len,
|
|
|
|
const char* limit_key, size_t limit_key_len) {
|
|
|
|
Slice a, b;
|
|
|
|
db->rep->CompactRange(
|
|
|
|
opt->rep, column_family->rep,
|
|
|
|
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_flush(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_flushoptions_t* options,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Flush(options->rep));
|
|
|
|
}
|
|
|
|
|
2019-04-25 18:20:01 +00:00
|
|
|
void rocksdb_flush_cf(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_flushoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Flush(options->rep, column_family->rep));
|
|
|
|
}
|
|
|
|
|
2021-04-27 21:55:34 +00:00
|
|
|
void rocksdb_flush_wal(rocksdb_t* db, unsigned char sync, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->FlushWAL(sync));
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_disable_file_deletions(
|
|
|
|
rocksdb_t* db,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->DisableFileDeletions());
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_enable_file_deletions(
|
|
|
|
rocksdb_t* db,
|
|
|
|
unsigned char force,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->EnableFileDeletions(force));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_destroy_db(
|
|
|
|
const rocksdb_options_t* options,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* name,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, DestroyDB(name, options->rep));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_repair_db(
|
|
|
|
const rocksdb_options_t* options,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* name,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, RepairDB(name, options->rep));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_destroy(rocksdb_iterator_t* iter) {
|
2011-08-05 20:40:49 +00:00
|
|
|
delete iter->rep;
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
unsigned char rocksdb_iter_valid(const rocksdb_iterator_t* iter) {
|
2011-08-05 20:40:49 +00:00
|
|
|
return iter->rep->Valid();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_seek_to_first(rocksdb_iterator_t* iter) {
|
2011-08-05 20:40:49 +00:00
|
|
|
iter->rep->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_seek_to_last(rocksdb_iterator_t* iter) {
|
2011-08-05 20:40:49 +00:00
|
|
|
iter->rep->SeekToLast();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_seek(rocksdb_iterator_t* iter, const char* k, size_t klen) {
|
2011-08-05 20:40:49 +00:00
|
|
|
iter->rep->Seek(Slice(k, klen));
|
|
|
|
}
|
|
|
|
|
2016-11-08 20:36:05 +00:00
|
|
|
void rocksdb_iter_seek_for_prev(rocksdb_iterator_t* iter, const char* k,
|
|
|
|
size_t klen) {
|
|
|
|
iter->rep->SeekForPrev(Slice(k, klen));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_next(rocksdb_iterator_t* iter) {
|
2011-08-05 20:40:49 +00:00
|
|
|
iter->rep->Next();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_prev(rocksdb_iterator_t* iter) {
|
2011-08-05 20:40:49 +00:00
|
|
|
iter->rep->Prev();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
const char* rocksdb_iter_key(const rocksdb_iterator_t* iter, size_t* klen) {
|
2011-08-05 20:40:49 +00:00
|
|
|
Slice s = iter->rep->key();
|
|
|
|
*klen = s.size();
|
|
|
|
return s.data();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
const char* rocksdb_iter_value(const rocksdb_iterator_t* iter, size_t* vlen) {
|
2011-08-05 20:40:49 +00:00
|
|
|
Slice s = iter->rep->value();
|
|
|
|
*vlen = s.size();
|
|
|
|
return s.data();
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
const char* rocksdb_iter_timestamp(const rocksdb_iterator_t* iter,
|
|
|
|
size_t* tslen) {
|
|
|
|
Slice s = iter->rep->timestamp();
|
|
|
|
*tslen = s.size();
|
|
|
|
return s.data();
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_iter_get_error(const rocksdb_iterator_t* iter, char** errptr) {
|
2011-08-05 20:40:49 +00:00
|
|
|
SaveError(errptr, iter->rep->status());
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_writebatch_t* rocksdb_writebatch_create() {
|
|
|
|
return new rocksdb_writebatch_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2014-07-05 22:35:13 +00:00
|
|
|
rocksdb_writebatch_t* rocksdb_writebatch_create_from(const char* rep,
|
|
|
|
size_t size) {
|
|
|
|
rocksdb_writebatch_t* b = new rocksdb_writebatch_t;
|
|
|
|
b->rep = WriteBatch(std::string(rep, size));
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_destroy(rocksdb_writebatch_t* b) { delete b; }
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_clear(rocksdb_writebatch_t* b) { b->rep.Clear(); }
|
2011-08-05 20:40:49 +00:00
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
int rocksdb_writebatch_count(rocksdb_writebatch_t* b) { return b->rep.Count(); }
|
2014-02-25 18:32:28 +00:00
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_put(rocksdb_writebatch_t* b, const char* key,
|
|
|
|
size_t klen, const char* val, size_t vlen) {
|
2011-08-05 20:40:49 +00:00
|
|
|
b->rep.Put(Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_put_cf(rocksdb_writebatch_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, const char* val,
|
|
|
|
size_t vlen) {
|
2014-07-07 08:18:52 +00:00
|
|
|
b->rep.Put(column_family->rep, Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_put_cf_with_ts(
|
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, const char* ts, size_t tslen, const char* val,
|
|
|
|
size_t vlen) {
|
|
|
|
b->rep.Put(column_family->rep, Slice(key, klen), Slice(ts, tslen),
|
|
|
|
Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_putv(rocksdb_writebatch_t* b, int num_keys,
|
|
|
|
const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes, int num_values,
|
|
|
|
const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
2015-06-04 00:07:36 +00:00
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.Put(SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_putv_cf(rocksdb_writebatch_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes, int num_values,
|
|
|
|
const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
2015-06-04 00:07:36 +00:00
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.Put(column_family->rep, SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_merge(rocksdb_writebatch_t* b, const char* key,
|
|
|
|
size_t klen, const char* val, size_t vlen) {
|
2014-02-12 21:49:00 +00:00
|
|
|
b->rep.Merge(Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_writebatch_merge_cf(
|
|
|
|
rocksdb_writebatch_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
const char* val, size_t vlen) {
|
|
|
|
b->rep.Merge(column_family->rep, Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
2015-06-04 00:07:36 +00:00
|
|
|
void rocksdb_writebatch_mergev(
|
|
|
|
rocksdb_writebatch_t* b,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
int num_values, const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.Merge(SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_mergev_cf(
|
|
|
|
rocksdb_writebatch_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
int num_values, const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.Merge(column_family->rep, SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_writebatch_delete(
|
|
|
|
rocksdb_writebatch_t* b,
|
2011-08-05 20:40:49 +00:00
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep.Delete(Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_writebatch_singledelete(rocksdb_writebatch_t* b, const char* key,
|
|
|
|
size_t klen) {
|
|
|
|
b->rep.SingleDelete(Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2014-07-07 08:18:52 +00:00
|
|
|
void rocksdb_writebatch_delete_cf(
|
|
|
|
rocksdb_writebatch_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep.Delete(column_family->rep, Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_delete_cf_with_ts(
|
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, const char* ts, size_t tslen) {
|
|
|
|
b->rep.Delete(column_family->rep, Slice(key, klen), Slice(ts, tslen));
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_writebatch_singledelete_cf(
|
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep.SingleDelete(column_family->rep, Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writebatch_singledelete_cf_with_ts(
|
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, const char* ts, size_t tslen) {
|
|
|
|
b->rep.SingleDelete(column_family->rep, Slice(key, klen), Slice(ts, tslen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_deletev(rocksdb_writebatch_t* b, int num_keys,
|
|
|
|
const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes) {
|
2015-06-04 00:07:36 +00:00
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.Delete(SliceParts(key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_deletev_cf(
|
2022-05-26 16:40:10 +00:00
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* keys_list, const size_t* keys_list_sizes) {
|
2015-06-04 00:07:36 +00:00
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.Delete(column_family->rep, SliceParts(key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
2016-12-13 19:08:45 +00:00
|
|
|
void rocksdb_writebatch_delete_range(rocksdb_writebatch_t* b,
|
|
|
|
const char* start_key,
|
|
|
|
size_t start_key_len, const char* end_key,
|
|
|
|
size_t end_key_len) {
|
|
|
|
b->rep.DeleteRange(Slice(start_key, start_key_len),
|
|
|
|
Slice(end_key, end_key_len));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_delete_range_cf(
|
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* start_key, size_t start_key_len, const char* end_key,
|
|
|
|
size_t end_key_len) {
|
|
|
|
b->rep.DeleteRange(column_family->rep, Slice(start_key, start_key_len),
|
|
|
|
Slice(end_key, end_key_len));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_delete_rangev(rocksdb_writebatch_t* b, int num_keys,
|
|
|
|
const char* const* start_keys_list,
|
|
|
|
const size_t* start_keys_list_sizes,
|
|
|
|
const char* const* end_keys_list,
|
|
|
|
const size_t* end_keys_list_sizes) {
|
|
|
|
std::vector<Slice> start_key_slices(num_keys);
|
|
|
|
std::vector<Slice> end_key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
|
|
|
|
end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.DeleteRange(SliceParts(start_key_slices.data(), num_keys),
|
|
|
|
SliceParts(end_key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_delete_rangev_cf(
|
|
|
|
rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* start_keys_list,
|
|
|
|
const size_t* start_keys_list_sizes, const char* const* end_keys_list,
|
|
|
|
const size_t* end_keys_list_sizes) {
|
|
|
|
std::vector<Slice> start_key_slices(num_keys);
|
|
|
|
std::vector<Slice> end_key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
|
|
|
|
end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep.DeleteRange(column_family->rep,
|
|
|
|
SliceParts(start_key_slices.data(), num_keys),
|
|
|
|
SliceParts(end_key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
2015-06-10 07:12:33 +00:00
|
|
|
void rocksdb_writebatch_put_log_data(
|
|
|
|
rocksdb_writebatch_t* b,
|
|
|
|
const char* blob, size_t len) {
|
|
|
|
b->rep.PutLogData(Slice(blob, len));
|
|
|
|
}
|
|
|
|
|
2018-04-20 20:28:05 +00:00
|
|
|
class H : public WriteBatch::Handler {
|
|
|
|
public:
|
|
|
|
void* state_;
|
|
|
|
void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
|
|
|
|
void (*deleted_)(void*, const char* k, size_t klen);
|
2019-02-14 21:52:47 +00:00
|
|
|
void Put(const Slice& key, const Slice& value) override {
|
2018-04-20 20:28:05 +00:00
|
|
|
(*put_)(state_, key.data(), key.size(), value.data(), value.size());
|
|
|
|
}
|
2019-02-14 21:52:47 +00:00
|
|
|
void Delete(const Slice& key) override {
|
2018-04-20 20:28:05 +00:00
|
|
|
(*deleted_)(state_, key.data(), key.size());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_writebatch_iterate(
|
|
|
|
rocksdb_writebatch_t* b,
|
2011-08-05 20:40:49 +00:00
|
|
|
void* state,
|
|
|
|
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
|
|
|
|
void (*deleted)(void*, const char* k, size_t klen)) {
|
|
|
|
H handler;
|
|
|
|
handler.state_ = state;
|
|
|
|
handler.put_ = put;
|
|
|
|
handler.deleted_ = deleted;
|
|
|
|
b->rep.Iterate(&handler);
|
|
|
|
}
|
|
|
|
|
2014-02-25 18:32:28 +00:00
|
|
|
const char* rocksdb_writebatch_data(rocksdb_writebatch_t* b, size_t* size) {
|
|
|
|
*size = b->rep.GetDataSize();
|
|
|
|
return b->rep.Data().c_str();
|
|
|
|
}
|
|
|
|
|
2017-01-20 21:15:09 +00:00
|
|
|
void rocksdb_writebatch_set_save_point(rocksdb_writebatch_t* b) {
|
|
|
|
b->rep.SetSavePoint();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_rollback_to_save_point(rocksdb_writebatch_t* b,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, b->rep.RollbackToSavePoint());
|
|
|
|
}
|
|
|
|
|
2017-05-03 17:54:07 +00:00
|
|
|
void rocksdb_writebatch_pop_save_point(rocksdb_writebatch_t* b, char** errptr) {
|
|
|
|
SaveError(errptr, b->rep.PopSavePoint());
|
|
|
|
}
|
|
|
|
|
2017-03-23 22:50:51 +00:00
|
|
|
rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create(size_t reserved_bytes, unsigned char overwrite_key) {
|
|
|
|
rocksdb_writebatch_wi_t* b = new rocksdb_writebatch_wi_t;
|
|
|
|
b->rep = new WriteBatchWithIndex(BytewiseComparator(), reserved_bytes, overwrite_key);
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_destroy(rocksdb_writebatch_wi_t* b) {
|
|
|
|
if (b->rep) {
|
|
|
|
delete b->rep;
|
|
|
|
}
|
|
|
|
delete b;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_clear(rocksdb_writebatch_wi_t* b) {
|
|
|
|
b->rep->Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_writebatch_wi_count(rocksdb_writebatch_wi_t* b) {
|
|
|
|
return b->rep->GetWriteBatch()->Count();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_put(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
const char* val, size_t vlen) {
|
|
|
|
b->rep->Put(Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_put_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
const char* val, size_t vlen) {
|
|
|
|
b->rep->Put(column_family->rep, Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_putv(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
int num_values, const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->Put(SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_putv_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
int num_values, const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->Put(column_family->rep, SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_merge(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
const char* val, size_t vlen) {
|
|
|
|
b->rep->Merge(Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_merge_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
const char* val, size_t vlen) {
|
|
|
|
b->rep->Merge(column_family->rep, Slice(key, klen), Slice(val, vlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_mergev(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
int num_values, const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->Merge(SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_mergev_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
int num_values, const char* const* values_list,
|
|
|
|
const size_t* values_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<Slice> value_slices(num_values);
|
|
|
|
for (int i = 0; i < num_values; i++) {
|
|
|
|
value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->Merge(column_family->rep, SliceParts(key_slices.data(), num_keys),
|
|
|
|
SliceParts(value_slices.data(), num_values));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_delete(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep->Delete(Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_writebatch_wi_singledelete(rocksdb_writebatch_wi_t* b,
|
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep->SingleDelete(Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2017-03-23 22:50:51 +00:00
|
|
|
void rocksdb_writebatch_wi_delete_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep->Delete(column_family->rep, Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_writebatch_wi_singledelete_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen) {
|
|
|
|
b->rep->SingleDelete(column_family->rep, Slice(key, klen));
|
|
|
|
}
|
|
|
|
|
2017-03-23 22:50:51 +00:00
|
|
|
void rocksdb_writebatch_wi_deletev(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->Delete(SliceParts(key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_deletev_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes) {
|
|
|
|
std::vector<Slice> key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->Delete(column_family->rep, SliceParts(key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_delete_range(rocksdb_writebatch_wi_t* b,
|
|
|
|
const char* start_key,
|
|
|
|
size_t start_key_len, const char* end_key,
|
|
|
|
size_t end_key_len) {
|
|
|
|
b->rep->DeleteRange(Slice(start_key, start_key_len),
|
|
|
|
Slice(end_key, end_key_len));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_delete_range_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* start_key, size_t start_key_len, const char* end_key,
|
|
|
|
size_t end_key_len) {
|
|
|
|
b->rep->DeleteRange(column_family->rep, Slice(start_key, start_key_len),
|
|
|
|
Slice(end_key, end_key_len));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_delete_rangev(rocksdb_writebatch_wi_t* b, int num_keys,
|
|
|
|
const char* const* start_keys_list,
|
|
|
|
const size_t* start_keys_list_sizes,
|
|
|
|
const char* const* end_keys_list,
|
|
|
|
const size_t* end_keys_list_sizes) {
|
|
|
|
std::vector<Slice> start_key_slices(num_keys);
|
|
|
|
std::vector<Slice> end_key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
|
|
|
|
end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->DeleteRange(SliceParts(start_key_slices.data(), num_keys),
|
|
|
|
SliceParts(end_key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_delete_rangev_cf(
|
|
|
|
rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
|
|
|
|
int num_keys, const char* const* start_keys_list,
|
|
|
|
const size_t* start_keys_list_sizes, const char* const* end_keys_list,
|
|
|
|
const size_t* end_keys_list_sizes) {
|
|
|
|
std::vector<Slice> start_key_slices(num_keys);
|
|
|
|
std::vector<Slice> end_key_slices(num_keys);
|
|
|
|
for (int i = 0; i < num_keys; i++) {
|
|
|
|
start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
|
|
|
|
end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
b->rep->DeleteRange(column_family->rep,
|
|
|
|
SliceParts(start_key_slices.data(), num_keys),
|
|
|
|
SliceParts(end_key_slices.data(), num_keys));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_put_log_data(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
const char* blob, size_t len) {
|
|
|
|
b->rep->PutLogData(Slice(blob, len));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_iterate(
|
|
|
|
rocksdb_writebatch_wi_t* b,
|
|
|
|
void* state,
|
|
|
|
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
|
|
|
|
void (*deleted)(void*, const char* k, size_t klen)) {
|
|
|
|
H handler;
|
|
|
|
handler.state_ = state;
|
|
|
|
handler.put_ = put;
|
|
|
|
handler.deleted_ = deleted;
|
|
|
|
b->rep->GetWriteBatch()->Iterate(&handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* rocksdb_writebatch_wi_data(rocksdb_writebatch_wi_t* b, size_t* size) {
|
|
|
|
WriteBatch* wb = b->rep->GetWriteBatch();
|
|
|
|
*size = wb->GetDataSize();
|
|
|
|
return wb->Data().c_str();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_set_save_point(rocksdb_writebatch_wi_t* b) {
|
|
|
|
b->rep->SetSavePoint();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_writebatch_wi_rollback_to_save_point(rocksdb_writebatch_wi_t* b,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, b->rep->RollbackToSavePoint());
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base(
|
|
|
|
rocksdb_writebatch_wi_t* wbwi,
|
|
|
|
rocksdb_iterator_t* base_iterator) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
2018-12-04 07:36:32 +00:00
|
|
|
result->rep = wbwi->rep->NewIteratorWithBase(base_iterator->rep);
|
2017-03-23 22:50:51 +00:00
|
|
|
delete base_iterator;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-12-04 07:36:32 +00:00
|
|
|
rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf(
|
2018-12-13 22:12:02 +00:00
|
|
|
rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator,
|
2017-03-23 22:50:51 +00:00
|
|
|
rocksdb_column_family_handle_t* column_family) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
2018-12-13 22:12:02 +00:00
|
|
|
result->rep =
|
|
|
|
wbwi->rep->NewIteratorWithBase(column_family->rep, base_iterator->rep);
|
2017-03-23 22:50:51 +00:00
|
|
|
delete base_iterator;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_writebatch_wi_get_from_batch(
|
|
|
|
rocksdb_writebatch_wi_t* wbwi,
|
|
|
|
const rocksdb_options_t* options,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
size_t* vallen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = wbwi->rep->GetFromBatch(options->rep, Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_writebatch_wi_get_from_batch_cf(
|
|
|
|
rocksdb_writebatch_wi_t* wbwi,
|
|
|
|
const rocksdb_options_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
size_t* vallen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = wbwi->rep->GetFromBatch(column_family->rep, options->rep,
|
|
|
|
Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_writebatch_wi_get_from_batch_and_db(
|
|
|
|
rocksdb_writebatch_wi_t* wbwi,
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
size_t* vallen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_writebatch_wi_get_from_batch_and_db_cf(
|
|
|
|
rocksdb_writebatch_wi_t* wbwi,
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
size_t* vallen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_write_writebatch_wi(
|
|
|
|
rocksdb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_writebatch_wi_t* wbwi,
|
|
|
|
char** errptr) {
|
|
|
|
WriteBatch* wb = wbwi->rep->GetWriteBatch();
|
|
|
|
SaveError(errptr, db->rep->Write(options->rep, wb));
|
|
|
|
}
|
|
|
|
|
2022-06-30 18:03:52 +00:00
|
|
|
void rocksdb_load_latest_options(
|
|
|
|
const char* db_path, rocksdb_env_t* env, bool ignore_unknown_options,
|
|
|
|
rocksdb_cache_t* cache, rocksdb_options_t** db_options,
|
|
|
|
size_t* num_column_families, char*** list_column_family_names,
|
|
|
|
rocksdb_options_t*** list_column_family_options, char** errptr) {
|
|
|
|
DBOptions db_opt;
|
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
|
|
|
Status s = LoadLatestOptions(std::string(db_path), env->rep, &db_opt,
|
|
|
|
&cf_descs, ignore_unknown_options, &cache->rep);
|
|
|
|
if (s.ok()) {
|
|
|
|
char** cf_names = (char**)malloc(cf_descs.size() * sizeof(char*));
|
|
|
|
rocksdb_options_t** cf_options = (rocksdb_options_t**)malloc(
|
|
|
|
cf_descs.size() * sizeof(rocksdb_options_t*));
|
|
|
|
for (size_t i = 0; i < cf_descs.size(); ++i) {
|
|
|
|
cf_names[i] = strdup(cf_descs[i].name.c_str());
|
|
|
|
cf_options[i] = new rocksdb_options_t{
|
|
|
|
Options(DBOptions(), std::move(cf_descs[i].options))};
|
|
|
|
}
|
|
|
|
*num_column_families = cf_descs.size();
|
|
|
|
*db_options = new rocksdb_options_t{
|
|
|
|
Options(std::move(db_opt), ColumnFamilyOptions())};
|
|
|
|
*list_column_family_names = cf_names;
|
|
|
|
*list_column_family_options = cf_options;
|
|
|
|
} else {
|
|
|
|
*num_column_families = 0;
|
|
|
|
*db_options = nullptr;
|
|
|
|
*list_column_family_names = nullptr;
|
|
|
|
*list_column_family_options = nullptr;
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_load_latest_options_destroy(
|
|
|
|
rocksdb_options_t* db_options, char** list_column_family_names,
|
|
|
|
rocksdb_options_t** list_column_family_options, size_t len) {
|
|
|
|
rocksdb_options_destroy(db_options);
|
|
|
|
if (list_column_family_names) {
|
|
|
|
for (size_t i = 0; i < len; ++i) {
|
|
|
|
free(list_column_family_names[i]);
|
|
|
|
}
|
|
|
|
free(list_column_family_names);
|
|
|
|
}
|
|
|
|
if (list_column_family_options) {
|
|
|
|
for (size_t i = 0; i < len; ++i) {
|
|
|
|
rocksdb_options_destroy(list_column_family_options[i]);
|
|
|
|
}
|
|
|
|
free(list_column_family_options);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:22:05 +00:00
|
|
|
rocksdb_block_based_table_options_t*
|
|
|
|
rocksdb_block_based_options_create() {
|
|
|
|
return new rocksdb_block_based_table_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_destroy(
|
|
|
|
rocksdb_block_based_table_options_t* options) {
|
|
|
|
delete options;
|
2022-08-23 21:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_checksum(
|
|
|
|
rocksdb_block_based_table_options_t* opt, char v) {
|
|
|
|
opt->rep.checksum = static_cast<ROCKSDB_NAMESPACE::ChecksumType>(v);
|
2014-08-25 21:22:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_block_size(
|
|
|
|
rocksdb_block_based_table_options_t* options, size_t block_size) {
|
|
|
|
options->rep.block_size = block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_block_size_deviation(
|
|
|
|
rocksdb_block_based_table_options_t* options, int block_size_deviation) {
|
|
|
|
options->rep.block_size_deviation = block_size_deviation;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_block_restart_interval(
|
|
|
|
rocksdb_block_based_table_options_t* options, int block_restart_interval) {
|
|
|
|
options->rep.block_restart_interval = block_restart_interval;
|
|
|
|
}
|
|
|
|
|
2017-11-28 22:09:17 +00:00
|
|
|
void rocksdb_block_based_options_set_index_block_restart_interval(
|
|
|
|
rocksdb_block_based_table_options_t* options, int index_block_restart_interval) {
|
|
|
|
options->rep.index_block_restart_interval = index_block_restart_interval;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_metadata_block_size(
|
|
|
|
rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size) {
|
|
|
|
options->rep.metadata_block_size = metadata_block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_partition_filters(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char partition_filters) {
|
|
|
|
options->rep.partition_filters = partition_filters;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_use_delta_encoding(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char use_delta_encoding) {
|
|
|
|
options->rep.use_delta_encoding = use_delta_encoding;
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:22:05 +00:00
|
|
|
void rocksdb_block_based_options_set_filter_policy(
|
|
|
|
rocksdb_block_based_table_options_t* options,
|
|
|
|
rocksdb_filterpolicy_t* filter_policy) {
|
|
|
|
options->rep.filter_policy.reset(filter_policy);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_no_block_cache(
|
|
|
|
rocksdb_block_based_table_options_t* options,
|
|
|
|
unsigned char no_block_cache) {
|
|
|
|
options->rep.no_block_cache = no_block_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_block_cache(
|
|
|
|
rocksdb_block_based_table_options_t* options,
|
|
|
|
rocksdb_cache_t* block_cache) {
|
|
|
|
if (block_cache) {
|
|
|
|
options->rep.block_cache = block_cache->rep;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_block_cache_compressed(
|
|
|
|
rocksdb_block_based_table_options_t* options,
|
|
|
|
rocksdb_cache_t* block_cache_compressed) {
|
|
|
|
if (block_cache_compressed) {
|
|
|
|
options->rep.block_cache_compressed = block_cache_compressed->rep;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_whole_key_filtering(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.whole_key_filtering = v;
|
|
|
|
}
|
|
|
|
|
2015-07-03 00:23:41 +00:00
|
|
|
void rocksdb_block_based_options_set_format_version(
|
|
|
|
rocksdb_block_based_table_options_t* options, int v) {
|
|
|
|
options->rep.format_version = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_index_type(
|
|
|
|
rocksdb_block_based_table_options_t* options, int v) {
|
|
|
|
options->rep.index_type = static_cast<BlockBasedTableOptions::IndexType>(v);
|
|
|
|
}
|
|
|
|
|
2019-12-02 18:58:14 +00:00
|
|
|
void rocksdb_block_based_options_set_data_block_index_type(
|
|
|
|
rocksdb_block_based_table_options_t* options, int v) {
|
|
|
|
options->rep.data_block_index_type =
|
|
|
|
static_cast<BlockBasedTableOptions::DataBlockIndexType>(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_block_based_options_set_data_block_hash_ratio(
|
|
|
|
rocksdb_block_based_table_options_t* options, double v) {
|
|
|
|
options->rep.data_block_hash_table_util_ratio = v;
|
|
|
|
}
|
|
|
|
|
2015-07-03 00:23:41 +00:00
|
|
|
void rocksdb_block_based_options_set_cache_index_and_filter_blocks(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.cache_index_and_filter_blocks = v;
|
|
|
|
}
|
|
|
|
|
2017-11-28 22:09:17 +00:00
|
|
|
void rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.cache_index_and_filter_blocks_with_high_priority = v;
|
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 17:42:39 +00:00
|
|
|
void rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.pin_l0_filter_and_index_blocks_in_cache = v;
|
|
|
|
}
|
|
|
|
|
2018-06-22 22:14:05 +00:00
|
|
|
void rocksdb_block_based_options_set_pin_top_level_index_and_filter(
|
|
|
|
rocksdb_block_based_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.pin_top_level_index_and_filter = v;
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:22:05 +00:00
|
|
|
void rocksdb_options_set_block_based_table_factory(
|
|
|
|
rocksdb_options_t *opt,
|
|
|
|
rocksdb_block_based_table_options_t* table_options) {
|
|
|
|
if (table_options) {
|
|
|
|
opt->rep.table_factory.reset(
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::NewBlockBasedTableFactory(table_options->rep));
|
2014-08-25 21:22:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-14 00:57:01 +00:00
|
|
|
rocksdb_cuckoo_table_options_t*
|
|
|
|
rocksdb_cuckoo_options_create() {
|
|
|
|
return new rocksdb_cuckoo_table_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_cuckoo_options_destroy(
|
|
|
|
rocksdb_cuckoo_table_options_t* options) {
|
|
|
|
delete options;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_cuckoo_options_set_hash_ratio(
|
|
|
|
rocksdb_cuckoo_table_options_t* options, double v) {
|
|
|
|
options->rep.hash_table_ratio = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_cuckoo_options_set_max_search_depth(
|
|
|
|
rocksdb_cuckoo_table_options_t* options, uint32_t v) {
|
|
|
|
options->rep.max_search_depth = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_cuckoo_options_set_cuckoo_block_size(
|
|
|
|
rocksdb_cuckoo_table_options_t* options, uint32_t v) {
|
|
|
|
options->rep.cuckoo_block_size = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_cuckoo_options_set_identity_as_first_hash(
|
|
|
|
rocksdb_cuckoo_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.identity_as_first_hash = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_cuckoo_options_set_use_module_hash(
|
|
|
|
rocksdb_cuckoo_table_options_t* options, unsigned char v) {
|
|
|
|
options->rep.use_module_hash = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_cuckoo_table_factory(
|
|
|
|
rocksdb_options_t *opt,
|
|
|
|
rocksdb_cuckoo_table_options_t* table_options) {
|
|
|
|
if (table_options) {
|
|
|
|
opt->rep.table_factory.reset(
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::NewCuckooTableFactory(table_options->rep));
|
2014-11-14 00:57:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-28 01:31:33 +00:00
|
|
|
void rocksdb_set_options(
|
|
|
|
rocksdb_t* db, int count, const char* const keys[], const char* const values[], char** errptr) {
|
|
|
|
std::unordered_map<std::string, std::string> options_map;
|
|
|
|
for (int i=0; i<count; i++)
|
|
|
|
options_map[keys[i]] = values[i];
|
|
|
|
SaveError(errptr,
|
|
|
|
db->rep->SetOptions(options_map));
|
|
|
|
}
|
2014-11-14 00:57:01 +00:00
|
|
|
|
2018-12-17 21:48:53 +00:00
|
|
|
void rocksdb_set_options_cf(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, const char* const keys[], const char* const values[], char** errptr) {
|
|
|
|
std::unordered_map<std::string, std::string> options_map;
|
|
|
|
for (int i=0; i<count; i++)
|
|
|
|
options_map[keys[i]] = values[i];
|
|
|
|
SaveError(errptr,
|
|
|
|
db->rep->SetOptions(handle->rep, options_map));
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_options_t* rocksdb_options_create() {
|
|
|
|
return new rocksdb_options_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_destroy(rocksdb_options_t* options) {
|
2011-08-05 20:40:49 +00:00
|
|
|
delete options;
|
|
|
|
}
|
|
|
|
|
2020-06-02 20:44:53 +00:00
|
|
|
rocksdb_options_t* rocksdb_options_create_copy(rocksdb_options_t* options) {
|
|
|
|
return new rocksdb_options_t(*options);
|
|
|
|
}
|
|
|
|
|
2014-07-08 04:12:25 +00:00
|
|
|
void rocksdb_options_increase_parallelism(
|
|
|
|
rocksdb_options_t* opt, int total_threads) {
|
|
|
|
opt->rep.IncreaseParallelism(total_threads);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_optimize_for_point_lookup(
|
2014-08-26 21:15:00 +00:00
|
|
|
rocksdb_options_t* opt, uint64_t block_cache_size_mb) {
|
|
|
|
opt->rep.OptimizeForPointLookup(block_cache_size_mb);
|
2014-07-08 04:12:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_optimize_level_style_compaction(
|
|
|
|
rocksdb_options_t* opt, uint64_t memtable_memory_budget) {
|
|
|
|
opt->rep.OptimizeLevelStyleCompaction(memtable_memory_budget);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_optimize_universal_style_compaction(
|
|
|
|
rocksdb_options_t* opt, uint64_t memtable_memory_budget) {
|
|
|
|
opt->rep.OptimizeUniversalStyleCompaction(memtable_memory_budget);
|
|
|
|
}
|
|
|
|
|
2018-01-09 01:16:22 +00:00
|
|
|
void rocksdb_options_set_allow_ingest_behind(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.allow_ingest_behind = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_allow_ingest_behind(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.allow_ingest_behind;
|
|
|
|
}
|
|
|
|
|
2014-06-18 02:28:11 +00:00
|
|
|
void rocksdb_options_set_compaction_filter(
|
|
|
|
rocksdb_options_t* opt,
|
|
|
|
rocksdb_compactionfilter_t* filter) {
|
|
|
|
opt->rep.compaction_filter = filter;
|
|
|
|
}
|
|
|
|
|
2014-07-04 04:04:55 +00:00
|
|
|
void rocksdb_options_set_compaction_filter_factory(
|
|
|
|
rocksdb_options_t* opt, rocksdb_compactionfilterfactory_t* factory) {
|
|
|
|
opt->rep.compaction_filter_factory =
|
|
|
|
std::shared_ptr<CompactionFilterFactory>(factory);
|
|
|
|
}
|
|
|
|
|
2016-04-22 20:24:09 +00:00
|
|
|
void rocksdb_options_compaction_readahead_size(
|
|
|
|
rocksdb_options_t* opt, size_t s) {
|
|
|
|
opt->rep.compaction_readahead_size = s;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
size_t rocksdb_options_get_compaction_readahead_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compaction_readahead_size;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_comparator(
|
|
|
|
rocksdb_options_t* opt,
|
|
|
|
rocksdb_comparator_t* cmp) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.comparator = cmp;
|
|
|
|
}
|
|
|
|
|
2014-02-24 23:15:34 +00:00
|
|
|
void rocksdb_options_set_merge_operator(
|
2014-02-12 21:49:00 +00:00
|
|
|
rocksdb_options_t* opt,
|
|
|
|
rocksdb_mergeoperator_t* merge_operator) {
|
|
|
|
opt->rep.merge_operator = std::shared_ptr<MergeOperator>(merge_operator);
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_create_if_missing(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.create_if_missing = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_create_if_missing(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.create_if_missing;
|
|
|
|
}
|
|
|
|
|
2014-07-10 19:53:46 +00:00
|
|
|
void rocksdb_options_set_create_missing_column_families(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.create_missing_column_families = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_create_missing_column_families(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.create_missing_column_families;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_error_if_exists(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.error_if_exists = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_error_if_exists(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.error_if_exists;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_paranoid_checks(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.paranoid_checks = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_paranoid_checks(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.paranoid_checks;
|
|
|
|
}
|
|
|
|
|
2017-09-28 00:37:08 +00:00
|
|
|
void rocksdb_options_set_db_paths(rocksdb_options_t* opt,
|
|
|
|
const rocksdb_dbpath_t** dbpath_values,
|
2017-07-24 18:47:34 +00:00
|
|
|
size_t num_paths) {
|
|
|
|
std::vector<DbPath> db_paths(num_paths);
|
|
|
|
for (size_t i = 0; i < num_paths; ++i) {
|
|
|
|
db_paths[i] = dbpath_values[i]->rep;
|
|
|
|
}
|
|
|
|
opt->rep.db_paths = db_paths;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_env(rocksdb_options_t* opt, rocksdb_env_t* env) {
|
2014-03-10 19:56:46 +00:00
|
|
|
opt->rep.env = (env ? env->rep : nullptr);
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_info_log(rocksdb_options_t* opt, rocksdb_logger_t* l) {
|
2013-01-20 10:07:13 +00:00
|
|
|
if (l) {
|
|
|
|
opt->rep.info_log = l->rep;
|
|
|
|
}
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2014-04-03 08:47:07 +00:00
|
|
|
void rocksdb_options_set_info_log_level(
|
|
|
|
rocksdb_options_t* opt, int v) {
|
|
|
|
opt->rep.info_log_level = static_cast<InfoLogLevel>(v);
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_info_log_level(rocksdb_options_t* opt) {
|
|
|
|
return static_cast<int>(opt->rep.info_log_level);
|
|
|
|
}
|
|
|
|
|
2014-12-02 20:09:20 +00:00
|
|
|
void rocksdb_options_set_db_write_buffer_size(rocksdb_options_t* opt,
|
|
|
|
size_t s) {
|
|
|
|
opt->rep.db_write_buffer_size = s;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
size_t rocksdb_options_get_db_write_buffer_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.db_write_buffer_size;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_write_buffer_size(rocksdb_options_t* opt, size_t s) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.write_buffer_size = s;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
size_t rocksdb_options_get_write_buffer_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.write_buffer_size;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_max_open_files(rocksdb_options_t* opt, int n) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.max_open_files = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_max_open_files(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_open_files;
|
|
|
|
}
|
|
|
|
|
2017-05-09 05:09:06 +00:00
|
|
|
void rocksdb_options_set_max_file_opening_threads(rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.max_file_opening_threads = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_max_file_opening_threads(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_file_opening_threads;
|
|
|
|
}
|
|
|
|
|
2014-11-25 06:00:29 +00:00
|
|
|
void rocksdb_options_set_max_total_wal_size(rocksdb_options_t* opt, uint64_t n) {
|
|
|
|
opt->rep.max_total_wal_size = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
uint64_t rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_total_wal_size;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_target_file_size_base(
|
|
|
|
rocksdb_options_t* opt, uint64_t n) {
|
2012-06-23 02:30:03 +00:00
|
|
|
opt->rep.target_file_size_base = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
uint64_t rocksdb_options_get_target_file_size_base(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.target_file_size_base;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_target_file_size_multiplier(
|
|
|
|
rocksdb_options_t* opt, int n) {
|
2012-06-23 02:30:03 +00:00
|
|
|
opt->rep.target_file_size_multiplier = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_target_file_size_multiplier(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.target_file_size_multiplier;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_max_bytes_for_level_base(
|
|
|
|
rocksdb_options_t* opt, uint64_t n) {
|
2012-06-23 02:30:03 +00:00
|
|
|
opt->rep.max_bytes_for_level_base = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
uint64_t rocksdb_options_get_max_bytes_for_level_base(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_bytes_for_level_base;
|
|
|
|
}
|
|
|
|
|
2016-11-30 19:05:46 +00:00
|
|
|
void rocksdb_options_set_level_compaction_dynamic_level_bytes(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.level_compaction_dynamic_level_bytes = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_level_compaction_dynamic_level_bytes(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.level_compaction_dynamic_level_bytes;
|
|
|
|
}
|
|
|
|
|
2016-11-02 04:05:32 +00:00
|
|
|
void rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t* opt,
|
|
|
|
double n) {
|
2012-06-23 02:30:03 +00:00
|
|
|
opt->rep.max_bytes_for_level_multiplier = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
double rocksdb_options_get_max_bytes_for_level_multiplier(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_bytes_for_level_multiplier;
|
|
|
|
}
|
|
|
|
|
2016-06-16 23:02:52 +00:00
|
|
|
void rocksdb_options_set_max_compaction_bytes(rocksdb_options_t* opt,
|
|
|
|
uint64_t n) {
|
|
|
|
opt->rep.max_compaction_bytes = n;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint64_t rocksdb_options_get_max_compaction_bytes(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_compaction_bytes;
|
|
|
|
}
|
|
|
|
|
2014-04-03 08:47:07 +00:00
|
|
|
void rocksdb_options_set_max_bytes_for_level_multiplier_additional(
|
|
|
|
rocksdb_options_t* opt, int* level_values, size_t num_levels) {
|
|
|
|
opt->rep.max_bytes_for_level_multiplier_additional.resize(num_levels);
|
|
|
|
for (size_t i = 0; i < num_levels; ++i) {
|
|
|
|
opt->rep.max_bytes_for_level_multiplier_additional[i] = level_values[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_enable_statistics(rocksdb_options_t* opt) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 18:56:19 +00:00
|
|
|
void rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt,
|
|
|
|
unsigned char val) {
|
2017-08-11 19:13:46 +00:00
|
|
|
opt->rep.skip_stats_update_on_db_open = val;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_skip_stats_update_on_db_open(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.skip_stats_update_on_db_open;
|
|
|
|
}
|
|
|
|
|
Add an option to prevent DB::Open() from querying sizes of all sst files (#6353)
Summary:
When paranoid_checks is on, DBImpl::CheckConsistency() iterates over all sst files and calls Env::GetFileSize() for each of them. As far as I could understand, this is pretty arbitrary and doesn't affect correctness - if filesystem doesn't corrupt fsynced files, the file sizes will always match; if it does, it may as well corrupt contents as well as sizes, and rocksdb doesn't check contents on open.
If there are thousands of sst files, getting all their sizes takes a while. If, on top of that, Env is overridden to use some remote storage instead of local filesystem, it can be *really* slow and overload the remote storage service. This PR adds an option to not do GetFileSize(); instead it does GetChildren() for parent directory to check that all the expected sst files are at least present, but doesn't check their sizes.
We can't just disable paranoid_checks instead because paranoid_checks do a few other important things: make the DB read-only on write errors, print error messages on read errors, etc.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6353
Test Plan: ran the added sanity check unit test. Will try it out in a LogDevice test cluster where the GetFileSize() calls are causing a lot of trouble.
Differential Revision: D19656425
Pulled By: al13n321
fbshipit-source-id: c2c421b367633033760d1f56747bad206d1fbf82
2020-02-04 09:24:29 +00:00
|
|
|
void rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
|
|
|
|
rocksdb_options_t* opt, unsigned char val) {
|
|
|
|
opt->rep.skip_checking_sst_file_sizes_on_db_open = val;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_skip_checking_sst_file_sizes_on_db_open(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.skip_checking_sst_file_sizes_on_db_open;
|
|
|
|
}
|
|
|
|
|
2021-04-16 12:55:08 +00:00
|
|
|
/* Blob Options Settings */
|
|
|
|
void rocksdb_options_set_enable_blob_files(rocksdb_options_t* opt,
|
|
|
|
unsigned char val) {
|
|
|
|
opt->rep.enable_blob_files = val;
|
|
|
|
}
|
|
|
|
extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_files(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.enable_blob_files;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_min_blob_size(rocksdb_options_t* opt, uint64_t val) {
|
|
|
|
opt->rep.min_blob_size = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_options_get_min_blob_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.min_blob_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_blob_file_size(rocksdb_options_t* opt, uint64_t val) {
|
|
|
|
opt->rep.blob_file_size = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_options_get_blob_file_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.blob_file_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_blob_compression_type(rocksdb_options_t* opt,
|
|
|
|
int val) {
|
|
|
|
opt->rep.blob_compression_type = static_cast<CompressionType>(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_options_get_blob_compression_type(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.blob_compression_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_enable_blob_gc(rocksdb_options_t* opt,
|
|
|
|
unsigned char val) {
|
|
|
|
opt->rep.enable_blob_garbage_collection = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_options_get_enable_blob_gc(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.enable_blob_garbage_collection;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_blob_gc_age_cutoff(rocksdb_options_t* opt,
|
|
|
|
double val) {
|
|
|
|
opt->rep.blob_garbage_collection_age_cutoff = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
double rocksdb_options_get_blob_gc_age_cutoff(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.blob_garbage_collection_age_cutoff;
|
|
|
|
}
|
|
|
|
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
2021-10-12 01:00:44 +00:00
|
|
|
void rocksdb_options_set_blob_gc_force_threshold(rocksdb_options_t* opt,
|
|
|
|
double val) {
|
|
|
|
opt->rep.blob_garbage_collection_force_threshold = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
double rocksdb_options_get_blob_gc_force_threshold(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.blob_garbage_collection_force_threshold;
|
|
|
|
}
|
|
|
|
|
2021-11-20 01:52:42 +00:00
|
|
|
void rocksdb_options_set_blob_compaction_readahead_size(rocksdb_options_t* opt,
|
|
|
|
uint64_t val) {
|
|
|
|
opt->rep.blob_compaction_readahead_size = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_options_get_blob_compaction_readahead_size(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.blob_compaction_readahead_size;
|
|
|
|
}
|
|
|
|
|
2022-06-03 03:04:33 +00:00
|
|
|
void rocksdb_options_set_blob_file_starting_level(rocksdb_options_t* opt,
|
|
|
|
int val) {
|
|
|
|
opt->rep.blob_file_starting_level = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_options_get_blob_file_starting_level(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.blob_file_starting_level;
|
|
|
|
}
|
|
|
|
|
2022-06-14 21:19:26 +00:00
|
|
|
void rocksdb_options_set_blob_cache(rocksdb_options_t* opt,
|
|
|
|
rocksdb_cache_t* blob_cache) {
|
|
|
|
opt->rep.blob_cache = blob_cache->rep;
|
|
|
|
}
|
|
|
|
|
2022-07-17 14:13:59 +00:00
|
|
|
void rocksdb_options_set_prepopulate_blob_cache(rocksdb_options_t* opt, int t) {
|
|
|
|
opt->rep.prepopulate_blob_cache = static_cast<PrepopulateBlobCache>(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_options_get_prepopulate_blob_cache(rocksdb_options_t* opt) {
|
|
|
|
return static_cast<int>(opt->rep.prepopulate_blob_cache);
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_num_levels(rocksdb_options_t* opt, int n) {
|
2012-11-29 00:42:36 +00:00
|
|
|
opt->rep.num_levels = n;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_num_levels(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.num_levels;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_level0_file_num_compaction_trigger(
|
|
|
|
rocksdb_options_t* opt, int n) {
|
2012-11-29 00:42:36 +00:00
|
|
|
opt->rep.level0_file_num_compaction_trigger = n;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_level0_file_num_compaction_trigger(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.level0_file_num_compaction_trigger;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_level0_slowdown_writes_trigger(
|
|
|
|
rocksdb_options_t* opt, int n) {
|
2012-11-29 00:42:36 +00:00
|
|
|
opt->rep.level0_slowdown_writes_trigger = n;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.level0_slowdown_writes_trigger;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_level0_stop_writes_trigger(
|
|
|
|
rocksdb_options_t* opt, int n) {
|
2012-11-29 00:42:36 +00:00
|
|
|
opt->rep.level0_stop_writes_trigger = n;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_level0_stop_writes_trigger(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.level0_stop_writes_trigger;
|
|
|
|
}
|
|
|
|
|
2016-09-09 17:11:30 +00:00
|
|
|
void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) {
|
|
|
|
opt->rep.wal_recovery_mode = static_cast<WALRecoveryMode>(mode);
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
int rocksdb_options_get_wal_recovery_mode(rocksdb_options_t* opt) {
|
|
|
|
return static_cast<int>(opt->rep.wal_recovery_mode);
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_compression(rocksdb_options_t* opt, int t) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.compression = static_cast<CompressionType>(t);
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
int rocksdb_options_get_compression(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compression;
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_options_set_bottommost_compression(rocksdb_options_t* opt, int t) {
|
|
|
|
opt->rep.bottommost_compression = static_cast<CompressionType>(t);
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
int rocksdb_options_get_bottommost_compression(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.bottommost_compression;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_compression_per_level(rocksdb_options_t* opt,
|
2022-01-12 16:33:37 +00:00
|
|
|
const int* level_values,
|
2013-01-24 18:54:26 +00:00
|
|
|
size_t num_levels) {
|
|
|
|
opt->rep.compression_per_level.resize(num_levels);
|
|
|
|
for (size_t i = 0; i < num_levels; ++i) {
|
|
|
|
opt->rep.compression_per_level[i] =
|
|
|
|
static_cast<CompressionType>(level_values[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-28 00:34:07 +00:00
|
|
|
void rocksdb_options_set_bottommost_compression_options(rocksdb_options_t* opt,
|
|
|
|
int w_bits, int level,
|
|
|
|
int strategy,
|
|
|
|
int max_dict_bytes,
|
2020-06-03 19:22:29 +00:00
|
|
|
unsigned char enabled) {
|
2018-06-28 00:34:07 +00:00
|
|
|
opt->rep.bottommost_compression_opts.window_bits = w_bits;
|
|
|
|
opt->rep.bottommost_compression_opts.level = level;
|
|
|
|
opt->rep.bottommost_compression_opts.strategy = strategy;
|
|
|
|
opt->rep.bottommost_compression_opts.max_dict_bytes = max_dict_bytes;
|
|
|
|
opt->rep.bottommost_compression_opts.enabled = enabled;
|
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
|
|
|
|
rocksdb_options_t* opt, int zstd_max_train_bytes, unsigned char enabled) {
|
|
|
|
opt->rep.bottommost_compression_opts.zstd_max_train_bytes =
|
|
|
|
zstd_max_train_bytes;
|
|
|
|
opt->rep.bottommost_compression_opts.enabled = enabled;
|
|
|
|
}
|
|
|
|
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
void rocksdb_options_set_bottommost_compression_options_use_zstd_dict_trainer(
|
|
|
|
rocksdb_options_t* opt, unsigned char use_zstd_dict_trainer,
|
|
|
|
unsigned char enabled) {
|
|
|
|
opt->rep.bottommost_compression_opts.use_zstd_dict_trainer =
|
|
|
|
use_zstd_dict_trainer;
|
|
|
|
opt->rep.bottommost_compression_opts.enabled = enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char
|
|
|
|
rocksdb_options_get_bottommost_compression_options_use_zstd_dict_trainer(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.bottommost_compression_opts.use_zstd_dict_trainer;
|
|
|
|
}
|
|
|
|
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
2021-02-19 22:06:59 +00:00
|
|
|
void rocksdb_options_set_bottommost_compression_options_max_dict_buffer_bytes(
|
|
|
|
rocksdb_options_t* opt, uint64_t max_dict_buffer_bytes,
|
|
|
|
unsigned char enabled) {
|
|
|
|
opt->rep.bottommost_compression_opts.max_dict_buffer_bytes =
|
|
|
|
max_dict_buffer_bytes;
|
|
|
|
opt->rep.bottommost_compression_opts.enabled = enabled;
|
|
|
|
}
|
|
|
|
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
void rocksdb_options_set_compression_options(rocksdb_options_t* opt, int w_bits,
|
|
|
|
int level, int strategy,
|
2016-04-28 01:30:04 +00:00
|
|
|
int max_dict_bytes) {
|
2012-11-01 17:50:08 +00:00
|
|
|
opt->rep.compression_opts.window_bits = w_bits;
|
|
|
|
opt->rep.compression_opts.level = level;
|
|
|
|
opt->rep.compression_opts.strategy = strategy;
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 00:36:03 +00:00
|
|
|
opt->rep.compression_opts.max_dict_bytes = max_dict_bytes;
|
2012-11-01 17:50:08 +00:00
|
|
|
}
|
|
|
|
|
2020-06-03 19:22:29 +00:00
|
|
|
void rocksdb_options_set_compression_options_zstd_max_train_bytes(
|
|
|
|
rocksdb_options_t* opt, int zstd_max_train_bytes) {
|
|
|
|
opt->rep.compression_opts.zstd_max_train_bytes = zstd_max_train_bytes;
|
|
|
|
}
|
|
|
|
|
2021-05-18 05:52:05 +00:00
|
|
|
int rocksdb_options_get_compression_options_zstd_max_train_bytes(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compression_opts.zstd_max_train_bytes;
|
|
|
|
}
|
|
|
|
|
Support using ZDICT_finalizeDictionary to generate zstd dictionary (#9857)
Summary:
An untrained dictionary is currently simply the concatenation of several samples. The ZSTD API, ZDICT_finalizeDictionary(), can improve such a dictionary's effectiveness at low cost. This PR changes how dictionary is created by calling the ZSTD ZDICT_finalizeDictionary() API instead of creating raw content dictionary (when max_dict_buffer_bytes > 0), and pass in all buffered uncompressed data blocks as samples.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9857
Test Plan:
#### db_bench test for cpu/memory of compression+decompression and space saving on synthetic data:
Set up: change the parameter [here](https://github.com/facebook/rocksdb/blob/fb9a167a55e0970b1ef6f67c1600c8d9c4c6114f/tools/db_bench_tool.cc#L1766) to 16384 to make synthetic data more compressible.
```
# linked local ZSTD with version 1.5.2
# DEBUG_LEVEL=0 ROCKSDB_NO_FBCODE=1 ROCKSDB_DISABLE_ZSTD=1 EXTRA_CXXFLAGS="-DZSTD_STATIC_LINKING_ONLY -DZSTD -I/data/users/changyubi/install/include/" EXTRA_LDFLAGS="-L/data/users/changyubi/install/lib/ -l:libzstd.a" make -j32 db_bench
dict_bytes=16384
train_bytes=1048576
echo "========== No Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=0 -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== Raw Content Dictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench_main -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench_main -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== FinalizeDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
echo "========== TrainDictionary =========="
TEST_TMPDIR=/dev/shm ./db_bench -benchmarks=filluniquerandom,compact -num=10000000 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 -max_background_jobs=24 -memtablerep=vector -allow_concurrent_memtable_write=false -disable_wal=true -max_write_buffer_number=8 >/dev/null 2>&1
TEST_TMPDIR=/dev/shm /usr/bin/time ./db_bench -use_existing_db=true -benchmarks=compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -block_size=4096 2>&1 | grep elapsed
du -hc /dev/shm/dbbench/*sst | grep total
# Result: TrainDictionary is much better on space saving, but FinalizeDictionary seems to use less memory.
# before compression data size: 1.2GB
dict_bytes=16384
max_dict_buffer_bytes = 1048576
space cpu/memory
No Dictionary 468M 14.93user 1.00system 0:15.92elapsed 100%CPU (0avgtext+0avgdata 23904maxresident)k
Raw Dictionary 251M 15.81user 0.80system 0:16.56elapsed 100%CPU (0avgtext+0avgdata 156808maxresident)k
FinalizeDictionary 236M 11.93user 0.64system 0:12.56elapsed 100%CPU (0avgtext+0avgdata 89548maxresident)k
TrainDictionary 84M 7.29user 0.45system 0:07.75elapsed 100%CPU (0avgtext+0avgdata 97288maxresident)k
```
#### Benchmark on 10 sample SST files for spacing saving and CPU time on compression:
FinalizeDictionary is comparable to TrainDictionary in terms of space saving, and takes less time in compression.
```
dict_bytes=16384
train_bytes=1048576
for sst_file in `ls ../temp/myrock-sst/`
do
echo "********** $sst_file **********"
echo "========== No Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD
echo "========== Raw Content Dictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes
echo "========== FinalizeDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes --compression_use_zstd_finalize_dict
echo "========== TrainDictionary =========="
./sst_dump --file="../temp/myrock-sst/$sst_file" --command=recompress --compression_level_from=6 --compression_level_to=6 --compression_types=kZSTD --compression_max_dict_bytes=$dict_bytes --compression_zstd_max_train_bytes=$train_bytes
done
010240.sst (Size/Time) 011029.sst 013184.sst 021552.sst 185054.sst 185137.sst 191666.sst 7560381.sst 7604174.sst 7635312.sst
No Dictionary 28165569 / 2614419 32899411 / 2976832 32977848 / 3055542 31966329 / 2004590 33614351 / 1755877 33429029 / 1717042 33611933 / 1776936 33634045 / 2771417 33789721 / 2205414 33592194 / 388254
Raw Content Dictionary 28019950 / 2697961 33748665 / 3572422 33896373 / 3534701 26418431 / 2259658 28560825 / 1839168 28455030 / 1846039 28494319 / 1861349 32391599 / 3095649 33772142 / 2407843 33592230 / 474523
FinalizeDictionary 27896012 / 2650029 33763886 / 3719427 33904283 / 3552793 26008225 / 2198033 28111872 / 1869530 28014374 / 1789771 28047706 / 1848300 32296254 / 3204027 33698698 / 2381468 33592344 / 517433
TrainDictionary 28046089 / 2740037 33706480 / 3679019 33885741 / 3629351 25087123 / 2204558 27194353 / 1970207 27234229 / 1896811 27166710 / 1903119 32011041 / 3322315 32730692 / 2406146 33608631 / 570593
```
#### Decompression/Read test:
With FinalizeDictionary/TrainDictionary, some data structure used for decompression are in stored in dictionary, so they are expected to be faster in terms of decompression/reads.
```
dict_bytes=16384
train_bytes=1048576
echo "No Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=0 > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=0 2>&1 | grep MB/s
echo "Raw Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes 2>&1 | grep MB/s
echo "FinalizeDict"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes -compression_use_zstd_dict_trainer=false 2>&1 | grep MB/s
echo "Train Dictionary"
TEST_TMPDIR=/dev/shm/ ./db_bench -benchmarks=filluniquerandom,compact -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes > /dev/null 2>&1
TEST_TMPDIR=/dev/shm/ ./db_bench -use_existing_db=true -benchmarks=readrandom -cache_size=0 -compression_type=zstd -compression_max_dict_bytes=$dict_bytes -compression_zstd_max_train_bytes=$train_bytes 2>&1 | grep MB/s
No Dictionary
readrandom : 12.183 micros/op 82082 ops/sec 12.183 seconds 1000000 operations; 9.1 MB/s (1000000 of 1000000 found)
Raw Dictionary
readrandom : 12.314 micros/op 81205 ops/sec 12.314 seconds 1000000 operations; 9.0 MB/s (1000000 of 1000000 found)
FinalizeDict
readrandom : 9.787 micros/op 102180 ops/sec 9.787 seconds 1000000 operations; 11.3 MB/s (1000000 of 1000000 found)
Train Dictionary
readrandom : 9.698 micros/op 103108 ops/sec 9.699 seconds 1000000 operations; 11.4 MB/s (1000000 of 1000000 found)
```
Reviewed By: ajkr
Differential Revision: D35720026
Pulled By: cbi42
fbshipit-source-id: 24d230fdff0fd28a1bb650658798f00dfcfb2a1f
2022-05-20 19:09:09 +00:00
|
|
|
void rocksdb_options_set_compression_options_use_zstd_dict_trainer(
|
|
|
|
rocksdb_options_t* opt, unsigned char use_zstd_dict_trainer) {
|
|
|
|
opt->rep.compression_opts.use_zstd_dict_trainer = use_zstd_dict_trainer;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_options_get_compression_options_use_zstd_dict_trainer(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compression_opts.use_zstd_dict_trainer;
|
|
|
|
}
|
|
|
|
|
2021-05-18 05:52:05 +00:00
|
|
|
void rocksdb_options_set_compression_options_parallel_threads(
|
|
|
|
rocksdb_options_t* opt, int value) {
|
|
|
|
opt->rep.compression_opts.parallel_threads = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_options_get_compression_options_parallel_threads(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compression_opts.parallel_threads;
|
|
|
|
}
|
|
|
|
|
Limit buffering for collecting samples for compression dictionary (#7970)
Summary:
For dictionary compression, we need to collect some representative samples of the data to be compressed, which we use to either generate or train (when `CompressionOptions::zstd_max_train_bytes > 0`) a dictionary. Previously, the strategy was to buffer all the data blocks during flush, and up to the target file size during compaction. That strategy allowed us to randomly pick samples from as wide a range as possible that'd be guaranteed to land in a single output file.
However, some users try to make huge files in memory-constrained environments, where this strategy can cause OOM. This PR introduces an option, `CompressionOptions::max_dict_buffer_bytes`, that limits how much data blocks are buffered before we switch to unbuffered mode (which means creating the per-SST dictionary, writing out the buffered data, and compressing/writing new blocks as soon as they are built). It is not strict as we currently buffer more than just data blocks -- also keys are buffered. But it does make a step towards giving users predictable memory usage.
Related changes include:
- Changed sampling for dictionary compression to select unique data blocks when there is limited availability of data blocks
- Made use of `BlockBuilder::SwapAndReset()` to save an allocation+memcpy when buffering data blocks for building a dictionary
- Changed `ParseBoolean()` to accept an input containing characters after the boolean. This is necessary since, with this PR, a value for `CompressionOptions::enabled` is no longer necessarily the final component in the `CompressionOptions` string.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7970
Test Plan:
- updated `CompressionOptions` unit tests to verify limit is respected (to the extent expected in the current implementation) in various scenarios of flush/compaction to bottommost/non-bottommost level
- looked at jemalloc heap profiles right before and after switching to unbuffered mode during flush/compaction. Verified memory usage in buffering is proportional to the limit set.
Reviewed By: pdillinger
Differential Revision: D26467994
Pulled By: ajkr
fbshipit-source-id: 3da4ef9fba59974e4ef40e40c01611002c861465
2021-02-19 22:06:59 +00:00
|
|
|
void rocksdb_options_set_compression_options_max_dict_buffer_bytes(
|
|
|
|
rocksdb_options_t* opt, uint64_t max_dict_buffer_bytes) {
|
|
|
|
opt->rep.compression_opts.max_dict_buffer_bytes = max_dict_buffer_bytes;
|
|
|
|
}
|
|
|
|
|
2021-05-18 05:52:05 +00:00
|
|
|
uint64_t rocksdb_options_get_compression_options_max_dict_buffer_bytes(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compression_opts.max_dict_buffer_bytes;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_prefix_extractor(
|
|
|
|
rocksdb_options_t* opt, rocksdb_slicetransform_t* prefix_extractor) {
|
2014-03-10 19:56:46 +00:00
|
|
|
opt->rep.prefix_extractor.reset(prefix_extractor);
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_use_fsync(
|
2013-12-14 07:58:18 +00:00
|
|
|
rocksdb_options_t* opt, int use_fsync) {
|
2012-09-06 00:44:13 +00:00
|
|
|
opt->rep.use_fsync = use_fsync;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
int rocksdb_options_get_use_fsync(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.use_fsync;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_db_log_dir(
|
|
|
|
rocksdb_options_t* opt, const char* db_log_dir) {
|
2012-09-06 00:44:13 +00:00
|
|
|
opt->rep.db_log_dir = db_log_dir;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_wal_dir(
|
|
|
|
rocksdb_options_t* opt, const char* v) {
|
|
|
|
opt->rep.wal_dir = v;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_WAL_ttl_seconds(rocksdb_options_t* opt, uint64_t ttl) {
|
2012-11-26 21:56:45 +00:00
|
|
|
opt->rep.WAL_ttl_seconds = ttl;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
uint64_t rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.WAL_ttl_seconds;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_options_set_WAL_size_limit_MB(
|
|
|
|
rocksdb_options_t* opt, uint64_t limit) {
|
2013-11-07 02:46:28 +00:00
|
|
|
opt->rep.WAL_size_limit_MB = limit;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
uint64_t rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.WAL_size_limit_MB;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_manifest_preallocation_size(
|
|
|
|
rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.manifest_preallocation_size = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.manifest_preallocation_size;
|
|
|
|
}
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
void rocksdb_options_set_use_direct_reads(rocksdb_options_t* opt,
|
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.use_direct_reads = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
unsigned char rocksdb_options_get_use_direct_reads(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.use_direct_reads;
|
|
|
|
}
|
|
|
|
|
2017-04-13 20:07:33 +00:00
|
|
|
void rocksdb_options_set_use_direct_io_for_flush_and_compaction(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.use_direct_io_for_flush_and_compaction = v;
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
unsigned char rocksdb_options_get_use_direct_io_for_flush_and_compaction(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.use_direct_io_for_flush_and_compaction;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_allow_mmap_reads(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.allow_mmap_reads = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
unsigned char rocksdb_options_get_allow_mmap_reads(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.allow_mmap_reads;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_allow_mmap_writes(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.allow_mmap_writes = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
unsigned char rocksdb_options_get_allow_mmap_writes(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.allow_mmap_writes;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_is_fd_close_on_exec(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.is_fd_close_on_exec = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
unsigned char rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.is_fd_close_on_exec;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_stats_dump_period_sec(
|
|
|
|
rocksdb_options_t* opt, unsigned int v) {
|
|
|
|
opt->rep.stats_dump_period_sec = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned int rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.stats_dump_period_sec;
|
|
|
|
}
|
|
|
|
|
2020-07-28 20:04:29 +00:00
|
|
|
void rocksdb_options_set_stats_persist_period_sec(rocksdb_options_t* opt,
|
|
|
|
unsigned int v) {
|
|
|
|
opt->rep.stats_persist_period_sec = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int rocksdb_options_get_stats_persist_period_sec(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.stats_persist_period_sec;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_advise_random_on_open(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.advise_random_on_open = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_advise_random_on_open(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.advise_random_on_open;
|
|
|
|
}
|
|
|
|
|
2014-04-03 08:47:07 +00:00
|
|
|
void rocksdb_options_set_access_hint_on_compaction_start(
|
|
|
|
rocksdb_options_t* opt, int v) {
|
|
|
|
switch(v) {
|
|
|
|
case 0:
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.access_hint_on_compaction_start =
|
|
|
|
ROCKSDB_NAMESPACE::Options::NONE;
|
2014-04-03 08:47:07 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.access_hint_on_compaction_start =
|
|
|
|
ROCKSDB_NAMESPACE::Options::NORMAL;
|
2014-04-03 08:47:07 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.access_hint_on_compaction_start =
|
|
|
|
ROCKSDB_NAMESPACE::Options::SEQUENTIAL;
|
2014-04-03 08:47:07 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.access_hint_on_compaction_start =
|
|
|
|
ROCKSDB_NAMESPACE::Options::WILLNEED;
|
2014-04-03 08:47:07 +00:00
|
|
|
break;
|
2021-03-19 18:56:25 +00:00
|
|
|
default:
|
|
|
|
assert(0);
|
2014-04-03 08:47:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
int rocksdb_options_get_access_hint_on_compaction_start(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.access_hint_on_compaction_start;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_use_adaptive_mutex(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.use_adaptive_mutex = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_use_adaptive_mutex(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.use_adaptive_mutex;
|
|
|
|
}
|
|
|
|
|
2017-09-28 00:37:08 +00:00
|
|
|
void rocksdb_options_set_wal_bytes_per_sync(
|
|
|
|
rocksdb_options_t* opt, uint64_t v) {
|
|
|
|
opt->rep.wal_bytes_per_sync = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint64_t rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.wal_bytes_per_sync;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_bytes_per_sync(
|
|
|
|
rocksdb_options_t* opt, uint64_t v) {
|
|
|
|
opt->rep.bytes_per_sync = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint64_t rocksdb_options_get_bytes_per_sync(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.bytes_per_sync;
|
|
|
|
}
|
|
|
|
|
2017-10-31 20:49:25 +00:00
|
|
|
void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t* opt,
|
|
|
|
uint64_t v) {
|
2018-09-06 01:07:53 +00:00
|
|
|
opt->rep.writable_file_max_buffer_size = static_cast<size_t>(v);
|
2017-10-31 20:49:25 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint64_t rocksdb_options_get_writable_file_max_buffer_size(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.writable_file_max_buffer_size;
|
|
|
|
}
|
|
|
|
|
2016-11-16 17:24:52 +00:00
|
|
|
void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t* opt,
|
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.allow_concurrent_memtable_write = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_allow_concurrent_memtable_write(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.allow_concurrent_memtable_write;
|
|
|
|
}
|
|
|
|
|
2016-11-16 17:24:52 +00:00
|
|
|
void rocksdb_options_set_enable_write_thread_adaptive_yield(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.enable_write_thread_adaptive_yield = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_enable_write_thread_adaptive_yield(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.enable_write_thread_adaptive_yield;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_max_sequential_skip_in_iterations(
|
|
|
|
rocksdb_options_t* opt, uint64_t v) {
|
|
|
|
opt->rep.max_sequential_skip_in_iterations = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint64_t rocksdb_options_get_max_sequential_skip_in_iterations(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_sequential_skip_in_iterations;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_max_write_buffer_number(rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.max_write_buffer_number = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_max_write_buffer_number(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_write_buffer_number;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.min_write_buffer_number_to_merge = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_min_write_buffer_number_to_merge(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.min_write_buffer_number_to_merge;
|
|
|
|
}
|
|
|
|
|
2015-07-03 00:23:41 +00:00
|
|
|
void rocksdb_options_set_max_write_buffer_number_to_maintain(
|
|
|
|
rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.max_write_buffer_number_to_maintain = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int rocksdb_options_get_max_write_buffer_number_to_maintain(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_write_buffer_number_to_maintain;
|
|
|
|
}
|
|
|
|
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
void rocksdb_options_set_max_write_buffer_size_to_maintain(
|
|
|
|
rocksdb_options_t* opt, int64_t n) {
|
|
|
|
opt->rep.max_write_buffer_size_to_maintain = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
int64_t rocksdb_options_get_max_write_buffer_size_to_maintain(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_write_buffer_size_to_maintain;
|
|
|
|
}
|
|
|
|
|
2018-08-23 17:04:10 +00:00
|
|
|
void rocksdb_options_set_enable_pipelined_write(rocksdb_options_t* opt,
|
|
|
|
unsigned char v) {
|
2018-08-14 01:34:04 +00:00
|
|
|
opt->rep.enable_pipelined_write = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_enable_pipelined_write(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.enable_pipelined_write;
|
|
|
|
}
|
|
|
|
|
2019-05-14 00:43:47 +00:00
|
|
|
void rocksdb_options_set_unordered_write(rocksdb_options_t* opt,
|
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.unordered_write = v;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
unsigned char rocksdb_options_get_unordered_write(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.unordered_write;
|
|
|
|
}
|
|
|
|
|
2018-08-23 17:04:10 +00:00
|
|
|
void rocksdb_options_set_max_subcompactions(rocksdb_options_t* opt,
|
|
|
|
uint32_t n) {
|
2018-08-14 01:34:04 +00:00
|
|
|
opt->rep.max_subcompactions = n;
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:06:44 +00:00
|
|
|
uint32_t rocksdb_options_get_max_subcompactions(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_subcompactions;
|
|
|
|
}
|
|
|
|
|
2018-08-14 01:34:04 +00:00
|
|
|
void rocksdb_options_set_max_background_jobs(rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.max_background_jobs = n;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
int rocksdb_options_get_max_background_jobs(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_background_jobs;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_max_background_compactions(rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.max_background_compactions = n;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
int rocksdb_options_get_max_background_compactions(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_background_compactions;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_max_background_flushes(rocksdb_options_t* opt, int n) {
|
|
|
|
opt->rep.max_background_flushes = n;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
int rocksdb_options_get_max_background_flushes(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_background_flushes;
|
|
|
|
}
|
|
|
|
|
2022-06-23 16:42:18 +00:00
|
|
|
void rocksdb_options_set_experimental_mempurge_threshold(rocksdb_options_t* opt,
|
|
|
|
double v) {
|
|
|
|
opt->rep.experimental_mempurge_threshold = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
double rocksdb_options_get_experimental_mempurge_threshold(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.experimental_mempurge_threshold;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_max_log_file_size(rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.max_log_file_size = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_max_log_file_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_log_file_size;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_log_file_time_to_roll(rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.log_file_time_to_roll = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.log_file_time_to_roll;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_keep_log_file_num(rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.keep_log_file_num = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_keep_log_file_num(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.keep_log_file_num;
|
|
|
|
}
|
|
|
|
|
2015-10-08 01:06:28 +00:00
|
|
|
void rocksdb_options_set_recycle_log_file_num(rocksdb_options_t* opt,
|
|
|
|
size_t v) {
|
|
|
|
opt->rep.recycle_log_file_num = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_recycle_log_file_num(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.recycle_log_file_num;
|
|
|
|
}
|
|
|
|
|
2017-01-28 01:31:33 +00:00
|
|
|
void rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.soft_pending_compaction_bytes_limit = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_soft_pending_compaction_bytes_limit(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.soft_pending_compaction_bytes_limit;
|
|
|
|
}
|
|
|
|
|
2017-01-28 01:31:33 +00:00
|
|
|
void rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.hard_pending_compaction_bytes_limit = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_hard_pending_compaction_bytes_limit(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.hard_pending_compaction_bytes_limit;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_max_manifest_file_size(
|
|
|
|
rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.max_manifest_file_size = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_max_manifest_file_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_manifest_file_size;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_table_cache_numshardbits(
|
|
|
|
rocksdb_options_t* opt, int v) {
|
|
|
|
opt->rep.table_cache_numshardbits = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
int rocksdb_options_get_table_cache_numshardbits(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.table_cache_numshardbits;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_arena_block_size(
|
|
|
|
rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.arena_block_size = v;
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:50:20 +00:00
|
|
|
size_t rocksdb_options_get_arena_block_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.arena_block_size;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t* opt, int disable) {
|
|
|
|
opt->rep.disable_auto_compactions = disable;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_disable_auto_compactions(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.disable_auto_compactions;
|
|
|
|
}
|
|
|
|
|
2017-01-28 01:31:33 +00:00
|
|
|
void rocksdb_options_set_optimize_filters_for_hits(rocksdb_options_t* opt, int v) {
|
|
|
|
opt->rep.optimize_filters_for_hits = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_optimize_filters_for_hits(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.optimize_filters_for_hits;
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_delete_obsolete_files_period_micros(
|
|
|
|
rocksdb_options_t* opt, uint64_t v) {
|
|
|
|
opt->rep.delete_obsolete_files_period_micros = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint64_t rocksdb_options_get_delete_obsolete_files_period_micros(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.delete_obsolete_files_period_micros;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_prepare_for_bulk_load(rocksdb_options_t* opt) {
|
|
|
|
opt->rep.PrepareForBulkLoad();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_memtable_vector_rep(rocksdb_options_t *opt) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.memtable_factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
|
2013-12-14 07:58:18 +00:00
|
|
|
}
|
|
|
|
|
2016-06-04 00:02:10 +00:00
|
|
|
void rocksdb_options_set_memtable_prefix_bloom_size_ratio(
|
|
|
|
rocksdb_options_t* opt, double v) {
|
|
|
|
opt->rep.memtable_prefix_bloom_size_ratio = v;
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
double rocksdb_options_get_memtable_prefix_bloom_size_ratio(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.memtable_prefix_bloom_size_ratio;
|
|
|
|
}
|
|
|
|
|
2016-07-27 01:05:30 +00:00
|
|
|
void rocksdb_options_set_memtable_huge_page_size(rocksdb_options_t* opt,
|
|
|
|
size_t v) {
|
|
|
|
opt->rep.memtable_huge_page_size = v;
|
2015-12-29 23:30:12 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
size_t rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.memtable_huge_page_size;
|
|
|
|
}
|
|
|
|
|
2014-03-31 06:28:06 +00:00
|
|
|
void rocksdb_options_set_hash_skip_list_rep(
|
|
|
|
rocksdb_options_t *opt, size_t bucket_count,
|
|
|
|
int32_t skiplist_height, int32_t skiplist_branching_factor) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::MemTableRepFactory* factory =
|
|
|
|
ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
|
|
|
|
bucket_count, skiplist_height, skiplist_branching_factor);
|
2014-03-31 06:28:06 +00:00
|
|
|
opt->rep.memtable_factory.reset(factory);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_hash_link_list_rep(
|
|
|
|
rocksdb_options_t *opt, size_t bucket_count) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.memtable_factory.reset(
|
|
|
|
ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(bucket_count));
|
2014-03-31 06:28:06 +00:00
|
|
|
}
|
|
|
|
|
2014-04-03 06:59:01 +00:00
|
|
|
void rocksdb_options_set_plain_table_factory(
|
|
|
|
rocksdb_options_t *opt, uint32_t user_key_len, int bloom_bits_per_key,
|
|
|
|
double hash_table_ratio, size_t index_sparseness) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::PlainTableOptions options;
|
2016-09-12 22:31:42 +00:00
|
|
|
options.user_key_len = user_key_len;
|
|
|
|
options.bloom_bits_per_key = bloom_bits_per_key;
|
|
|
|
options.hash_table_ratio = hash_table_ratio;
|
|
|
|
options.index_sparseness = index_sparseness;
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::TableFactory* factory =
|
|
|
|
ROCKSDB_NAMESPACE::NewPlainTableFactory(options);
|
2014-04-03 06:59:01 +00:00
|
|
|
opt->rep.table_factory.reset(factory);
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_options_set_max_successive_merges(
|
|
|
|
rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.max_successive_merges = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
size_t rocksdb_options_get_max_successive_merges(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.max_successive_merges;
|
|
|
|
}
|
|
|
|
|
2014-04-03 08:47:07 +00:00
|
|
|
void rocksdb_options_set_bloom_locality(
|
|
|
|
rocksdb_options_t* opt, uint32_t v) {
|
|
|
|
opt->rep.bloom_locality = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
uint32_t rocksdb_options_get_bloom_locality(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.bloom_locality;
|
|
|
|
}
|
|
|
|
|
2014-04-03 08:47:07 +00:00
|
|
|
void rocksdb_options_set_inplace_update_support(
|
|
|
|
rocksdb_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.inplace_update_support = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_inplace_update_support(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.inplace_update_support;
|
|
|
|
}
|
|
|
|
|
2014-04-03 08:47:07 +00:00
|
|
|
void rocksdb_options_set_inplace_update_num_locks(
|
|
|
|
rocksdb_options_t* opt, size_t v) {
|
|
|
|
opt->rep.inplace_update_num_locks = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
size_t rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.inplace_update_num_locks;
|
|
|
|
}
|
|
|
|
|
2016-05-23 20:13:47 +00:00
|
|
|
void rocksdb_options_set_report_bg_io_stats(
|
|
|
|
rocksdb_options_t* opt, int v) {
|
|
|
|
opt->rep.report_bg_io_stats = v;
|
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_report_bg_io_stats(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.report_bg_io_stats;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_compaction_style(rocksdb_options_t *opt, int style) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.compaction_style =
|
|
|
|
static_cast<ROCKSDB_NAMESPACE::CompactionStyle>(style);
|
2013-12-14 07:58:18 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
int rocksdb_options_get_compaction_style(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.compaction_style;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_options_set_universal_compaction_options(rocksdb_options_t *opt, rocksdb_universal_compaction_options_t *uco) {
|
|
|
|
opt->rep.compaction_options_universal = *(uco->rep);
|
|
|
|
}
|
|
|
|
|
2014-07-08 04:12:25 +00:00
|
|
|
void rocksdb_options_set_fifo_compaction_options(
|
|
|
|
rocksdb_options_t* opt,
|
|
|
|
rocksdb_fifo_compaction_options_t* fifo) {
|
|
|
|
opt->rep.compaction_options_fifo = fifo->rep;
|
|
|
|
}
|
|
|
|
|
2015-05-16 08:34:28 +00:00
|
|
|
char *rocksdb_options_statistics_get_string(rocksdb_options_t *opt) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::Statistics* statistics = opt->rep.statistics.get();
|
2015-05-16 08:34:28 +00:00
|
|
|
if (statistics) {
|
|
|
|
return strdup(statistics->ToString().c_str());
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-11-03 18:00:46 +00:00
|
|
|
void rocksdb_options_set_ratelimiter(rocksdb_options_t *opt, rocksdb_ratelimiter_t *limiter) {
|
2018-04-25 22:45:18 +00:00
|
|
|
if (limiter) {
|
|
|
|
opt->rep.rate_limiter = limiter->rep;
|
|
|
|
}
|
2016-11-03 18:00:46 +00:00
|
|
|
}
|
|
|
|
|
2020-01-31 07:08:56 +00:00
|
|
|
void rocksdb_options_set_atomic_flush(rocksdb_options_t* opt,
|
|
|
|
unsigned char atomic_flush) {
|
|
|
|
opt->rep.atomic_flush = atomic_flush;
|
2020-01-17 20:56:19 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 21:27:57 +00:00
|
|
|
unsigned char rocksdb_options_get_atomic_flush(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.atomic_flush;
|
|
|
|
}
|
|
|
|
|
2021-04-27 21:55:34 +00:00
|
|
|
void rocksdb_options_set_manual_wal_flush(rocksdb_options_t* opt,
|
|
|
|
unsigned char manual_wal_flush) {
|
|
|
|
opt->rep.manual_wal_flush = manual_wal_flush;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_options_get_manual_wal_flush(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.manual_wal_flush;
|
|
|
|
}
|
|
|
|
|
2022-01-26 21:57:30 +00:00
|
|
|
void rocksdb_options_set_wal_compression(rocksdb_options_t* opt, int val) {
|
|
|
|
opt->rep.wal_compression = static_cast<CompressionType>(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_options_get_wal_compression(rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.wal_compression;
|
|
|
|
}
|
|
|
|
|
2016-11-03 18:00:46 +00:00
|
|
|
rocksdb_ratelimiter_t* rocksdb_ratelimiter_create(
|
|
|
|
int64_t rate_bytes_per_sec,
|
|
|
|
int64_t refill_period_us,
|
|
|
|
int32_t fairness) {
|
|
|
|
rocksdb_ratelimiter_t* rate_limiter = new rocksdb_ratelimiter_t;
|
2018-04-25 22:45:18 +00:00
|
|
|
rate_limiter->rep.reset(
|
|
|
|
NewGenericRateLimiter(rate_bytes_per_sec,
|
|
|
|
refill_period_us, fairness));
|
2016-11-03 18:00:46 +00:00
|
|
|
return rate_limiter;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ratelimiter_destroy(rocksdb_ratelimiter_t *limiter) {
|
|
|
|
delete limiter;
|
|
|
|
}
|
|
|
|
|
2020-02-21 19:10:54 +00:00
|
|
|
void rocksdb_options_set_row_cache(rocksdb_options_t* opt, rocksdb_cache_t* cache) {
|
|
|
|
if(cache) {
|
|
|
|
opt->rep.row_cache = cache->rep;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 17:12:55 +00:00
|
|
|
void rocksdb_options_add_compact_on_deletion_collector_factory(
|
|
|
|
rocksdb_options_t* opt, size_t window_size, size_t num_dels_trigger) {
|
|
|
|
std::shared_ptr<ROCKSDB_NAMESPACE::TablePropertiesCollectorFactory>
|
|
|
|
compact_on_del =
|
|
|
|
NewCompactOnDeletionCollectorFactory(window_size, num_dels_trigger);
|
|
|
|
opt->rep.table_properties_collector_factories.emplace_back(compact_on_del);
|
|
|
|
}
|
|
|
|
|
2018-03-22 05:05:45 +00:00
|
|
|
void rocksdb_set_perf_level(int v) {
|
|
|
|
PerfLevel level = static_cast<PerfLevel>(v);
|
|
|
|
SetPerfLevel(level);
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_perfcontext_t* rocksdb_perfcontext_create() {
|
|
|
|
rocksdb_perfcontext_t* context = new rocksdb_perfcontext_t;
|
2020-02-20 20:07:53 +00:00
|
|
|
context->rep = ROCKSDB_NAMESPACE::get_perf_context();
|
2018-03-22 05:05:45 +00:00
|
|
|
return context;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_perfcontext_reset(rocksdb_perfcontext_t* context) {
|
|
|
|
context->rep->Reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_perfcontext_report(rocksdb_perfcontext_t* context,
|
|
|
|
unsigned char exclude_zero_counters) {
|
|
|
|
return strdup(context->rep->ToString(exclude_zero_counters).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_perfcontext_metric(rocksdb_perfcontext_t* context,
|
|
|
|
int metric) {
|
|
|
|
PerfContext* rep = context->rep;
|
|
|
|
switch (metric) {
|
|
|
|
case rocksdb_user_key_comparison_count:
|
|
|
|
return rep->user_key_comparison_count;
|
|
|
|
case rocksdb_block_cache_hit_count:
|
|
|
|
return rep->block_cache_hit_count;
|
|
|
|
case rocksdb_block_read_count:
|
|
|
|
return rep->block_read_count;
|
|
|
|
case rocksdb_block_read_byte:
|
|
|
|
return rep->block_read_byte;
|
|
|
|
case rocksdb_block_read_time:
|
|
|
|
return rep->block_read_time;
|
|
|
|
case rocksdb_block_checksum_time:
|
|
|
|
return rep->block_checksum_time;
|
|
|
|
case rocksdb_block_decompress_time:
|
|
|
|
return rep->block_decompress_time;
|
|
|
|
case rocksdb_get_read_bytes:
|
|
|
|
return rep->get_read_bytes;
|
|
|
|
case rocksdb_multiget_read_bytes:
|
|
|
|
return rep->multiget_read_bytes;
|
|
|
|
case rocksdb_iter_read_bytes:
|
|
|
|
return rep->iter_read_bytes;
|
|
|
|
case rocksdb_internal_key_skipped_count:
|
|
|
|
return rep->internal_key_skipped_count;
|
|
|
|
case rocksdb_internal_delete_skipped_count:
|
|
|
|
return rep->internal_delete_skipped_count;
|
|
|
|
case rocksdb_internal_recent_skipped_count:
|
|
|
|
return rep->internal_recent_skipped_count;
|
|
|
|
case rocksdb_internal_merge_count:
|
|
|
|
return rep->internal_merge_count;
|
|
|
|
case rocksdb_get_snapshot_time:
|
|
|
|
return rep->get_snapshot_time;
|
|
|
|
case rocksdb_get_from_memtable_time:
|
|
|
|
return rep->get_from_memtable_time;
|
|
|
|
case rocksdb_get_from_memtable_count:
|
|
|
|
return rep->get_from_memtable_count;
|
|
|
|
case rocksdb_get_post_process_time:
|
|
|
|
return rep->get_post_process_time;
|
|
|
|
case rocksdb_get_from_output_files_time:
|
|
|
|
return rep->get_from_output_files_time;
|
|
|
|
case rocksdb_seek_on_memtable_time:
|
|
|
|
return rep->seek_on_memtable_time;
|
|
|
|
case rocksdb_seek_on_memtable_count:
|
|
|
|
return rep->seek_on_memtable_count;
|
|
|
|
case rocksdb_next_on_memtable_count:
|
|
|
|
return rep->next_on_memtable_count;
|
|
|
|
case rocksdb_prev_on_memtable_count:
|
|
|
|
return rep->prev_on_memtable_count;
|
|
|
|
case rocksdb_seek_child_seek_time:
|
|
|
|
return rep->seek_child_seek_time;
|
|
|
|
case rocksdb_seek_child_seek_count:
|
|
|
|
return rep->seek_child_seek_count;
|
|
|
|
case rocksdb_seek_min_heap_time:
|
|
|
|
return rep->seek_min_heap_time;
|
|
|
|
case rocksdb_seek_max_heap_time:
|
|
|
|
return rep->seek_max_heap_time;
|
|
|
|
case rocksdb_seek_internal_seek_time:
|
|
|
|
return rep->seek_internal_seek_time;
|
|
|
|
case rocksdb_find_next_user_entry_time:
|
|
|
|
return rep->find_next_user_entry_time;
|
|
|
|
case rocksdb_write_wal_time:
|
|
|
|
return rep->write_wal_time;
|
|
|
|
case rocksdb_write_memtable_time:
|
|
|
|
return rep->write_memtable_time;
|
|
|
|
case rocksdb_write_delay_time:
|
|
|
|
return rep->write_delay_time;
|
|
|
|
case rocksdb_write_pre_and_post_process_time:
|
|
|
|
return rep->write_pre_and_post_process_time;
|
|
|
|
case rocksdb_db_mutex_lock_nanos:
|
|
|
|
return rep->db_mutex_lock_nanos;
|
|
|
|
case rocksdb_db_condition_wait_nanos:
|
|
|
|
return rep->db_condition_wait_nanos;
|
|
|
|
case rocksdb_merge_operator_time_nanos:
|
|
|
|
return rep->merge_operator_time_nanos;
|
|
|
|
case rocksdb_read_index_block_nanos:
|
|
|
|
return rep->read_index_block_nanos;
|
|
|
|
case rocksdb_read_filter_block_nanos:
|
|
|
|
return rep->read_filter_block_nanos;
|
|
|
|
case rocksdb_new_table_block_iter_nanos:
|
|
|
|
return rep->new_table_block_iter_nanos;
|
|
|
|
case rocksdb_new_table_iterator_nanos:
|
|
|
|
return rep->new_table_iterator_nanos;
|
|
|
|
case rocksdb_block_seek_nanos:
|
|
|
|
return rep->block_seek_nanos;
|
|
|
|
case rocksdb_find_table_nanos:
|
|
|
|
return rep->find_table_nanos;
|
|
|
|
case rocksdb_bloom_memtable_hit_count:
|
|
|
|
return rep->bloom_memtable_hit_count;
|
|
|
|
case rocksdb_bloom_memtable_miss_count:
|
|
|
|
return rep->bloom_memtable_miss_count;
|
|
|
|
case rocksdb_bloom_sst_hit_count:
|
|
|
|
return rep->bloom_sst_hit_count;
|
|
|
|
case rocksdb_bloom_sst_miss_count:
|
|
|
|
return rep->bloom_sst_miss_count;
|
|
|
|
case rocksdb_key_lock_wait_time:
|
|
|
|
return rep->key_lock_wait_time;
|
|
|
|
case rocksdb_key_lock_wait_count:
|
|
|
|
return rep->key_lock_wait_count;
|
|
|
|
case rocksdb_env_new_sequential_file_nanos:
|
|
|
|
return rep->env_new_sequential_file_nanos;
|
|
|
|
case rocksdb_env_new_random_access_file_nanos:
|
|
|
|
return rep->env_new_random_access_file_nanos;
|
|
|
|
case rocksdb_env_new_writable_file_nanos:
|
|
|
|
return rep->env_new_writable_file_nanos;
|
|
|
|
case rocksdb_env_reuse_writable_file_nanos:
|
|
|
|
return rep->env_reuse_writable_file_nanos;
|
|
|
|
case rocksdb_env_new_random_rw_file_nanos:
|
|
|
|
return rep->env_new_random_rw_file_nanos;
|
|
|
|
case rocksdb_env_new_directory_nanos:
|
|
|
|
return rep->env_new_directory_nanos;
|
|
|
|
case rocksdb_env_file_exists_nanos:
|
|
|
|
return rep->env_file_exists_nanos;
|
|
|
|
case rocksdb_env_get_children_nanos:
|
|
|
|
return rep->env_get_children_nanos;
|
|
|
|
case rocksdb_env_get_children_file_attributes_nanos:
|
|
|
|
return rep->env_get_children_file_attributes_nanos;
|
|
|
|
case rocksdb_env_delete_file_nanos:
|
|
|
|
return rep->env_delete_file_nanos;
|
|
|
|
case rocksdb_env_create_dir_nanos:
|
|
|
|
return rep->env_create_dir_nanos;
|
|
|
|
case rocksdb_env_create_dir_if_missing_nanos:
|
|
|
|
return rep->env_create_dir_if_missing_nanos;
|
|
|
|
case rocksdb_env_delete_dir_nanos:
|
|
|
|
return rep->env_delete_dir_nanos;
|
|
|
|
case rocksdb_env_get_file_size_nanos:
|
|
|
|
return rep->env_get_file_size_nanos;
|
|
|
|
case rocksdb_env_get_file_modification_time_nanos:
|
|
|
|
return rep->env_get_file_modification_time_nanos;
|
|
|
|
case rocksdb_env_rename_file_nanos:
|
|
|
|
return rep->env_rename_file_nanos;
|
|
|
|
case rocksdb_env_link_file_nanos:
|
|
|
|
return rep->env_link_file_nanos;
|
|
|
|
case rocksdb_env_lock_file_nanos:
|
|
|
|
return rep->env_lock_file_nanos;
|
|
|
|
case rocksdb_env_unlock_file_nanos:
|
|
|
|
return rep->env_unlock_file_nanos;
|
|
|
|
case rocksdb_env_new_logger_nanos:
|
|
|
|
return rep->env_new_logger_nanos;
|
2022-05-20 23:09:33 +00:00
|
|
|
case rocksdb_number_async_seek:
|
|
|
|
return rep->number_async_seek;
|
2022-06-28 20:52:35 +00:00
|
|
|
case rocksdb_blob_cache_hit_count:
|
|
|
|
return rep->blob_cache_hit_count;
|
|
|
|
case rocksdb_blob_read_count:
|
|
|
|
return rep->blob_read_count;
|
|
|
|
case rocksdb_blob_read_byte:
|
|
|
|
return rep->blob_read_byte;
|
|
|
|
case rocksdb_blob_read_time:
|
|
|
|
return rep->blob_read_time;
|
|
|
|
case rocksdb_blob_checksum_time:
|
|
|
|
return rep->blob_checksum_time;
|
|
|
|
case rocksdb_blob_decompress_time:
|
|
|
|
return rep->blob_decompress_time;
|
Skip swaths of range tombstone covered keys in merging iterator (2022 edition) (#10449)
Summary:
Delete range logic is moved from `DBIter` to `MergingIterator`, and `MergingIterator` will seek to the end of a range deletion if possible instead of scanning through each key and check with `RangeDelAggregator`.
With the invariant that a key in level L (consider memtable as the first level, each immutable and L0 as a separate level) has a larger sequence number than all keys in any level >L, a range tombstone `[start, end)` from level L covers all keys in its range in any level >L. This property motivates optimizations in iterator:
- in `Seek(target)`, if level L has a range tombstone `[start, end)` that covers `target.UserKey`, then for all levels > L, we can do Seek() on `end` instead of `target` to skip some range tombstone covered keys.
- in `Next()/Prev()`, if the current key is covered by a range tombstone `[start, end)` from level L, we can do `Seek` to `end` for all levels > L.
This PR implements the above optimizations in `MergingIterator`. As all range tombstone covered keys are now skipped in `MergingIterator`, the range tombstone logic is removed from `DBIter`. The idea in this PR is similar to https://github.com/facebook/rocksdb/issues/7317, but this PR leaves `InternalIterator` interface mostly unchanged. **Credit**: the cascading seek optimization and the sentinel key (discussed below) are inspired by [Pebble](https://github.com/cockroachdb/pebble/blob/master/merging_iter.go) and suggested by ajkr in https://github.com/facebook/rocksdb/issues/7317. The two optimizations are mostly implemented in `SeekImpl()/SeekForPrevImpl()` and `IsNextDeleted()/IsPrevDeleted()` in `merging_iterator.cc`. See comments for each method for more detail.
One notable change is that the minHeap/maxHeap used by `MergingIterator` now contains range tombstone end keys besides point key iterators. This helps to reduce the number of key comparisons. For example, for a range tombstone `[start, end)`, a `start` and an `end` `HeapItem` are inserted into the heap. When a `HeapItem` for range tombstone start key is popped from the minHeap, we know this range tombstone becomes "active" in the sense that, before the range tombstone's end key is popped from the minHeap, all the keys popped from this heap is covered by the range tombstone's internal key range `[start, end)`.
Another major change, *delete range sentinel key*, is made to `LevelIterator`. Before this PR, when all point keys in an SST file are iterated through in `MergingIterator`, a level iterator would advance to the next SST file in its level. In the case when an SST file has a range tombstone that covers keys beyond the SST file's last point key, advancing to the next SST file would lose this range tombstone. Consequently, `MergingIterator` could return keys that should have been deleted by some range tombstone. We prevent this by pretending that file boundaries in each SST file are sentinel keys. A `LevelIterator` now only advance the file iterator once the sentinel key is processed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10449
Test Plan:
- Added many unit tests in db_range_del_test
- Stress test: `./db_stress --readpercent=5 --prefixpercent=19 --writepercent=20 -delpercent=10 --iterpercent=44 --delrangepercent=2`
- Additional iterator stress test is added to verify against iterators against expected state: https://github.com/facebook/rocksdb/issues/10538. This is based on ajkr's previous attempt https://github.com/facebook/rocksdb/pull/5506#issuecomment-506021913.
```
python3 ./tools/db_crashtest.py blackbox --simple --write_buffer_size=524288 --target_file_size_base=524288 --max_bytes_for_level_base=2097152 --compression_type=none --max_background_compactions=8 --value_size_mult=33 --max_key=5000000 --interval=10 --duration=7200 --delrangepercent=3 --delpercent=9 --iterpercent=25 --writepercent=60 --readpercent=3 --prefixpercent=0 --num_iterations=1000 --range_deletion_width=100 --verify_iterator_with_expected_state_one_in=1
```
- Performance benchmark: I used a similar setup as in the blog [post](http://rocksdb.org/blog/2018/11/21/delete-range.html) that introduced DeleteRange, "a database with 5 million data keys, and 10000 range tombstones (ignoring those dropped during compaction) that were written in regular intervals after 4.5 million data keys were written". As expected, the performance with this PR depends on the range tombstone width.
```
# Setup:
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=fillrandom --writes=4500000 --num=5000000
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=overwrite --writes=500000 --num=5000000 --use_existing_db=true --writes_per_range_tombstone=50
# Scan entire DB
TEST_TMPDIR=/dev/shm ./db_bench_main --benchmarks=readseq[-X5] --use_existing_db=true --num=5000000 --disable_auto_compactions=true
# Short range scan (10 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=100000 --seek_nexts=10 --disable_auto_compactions=true
# Long range scan(1000 Next())
TEST_TMPDIR=/dev/shm/width-100/ ./db_bench_main --benchmarks=seekrandom[-X5] --use_existing_db=true --num=500000 --reads=2500 --seek_nexts=1000 --disable_auto_compactions=true
```
Avg over of 10 runs (some slower tests had fews runs):
For the first column (tombstone), 0 means no range tombstone, 100-10000 means width of the 10k range tombstones, and 1 means there is a single range tombstone in the entire DB (width is 1000). The 1 tombstone case is to test regression when there's very few range tombstones in the DB, as no range tombstone is likely to take a different code path than with range tombstones.
- Scan entire DB
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2525600 (± 43564) |2486917 (± 33698) |-1.53% |
| 100 |1853835 (± 24736) |2073884 (± 32176) |+11.87% |
| 1000 |422415 (± 7466) |1115801 (± 22781) |+164.15% |
| 10000 |22384 (± 227) |227919 (± 6647) |+918.22% |
| 1 range tombstone |2176540 (± 39050) |2434954 (± 24563) |+11.87% |
- Short range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |35398 (± 533) |35338 (± 569) |-0.17% |
| 100 |28276 (± 664) |31684 (± 331) |+12.05% |
| 1000 |7637 (± 77) |25422 (± 277) |+232.88% |
| 10000 |1367 |28667 |+1997.07% |
| 1 range tombstone |32618 (± 581) |32748 (± 506) |+0.4% |
- Long range scan
| tombstone width | Pre-PR ops/sec | Post-PR ops/sec | ±% |
| ------------- | ------------- | ------------- | ------------- |
| 0 range tombstone |2262 (± 33) |2353 (± 20) |+4.02% |
| 100 |1696 (± 26) |1926 (± 18) |+13.56% |
| 1000 |410 (± 6) |1255 (± 29) |+206.1% |
| 10000 |25 |414 |+1556.0% |
| 1 range tombstone |1957 (± 30) |2185 (± 44) |+11.65% |
- Microbench does not show significant regression: https://gist.github.com/cbi42/59f280f85a59b678e7e5d8561e693b61
Reviewed By: ajkr
Differential Revision: D38450331
Pulled By: cbi42
fbshipit-source-id: b5ef12e8d8c289ed2e163ccdf277f5039b511fca
2022-09-02 16:51:19 +00:00
|
|
|
case rocksdb_internal_range_del_reseek_count:
|
|
|
|
return rep->internal_range_del_reseek_count;
|
2018-03-22 05:05:45 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_perfcontext_destroy(rocksdb_perfcontext_t* context) {
|
|
|
|
delete context;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
/*
|
|
|
|
TODO:
|
2014-02-12 21:49:00 +00:00
|
|
|
DB::OpenForReadOnly
|
|
|
|
DB::KeyMayExist
|
|
|
|
DB::GetOptions
|
|
|
|
DB::GetSortedWalFiles
|
|
|
|
DB::GetLatestSequenceNumber
|
|
|
|
DB::GetUpdatesSince
|
|
|
|
DB::GetDbIdentity
|
|
|
|
DB::RunManualCompaction
|
|
|
|
custom cache
|
2013-12-14 07:58:18 +00:00
|
|
|
table_properties_collectors
|
|
|
|
*/
|
|
|
|
|
2014-06-18 02:23:47 +00:00
|
|
|
rocksdb_compactionfilter_t* rocksdb_compactionfilter_create(
|
|
|
|
void* state,
|
|
|
|
void (*destructor)(void*),
|
|
|
|
unsigned char (*filter)(
|
|
|
|
void*,
|
|
|
|
int level,
|
|
|
|
const char* key, size_t key_length,
|
|
|
|
const char* existing_value, size_t value_length,
|
|
|
|
char** new_value, size_t *new_value_length,
|
|
|
|
unsigned char* value_changed),
|
|
|
|
const char* (*name)(void*)) {
|
|
|
|
rocksdb_compactionfilter_t* result = new rocksdb_compactionfilter_t;
|
|
|
|
result->state_ = state;
|
|
|
|
result->destructor_ = destructor;
|
|
|
|
result->filter_ = filter;
|
2019-02-08 00:16:48 +00:00
|
|
|
result->ignore_snapshots_ = true;
|
2014-06-18 02:23:47 +00:00
|
|
|
result->name_ = name;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-08-18 01:48:43 +00:00
|
|
|
void rocksdb_compactionfilter_set_ignore_snapshots(
|
|
|
|
rocksdb_compactionfilter_t* filter,
|
|
|
|
unsigned char whether_ignore) {
|
|
|
|
filter->ignore_snapshots_ = whether_ignore;
|
|
|
|
}
|
|
|
|
|
2014-06-18 02:23:47 +00:00
|
|
|
void rocksdb_compactionfilter_destroy(rocksdb_compactionfilter_t* filter) {
|
|
|
|
delete filter;
|
|
|
|
}
|
|
|
|
|
2014-07-03 23:52:22 +00:00
|
|
|
unsigned char rocksdb_compactionfiltercontext_is_full_compaction(
|
|
|
|
rocksdb_compactionfiltercontext_t* context) {
|
|
|
|
return context->rep.is_full_compaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_compactionfiltercontext_is_manual_compaction(
|
|
|
|
rocksdb_compactionfiltercontext_t* context) {
|
|
|
|
return context->rep.is_manual_compaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_compactionfilterfactory_t* rocksdb_compactionfilterfactory_create(
|
|
|
|
void* state, void (*destructor)(void*),
|
|
|
|
rocksdb_compactionfilter_t* (*create_compaction_filter)(
|
|
|
|
void*, rocksdb_compactionfiltercontext_t* context),
|
|
|
|
const char* (*name)(void*)) {
|
|
|
|
rocksdb_compactionfilterfactory_t* result =
|
|
|
|
new rocksdb_compactionfilterfactory_t;
|
|
|
|
result->state_ = state;
|
|
|
|
result->destructor_ = destructor;
|
|
|
|
result->create_compaction_filter_ = create_compaction_filter;
|
|
|
|
result->name_ = name;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_compactionfilterfactory_destroy(
|
|
|
|
rocksdb_compactionfilterfactory_t* factory) {
|
|
|
|
delete factory;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_comparator_t* rocksdb_comparator_create(
|
2011-08-05 20:40:49 +00:00
|
|
|
void* state,
|
|
|
|
void (*destructor)(void*),
|
|
|
|
int (*compare)(
|
|
|
|
void*,
|
|
|
|
const char* a, size_t alen,
|
|
|
|
const char* b, size_t blen),
|
|
|
|
const char* (*name)(void*)) {
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_comparator_t* result = new rocksdb_comparator_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
result->state_ = state;
|
|
|
|
result->destructor_ = destructor;
|
|
|
|
result->compare_ = compare;
|
|
|
|
result->name_ = name;
|
2022-05-26 16:40:10 +00:00
|
|
|
result->compare_ts_ = nullptr;
|
|
|
|
result->compare_without_ts_ = nullptr;
|
2011-08-05 20:40:49 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_comparator_destroy(rocksdb_comparator_t* cmp) { delete cmp; }
|
|
|
|
|
|
|
|
rocksdb_comparator_t* rocksdb_comparator_with_ts_create(
|
|
|
|
void* state, void (*destructor)(void*),
|
|
|
|
int (*compare)(void*, const char* a, size_t alen, const char* b,
|
|
|
|
size_t blen),
|
|
|
|
int (*compare_ts)(void*, const char* a_ts, size_t a_tslen, const char* b_ts,
|
|
|
|
size_t b_tslen),
|
|
|
|
int (*compare_without_ts)(void*, const char* a, size_t alen,
|
|
|
|
unsigned char a_has_ts, const char* b,
|
|
|
|
size_t blen, unsigned char b_has_ts),
|
|
|
|
const char* (*name)(void*), size_t timestamp_size) {
|
|
|
|
rocksdb_comparator_t* result = new rocksdb_comparator_t(timestamp_size);
|
|
|
|
result->state_ = state;
|
|
|
|
result->destructor_ = destructor;
|
|
|
|
result->compare_ = compare;
|
|
|
|
result->compare_ts_ = compare_ts;
|
|
|
|
result->compare_without_ts_ = compare_without_ts;
|
|
|
|
result->name_ = name;
|
|
|
|
return result;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_filterpolicy_destroy(rocksdb_filterpolicy_t* filter) {
|
2012-04-17 15:36:46 +00:00
|
|
|
delete filter;
|
|
|
|
}
|
|
|
|
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_format(
|
|
|
|
double bits_per_key, bool original_format) {
|
2013-12-10 08:45:07 +00:00
|
|
|
// Make a rocksdb_filterpolicy_t, but override all of its methods so
|
2012-04-17 15:36:46 +00:00
|
|
|
// they delegate to a NewBloomFilterPolicy() instead of user
|
|
|
|
// supplied C functions.
|
2013-12-10 08:45:07 +00:00
|
|
|
struct Wrapper : public rocksdb_filterpolicy_t {
|
2012-04-17 15:36:46 +00:00
|
|
|
const FilterPolicy* rep_;
|
2019-02-14 21:52:47 +00:00
|
|
|
~Wrapper() override { delete rep_; }
|
2015-02-26 19:28:41 +00:00
|
|
|
const char* Name() const override { return rep_->Name(); }
|
2022-03-23 17:00:54 +00:00
|
|
|
const char* CompatibilityName() const override {
|
|
|
|
return rep_->CompatibilityName();
|
|
|
|
}
|
2019-12-09 20:19:50 +00:00
|
|
|
// No need to override GetFilterBitsBuilder if this one is overridden
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::FilterBitsBuilder* GetBuilderWithContext(
|
|
|
|
const ROCKSDB_NAMESPACE::FilterBuildingContext& context)
|
|
|
|
const override {
|
2019-12-09 20:19:50 +00:00
|
|
|
return rep_->GetBuilderWithContext(context);
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::FilterBitsReader* GetFilterBitsReader(
|
2019-12-09 20:19:50 +00:00
|
|
|
const Slice& contents) const override {
|
|
|
|
return rep_->GetFilterBitsReader(contents);
|
|
|
|
}
|
2019-12-09 23:48:04 +00:00
|
|
|
static void DoNothing(void*) {}
|
2012-04-17 15:36:46 +00:00
|
|
|
};
|
|
|
|
Wrapper* wrapper = new Wrapper;
|
2015-12-29 19:47:51 +00:00
|
|
|
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key, original_format);
|
2014-03-10 19:56:46 +00:00
|
|
|
wrapper->state_ = nullptr;
|
2012-04-17 15:36:46 +00:00
|
|
|
wrapper->destructor_ = &Wrapper::DoNothing;
|
|
|
|
return wrapper;
|
|
|
|
}
|
|
|
|
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_full(
|
|
|
|
double bits_per_key) {
|
2015-12-29 19:47:51 +00:00
|
|
|
return rocksdb_filterpolicy_create_bloom_format(bits_per_key, false);
|
|
|
|
}
|
|
|
|
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom(double bits_per_key) {
|
2015-12-29 19:47:51 +00:00
|
|
|
return rocksdb_filterpolicy_create_bloom_format(bits_per_key, true);
|
|
|
|
}
|
|
|
|
|
2021-07-09 22:44:57 +00:00
|
|
|
rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_ribbon_format(
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
double bloom_equivalent_bits_per_key, int bloom_before_level) {
|
2021-07-09 22:44:57 +00:00
|
|
|
// Make a rocksdb_filterpolicy_t, but override all of its methods so
|
|
|
|
// they delegate to a NewRibbonFilterPolicy() instead of user
|
|
|
|
// supplied C functions.
|
|
|
|
struct Wrapper : public rocksdb_filterpolicy_t {
|
|
|
|
const FilterPolicy* rep_;
|
|
|
|
~Wrapper() override { delete rep_; }
|
|
|
|
const char* Name() const override { return rep_->Name(); }
|
2022-03-23 17:00:54 +00:00
|
|
|
const char* CompatibilityName() const override {
|
|
|
|
return rep_->CompatibilityName();
|
|
|
|
}
|
2021-07-09 22:44:57 +00:00
|
|
|
ROCKSDB_NAMESPACE::FilterBitsBuilder* GetBuilderWithContext(
|
|
|
|
const ROCKSDB_NAMESPACE::FilterBuildingContext& context)
|
|
|
|
const override {
|
|
|
|
return rep_->GetBuilderWithContext(context);
|
|
|
|
}
|
|
|
|
ROCKSDB_NAMESPACE::FilterBitsReader* GetFilterBitsReader(
|
|
|
|
const Slice& contents) const override {
|
|
|
|
return rep_->GetFilterBitsReader(contents);
|
|
|
|
}
|
|
|
|
static void DoNothing(void*) {}
|
|
|
|
};
|
|
|
|
Wrapper* wrapper = new Wrapper;
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
wrapper->rep_ =
|
|
|
|
NewRibbonFilterPolicy(bloom_equivalent_bits_per_key, bloom_before_level);
|
2021-07-09 22:44:57 +00:00
|
|
|
wrapper->state_ = nullptr;
|
|
|
|
wrapper->destructor_ = &Wrapper::DoNothing;
|
|
|
|
return wrapper;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_ribbon(
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
double bloom_equivalent_bits_per_key) {
|
2021-07-09 22:44:57 +00:00
|
|
|
return rocksdb_filterpolicy_create_ribbon_format(
|
Add Bloom/Ribbon hybrid API support (#8679)
Summary:
This is essentially resurrection and fixing of the part of
https://github.com/facebook/rocksdb/issues/8198 that was reverted in https://github.com/facebook/rocksdb/issues/8212, using data added in https://github.com/facebook/rocksdb/issues/8246. Basically,
when configuring Ribbon filter, you can specify an LSM level before which
Bloom will be used instead of Ribbon. But Bloom is only considered for
Leveled and Universal compaction styles and file going into a known LSM
level. This way, SST file writer, FIFO compaction, etc. use Ribbon filter as
you would expect with NewRibbonFilterPolicy.
So that this can be controlled with a single int value and so that flushes
can be distinguished from intra-L0, we consider flush to go to level -1 for
the purposes of this option. (Explained in API comment.)
I also expect the most common and recommended Ribbon configuration to
use Bloom during flush, to minimize slowing down writes and because according
to my estimates, Ribbon only pays off if the structure lives in memory for
more than an hour. Thus, I have changed the default for NewRibbonFilterPolicy
to be this mild hybrid configuration. I don't really want to add something like
NewHybridFilterPolicy because at least the mild hybrid configuration (Bloom for
flush, Ribbon otherwise) should be considered a natural choice.
C APIs also updated, but because they don't support overloading,
rocksdb_filterpolicy_create_ribbon is kept pure ribbon for clarity and
rocksdb_filterpolicy_create_ribbon_hybrid must be called for a hybrid
configuration. While touching C API, I changed bits per key options from
int to double.
BuiltinFilterPolicy is needed so that LevelThresholdFilterPolicy doesn't inherit
unused fields from BloomFilterPolicy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8679
Test Plan: new + updated tests, including crash test
Reviewed By: jay-zhuang
Differential Revision: D30445797
Pulled By: pdillinger
fbshipit-source-id: 6f5aeddfd6d79f7e55493b563c2d1d2d568892e1
2021-08-21 00:59:24 +00:00
|
|
|
bloom_equivalent_bits_per_key, /*bloom_before_level = disabled*/ -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_ribbon_hybrid(
|
|
|
|
double bloom_equivalent_bits_per_key, int bloom_before_level) {
|
|
|
|
return rocksdb_filterpolicy_create_ribbon_format(
|
|
|
|
bloom_equivalent_bits_per_key, bloom_before_level);
|
2021-07-09 22:44:57 +00:00
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
rocksdb_mergeoperator_t* rocksdb_mergeoperator_create(
|
2014-03-25 00:57:13 +00:00
|
|
|
void* state, void (*destructor)(void*),
|
|
|
|
char* (*full_merge)(void*, const char* key, size_t key_length,
|
|
|
|
const char* existing_value,
|
|
|
|
size_t existing_value_length,
|
|
|
|
const char* const* operands_list,
|
|
|
|
const size_t* operands_list_length, int num_operands,
|
|
|
|
unsigned char* success, size_t* new_value_length),
|
|
|
|
char* (*partial_merge)(void*, const char* key, size_t key_length,
|
|
|
|
const char* const* operands_list,
|
|
|
|
const size_t* operands_list_length, int num_operands,
|
|
|
|
unsigned char* success, size_t* new_value_length),
|
|
|
|
void (*delete_value)(void*, const char* value, size_t value_length),
|
2014-02-12 21:49:00 +00:00
|
|
|
const char* (*name)(void*)) {
|
|
|
|
rocksdb_mergeoperator_t* result = new rocksdb_mergeoperator_t;
|
|
|
|
result->state_ = state;
|
|
|
|
result->destructor_ = destructor;
|
|
|
|
result->full_merge_ = full_merge;
|
|
|
|
result->partial_merge_ = partial_merge;
|
2014-02-23 16:58:11 +00:00
|
|
|
result->delete_value_ = delete_value;
|
2014-02-12 21:49:00 +00:00
|
|
|
result->name_ = name;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_mergeoperator_destroy(rocksdb_mergeoperator_t* merge_operator) {
|
|
|
|
delete merge_operator;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_readoptions_t* rocksdb_readoptions_create() {
|
|
|
|
return new rocksdb_readoptions_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_readoptions_destroy(rocksdb_readoptions_t* opt) {
|
2011-08-05 20:40:49 +00:00
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_readoptions_set_verify_checksums(
|
|
|
|
rocksdb_readoptions_t* opt,
|
2011-08-05 20:40:49 +00:00
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.verify_checksums = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_verify_checksums(
|
|
|
|
rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.verify_checksums;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_readoptions_set_fill_cache(
|
|
|
|
rocksdb_readoptions_t* opt, unsigned char v) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.fill_cache = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_fill_cache(rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.fill_cache;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_readoptions_set_snapshot(
|
|
|
|
rocksdb_readoptions_t* opt,
|
|
|
|
const rocksdb_snapshot_t* snap) {
|
2014-03-10 19:56:46 +00:00
|
|
|
opt->rep.snapshot = (snap ? snap->rep : nullptr);
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2014-09-04 17:48:24 +00:00
|
|
|
void rocksdb_readoptions_set_iterate_upper_bound(
|
|
|
|
rocksdb_readoptions_t* opt,
|
|
|
|
const char* key, size_t keylen) {
|
2014-11-25 23:08:59 +00:00
|
|
|
if (key == nullptr) {
|
|
|
|
opt->upper_bound = Slice();
|
|
|
|
opt->rep.iterate_upper_bound = nullptr;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
opt->upper_bound = Slice(key, keylen);
|
|
|
|
opt->rep.iterate_upper_bound = &opt->upper_bound;
|
|
|
|
}
|
2014-09-04 17:48:24 +00:00
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_readoptions_set_iterate_lower_bound(
|
|
|
|
rocksdb_readoptions_t *opt,
|
|
|
|
const char* key, size_t keylen) {
|
|
|
|
if (key == nullptr) {
|
|
|
|
opt->lower_bound = Slice();
|
|
|
|
opt->rep.iterate_lower_bound = nullptr;
|
|
|
|
} else {
|
|
|
|
opt->lower_bound = Slice(key, keylen);
|
|
|
|
opt->rep.iterate_lower_bound = &opt->lower_bound;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_readoptions_set_read_tier(
|
|
|
|
rocksdb_readoptions_t* opt, int v) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.read_tier = static_cast<ROCKSDB_NAMESPACE::ReadTier>(v);
|
2014-02-12 21:49:00 +00:00
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
int rocksdb_readoptions_get_read_tier(rocksdb_readoptions_t* opt) {
|
|
|
|
return static_cast<int>(opt->rep.read_tier);
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
void rocksdb_readoptions_set_tailing(
|
|
|
|
rocksdb_readoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.tailing = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_tailing(rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.tailing;
|
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_readoptions_set_managed(
|
|
|
|
rocksdb_readoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.managed = v;
|
|
|
|
}
|
|
|
|
|
2016-06-01 17:48:50 +00:00
|
|
|
void rocksdb_readoptions_set_readahead_size(
|
|
|
|
rocksdb_readoptions_t* opt, size_t v) {
|
|
|
|
opt->rep.readahead_size = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
size_t rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.readahead_size;
|
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_readoptions_set_prefix_same_as_start(
|
|
|
|
rocksdb_readoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.prefix_same_as_start = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_prefix_same_as_start(
|
|
|
|
rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.prefix_same_as_start;
|
|
|
|
}
|
|
|
|
|
2016-12-16 01:14:06 +00:00
|
|
|
void rocksdb_readoptions_set_pin_data(rocksdb_readoptions_t* opt,
|
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.pin_data = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_pin_data(rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.pin_data;
|
|
|
|
}
|
|
|
|
|
2017-01-04 02:21:55 +00:00
|
|
|
void rocksdb_readoptions_set_total_order_seek(rocksdb_readoptions_t* opt,
|
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.total_order_seek = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_total_order_seek(
|
|
|
|
rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.total_order_seek;
|
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_readoptions_set_max_skippable_internal_keys(
|
|
|
|
rocksdb_readoptions_t* opt,
|
|
|
|
uint64_t v) {
|
|
|
|
opt->rep.max_skippable_internal_keys = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
uint64_t rocksdb_readoptions_get_max_skippable_internal_keys(
|
|
|
|
rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.max_skippable_internal_keys;
|
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
|
|
|
|
rocksdb_readoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.background_purge_on_iterator_cleanup = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_background_purge_on_iterator_cleanup(
|
|
|
|
rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.background_purge_on_iterator_cleanup;
|
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_readoptions_set_ignore_range_deletions(
|
|
|
|
rocksdb_readoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.ignore_range_deletions = v;
|
|
|
|
}
|
|
|
|
|
2020-08-20 23:35:17 +00:00
|
|
|
unsigned char rocksdb_readoptions_get_ignore_range_deletions(
|
|
|
|
rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.ignore_range_deletions;
|
|
|
|
}
|
|
|
|
|
2021-02-05 00:59:18 +00:00
|
|
|
void rocksdb_readoptions_set_deadline(rocksdb_readoptions_t* opt,
|
|
|
|
uint64_t microseconds) {
|
|
|
|
opt->rep.deadline = std::chrono::microseconds(microseconds);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_readoptions_get_deadline(rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.deadline.count();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_readoptions_set_io_timeout(rocksdb_readoptions_t* opt,
|
|
|
|
uint64_t microseconds) {
|
|
|
|
opt->rep.io_timeout = std::chrono::microseconds(microseconds);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern ROCKSDB_LIBRARY_API uint64_t
|
|
|
|
rocksdb_readoptions_get_io_timeout(rocksdb_readoptions_t* opt) {
|
|
|
|
return opt->rep.io_timeout.count();
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_readoptions_set_timestamp(rocksdb_readoptions_t* opt,
|
|
|
|
const char* ts, size_t tslen) {
|
|
|
|
if (ts == nullptr) {
|
|
|
|
opt->timestamp = Slice();
|
|
|
|
opt->rep.timestamp = nullptr;
|
|
|
|
} else {
|
|
|
|
opt->timestamp = Slice(ts, tslen);
|
|
|
|
opt->rep.timestamp = &opt->timestamp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_readoptions_set_iter_start_ts(rocksdb_readoptions_t* opt,
|
|
|
|
const char* ts, size_t tslen) {
|
|
|
|
if (ts == nullptr) {
|
|
|
|
opt->iter_start_ts = Slice();
|
|
|
|
opt->rep.iter_start_ts = nullptr;
|
|
|
|
} else {
|
|
|
|
opt->iter_start_ts = Slice(ts, tslen);
|
|
|
|
opt->rep.iter_start_ts = &opt->iter_start_ts;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_writeoptions_t* rocksdb_writeoptions_create() {
|
|
|
|
return new rocksdb_writeoptions_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_writeoptions_destroy(rocksdb_writeoptions_t* opt) {
|
2011-08-05 20:40:49 +00:00
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_writeoptions_set_sync(
|
|
|
|
rocksdb_writeoptions_t* opt, unsigned char v) {
|
2011-08-05 20:40:49 +00:00
|
|
|
opt->rep.sync = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_writeoptions_get_sync(rocksdb_writeoptions_t* opt) {
|
|
|
|
return opt->rep.sync;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writeoptions_disable_WAL(rocksdb_writeoptions_t* opt,
|
|
|
|
int disable) {
|
2013-12-14 07:58:18 +00:00
|
|
|
opt->rep.disableWAL = disable;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_writeoptions_get_disable_WAL(
|
|
|
|
rocksdb_writeoptions_t* opt) {
|
|
|
|
return opt->rep.disableWAL;
|
|
|
|
}
|
|
|
|
|
2017-11-28 18:24:23 +00:00
|
|
|
void rocksdb_writeoptions_set_ignore_missing_column_families(
|
2022-05-26 16:40:10 +00:00
|
|
|
rocksdb_writeoptions_t* opt, unsigned char v) {
|
2017-11-28 18:24:23 +00:00
|
|
|
opt->rep.ignore_missing_column_families = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_writeoptions_get_ignore_missing_column_families(
|
|
|
|
rocksdb_writeoptions_t* opt) {
|
|
|
|
return opt->rep.ignore_missing_column_families;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writeoptions_set_no_slowdown(rocksdb_writeoptions_t* opt,
|
|
|
|
unsigned char v) {
|
2017-11-28 18:24:23 +00:00
|
|
|
opt->rep.no_slowdown = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_writeoptions_get_no_slowdown(
|
|
|
|
rocksdb_writeoptions_t* opt) {
|
|
|
|
return opt->rep.no_slowdown;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_writeoptions_set_low_pri(rocksdb_writeoptions_t* opt,
|
|
|
|
unsigned char v) {
|
2017-11-28 18:24:23 +00:00
|
|
|
opt->rep.low_pri = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_writeoptions_get_low_pri(rocksdb_writeoptions_t* opt) {
|
|
|
|
return opt->rep.low_pri;
|
|
|
|
}
|
|
|
|
|
2019-09-12 23:53:31 +00:00
|
|
|
void rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
|
|
|
|
rocksdb_writeoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.memtable_insert_hint_per_batch = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_writeoptions_get_memtable_insert_hint_per_batch(
|
|
|
|
rocksdb_writeoptions_t* opt) {
|
|
|
|
return opt->rep.memtable_insert_hint_per_batch;
|
|
|
|
}
|
|
|
|
|
2016-12-08 01:44:35 +00:00
|
|
|
rocksdb_compactoptions_t* rocksdb_compactoptions_create() {
|
|
|
|
return new rocksdb_compactoptions_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_compactoptions_destroy(rocksdb_compactoptions_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
2018-06-01 00:20:56 +00:00
|
|
|
void rocksdb_compactoptions_set_bottommost_level_compaction(
|
|
|
|
rocksdb_compactoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.bottommost_level_compaction = static_cast<BottommostLevelCompaction>(v);
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_compactoptions_get_bottommost_level_compaction(
|
|
|
|
rocksdb_compactoptions_t* opt) {
|
|
|
|
return static_cast<unsigned char>(opt->rep.bottommost_level_compaction);
|
|
|
|
}
|
|
|
|
|
2016-12-08 01:44:35 +00:00
|
|
|
void rocksdb_compactoptions_set_exclusive_manual_compaction(
|
|
|
|
rocksdb_compactoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.exclusive_manual_compaction = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_compactoptions_get_exclusive_manual_compaction(
|
|
|
|
rocksdb_compactoptions_t* opt) {
|
|
|
|
return opt->rep.exclusive_manual_compaction;
|
|
|
|
}
|
|
|
|
|
2016-12-08 01:44:35 +00:00
|
|
|
void rocksdb_compactoptions_set_change_level(rocksdb_compactoptions_t* opt,
|
|
|
|
unsigned char v) {
|
|
|
|
opt->rep.change_level = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_compactoptions_get_change_level(
|
|
|
|
rocksdb_compactoptions_t* opt) {
|
|
|
|
return opt->rep.change_level;
|
|
|
|
}
|
|
|
|
|
2016-12-08 01:44:35 +00:00
|
|
|
void rocksdb_compactoptions_set_target_level(rocksdb_compactoptions_t* opt,
|
|
|
|
int n) {
|
|
|
|
opt->rep.target_level = n;
|
|
|
|
}
|
2013-12-14 07:58:18 +00:00
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
int rocksdb_compactoptions_get_target_level(rocksdb_compactoptions_t* opt) {
|
|
|
|
return opt->rep.target_level;
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_compactoptions_set_full_history_ts_low(
|
|
|
|
rocksdb_compactoptions_t* opt, char* ts, size_t tslen) {
|
|
|
|
if (ts == nullptr) {
|
|
|
|
opt->full_history_ts_low = Slice();
|
|
|
|
opt->rep.full_history_ts_low = nullptr;
|
|
|
|
} else {
|
|
|
|
opt->full_history_ts_low = Slice(ts, tslen);
|
|
|
|
opt->rep.full_history_ts_low = &opt->full_history_ts_low;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
rocksdb_flushoptions_t* rocksdb_flushoptions_create() {
|
|
|
|
return new rocksdb_flushoptions_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_flushoptions_destroy(rocksdb_flushoptions_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_flushoptions_set_wait(
|
|
|
|
rocksdb_flushoptions_t* opt, unsigned char v) {
|
|
|
|
opt->rep.wait = v;
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
unsigned char rocksdb_flushoptions_get_wait(rocksdb_flushoptions_t* opt) {
|
|
|
|
return opt->rep.wait;
|
|
|
|
}
|
|
|
|
|
2021-04-23 05:21:48 +00:00
|
|
|
rocksdb_memory_allocator_t* rocksdb_jemalloc_nodump_allocator_create(
|
|
|
|
char** errptr) {
|
|
|
|
rocksdb_memory_allocator_t* allocator = new rocksdb_memory_allocator_t;
|
|
|
|
ROCKSDB_NAMESPACE::JemallocAllocatorOptions options;
|
|
|
|
SaveError(errptr, ROCKSDB_NAMESPACE::NewJemallocNodumpAllocator(
|
|
|
|
options, &allocator->rep));
|
|
|
|
return allocator;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_memory_allocator_destroy(rocksdb_memory_allocator_t* allocator) {
|
|
|
|
delete allocator;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_lru_cache_options_t* rocksdb_lru_cache_options_create() {
|
|
|
|
return new rocksdb_lru_cache_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_lru_cache_options_destroy(rocksdb_lru_cache_options_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_lru_cache_options_set_capacity(rocksdb_lru_cache_options_t* opt,
|
|
|
|
size_t capacity) {
|
|
|
|
opt->rep.capacity = capacity;
|
|
|
|
}
|
|
|
|
|
2022-06-29 18:12:25 +00:00
|
|
|
void rocksdb_lru_cache_options_set_num_shard_bits(
|
|
|
|
rocksdb_lru_cache_options_t* opt, int num_shard_bits) {
|
|
|
|
opt->rep.num_shard_bits = num_shard_bits;
|
|
|
|
}
|
|
|
|
|
2021-04-23 05:21:48 +00:00
|
|
|
void rocksdb_lru_cache_options_set_memory_allocator(
|
|
|
|
rocksdb_lru_cache_options_t* opt, rocksdb_memory_allocator_t* allocator) {
|
|
|
|
opt->rep.memory_allocator = allocator->rep;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_cache_t* rocksdb_cache_create_lru(size_t capacity) {
|
|
|
|
rocksdb_cache_t* c = new rocksdb_cache_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
c->rep = NewLRUCache(capacity);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2022-04-19 16:34:02 +00:00
|
|
|
rocksdb_cache_t* rocksdb_cache_create_lru_with_strict_capacity_limit(
|
|
|
|
size_t capacity) {
|
|
|
|
rocksdb_cache_t* c = new rocksdb_cache_t;
|
|
|
|
c->rep = NewLRUCache(capacity);
|
|
|
|
c->rep->SetStrictCapacityLimit(true);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2021-04-23 05:21:48 +00:00
|
|
|
rocksdb_cache_t* rocksdb_cache_create_lru_opts(
|
|
|
|
rocksdb_lru_cache_options_t* opt) {
|
|
|
|
rocksdb_cache_t* c = new rocksdb_cache_t;
|
|
|
|
c->rep = NewLRUCache(opt->rep);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_cache_destroy(rocksdb_cache_t* cache) {
|
2011-08-05 20:40:49 +00:00
|
|
|
delete cache;
|
|
|
|
}
|
|
|
|
|
2021-04-09 17:34:13 +00:00
|
|
|
void rocksdb_cache_disown_data(rocksdb_cache_t* cache) {
|
|
|
|
cache->rep->DisownData();
|
|
|
|
}
|
|
|
|
|
2016-06-03 13:04:51 +00:00
|
|
|
void rocksdb_cache_set_capacity(rocksdb_cache_t* cache, size_t capacity) {
|
|
|
|
cache->rep->SetCapacity(capacity);
|
|
|
|
}
|
|
|
|
|
2020-09-09 18:44:12 +00:00
|
|
|
size_t rocksdb_cache_get_capacity(rocksdb_cache_t* cache) {
|
|
|
|
return cache->rep->GetCapacity();
|
|
|
|
}
|
|
|
|
|
2016-12-16 01:14:06 +00:00
|
|
|
size_t rocksdb_cache_get_usage(rocksdb_cache_t* cache) {
|
|
|
|
return cache->rep->GetUsage();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache) {
|
|
|
|
return cache->rep->GetPinnedUsage();
|
|
|
|
}
|
|
|
|
|
2017-07-24 18:47:34 +00:00
|
|
|
rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size) {
|
|
|
|
rocksdb_dbpath_t* result = new rocksdb_dbpath_t;
|
|
|
|
result->rep.path = std::string(path);
|
|
|
|
result->rep.target_size = target_size;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_dbpath_destroy(rocksdb_dbpath_t* dbpath) {
|
|
|
|
delete dbpath;
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
rocksdb_env_t* rocksdb_create_default_env() {
|
|
|
|
rocksdb_env_t* result = new rocksdb_env_t;
|
2011-08-05 20:40:49 +00:00
|
|
|
result->rep = Env::Default();
|
|
|
|
result->is_default = true;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-04-22 20:25:05 +00:00
|
|
|
rocksdb_env_t* rocksdb_create_mem_env() {
|
|
|
|
rocksdb_env_t* result = new rocksdb_env_t;
|
2020-02-20 20:07:53 +00:00
|
|
|
result->rep = ROCKSDB_NAMESPACE::NewMemEnv(Env::Default());
|
2016-04-22 20:25:05 +00:00
|
|
|
result->is_default = false;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_env_set_background_threads(rocksdb_env_t* env, int n) {
|
|
|
|
env->rep->SetBackgroundThreads(n);
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_env_get_background_threads(rocksdb_env_t* env) {
|
|
|
|
return env->rep->GetBackgroundThreads();
|
|
|
|
}
|
|
|
|
|
2020-07-29 19:22:48 +00:00
|
|
|
void rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env,
|
|
|
|
int n) {
|
|
|
|
env->rep->SetBackgroundThreads(n, Env::BOTTOM);
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env) {
|
|
|
|
return env->rep->GetBackgroundThreads(Env::BOTTOM);
|
|
|
|
}
|
|
|
|
|
2013-12-31 23:14:18 +00:00
|
|
|
void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n) {
|
|
|
|
env->rep->SetBackgroundThreads(n, Env::HIGH);
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_env_get_high_priority_background_threads(rocksdb_env_t* env) {
|
|
|
|
return env->rep->GetBackgroundThreads(Env::HIGH);
|
|
|
|
}
|
|
|
|
|
2020-07-29 19:22:48 +00:00
|
|
|
void rocksdb_env_set_low_priority_background_threads(rocksdb_env_t* env,
|
|
|
|
int n) {
|
|
|
|
env->rep->SetBackgroundThreads(n, Env::LOW);
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_env_get_low_priority_background_threads(rocksdb_env_t* env) {
|
|
|
|
return env->rep->GetBackgroundThreads(Env::LOW);
|
|
|
|
}
|
|
|
|
|
2015-07-01 23:13:49 +00:00
|
|
|
void rocksdb_env_join_all_threads(rocksdb_env_t* env) {
|
2015-07-13 19:11:05 +00:00
|
|
|
env->rep->WaitForJoin();
|
2015-07-01 23:13:49 +00:00
|
|
|
}
|
|
|
|
|
2019-05-10 01:16:45 +00:00
|
|
|
void rocksdb_env_lower_thread_pool_io_priority(rocksdb_env_t* env) {
|
|
|
|
env->rep->LowerThreadPoolIOPriority();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env) {
|
|
|
|
env->rep->LowerThreadPoolIOPriority(Env::HIGH);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_env_lower_thread_pool_cpu_priority(rocksdb_env_t* env) {
|
|
|
|
env->rep->LowerThreadPoolCPUPriority();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env) {
|
|
|
|
env->rep->LowerThreadPoolCPUPriority(Env::HIGH);
|
|
|
|
}
|
|
|
|
|
2013-12-10 08:45:07 +00:00
|
|
|
void rocksdb_env_destroy(rocksdb_env_t* env) {
|
2011-08-05 20:40:49 +00:00
|
|
|
if (!env->is_default) delete env->rep;
|
|
|
|
delete env;
|
|
|
|
}
|
|
|
|
|
2016-11-02 00:02:38 +00:00
|
|
|
rocksdb_envoptions_t* rocksdb_envoptions_create() {
|
|
|
|
rocksdb_envoptions_t* opt = new rocksdb_envoptions_t;
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_envoptions_destroy(rocksdb_envoptions_t* opt) { delete opt; }
|
|
|
|
|
|
|
|
rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create(
|
|
|
|
const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options) {
|
|
|
|
rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t;
|
2017-03-13 18:17:19 +00:00
|
|
|
writer->rep = new SstFileWriter(env->rep, io_options->rep);
|
2016-11-02 00:02:38 +00:00
|
|
|
return writer;
|
|
|
|
}
|
|
|
|
|
2022-02-11 18:22:22 +00:00
|
|
|
void rocksdb_create_dir_if_missing(rocksdb_env_t* env, const char* path,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, env->rep->CreateDirIfMissing(std::string(path)));
|
|
|
|
}
|
|
|
|
|
2016-11-02 00:02:38 +00:00
|
|
|
rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create_with_comparator(
|
|
|
|
const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options,
|
2018-03-05 21:08:17 +00:00
|
|
|
const rocksdb_comparator_t* /*comparator*/) {
|
2016-11-02 00:02:38 +00:00
|
|
|
rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t;
|
2017-03-13 18:17:19 +00:00
|
|
|
writer->rep = new SstFileWriter(env->rep, io_options->rep);
|
2016-11-02 00:02:38 +00:00
|
|
|
return writer;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_sstfilewriter_open(rocksdb_sstfilewriter_t* writer,
|
|
|
|
const char* name, char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->Open(std::string(name)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_sstfilewriter_add(rocksdb_sstfilewriter_t* writer, const char* key,
|
|
|
|
size_t keylen, const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
2017-05-26 19:05:19 +00:00
|
|
|
SaveError(errptr, writer->rep->Put(Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_sstfilewriter_put(rocksdb_sstfilewriter_t* writer, const char* key,
|
|
|
|
size_t keylen, const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->Put(Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_sstfilewriter_put_with_ts(rocksdb_sstfilewriter_t* writer,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
const char* ts, size_t tslen,
|
|
|
|
const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->Put(Slice(key, keylen), Slice(ts, tslen),
|
|
|
|
Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
2017-05-26 19:05:19 +00:00
|
|
|
void rocksdb_sstfilewriter_merge(rocksdb_sstfilewriter_t* writer,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->Merge(Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_sstfilewriter_delete(rocksdb_sstfilewriter_t* writer,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->Delete(Slice(key, keylen)));
|
2016-11-02 00:02:38 +00:00
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_sstfilewriter_delete_with_ts(rocksdb_sstfilewriter_t* writer,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
const char* ts, size_t tslen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->Delete(Slice(key, keylen), Slice(ts, tslen)));
|
|
|
|
}
|
|
|
|
|
2022-07-17 02:35:46 +00:00
|
|
|
void rocksdb_sstfilewriter_delete_range(rocksdb_sstfilewriter_t* writer,
|
|
|
|
const char* begin_key,
|
|
|
|
size_t begin_keylen,
|
|
|
|
const char* end_key, size_t end_keylen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, writer->rep->DeleteRange(Slice(begin_key, begin_keylen),
|
|
|
|
Slice(end_key, end_keylen)));
|
|
|
|
}
|
|
|
|
|
2016-11-02 00:02:38 +00:00
|
|
|
void rocksdb_sstfilewriter_finish(rocksdb_sstfilewriter_t* writer,
|
|
|
|
char** errptr) {
|
2018-03-07 20:39:19 +00:00
|
|
|
SaveError(errptr, writer->rep->Finish(nullptr));
|
2016-11-02 00:02:38 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 16:33:10 +00:00
|
|
|
void rocksdb_sstfilewriter_file_size(rocksdb_sstfilewriter_t* writer,
|
2022-05-26 16:40:10 +00:00
|
|
|
uint64_t* file_size) {
|
2018-06-01 16:33:10 +00:00
|
|
|
*file_size = writer->rep->FileSize();
|
|
|
|
}
|
|
|
|
|
2016-11-02 00:02:38 +00:00
|
|
|
void rocksdb_sstfilewriter_destroy(rocksdb_sstfilewriter_t* writer) {
|
|
|
|
delete writer->rep;
|
|
|
|
delete writer;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_ingestexternalfileoptions_t*
|
|
|
|
rocksdb_ingestexternalfileoptions_create() {
|
|
|
|
rocksdb_ingestexternalfileoptions_t* opt =
|
|
|
|
new rocksdb_ingestexternalfileoptions_t;
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ingestexternalfileoptions_set_move_files(
|
|
|
|
rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files) {
|
|
|
|
opt->rep.move_files = move_files;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
|
|
|
|
rocksdb_ingestexternalfileoptions_t* opt,
|
|
|
|
unsigned char snapshot_consistency) {
|
|
|
|
opt->rep.snapshot_consistency = snapshot_consistency;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
|
|
|
|
rocksdb_ingestexternalfileoptions_t* opt,
|
|
|
|
unsigned char allow_global_seqno) {
|
|
|
|
opt->rep.allow_global_seqno = allow_global_seqno;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
|
|
|
|
rocksdb_ingestexternalfileoptions_t* opt,
|
|
|
|
unsigned char allow_blocking_flush) {
|
|
|
|
opt->rep.allow_blocking_flush = allow_blocking_flush;
|
|
|
|
}
|
|
|
|
|
2017-11-10 02:06:50 +00:00
|
|
|
void rocksdb_ingestexternalfileoptions_set_ingest_behind(
|
2022-05-26 16:40:10 +00:00
|
|
|
rocksdb_ingestexternalfileoptions_t* opt, unsigned char ingest_behind) {
|
2017-11-10 02:06:50 +00:00
|
|
|
opt->rep.ingest_behind = ingest_behind;
|
|
|
|
}
|
|
|
|
|
2016-11-02 00:02:38 +00:00
|
|
|
void rocksdb_ingestexternalfileoptions_destroy(
|
|
|
|
rocksdb_ingestexternalfileoptions_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ingest_external_file(
|
|
|
|
rocksdb_t* db, const char* const* file_list, const size_t list_len,
|
|
|
|
const rocksdb_ingestexternalfileoptions_t* opt, char** errptr) {
|
|
|
|
std::vector<std::string> files(list_len);
|
|
|
|
for (size_t i = 0; i < list_len; ++i) {
|
|
|
|
files[i] = std::string(file_list[i]);
|
|
|
|
}
|
|
|
|
SaveError(errptr, db->rep->IngestExternalFile(files, opt->rep));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_ingest_external_file_cf(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* handle,
|
|
|
|
const char* const* file_list, const size_t list_len,
|
|
|
|
const rocksdb_ingestexternalfileoptions_t* opt, char** errptr) {
|
|
|
|
std::vector<std::string> files(list_len);
|
|
|
|
for (size_t i = 0; i < list_len; ++i) {
|
|
|
|
files[i] = std::string(file_list[i]);
|
|
|
|
}
|
|
|
|
SaveError(errptr, db->rep->IngestExternalFile(handle->rep, files, opt->rep));
|
|
|
|
}
|
|
|
|
|
2019-06-27 15:54:28 +00:00
|
|
|
void rocksdb_try_catch_up_with_primary(rocksdb_t* db, char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->TryCatchUpWithPrimary());
|
|
|
|
}
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
rocksdb_slicetransform_t* rocksdb_slicetransform_create(
|
|
|
|
void* state,
|
|
|
|
void (*destructor)(void*),
|
|
|
|
char* (*transform)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t length,
|
|
|
|
size_t* dst_length),
|
|
|
|
unsigned char (*in_domain)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t length),
|
|
|
|
unsigned char (*in_range)(
|
|
|
|
void*,
|
|
|
|
const char* key, size_t length),
|
|
|
|
const char* (*name)(void*)) {
|
|
|
|
rocksdb_slicetransform_t* result = new rocksdb_slicetransform_t;
|
|
|
|
result->state_ = state;
|
|
|
|
result->destructor_ = destructor;
|
|
|
|
result->transform_ = transform;
|
|
|
|
result->in_domain_ = in_domain;
|
|
|
|
result->in_range_ = in_range;
|
|
|
|
result->name_ = name;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_slicetransform_destroy(rocksdb_slicetransform_t* st) {
|
|
|
|
delete st;
|
|
|
|
}
|
|
|
|
|
2021-12-30 20:46:38 +00:00
|
|
|
struct SliceTransformWrapper : public rocksdb_slicetransform_t {
|
2018-04-20 20:28:05 +00:00
|
|
|
const SliceTransform* rep_;
|
2021-12-30 20:46:38 +00:00
|
|
|
~SliceTransformWrapper() override { delete rep_; }
|
2018-04-20 20:28:05 +00:00
|
|
|
const char* Name() const override { return rep_->Name(); }
|
2021-12-30 20:46:38 +00:00
|
|
|
std::string GetId() const override { return rep_->GetId(); }
|
2018-04-20 20:28:05 +00:00
|
|
|
Slice Transform(const Slice& src) const override {
|
|
|
|
return rep_->Transform(src);
|
|
|
|
}
|
|
|
|
bool InDomain(const Slice& src) const override {
|
|
|
|
return rep_->InDomain(src);
|
|
|
|
}
|
|
|
|
bool InRange(const Slice& src) const override { return rep_->InRange(src); }
|
|
|
|
static void DoNothing(void*) { }
|
|
|
|
};
|
|
|
|
|
2014-02-12 21:49:00 +00:00
|
|
|
rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix(size_t prefixLen) {
|
2021-12-30 20:46:38 +00:00
|
|
|
SliceTransformWrapper* wrapper = new SliceTransformWrapper;
|
2020-02-20 20:07:53 +00:00
|
|
|
wrapper->rep_ = ROCKSDB_NAMESPACE::NewFixedPrefixTransform(prefixLen);
|
2014-03-10 19:56:46 +00:00
|
|
|
wrapper->state_ = nullptr;
|
2021-12-30 20:46:38 +00:00
|
|
|
wrapper->destructor_ = &SliceTransformWrapper::DoNothing;
|
2014-02-12 21:49:00 +00:00
|
|
|
return wrapper;
|
|
|
|
}
|
|
|
|
|
2015-07-03 00:23:41 +00:00
|
|
|
rocksdb_slicetransform_t* rocksdb_slicetransform_create_noop() {
|
2021-12-30 20:46:38 +00:00
|
|
|
SliceTransformWrapper* wrapper = new SliceTransformWrapper;
|
2020-02-20 20:07:53 +00:00
|
|
|
wrapper->rep_ = ROCKSDB_NAMESPACE::NewNoopTransform();
|
2015-07-03 00:23:41 +00:00
|
|
|
wrapper->state_ = nullptr;
|
2021-12-30 20:46:38 +00:00
|
|
|
wrapper->destructor_ = &SliceTransformWrapper::DoNothing;
|
2015-07-03 00:23:41 +00:00
|
|
|
return wrapper;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
rocksdb_universal_compaction_options_t* rocksdb_universal_compaction_options_create() {
|
|
|
|
rocksdb_universal_compaction_options_t* result = new rocksdb_universal_compaction_options_t;
|
2020-02-20 20:07:53 +00:00
|
|
|
result->rep = new ROCKSDB_NAMESPACE::CompactionOptionsUniversal;
|
2013-12-14 07:58:18 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_universal_compaction_options_set_size_ratio(
|
|
|
|
rocksdb_universal_compaction_options_t* uco, int ratio) {
|
|
|
|
uco->rep->size_ratio = ratio;
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_universal_compaction_options_get_size_ratio(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
return uco->rep->size_ratio;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_universal_compaction_options_set_min_merge_width(
|
|
|
|
rocksdb_universal_compaction_options_t* uco, int w) {
|
|
|
|
uco->rep->min_merge_width = w;
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_universal_compaction_options_get_min_merge_width(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
return uco->rep->min_merge_width;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_universal_compaction_options_set_max_merge_width(
|
|
|
|
rocksdb_universal_compaction_options_t* uco, int w) {
|
|
|
|
uco->rep->max_merge_width = w;
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_universal_compaction_options_get_max_merge_width(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
return uco->rep->max_merge_width;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_universal_compaction_options_set_max_size_amplification_percent(
|
|
|
|
rocksdb_universal_compaction_options_t* uco, int p) {
|
|
|
|
uco->rep->max_size_amplification_percent = p;
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_universal_compaction_options_get_max_size_amplification_percent(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
return uco->rep->max_size_amplification_percent;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_universal_compaction_options_set_compression_size_percent(
|
|
|
|
rocksdb_universal_compaction_options_t* uco, int p) {
|
|
|
|
uco->rep->compression_size_percent = p;
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_universal_compaction_options_get_compression_size_percent(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
return uco->rep->compression_size_percent;
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_universal_compaction_options_set_stop_style(
|
|
|
|
rocksdb_universal_compaction_options_t* uco, int style) {
|
2020-02-20 20:07:53 +00:00
|
|
|
uco->rep->stop_style =
|
|
|
|
static_cast<ROCKSDB_NAMESPACE::CompactionStopStyle>(style);
|
2013-12-14 07:58:18 +00:00
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
int rocksdb_universal_compaction_options_get_stop_style(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
return static_cast<int>(uco->rep->stop_style);
|
|
|
|
}
|
|
|
|
|
2013-12-14 07:58:18 +00:00
|
|
|
void rocksdb_universal_compaction_options_destroy(
|
|
|
|
rocksdb_universal_compaction_options_t* uco) {
|
|
|
|
delete uco->rep;
|
|
|
|
delete uco;
|
|
|
|
}
|
|
|
|
|
2014-07-08 04:12:25 +00:00
|
|
|
rocksdb_fifo_compaction_options_t* rocksdb_fifo_compaction_options_create() {
|
|
|
|
rocksdb_fifo_compaction_options_t* result = new rocksdb_fifo_compaction_options_t;
|
|
|
|
result->rep = CompactionOptionsFIFO();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_fifo_compaction_options_set_max_table_files_size(
|
|
|
|
rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size) {
|
|
|
|
fifo_opts->rep.max_table_files_size = size;
|
|
|
|
}
|
|
|
|
|
2020-10-16 18:01:50 +00:00
|
|
|
uint64_t rocksdb_fifo_compaction_options_get_max_table_files_size(
|
|
|
|
rocksdb_fifo_compaction_options_t* fifo_opts) {
|
|
|
|
return fifo_opts->rep.max_table_files_size;
|
|
|
|
}
|
|
|
|
|
2014-07-08 04:12:25 +00:00
|
|
|
void rocksdb_fifo_compaction_options_destroy(
|
|
|
|
rocksdb_fifo_compaction_options_t* fifo_opts) {
|
|
|
|
delete fifo_opts;
|
|
|
|
}
|
|
|
|
|
2014-02-25 18:32:28 +00:00
|
|
|
void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level) {
|
|
|
|
if (level >= 0) {
|
|
|
|
assert(level <= opt->rep.num_levels);
|
|
|
|
opt->rep.compression_per_level.resize(opt->rep.num_levels);
|
|
|
|
for (int i = 0; i < level; i++) {
|
2020-02-20 20:07:53 +00:00
|
|
|
opt->rep.compression_per_level[i] = ROCKSDB_NAMESPACE::kNoCompression;
|
2014-02-25 18:32:28 +00:00
|
|
|
}
|
|
|
|
for (int i = level; i < opt->rep.num_levels; i++) {
|
|
|
|
opt->rep.compression_per_level[i] = opt->rep.compression;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_livefiles_count(
|
|
|
|
const rocksdb_livefiles_t* lf) {
|
2014-11-11 21:47:22 +00:00
|
|
|
return static_cast<int>(lf->rep.size());
|
2014-02-25 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2021-12-01 00:53:07 +00:00
|
|
|
const char* rocksdb_livefiles_column_family_name(const rocksdb_livefiles_t* lf,
|
|
|
|
int index) {
|
|
|
|
return lf->rep[index].column_family_name.c_str();
|
|
|
|
}
|
|
|
|
|
2014-02-25 18:32:28 +00:00
|
|
|
const char* rocksdb_livefiles_name(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index) {
|
|
|
|
return lf->rep[index].name.c_str();
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_livefiles_level(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index) {
|
|
|
|
return lf->rep[index].level;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rocksdb_livefiles_size(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index) {
|
|
|
|
return lf->rep[index].size;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* rocksdb_livefiles_smallestkey(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index,
|
|
|
|
size_t* size) {
|
|
|
|
*size = lf->rep[index].smallestkey.size();
|
|
|
|
return lf->rep[index].smallestkey.data();
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* rocksdb_livefiles_largestkey(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index,
|
|
|
|
size_t* size) {
|
|
|
|
*size = lf->rep[index].largestkey.size();
|
|
|
|
return lf->rep[index].largestkey.data();
|
|
|
|
}
|
|
|
|
|
2018-11-13 19:50:22 +00:00
|
|
|
uint64_t rocksdb_livefiles_entries(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index) {
|
|
|
|
return lf->rep[index].num_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_livefiles_deletions(
|
|
|
|
const rocksdb_livefiles_t* lf,
|
|
|
|
int index) {
|
|
|
|
return lf->rep[index].num_deletions;
|
|
|
|
}
|
|
|
|
|
2014-02-25 18:32:28 +00:00
|
|
|
extern void rocksdb_livefiles_destroy(
|
|
|
|
const rocksdb_livefiles_t* lf) {
|
|
|
|
delete lf;
|
|
|
|
}
|
|
|
|
|
2015-07-13 19:11:05 +00:00
|
|
|
void rocksdb_get_options_from_string(const rocksdb_options_t* base_options,
|
|
|
|
const char* opts_str,
|
|
|
|
rocksdb_options_t* new_options,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr,
|
|
|
|
GetOptionsFromString(base_options->rep, std::string(opts_str),
|
|
|
|
&new_options->rep));
|
2015-07-01 23:13:49 +00:00
|
|
|
}
|
|
|
|
|
2016-05-23 11:19:47 +00:00
|
|
|
void rocksdb_delete_file_in_range(rocksdb_t* db, const char* start_key,
|
|
|
|
size_t start_key_len, const char* limit_key,
|
|
|
|
size_t limit_key_len, char** errptr) {
|
|
|
|
Slice a, b;
|
|
|
|
SaveError(
|
|
|
|
errptr,
|
|
|
|
DeleteFilesInRange(
|
|
|
|
db->rep, db->rep->DefaultColumnFamily(),
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_delete_file_in_range_cf(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* start_key, size_t start_key_len, const char* limit_key,
|
|
|
|
size_t limit_key_len, char** errptr) {
|
|
|
|
Slice a, b;
|
|
|
|
SaveError(
|
|
|
|
errptr,
|
|
|
|
DeleteFilesInRange(
|
|
|
|
db->rep, column_family->rep,
|
|
|
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
|
|
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)));
|
|
|
|
}
|
|
|
|
|
2022-06-22 22:00:28 +00:00
|
|
|
/* MetaData */
|
|
|
|
|
|
|
|
rocksdb_column_family_metadata_t* rocksdb_get_column_family_metadata(
|
|
|
|
rocksdb_t* db) {
|
|
|
|
rocksdb_column_family_metadata_t* meta = new rocksdb_column_family_metadata_t;
|
|
|
|
db->rep->GetColumnFamilyMetaData(&meta->rep);
|
|
|
|
return meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_column_family_metadata_t* rocksdb_get_column_family_metadata_cf(
|
|
|
|
rocksdb_t* db, rocksdb_column_family_handle_t* column_family) {
|
|
|
|
rocksdb_column_family_metadata_t* meta = new rocksdb_column_family_metadata_t;
|
|
|
|
db->rep->GetColumnFamilyMetaData(column_family->rep, &meta->rep);
|
|
|
|
return meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_column_family_metadata_destroy(
|
|
|
|
rocksdb_column_family_metadata_t* cf_meta) {
|
|
|
|
delete cf_meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_column_family_metadata_get_size(
|
|
|
|
rocksdb_column_family_metadata_t* cf_meta) {
|
|
|
|
return cf_meta->rep.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rocksdb_column_family_metadata_get_file_count(
|
|
|
|
rocksdb_column_family_metadata_t* cf_meta) {
|
|
|
|
return cf_meta->rep.file_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_column_family_metadata_get_name(
|
|
|
|
rocksdb_column_family_metadata_t* cf_meta) {
|
|
|
|
return strdup(cf_meta->rep.name.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rocksdb_column_family_metadata_get_level_count(
|
|
|
|
rocksdb_column_family_metadata_t* cf_meta) {
|
|
|
|
return cf_meta->rep.levels.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_level_metadata_t* rocksdb_column_family_metadata_get_level_metadata(
|
|
|
|
rocksdb_column_family_metadata_t* cf_meta, size_t i) {
|
|
|
|
if (i >= cf_meta->rep.levels.size()) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
rocksdb_level_metadata_t* level_meta =
|
|
|
|
(rocksdb_level_metadata_t*)malloc(sizeof(rocksdb_level_metadata_t));
|
|
|
|
level_meta->rep = &cf_meta->rep.levels[i];
|
|
|
|
|
|
|
|
return level_meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_level_metadata_destroy(rocksdb_level_metadata_t* level_meta) {
|
|
|
|
// Only free the base pointer as its parent rocksdb_column_family_metadata_t
|
|
|
|
// has the ownership of its rep.
|
|
|
|
free(level_meta);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_level_metadata_get_level(rocksdb_level_metadata_t* level_meta) {
|
|
|
|
return level_meta->rep->level;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_level_metadata_get_size(rocksdb_level_metadata_t* level_meta) {
|
|
|
|
return level_meta->rep->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rocksdb_level_metadata_get_file_count(
|
|
|
|
rocksdb_level_metadata_t* level_meta) {
|
|
|
|
return level_meta->rep->files.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_sst_file_metadata_t* rocksdb_level_metadata_get_sst_file_metadata(
|
|
|
|
rocksdb_level_metadata_t* level_meta, size_t i) {
|
|
|
|
if (i >= level_meta->rep->files.size()) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_sst_file_metadata_t* file_meta =
|
|
|
|
(rocksdb_sst_file_metadata_t*)malloc(sizeof(rocksdb_sst_file_metadata_t));
|
|
|
|
file_meta->rep = &level_meta->rep->files[i];
|
|
|
|
return file_meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_sst_file_metadata_destroy(rocksdb_sst_file_metadata_t* file_meta) {
|
|
|
|
// Only free the base pointer as its parent rocksdb_level_metadata_t
|
|
|
|
// has the ownership of its rep.
|
|
|
|
free(file_meta);
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_sst_file_metadata_get_relative_filename(
|
|
|
|
rocksdb_sst_file_metadata_t* file_meta) {
|
|
|
|
return strdup(file_meta->rep->relative_filename.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_sst_file_metadata_get_size(
|
|
|
|
rocksdb_sst_file_metadata_t* file_meta) {
|
|
|
|
return file_meta->rep->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_sst_file_metadata_get_smallestkey(
|
|
|
|
rocksdb_sst_file_metadata_t* file_meta, size_t* key_len) {
|
|
|
|
*key_len = file_meta->rep->smallestkey.size();
|
|
|
|
return CopyString(file_meta->rep->smallestkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_sst_file_metadata_get_largestkey(
|
|
|
|
rocksdb_sst_file_metadata_t* file_meta, size_t* key_len) {
|
|
|
|
*key_len = file_meta->rep->largestkey.size();
|
|
|
|
return CopyString(file_meta->rep->largestkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Transactions */
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
rocksdb_transactiondb_options_t* rocksdb_transactiondb_options_create() {
|
|
|
|
return new rocksdb_transactiondb_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_options_destroy(rocksdb_transactiondb_options_t* opt){
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_options_set_max_num_locks(
|
|
|
|
rocksdb_transactiondb_options_t* opt, int64_t max_num_locks) {
|
|
|
|
opt->rep.max_num_locks = max_num_locks;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_options_set_num_stripes(
|
|
|
|
rocksdb_transactiondb_options_t* opt, size_t num_stripes) {
|
|
|
|
opt->rep.num_stripes = num_stripes;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_options_set_transaction_lock_timeout(
|
|
|
|
rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout) {
|
|
|
|
opt->rep.transaction_lock_timeout = txn_lock_timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_options_set_default_lock_timeout(
|
|
|
|
rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout) {
|
|
|
|
opt->rep.default_lock_timeout = default_lock_timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_transaction_options_t* rocksdb_transaction_options_create() {
|
|
|
|
return new rocksdb_transaction_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_destroy(rocksdb_transaction_options_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_set_set_snapshot(
|
|
|
|
rocksdb_transaction_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.set_snapshot = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_set_deadlock_detect(
|
|
|
|
rocksdb_transaction_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.deadlock_detect = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_set_lock_timeout(
|
|
|
|
rocksdb_transaction_options_t* opt, int64_t lock_timeout) {
|
|
|
|
opt->rep.lock_timeout = lock_timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_set_expiration(
|
|
|
|
rocksdb_transaction_options_t* opt, int64_t expiration) {
|
|
|
|
opt->rep.expiration = expiration;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_set_deadlock_detect_depth(
|
|
|
|
rocksdb_transaction_options_t* opt, int64_t depth) {
|
|
|
|
opt->rep.deadlock_detect_depth = depth;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_options_set_max_write_batch_size(
|
|
|
|
rocksdb_transaction_options_t* opt, size_t size) {
|
|
|
|
opt->rep.max_write_batch_size = size;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
void rocksdb_transaction_options_set_skip_prepare(
|
|
|
|
rocksdb_transaction_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.skip_prepare = v;
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
rocksdb_optimistictransaction_options_t*
|
|
|
|
rocksdb_optimistictransaction_options_create() {
|
|
|
|
return new rocksdb_optimistictransaction_options_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_optimistictransaction_options_destroy(
|
|
|
|
rocksdb_optimistictransaction_options_t* opt) {
|
|
|
|
delete opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_optimistictransaction_options_set_set_snapshot(
|
|
|
|
rocksdb_optimistictransaction_options_t* opt, unsigned char v) {
|
|
|
|
opt->rep.set_snapshot = v;
|
|
|
|
}
|
|
|
|
|
2021-08-07 02:05:32 +00:00
|
|
|
char* rocksdb_optimistictransactiondb_property_value(
|
|
|
|
rocksdb_optimistictransactiondb_t* db, const char* propname) {
|
|
|
|
std::string tmp;
|
|
|
|
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
|
|
|
// We use strdup() since we expect human readable output.
|
|
|
|
return strdup(tmp.c_str());
|
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_optimistictransactiondb_property_int(
|
|
|
|
rocksdb_optimistictransactiondb_t* db, const char* propname,
|
|
|
|
uint64_t* out_val) {
|
|
|
|
if (db->rep->GetIntProperty(Slice(propname), out_val)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
rocksdb_column_family_handle_t* rocksdb_transactiondb_create_column_family(
|
|
|
|
rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_options_t* column_family_options,
|
|
|
|
const char* column_family_name, char** errptr) {
|
|
|
|
rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t;
|
|
|
|
SaveError(errptr, txn_db->rep->CreateColumnFamily(
|
|
|
|
ColumnFamilyOptions(column_family_options->rep),
|
|
|
|
std::string(column_family_name), &(handle->rep)));
|
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
rocksdb_transactiondb_t* rocksdb_transactiondb_open(
|
|
|
|
const rocksdb_options_t* options,
|
|
|
|
const rocksdb_transactiondb_options_t* txn_db_options, const char* name,
|
|
|
|
char** errptr) {
|
|
|
|
TransactionDB* txn_db;
|
|
|
|
if (SaveError(errptr, TransactionDB::Open(options->rep, txn_db_options->rep,
|
|
|
|
std::string(name), &txn_db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_transactiondb_t* result = new rocksdb_transactiondb_t;
|
|
|
|
result->rep = txn_db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-03-19 16:37:58 +00:00
|
|
|
rocksdb_transactiondb_t* rocksdb_transactiondb_open_column_families(
|
|
|
|
const rocksdb_options_t* options,
|
|
|
|
const rocksdb_transactiondb_options_t* txn_db_options, const char* name,
|
2020-01-11 03:25:51 +00:00
|
|
|
int num_column_families, const char* const* column_family_names,
|
|
|
|
const rocksdb_options_t* const* column_family_options,
|
2019-03-19 16:37:58 +00:00
|
|
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i < num_column_families; i++) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep)));
|
|
|
|
}
|
|
|
|
|
|
|
|
TransactionDB* txn_db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
if (SaveError(errptr, TransactionDB::Open(options->rep, txn_db_options->rep,
|
|
|
|
std::string(name), column_families,
|
|
|
|
&handles, &txn_db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < handles.size(); i++) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle =
|
|
|
|
new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_transactiondb_t* result = new rocksdb_transactiondb_t;
|
|
|
|
result->rep = txn_db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
const rocksdb_snapshot_t* rocksdb_transactiondb_create_snapshot(
|
|
|
|
rocksdb_transactiondb_t* txn_db) {
|
|
|
|
rocksdb_snapshot_t* result = new rocksdb_snapshot_t;
|
|
|
|
result->rep = txn_db->rep->GetSnapshot();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_release_snapshot(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot) {
|
|
|
|
txn_db->rep->ReleaseSnapshot(snapshot->rep);
|
|
|
|
delete snapshot;
|
|
|
|
}
|
|
|
|
|
2021-08-07 02:05:32 +00:00
|
|
|
char* rocksdb_transactiondb_property_value(rocksdb_transactiondb_t* db,
|
|
|
|
const char* propname) {
|
|
|
|
std::string tmp;
|
|
|
|
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
|
|
|
// We use strdup() since we expect human readable output.
|
|
|
|
return strdup(tmp.c_str());
|
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int rocksdb_transactiondb_property_int(rocksdb_transactiondb_t* db,
|
|
|
|
const char* propname,
|
|
|
|
uint64_t* out_val) {
|
|
|
|
if (db->rep->GetIntProperty(Slice(propname), out_val)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
rocksdb_transaction_t* rocksdb_transaction_begin(
|
|
|
|
rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_writeoptions_t* write_options,
|
|
|
|
const rocksdb_transaction_options_t* txn_options,
|
|
|
|
rocksdb_transaction_t* old_txn) {
|
|
|
|
if (old_txn == nullptr) {
|
|
|
|
rocksdb_transaction_t* result = new rocksdb_transaction_t;
|
|
|
|
result->rep = txn_db->rep->BeginTransaction(write_options->rep,
|
|
|
|
txn_options->rep, nullptr);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
old_txn->rep = txn_db->rep->BeginTransaction(write_options->rep,
|
|
|
|
txn_options->rep, old_txn->rep);
|
|
|
|
return old_txn;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_transaction_t** rocksdb_transactiondb_get_prepared_transactions(
|
|
|
|
rocksdb_transactiondb_t* txn_db, size_t* cnt) {
|
|
|
|
std::vector<Transaction*> txns;
|
|
|
|
txn_db->rep->GetAllPreparedTransactions(&txns);
|
|
|
|
*cnt = txns.size();
|
|
|
|
if (txns.empty()) {
|
|
|
|
return nullptr;
|
|
|
|
} else {
|
|
|
|
rocksdb_transaction_t** buf = (rocksdb_transaction_t**)malloc(
|
|
|
|
txns.size() * sizeof(rocksdb_transaction_t*));
|
|
|
|
for (size_t i = 0; i < txns.size(); i++) {
|
|
|
|
buf[i] = new rocksdb_transaction_t;
|
|
|
|
buf[i]->rep = txns[i];
|
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_set_name(rocksdb_transaction_t* txn, const char* name,
|
|
|
|
size_t name_len, char** errptr) {
|
|
|
|
std::string str = std::string(name, name_len);
|
|
|
|
SaveError(errptr, txn->rep->SetName(str));
|
|
|
|
}
|
|
|
|
|
|
|
|
char* rocksdb_transaction_get_name(rocksdb_transaction_t* txn,
|
|
|
|
size_t* name_len) {
|
|
|
|
auto name = txn->rep->GetName();
|
|
|
|
*name_len = name.size();
|
|
|
|
return CopyString(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_prepare(rocksdb_transaction_t* txn, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Prepare());
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_writebatch_wi_t* rocksdb_transaction_get_writebatch_wi(
|
|
|
|
rocksdb_transaction_t* txn) {
|
|
|
|
rocksdb_writebatch_wi_t* wi =
|
|
|
|
(rocksdb_writebatch_wi_t*)malloc(sizeof(rocksdb_writebatch_wi_t));
|
|
|
|
wi->rep = txn->rep->GetWriteBatch();
|
|
|
|
|
|
|
|
return wi;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_rebuild_from_writebatch(
|
|
|
|
rocksdb_transaction_t* txn, rocksdb_writebatch_t* writebatch,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->RebuildFromWriteBatch(&writebatch->rep));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_rebuild_from_writebatch_wi(rocksdb_transaction_t* txn,
|
|
|
|
rocksdb_writebatch_wi_t* wi,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->RebuildFromWriteBatch(wi->rep->GetWriteBatch()));
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
void rocksdb_transaction_commit(rocksdb_transaction_t* txn, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Commit());
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_rollback(rocksdb_transaction_t* txn, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Rollback());
|
|
|
|
}
|
|
|
|
|
2017-09-14 21:04:40 +00:00
|
|
|
void rocksdb_transaction_set_savepoint(rocksdb_transaction_t* txn) {
|
|
|
|
txn->rep->SetSavePoint();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_rollback_to_savepoint(rocksdb_transaction_t* txn, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->RollbackToSavePoint());
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
void rocksdb_transaction_destroy(rocksdb_transaction_t* txn) {
|
|
|
|
delete txn->rep;
|
|
|
|
delete txn;
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
const rocksdb_snapshot_t* rocksdb_transaction_get_snapshot(
|
|
|
|
rocksdb_transaction_t* txn) {
|
2021-03-26 22:48:56 +00:00
|
|
|
// This will be freed later on using free, so use malloc here to avoid a
|
|
|
|
// mismatch
|
|
|
|
rocksdb_snapshot_t* result =
|
|
|
|
(rocksdb_snapshot_t*)malloc(sizeof(rocksdb_snapshot_t));
|
2017-08-23 19:32:42 +00:00
|
|
|
result->rep = txn->rep->GetSnapshot();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read a key inside a transaction
|
2017-05-17 05:57:05 +00:00
|
|
|
char* rocksdb_transaction_get(rocksdb_transaction_t* txn,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t klen, size_t* vlen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = txn->rep->Get(options->rep, Slice(key, klen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vlen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vlen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_transaction_get_pinned(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t klen, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = txn->rep->Get(options->rep, Slice(key, klen), &v->rep);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
char* rocksdb_transaction_get_cf(rocksdb_transaction_t* txn,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, size_t* vlen,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s =
|
|
|
|
txn->rep->Get(options->rep, column_family->rep, Slice(key, klen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vlen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vlen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_transaction_get_pinned_cf(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key, size_t klen,
|
|
|
|
char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = txn->rep->Get(options->rep, column_family->rep, Slice(key, klen),
|
|
|
|
&v->rep);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
// Read a key inside a transaction
|
|
|
|
char* rocksdb_transaction_get_for_update(rocksdb_transaction_t* txn,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
size_t* vlen, unsigned char exclusive,
|
|
|
|
char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s =
|
|
|
|
txn->rep->GetForUpdate(options->rep, Slice(key, klen), &tmp, exclusive);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vlen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vlen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_transaction_get_pinned_for_update(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t klen, unsigned char exclusive, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = txn->rep->GetForUpdate(options->rep, Slice(key, klen),
|
|
|
|
v->rep.GetSelf(), exclusive);
|
|
|
|
v->rep.PinSelf();
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2019-03-19 16:37:58 +00:00
|
|
|
char* rocksdb_transaction_get_for_update_cf(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key, size_t klen,
|
|
|
|
size_t* vlen, unsigned char exclusive, char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = txn->rep->GetForUpdate(options->rep, column_family->rep,
|
|
|
|
Slice(key, klen), &tmp, exclusive);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vlen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vlen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_transaction_get_pinned_for_update_cf(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key, size_t klen,
|
|
|
|
unsigned char exclusive, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = txn->rep->GetForUpdate(options->rep, column_family->rep,
|
|
|
|
Slice(key, klen), &v->rep, exclusive);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_multi_get(rocksdb_transaction_t* txn,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
size_t num_keys,
|
|
|
|
const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
char** values_list,
|
|
|
|
size_t* values_list_sizes, char** errs) {
|
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
txn->rep->MultiGet(options->rep, keys, &values);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_multi_get_cf(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
const rocksdb_column_family_handle_t* const* column_families,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes, char** values_list,
|
|
|
|
size_t* values_list_sizes, char** errs) {
|
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
std::vector<ColumnFamilyHandle*> cfs(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
cfs[i] = column_families[i]->rep;
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
txn->rep->MultiGet(options->rep, cfs, keys, &values);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
// Read a key outside a transaction
|
|
|
|
char* rocksdb_transactiondb_get(
|
|
|
|
rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t klen,
|
|
|
|
size_t* vlen,
|
|
|
|
char** errptr){
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = txn_db->rep->Get(options->rep, Slice(key, klen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vlen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vlen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_transactiondb_get_pinned(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
|
|
|
|
const char* key, size_t klen, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = txn_db->rep->Get(options->rep, txn_db->rep->DefaultColumnFamily(),
|
|
|
|
Slice(key, klen), &v->rep);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
char* rocksdb_transactiondb_get_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key,
|
|
|
|
size_t keylen, size_t* vallen, char** errptr) {
|
|
|
|
char* result = nullptr;
|
|
|
|
std::string tmp;
|
|
|
|
Status s = txn_db->rep->Get(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), &tmp);
|
|
|
|
if (s.ok()) {
|
|
|
|
*vallen = tmp.size();
|
|
|
|
result = CopyString(tmp);
|
|
|
|
} else {
|
|
|
|
*vallen = 0;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_transactiondb_get_pinned_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key,
|
|
|
|
size_t keylen, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = txn_db->rep->Get(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), &v->rep);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_multi_get(rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_readoptions_t* options,
|
|
|
|
size_t num_keys,
|
|
|
|
const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes,
|
|
|
|
char** values_list,
|
|
|
|
size_t* values_list_sizes, char** errs) {
|
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
txn_db->rep->MultiGet(options->rep, keys, &values);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_multi_get_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
|
|
|
|
const rocksdb_column_family_handle_t* const* column_families,
|
|
|
|
size_t num_keys, const char* const* keys_list,
|
|
|
|
const size_t* keys_list_sizes, char** values_list,
|
|
|
|
size_t* values_list_sizes, char** errs) {
|
|
|
|
std::vector<Slice> keys(num_keys);
|
|
|
|
std::vector<ColumnFamilyHandle*> cfs(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
|
|
|
|
cfs[i] = column_families[i]->rep;
|
|
|
|
}
|
|
|
|
std::vector<std::string> values(num_keys);
|
|
|
|
std::vector<Status> statuses =
|
|
|
|
txn_db->rep->MultiGet(options->rep, cfs, keys, &values);
|
|
|
|
for (size_t i = 0; i < num_keys; i++) {
|
|
|
|
if (statuses[i].ok()) {
|
|
|
|
values_list[i] = CopyString(values[i]);
|
|
|
|
values_list_sizes[i] = values[i].size();
|
|
|
|
errs[i] = nullptr;
|
|
|
|
} else {
|
|
|
|
values_list[i] = nullptr;
|
|
|
|
values_list_sizes[i] = 0;
|
|
|
|
if (!statuses[i].IsNotFound()) {
|
|
|
|
errs[i] = strdup(statuses[i].ToString().c_str());
|
|
|
|
} else {
|
|
|
|
errs[i] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
// Put a key inside a transaction
|
|
|
|
void rocksdb_transaction_put(rocksdb_transaction_t* txn, const char* key,
|
|
|
|
size_t klen, const char* val, size_t vlen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Put(Slice(key, klen), Slice(val, vlen)));
|
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
void rocksdb_transaction_put_cf(rocksdb_transaction_t* txn,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, const char* val,
|
|
|
|
size_t vlen, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Put(column_family->rep, Slice(key, klen),
|
|
|
|
Slice(val, vlen)));
|
|
|
|
}
|
|
|
|
|
2022-05-26 16:40:10 +00:00
|
|
|
void rocksdb_transaction_set_commit_timestamp(rocksdb_transaction_t* txn,
|
|
|
|
uint64_t commit_timestamp) {
|
|
|
|
txn->rep->SetCommitTimestamp(commit_timestamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transaction_set_read_timestamp_for_validation(
|
|
|
|
rocksdb_transaction_t* txn, uint64_t read_timestamp) {
|
|
|
|
txn->rep->SetReadTimestampForValidation(read_timestamp);
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
// Put a key outside a transaction
|
2017-05-17 05:57:05 +00:00
|
|
|
void rocksdb_transactiondb_put(rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t klen, const char* val,
|
|
|
|
size_t vlen, char** errptr) {
|
2017-09-13 18:56:19 +00:00
|
|
|
SaveError(errptr,
|
|
|
|
txn_db->rep->Put(options->rep, Slice(key, klen), Slice(val, vlen)));
|
2017-05-17 05:57:05 +00:00
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
void rocksdb_transactiondb_put_cf(rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t keylen,
|
|
|
|
const char* val, size_t vallen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->Put(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen), Slice(val, vallen)));
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
// Write batch into transaction db
|
2017-08-10 18:39:32 +00:00
|
|
|
void rocksdb_transactiondb_write(
|
|
|
|
rocksdb_transactiondb_t* db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_writebatch_t* batch,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
// Merge a key inside a transaction
|
|
|
|
void rocksdb_transaction_merge(rocksdb_transaction_t* txn, const char* key,
|
|
|
|
size_t klen, const char* val, size_t vlen,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Merge(Slice(key, klen), Slice(val, vlen)));
|
|
|
|
}
|
|
|
|
|
2019-03-19 16:37:58 +00:00
|
|
|
void rocksdb_transaction_merge_cf(rocksdb_transaction_t* txn,
|
|
|
|
rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, const char* val,
|
|
|
|
size_t vlen, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Merge(column_family->rep, Slice(key, klen),
|
|
|
|
Slice(val, vlen)));
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
// Merge a key outside a transaction
|
|
|
|
void rocksdb_transactiondb_merge(rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t klen, const char* val,
|
|
|
|
size_t vlen, char** errptr) {
|
2017-09-13 18:56:19 +00:00
|
|
|
SaveError(errptr, txn_db->rep->Merge(options->rep, Slice(key, klen),
|
|
|
|
Slice(val, vlen)));
|
2017-08-23 19:32:42 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 16:37:58 +00:00
|
|
|
void rocksdb_transactiondb_merge_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key, size_t klen,
|
|
|
|
const char* val, size_t vlen, char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->Merge(options->rep, column_family->rep,
|
|
|
|
Slice(key, klen), Slice(val, vlen)));
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
// Delete a key inside a transaction
|
|
|
|
void rocksdb_transaction_delete(rocksdb_transaction_t* txn, const char* key,
|
|
|
|
size_t klen, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Delete(Slice(key, klen)));
|
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
void rocksdb_transaction_delete_cf(
|
|
|
|
rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family,
|
|
|
|
const char* key, size_t klen, char** errptr) {
|
|
|
|
SaveError(errptr, txn->rep->Delete(column_family->rep, Slice(key, klen)));
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
// Delete a key outside a transaction
|
|
|
|
void rocksdb_transactiondb_delete(rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_writeoptions_t* options,
|
|
|
|
const char* key, size_t klen, char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->Delete(options->rep, Slice(key, klen)));
|
|
|
|
}
|
|
|
|
|
2017-08-10 20:40:57 +00:00
|
|
|
void rocksdb_transactiondb_delete_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key,
|
|
|
|
size_t keylen, char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->Delete(options->rep, column_family->rep,
|
|
|
|
Slice(key, keylen)));
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
// Create an iterator inside a transaction
|
|
|
|
rocksdb_iterator_t* rocksdb_transaction_create_iterator(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
|
|
|
result->rep = txn->rep->GetIterator(options->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-09-13 18:56:19 +00:00
|
|
|
// Create an iterator inside a transaction with column family
|
|
|
|
rocksdb_iterator_t* rocksdb_transaction_create_iterator_cf(
|
|
|
|
rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
|
|
|
result->rep = txn->rep->GetIterator(options->rep, column_family->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
// Create an iterator outside a transaction
|
|
|
|
rocksdb_iterator_t* rocksdb_transactiondb_create_iterator(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
|
|
|
result->rep = txn_db->rep->NewIterator(options->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-03-19 16:37:58 +00:00
|
|
|
rocksdb_iterator_t* rocksdb_transactiondb_create_iterator_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family) {
|
|
|
|
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
|
|
|
result->rep = txn_db->rep->NewIterator(options->rep, column_family->rep);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
void rocksdb_transactiondb_close(rocksdb_transactiondb_t* txn_db) {
|
|
|
|
delete txn_db->rep;
|
|
|
|
delete txn_db;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:38:10 +00:00
|
|
|
void rocksdb_transactiondb_flush_wal(rocksdb_transactiondb_t* txn_db,
|
|
|
|
unsigned char sync, char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->FlushWAL(sync));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_flush(rocksdb_transactiondb_t* txn_db,
|
|
|
|
const rocksdb_flushoptions_t* options,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->Flush(options->rep));
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_transactiondb_flush_cf(
|
|
|
|
rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, char** errptr) {
|
|
|
|
SaveError(errptr, txn_db->rep->Flush(options->rep, column_family->rep));
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:57:05 +00:00
|
|
|
rocksdb_checkpoint_t* rocksdb_transactiondb_checkpoint_object_create(
|
|
|
|
rocksdb_transactiondb_t* txn_db, char** errptr) {
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
if (SaveError(errptr, Checkpoint::Create(txn_db->rep, &checkpoint))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_checkpoint_t* result = new rocksdb_checkpoint_t;
|
|
|
|
result->rep = checkpoint;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
rocksdb_optimistictransactiondb_t* rocksdb_optimistictransactiondb_open(
|
2017-09-13 18:56:19 +00:00
|
|
|
const rocksdb_options_t* options, const char* name, char** errptr) {
|
2017-08-23 19:32:42 +00:00
|
|
|
OptimisticTransactionDB* otxn_db;
|
|
|
|
if (SaveError(errptr, OptimisticTransactionDB::Open(
|
|
|
|
options->rep, std::string(name), &otxn_db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_optimistictransactiondb_t* result =
|
|
|
|
new rocksdb_optimistictransactiondb_t;
|
|
|
|
result->rep = otxn_db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-09-13 18:56:19 +00:00
|
|
|
rocksdb_optimistictransactiondb_t*
|
|
|
|
rocksdb_optimistictransactiondb_open_column_families(
|
|
|
|
const rocksdb_options_t* db_options, const char* name,
|
2020-01-11 03:25:51 +00:00
|
|
|
int num_column_families, const char* const* column_family_names,
|
|
|
|
const rocksdb_options_t* const* column_family_options,
|
2017-09-13 18:56:19 +00:00
|
|
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (int i = 0; i < num_column_families; i++) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(
|
|
|
|
std::string(column_family_names[i]),
|
|
|
|
ColumnFamilyOptions(column_family_options[i]->rep)));
|
|
|
|
}
|
|
|
|
|
|
|
|
OptimisticTransactionDB* otxn_db;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
if (SaveError(errptr, OptimisticTransactionDB::Open(
|
|
|
|
DBOptions(db_options->rep), std::string(name),
|
|
|
|
column_families, &handles, &otxn_db))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < handles.size(); i++) {
|
|
|
|
rocksdb_column_family_handle_t* c_handle =
|
|
|
|
new rocksdb_column_family_handle_t;
|
|
|
|
c_handle->rep = handles[i];
|
|
|
|
column_family_handles[i] = c_handle;
|
|
|
|
}
|
|
|
|
rocksdb_optimistictransactiondb_t* result =
|
|
|
|
new rocksdb_optimistictransactiondb_t;
|
|
|
|
result->rep = otxn_db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_t* rocksdb_optimistictransactiondb_get_base_db(
|
|
|
|
rocksdb_optimistictransactiondb_t* otxn_db) {
|
|
|
|
DB* base_db = otxn_db->rep->GetBaseDB();
|
|
|
|
|
|
|
|
if (base_db != nullptr) {
|
|
|
|
rocksdb_t* result = new rocksdb_t;
|
|
|
|
result->rep = base_db;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_optimistictransactiondb_close_base_db(rocksdb_t* base_db) {
|
|
|
|
delete base_db;
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
rocksdb_transaction_t* rocksdb_optimistictransaction_begin(
|
|
|
|
rocksdb_optimistictransactiondb_t* otxn_db,
|
|
|
|
const rocksdb_writeoptions_t* write_options,
|
|
|
|
const rocksdb_optimistictransaction_options_t* otxn_options,
|
|
|
|
rocksdb_transaction_t* old_txn) {
|
|
|
|
if (old_txn == nullptr) {
|
|
|
|
rocksdb_transaction_t* result = new rocksdb_transaction_t;
|
|
|
|
result->rep = otxn_db->rep->BeginTransaction(write_options->rep,
|
|
|
|
otxn_options->rep, nullptr);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
old_txn->rep = otxn_db->rep->BeginTransaction(
|
|
|
|
write_options->rep, otxn_options->rep, old_txn->rep);
|
|
|
|
return old_txn;
|
|
|
|
}
|
|
|
|
|
2021-08-07 02:05:32 +00:00
|
|
|
// Write batch into OptimisticTransactionDB
|
|
|
|
void rocksdb_optimistictransactiondb_write(
|
|
|
|
rocksdb_optimistictransactiondb_t* otxn_db,
|
|
|
|
const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch,
|
|
|
|
char** errptr) {
|
|
|
|
SaveError(errptr, otxn_db->rep->Write(options->rep, &batch->rep));
|
|
|
|
}
|
|
|
|
|
2017-08-23 19:32:42 +00:00
|
|
|
void rocksdb_optimistictransactiondb_close(
|
|
|
|
rocksdb_optimistictransactiondb_t* otxn_db) {
|
|
|
|
delete otxn_db->rep;
|
|
|
|
delete otxn_db;
|
|
|
|
}
|
|
|
|
|
2021-08-07 02:05:32 +00:00
|
|
|
rocksdb_checkpoint_t* rocksdb_optimistictransactiondb_checkpoint_object_create(
|
|
|
|
rocksdb_optimistictransactiondb_t* otxn_db, char** errptr) {
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
if (SaveError(errptr, Checkpoint::Create(otxn_db->rep, &checkpoint))) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
rocksdb_checkpoint_t* result = new rocksdb_checkpoint_t;
|
|
|
|
result->rep = checkpoint;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-07-13 19:11:05 +00:00
|
|
|
void rocksdb_free(void* ptr) { free(ptr); }
|
2015-07-01 23:13:49 +00:00
|
|
|
|
2017-05-16 17:24:03 +00:00
|
|
|
rocksdb_pinnableslice_t* rocksdb_get_pinned(
|
|
|
|
rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key,
|
|
|
|
size_t keylen, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = db->rep->Get(options->rep, db->rep->DefaultColumnFamily(),
|
|
|
|
Slice(key, keylen), &v->rep);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete (v);
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
2018-03-07 20:39:19 +00:00
|
|
|
return nullptr;
|
2017-05-16 17:24:03 +00:00
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb_pinnableslice_t* rocksdb_get_pinned_cf(
|
|
|
|
rocksdb_t* db, const rocksdb_readoptions_t* options,
|
|
|
|
rocksdb_column_family_handle_t* column_family, const char* key,
|
|
|
|
size_t keylen, char** errptr) {
|
|
|
|
rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
|
|
|
|
Status s = db->rep->Get(options->rep, column_family->rep, Slice(key, keylen),
|
|
|
|
&v->rep);
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete v;
|
|
|
|
if (!s.IsNotFound()) {
|
|
|
|
SaveError(errptr, s);
|
|
|
|
}
|
2018-03-07 20:39:19 +00:00
|
|
|
return nullptr;
|
2017-05-16 17:24:03 +00:00
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_pinnableslice_destroy(rocksdb_pinnableslice_t* v) { delete v; }
|
|
|
|
|
|
|
|
const char* rocksdb_pinnableslice_value(const rocksdb_pinnableslice_t* v,
|
|
|
|
size_t* vlen) {
|
|
|
|
if (!v) {
|
|
|
|
*vlen = 0;
|
2018-03-07 20:39:19 +00:00
|
|
|
return nullptr;
|
2017-05-16 17:24:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*vlen = v->rep.size();
|
|
|
|
return v->rep.data();
|
|
|
|
}
|
2018-09-13 21:12:44 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
// container to keep databases and caches in order to use
|
|
|
|
// ROCKSDB_NAMESPACE::MemoryUtil
|
2018-09-13 21:12:44 +00:00
|
|
|
struct rocksdb_memory_consumers_t {
|
|
|
|
std::vector<rocksdb_t*> dbs;
|
|
|
|
std::unordered_set<rocksdb_cache_t*> caches;
|
|
|
|
};
|
|
|
|
|
|
|
|
// initializes new container of memory consumers
|
|
|
|
rocksdb_memory_consumers_t* rocksdb_memory_consumers_create() {
|
|
|
|
return new rocksdb_memory_consumers_t;
|
|
|
|
}
|
|
|
|
|
|
|
|
// adds datatabase to the container of memory consumers
|
|
|
|
void rocksdb_memory_consumers_add_db(rocksdb_memory_consumers_t* consumers,
|
|
|
|
rocksdb_t* db) {
|
|
|
|
consumers->dbs.push_back(db);
|
|
|
|
}
|
|
|
|
|
|
|
|
// adds cache to the container of memory consumers
|
|
|
|
void rocksdb_memory_consumers_add_cache(rocksdb_memory_consumers_t* consumers,
|
|
|
|
rocksdb_cache_t* cache) {
|
|
|
|
consumers->caches.insert(cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
// deletes container with memory consumers
|
|
|
|
void rocksdb_memory_consumers_destroy(rocksdb_memory_consumers_t* consumers) {
|
|
|
|
delete consumers;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
// contains memory usage statistics provided by ROCKSDB_NAMESPACE::MemoryUtil
|
2018-09-13 21:12:44 +00:00
|
|
|
struct rocksdb_memory_usage_t {
|
|
|
|
uint64_t mem_table_total;
|
|
|
|
uint64_t mem_table_unflushed;
|
|
|
|
uint64_t mem_table_readers_total;
|
|
|
|
uint64_t cache_total;
|
|
|
|
};
|
|
|
|
|
|
|
|
// estimates amount of memory occupied by consumers (dbs and caches)
|
|
|
|
rocksdb_memory_usage_t* rocksdb_approximate_memory_usage_create(
|
|
|
|
rocksdb_memory_consumers_t* consumers, char** errptr) {
|
|
|
|
|
|
|
|
vector<DB*> dbs;
|
|
|
|
for (auto db : consumers->dbs) {
|
|
|
|
dbs.push_back(db->rep);
|
|
|
|
}
|
|
|
|
|
|
|
|
unordered_set<const Cache*> cache_set;
|
|
|
|
for (auto cache : consumers->caches) {
|
|
|
|
cache_set.insert(const_cast<const Cache*>(cache->rep.get()));
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
std::map<ROCKSDB_NAMESPACE::MemoryUtil::UsageType, uint64_t> usage_by_type;
|
2018-09-13 21:12:44 +00:00
|
|
|
|
|
|
|
auto status = MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
|
|
|
|
&usage_by_type);
|
|
|
|
if (SaveError(errptr, status)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto result = new rocksdb_memory_usage_t;
|
|
|
|
result->mem_table_total = usage_by_type[MemoryUtil::kMemTableTotal];
|
|
|
|
result->mem_table_unflushed = usage_by_type[MemoryUtil::kMemTableUnFlushed];
|
|
|
|
result->mem_table_readers_total = usage_by_type[MemoryUtil::kTableReadersTotal];
|
|
|
|
result->cache_total = usage_by_type[MemoryUtil::kCacheTotal];
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_approximate_memory_usage_get_mem_table_total(
|
|
|
|
rocksdb_memory_usage_t* memory_usage) {
|
|
|
|
return memory_usage->mem_table_total;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_approximate_memory_usage_get_mem_table_unflushed(
|
|
|
|
rocksdb_memory_usage_t* memory_usage) {
|
|
|
|
return memory_usage->mem_table_unflushed;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_approximate_memory_usage_get_mem_table_readers_total(
|
|
|
|
rocksdb_memory_usage_t* memory_usage) {
|
|
|
|
return memory_usage->mem_table_readers_total;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rocksdb_approximate_memory_usage_get_cache_total(
|
|
|
|
rocksdb_memory_usage_t* memory_usage) {
|
|
|
|
return memory_usage->cache_total;
|
|
|
|
}
|
|
|
|
|
2020-04-07 21:43:23 +00:00
|
|
|
void rocksdb_options_set_dump_malloc_stats(rocksdb_options_t* opt,
|
|
|
|
unsigned char val) {
|
|
|
|
opt->rep.dump_malloc_stats = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_options_set_memtable_whole_key_filtering(rocksdb_options_t* opt,
|
|
|
|
unsigned char val) {
|
|
|
|
opt->rep.memtable_whole_key_filtering = val;
|
|
|
|
}
|
|
|
|
|
2022-09-23 01:41:06 +00:00
|
|
|
void rocksdb_options_set_avoid_unnecessary_blocking_io(rocksdb_options_t* opt,
|
|
|
|
unsigned char val) {
|
|
|
|
opt->rep.avoid_unnecessary_blocking_io = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char rocksdb_options_get_avoid_unnecessary_blocking_io(
|
|
|
|
rocksdb_options_t* opt) {
|
|
|
|
return opt->rep.avoid_unnecessary_blocking_io;
|
|
|
|
}
|
|
|
|
|
2018-09-13 21:12:44 +00:00
|
|
|
// deletes container with memory usage estimates
|
|
|
|
void rocksdb_approximate_memory_usage_destroy(rocksdb_memory_usage_t* usage) {
|
|
|
|
delete usage;
|
|
|
|
}
|
|
|
|
|
2020-05-12 21:48:14 +00:00
|
|
|
void rocksdb_cancel_all_background_work(rocksdb_t* db, unsigned char wait) {
|
|
|
|
CancelAllBackgroundWork(db->rep, wait);
|
|
|
|
}
|
|
|
|
|
2022-05-26 04:46:17 +00:00
|
|
|
void rocksdb_disable_manual_compaction(rocksdb_t* db) {
|
|
|
|
db->rep->DisableManualCompaction();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rocksdb_enable_manual_compaction(rocksdb_t* db) {
|
|
|
|
db->rep->EnableManualCompaction();
|
|
|
|
}
|
|
|
|
|
2011-08-05 20:40:49 +00:00
|
|
|
} // end extern "C"
|
2014-04-15 20:39:26 +00:00
|
|
|
|
2015-06-17 17:54:51 +00:00
|
|
|
#endif // !ROCKSDB_LITE
|