Remove RocksDB LITE (#11147)

Summary:
We haven't been actively mantaining RocksDB LITE recently and the size must have been gone up significantly. We are removing the support.

Most of changes were done through following comments:

unifdef -m -UROCKSDB_LITE `git grep -l ROCKSDB_LITE | egrep '[.](cc|h)'`

by Peter Dillinger. Others changes were manually applied to build scripts, CircleCI manifests, ROCKSDB_LITE is used in an expression and file db_stress_test_base.cc.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11147

Test Plan: See CI

Reviewed By: pdillinger

Differential Revision: D42796341

fbshipit-source-id: 4920e15fc2060c2cd2221330a6d0e5e65d4b7fe2
This commit is contained in:
sdong 2023-01-27 13:14:19 -08:00 committed by Facebook GitHub Bot
parent 6943ff6e50
commit 4720ba4391
478 changed files with 36 additions and 3608 deletions

View File

@ -302,27 +302,6 @@ jobs:
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
build-linux-lite:
executor: linux-docker
resource_class: large
steps:
- pre-steps
- run: LITE=1 make V=1 J=8 -j8 check
- post-steps
build-linux-lite-release:
executor: linux-docker
resource_class: large
steps:
- checkout # check out the code in the project directory
- run: LITE=1 make V=1 -j8 release
- run: ./db_stress --version # ensure with gflags
- run: make clean
- run: apt-get remove -y libgflags-dev
- run: LITE=1 make V=1 -j8 release
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
- post-steps
build-linux-clang-no_test_run:
executor: linux-docker
resource_class: xlarge
@ -821,7 +800,6 @@ workflows:
- build-linux-cmake-with-folly-coroutines
- build-linux-cmake-with-benchmark
- build-linux-encrypted_env-no_compression
- build-linux-lite
jobs-linux-run-tests-san:
jobs:
- build-linux-clang10-asan
@ -832,7 +810,6 @@ workflows:
jobs:
- build-linux-release
- build-linux-release-rtti
- build-linux-lite-release
- build-examples
- build-fuzzers
- build-linux-clang-no_test_run

View File

@ -496,12 +496,6 @@ if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp")
endif()
option(ROCKSDB_LITE "Build RocksDBLite version" OFF)
if(ROCKSDB_LITE)
add_definitions(-DROCKSDB_LITE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions -Os")
endif()
if(CMAKE_SYSTEM_NAME MATCHES "Cygwin")
add_definitions(-fno-builtin-memcmp -DCYGWIN)
elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin")

View File

@ -8,6 +8,7 @@
* Fixed a data race on `ColumnFamilyData::flush_reason` caused by concurrent flushes.
### Feature Removal
* Remove RocksDB Lite.
* The feature block_cache_compressed is removed. Statistics related to it are removed too.
* Remove deprecated Env::LoadEnv(). Use Env::CreateFromString() instead.
* Remove deprecated FileSystem::Load(). Use FileSystem::CreateFromString() instead.

View File

@ -178,7 +178,7 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi
gmake rocksdbjava
* **iOS**:
* Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`.
* Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define an important pre-processing macros: `IOS_CROSS_COMPILE`.
* **Windows** (Visual Studio 2017 to up):
* Read and follow the instructions at CMakeLists.txt

View File

@ -83,27 +83,9 @@ endif
$(info $$DEBUG_LEVEL is ${DEBUG_LEVEL})
# Lite build flag.
LITE ?= 0
ifeq ($(LITE), 0)
ifneq ($(filter -DROCKSDB_LITE,$(OPT)),)
# Be backward compatible and support older format where OPT=-DROCKSDB_LITE is
# specified instead of LITE=1 on the command line.
LITE=1
endif
else ifeq ($(LITE), 1)
ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
OPT += -DROCKSDB_LITE
endif
endif
# Figure out optimize level.
ifneq ($(DEBUG_LEVEL), 2)
ifeq ($(LITE), 0)
OPTIMIZE_LEVEL ?= -O2
else
OPTIMIZE_LEVEL ?= -Os
endif
endif
# `OPTIMIZE_LEVEL` is empty when the user does not set it and `DEBUG_LEVEL=2`.
# In that case, the compiler default (`-O0` for gcc and clang) will be used.
@ -337,13 +319,6 @@ endif
ifeq ($(PLATFORM), OS_SOLARIS)
PLATFORM_CXXFLAGS += -D _GLIBCXX_USE_C99
endif
ifneq ($(filter -DROCKSDB_LITE,$(OPT)),)
# found
CFLAGS += -fno-exceptions
CXXFLAGS += -fno-exceptions
# LUA is not supported under ROCKSDB_LITE
LUA_PATH =
endif
ifeq ($(LIB_MODE),shared)
# So that binaries are executable from build location, in addition to install location
@ -1082,13 +1057,11 @@ check: all
rm -rf $(TEST_TMPDIR)
ifneq ($(PLATFORM), OS_AIX)
$(PYTHON) tools/check_all_python.py
ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
ifndef ASSERT_STATUS_CHECKED # not yet working with these tests
$(PYTHON) tools/ldb_test.py
sh tools/rocksdb_dump_test.sh
endif
endif
endif
ifndef SKIP_FORMAT_BUCK_CHECKS
$(MAKE) check-format
$(MAKE) check-buck-targets
@ -2489,18 +2462,6 @@ build_size:
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib $$(stat --printf="%s" `readlink -f librocksdb.so`)
strip `readlink -f librocksdb.so`
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib_stripped $$(stat --printf="%s" `readlink -f librocksdb.so`)
# === lite build, static ===
$(MAKE) clean
$(MAKE) LITE=1 static_lib
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.static_lib_lite $$(stat --printf="%s" librocksdb.a)
strip librocksdb.a
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.static_lib_lite_stripped $$(stat --printf="%s" librocksdb.a)
# === lite build, shared ===
$(MAKE) clean
$(MAKE) LITE=1 shared_lib
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib_lite $$(stat --printf="%s" `readlink -f librocksdb.so`)
strip `readlink -f librocksdb.so`
$(REPORT_BUILD_STATISTIC) rocksdb.build_size.shared_lib_lite_stripped $$(stat --printf="%s" `readlink -f librocksdb.so`)
# ---------------------------------------------------------------------------
# Platform-specific compilation

View File

@ -1,21 +0,0 @@
# RocksDBLite
RocksDBLite is a project focused on mobile use cases, which don't need a lot of fancy things we've built for server workloads and they are very sensitive to binary size. For that reason, we added a compile flag ROCKSDB_LITE that comments out a lot of the nonessential code and keeps the binary lean.
Some examples of the features disabled by ROCKSDB_LITE:
* compiled-in support for LDB tool
* No backup engine
* No support for replication (which we provide in form of TransactionalIterator)
* No advanced monitoring tools
* No special-purpose memtables that are highly optimized for specific use cases
* No Transactions
When adding a new big feature to RocksDB, please add ROCKSDB_LITE compile guard if:
* Nobody from mobile really needs your feature,
* Your feature is adding a lot of weight to the binary.
Don't add ROCKSDB_LITE compile guard if:
* It would introduce a lot of code complexity. Compile guards make code harder to read. It's a trade-off.
* Your feature is not adding a lot of weight.
If unsure, ask. :)

View File

@ -154,7 +154,7 @@ case "$TARGET_OS" in
;;
IOS)
PLATFORM=IOS
COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE -DROCKSDB_LITE"
COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE "
PLATFORM_SHARED_EXT=dylib
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
CROSS_COMPILE=true

13
cache/cache.cc vendored
View File

@ -16,7 +16,6 @@
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
static std::unordered_map<std::string, OptionTypeInfo>
lru_cache_options_type_info = {
{"capacity",
@ -64,7 +63,6 @@ static std::unordered_map<std::string, OptionTypeInfo>
OptionType::kBoolean, OptionVerificationType::kNormal,
OptionTypeFlags::kMutable}},
};
#endif // ROCKSDB_LITE
Status SecondaryCache::CreateFromString(
const ConfigOptions& config_options, const std::string& value,
@ -75,7 +73,6 @@ Status SecondaryCache::CreateFromString(
Status status;
std::shared_ptr<SecondaryCache> sec_cache;
#ifndef ROCKSDB_LITE
CompressedSecondaryCacheOptions sec_cache_opts;
status = OptionTypeInfo::ParseStruct(config_options, "",
&comp_sec_cache_options_type_info, "",
@ -84,11 +81,6 @@ Status SecondaryCache::CreateFromString(
sec_cache = NewCompressedSecondaryCache(sec_cache_opts);
}
#else
(void)config_options;
status = Status::NotSupported(
"Cannot load compressed secondary cache in LITE mode ", args);
#endif //! ROCKSDB_LITE
if (status.ok()) {
result->swap(sec_cache);
@ -108,7 +100,6 @@ Status Cache::CreateFromString(const ConfigOptions& config_options,
if (value.find('=') == std::string::npos) {
cache = NewLRUCache(ParseSizeT(value));
} else {
#ifndef ROCKSDB_LITE
LRUCacheOptions cache_opts;
status = OptionTypeInfo::ParseStruct(config_options, "",
&lru_cache_options_type_info, "",
@ -116,10 +107,6 @@ Status Cache::CreateFromString(const ConfigOptions& config_options,
if (status.ok()) {
cache = NewLRUCache(cache_opts);
}
#else
(void)config_options;
status = Status::NotSupported("Cannot load cache in LITE mode ", value);
#endif //! ROCKSDB_LITE
}
if (status.ok()) {
result->swap(cache);

View File

@ -77,11 +77,9 @@ DEFINE_bool(lean, false,
"If true, no additional computation is performed besides cache "
"operations.");
#ifndef ROCKSDB_LITE
DEFINE_string(secondary_cache_uri, "",
"Full URI for creating a custom secondary cache object");
static class std::shared_ptr<ROCKSDB_NAMESPACE::SecondaryCache> secondary_cache;
#endif // ROCKSDB_LITE
DEFINE_string(cache_type, "lru_cache", "Type of block cache.");
@ -305,7 +303,6 @@ class CacheBench {
LRUCacheOptions opts(FLAGS_cache_size, FLAGS_num_shard_bits,
false /* strict_capacity_limit */,
0.5 /* high_pri_pool_ratio */);
#ifndef ROCKSDB_LITE
if (!FLAGS_secondary_cache_uri.empty()) {
Status s = SecondaryCache::CreateFromString(
ConfigOptions(), FLAGS_secondary_cache_uri, &secondary_cache);
@ -318,7 +315,6 @@ class CacheBench {
}
opts.secondary_cache = secondary_cache;
}
#endif // ROCKSDB_LITE
cache_ = NewLRUCache(opts);
} else {

View File

@ -817,7 +817,6 @@ class CompressedSecondaryCacheTestWithCompressionParam
bool sec_cache_is_compressed_;
};
#ifndef ROCKSDB_LITE
TEST_P(CompressedSecondaryCacheTestWithCompressionParam, BasicTestFromString) {
std::shared_ptr<SecondaryCache> sec_cache{nullptr};
@ -882,7 +881,6 @@ TEST_P(CompressedSecondaryCacheTestWithCompressionParam,
BasicTestHelper(sec_cache, sec_cache_is_compressed_);
}
#endif // ROCKSDB_LITE
TEST_P(CompressedSecondaryCacheTestWithCompressionParam, FailsTest) {
FailsTest(sec_cache_is_compressed_);

View File

@ -1975,7 +1975,6 @@ class LRUCacheWithStat : public LRUCache {
uint32_t lookup_count_;
};
#ifndef ROCKSDB_LITE
TEST_F(DBSecondaryCacheTest, LRUCacheDumpLoadBasic) {
LRUCacheOptions cache_opts(1024 * 1024 /* capacity */, 0 /* num_shard_bits */,
@ -2590,7 +2589,6 @@ TEST_F(DBSecondaryCacheTest, TestSecondaryCacheOptionTwoDB) {
ASSERT_OK(DestroyDB(dbname2, options));
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -23,32 +23,19 @@ class BlobFileCompletionCallback {
const std::vector<std::shared_ptr<EventListener>>& listeners,
const std::string& dbname)
: event_logger_(event_logger), listeners_(listeners), dbname_(dbname) {
#ifndef ROCKSDB_LITE
sst_file_manager_ = sst_file_manager;
mutex_ = mutex;
error_handler_ = error_handler;
#else
(void)sst_file_manager;
(void)mutex;
(void)error_handler;
#endif // ROCKSDB_LITE
}
void OnBlobFileCreationStarted(const std::string& file_name,
const std::string& column_family_name,
int job_id,
BlobFileCreationReason creation_reason) {
#ifndef ROCKSDB_LITE
// Notify the listeners.
EventHelpers::NotifyBlobFileCreationStarted(listeners_, dbname_,
column_family_name, file_name,
job_id, creation_reason);
#else
(void)file_name;
(void)column_family_name;
(void)job_id;
(void)creation_reason;
#endif
}
Status OnBlobFileCompleted(const std::string& file_name,
@ -61,7 +48,6 @@ class BlobFileCompletionCallback {
uint64_t blob_count, uint64_t blob_bytes) {
Status s;
#ifndef ROCKSDB_LITE
auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager_);
if (sfm) {
// Report new blob files to SstFileManagerImpl
@ -74,7 +60,6 @@ class BlobFileCompletionCallback {
error_handler_->SetBGError(s, BackgroundErrorReason::kFlush);
}
}
#endif // !ROCKSDB_LITE
// Notify the listeners.
EventHelpers::LogAndNotifyBlobFileCreationFinished(
@ -89,11 +74,9 @@ class BlobFileCompletionCallback {
}
private:
#ifndef ROCKSDB_LITE
SstFileManager* sst_file_manager_;
InstrumentedMutex* mutex_;
ErrorHandler* error_handler_;
#endif // ROCKSDB_LITE
EventLogger* event_logger_;
std::vector<std::shared_ptr<EventListener>> listeners_;
std::string dbname_;

View File

@ -30,7 +30,6 @@ BlobSource::BlobSource(const ImmutableOptions* immutable_options,
blob_file_cache_(blob_file_cache),
blob_cache_(immutable_options->blob_cache),
lowest_used_cache_tier_(immutable_options->lowest_used_cache_tier) {
#ifndef ROCKSDB_LITE
auto bbto =
immutable_options->table_factory->GetOptions<BlockBasedTableOptions>();
if (bbto &&
@ -39,7 +38,6 @@ BlobSource::BlobSource(const ImmutableOptions* immutable_options,
blob_cache_ = SharedCacheInterface{std::make_shared<ChargedCache>(
immutable_options->blob_cache, bbto->block_cache)};
}
#endif // ROCKSDB_LITE
}
BlobSource::~BlobSource() = default;

View File

@ -1391,7 +1391,6 @@ class BlobSourceCacheReservationTest : public DBTestBase {
std::string db_session_id_;
};
#ifndef ROCKSDB_LITE
TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) {
options_.cf_paths.emplace_back(
test::PerThreadDBPath(
@ -1606,7 +1605,6 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservationOnFullCache) {
}
}
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -584,7 +584,6 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBBlobBasicTest, MultiGetWithDirectIO) {
Options options = GetDefaultOptions();
@ -773,7 +772,6 @@ TEST_F(DBBlobBasicTest, MultiGetWithDirectIO) {
ASSERT_EQ(values[2], second_blob);
}
}
#endif // !ROCKSDB_LITE
TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
Options options = GetDefaultOptions();
@ -1062,7 +1060,6 @@ TEST_F(DBBlobBasicTest, GetBlob_IndexWithInvalidFileNumber) {
.IsCorruption());
}
#ifndef ROCKSDB_LITE
TEST_F(DBBlobBasicTest, GenerateIOTracing) {
Options options = GetDefaultOptions();
options.enable_blob_files = true;
@ -1117,7 +1114,6 @@ TEST_F(DBBlobBasicTest, GenerateIOTracing) {
ASSERT_GT(blob_files_op_count, 2);
}
}
#endif // !ROCKSDB_LITE
TEST_F(DBBlobBasicTest, BestEffortsRecovery_MissingNewestBlobFile) {
Options options = GetDefaultOptions();
@ -1219,7 +1215,6 @@ TEST_F(DBBlobBasicTest, MultiGetMergeBlobWithPut) {
ASSERT_EQ(values[2], "v2_0");
}
#ifndef ROCKSDB_LITE
TEST_F(DBBlobBasicTest, Properties) {
Options options = GetDefaultOptions();
options.enable_blob_files = true;
@ -1382,7 +1377,6 @@ TEST_F(DBBlobBasicTest, PropertiesMultiVersion) {
BlobLogRecord::CalculateAdjustmentForRecordHeader(key_size) +
blob_size + BlobLogFooter::kSize));
}
#endif // !ROCKSDB_LITE
class DBBlobBasicIOErrorTest : public DBBlobBasicTest,
public testing::WithParamInterface<std::string> {
@ -1632,7 +1626,6 @@ TEST_F(DBBlobBasicTest, WarmCacheWithBlobsDuringFlush) {
options.statistics->getTickerCount(BLOB_DB_CACHE_ADD));
}
#ifndef ROCKSDB_LITE
TEST_F(DBBlobBasicTest, DynamicallyWarmCacheDuringFlush) {
Options options = GetDefaultOptions();
@ -1700,7 +1693,6 @@ TEST_F(DBBlobBasicTest, DynamicallyWarmCacheDuringFlush) {
/*end=*/nullptr));
EXPECT_EQ(0, options.statistics->getTickerCount(BLOB_DB_CACHE_ADD));
}
#endif // !ROCKSDB_LITE
TEST_F(DBBlobBasicTest, WarmCacheWithBlobsSecondary) {
CompressedSecondaryCacheOptions secondary_cache_opts;

View File

@ -16,7 +16,6 @@ class DBBlobCompactionTest : public DBTestBase {
explicit DBBlobCompactionTest()
: DBTestBase("db_blob_compaction_test", /*env_do_fsync=*/false) {}
#ifndef ROCKSDB_LITE
const std::vector<InternalStats::CompactionStats>& GetCompactionStats() {
VersionSet* const versions = dbfull()->GetVersionSet();
assert(versions);
@ -30,7 +29,6 @@ class DBBlobCompactionTest : public DBTestBase {
return internal_stats->TEST_GetCompactionStats();
}
#endif // ROCKSDB_LITE
};
namespace {
@ -250,7 +248,6 @@ TEST_F(DBBlobCompactionTest, FilterByKeyLength) {
ASSERT_OK(db_->Get(ReadOptions(), long_key, &value));
ASSERT_EQ("value", value);
#ifndef ROCKSDB_LITE
const auto& compaction_stats = GetCompactionStats();
ASSERT_GE(compaction_stats.size(), 2);
@ -258,7 +255,6 @@ TEST_F(DBBlobCompactionTest, FilterByKeyLength) {
// this involves neither reading nor writing blobs
ASSERT_EQ(compaction_stats[1].bytes_read_blob, 0);
ASSERT_EQ(compaction_stats[1].bytes_written_blob, 0);
#endif // ROCKSDB_LITE
Close();
}
@ -299,7 +295,6 @@ TEST_F(DBBlobCompactionTest, FilterByValueLength) {
ASSERT_EQ(long_value, value);
}
#ifndef ROCKSDB_LITE
const auto& compaction_stats = GetCompactionStats();
ASSERT_GE(compaction_stats.size(), 2);
@ -307,12 +302,10 @@ TEST_F(DBBlobCompactionTest, FilterByValueLength) {
// this involves reading but not writing blobs
ASSERT_GT(compaction_stats[1].bytes_read_blob, 0);
ASSERT_EQ(compaction_stats[1].bytes_written_blob, 0);
#endif // ROCKSDB_LITE
Close();
}
#ifndef ROCKSDB_LITE
TEST_F(DBBlobCompactionTest, BlobCompactWithStartingLevel) {
Options options = GetDefaultOptions();
@ -388,7 +381,6 @@ TEST_F(DBBlobCompactionTest, BlobCompactWithStartingLevel) {
Close();
}
#endif
TEST_F(DBBlobCompactionTest, BlindWriteFilter) {
Options options = GetDefaultOptions();
@ -413,7 +405,6 @@ TEST_F(DBBlobCompactionTest, BlindWriteFilter) {
ASSERT_EQ(new_blob_value, Get(key));
}
#ifndef ROCKSDB_LITE
const auto& compaction_stats = GetCompactionStats();
ASSERT_GE(compaction_stats.size(), 2);
@ -421,7 +412,6 @@ TEST_F(DBBlobCompactionTest, BlindWriteFilter) {
// this involves writing but not reading blobs
ASSERT_EQ(compaction_stats[1].bytes_read_blob, 0);
ASSERT_GT(compaction_stats[1].bytes_written_blob, 0);
#endif // ROCKSDB_LITE
Close();
}
@ -540,7 +530,6 @@ TEST_F(DBBlobCompactionTest, CompactionFilter) {
ASSERT_EQ(kv.second + std::string(padding), Get(kv.first));
}
#ifndef ROCKSDB_LITE
const auto& compaction_stats = GetCompactionStats();
ASSERT_GE(compaction_stats.size(), 2);
@ -548,7 +537,6 @@ TEST_F(DBBlobCompactionTest, CompactionFilter) {
// this involves reading and writing blobs
ASSERT_GT(compaction_stats[1].bytes_read_blob, 0);
ASSERT_GT(compaction_stats[1].bytes_written_blob, 0);
#endif // ROCKSDB_LITE
Close();
}
@ -606,7 +594,6 @@ TEST_F(DBBlobCompactionTest, CompactionFilterReadBlobAndKeep) {
/*end=*/nullptr));
ASSERT_EQ(blob_files, GetBlobFileNumbers());
#ifndef ROCKSDB_LITE
const auto& compaction_stats = GetCompactionStats();
ASSERT_GE(compaction_stats.size(), 2);
@ -614,7 +601,6 @@ TEST_F(DBBlobCompactionTest, CompactionFilterReadBlobAndKeep) {
// this involves reading but not writing blobs
ASSERT_GT(compaction_stats[1].bytes_read_blob, 0);
ASSERT_EQ(compaction_stats[1].bytes_written_blob, 0);
#endif // ROCKSDB_LITE
Close();
}

View File

@ -34,7 +34,6 @@ class DBBlobCorruptionTest : public DBTestBase {
}
};
#ifndef ROCKSDB_LITE
TEST_F(DBBlobCorruptionTest, VerifyWholeBlobFileChecksum) {
Options options = GetDefaultOptions();
options.enable_blob_files = true;
@ -71,7 +70,6 @@ TEST_F(DBBlobCorruptionTest, VerifyWholeBlobFileChecksum) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {

View File

@ -131,9 +131,7 @@ class DBBlobIndexTest : public DBTestBase {
ASSERT_OK(Flush());
ASSERT_OK(
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
break;
}
}
@ -459,7 +457,6 @@ TEST_F(DBBlobIndexTest, Iterate) {
verify(15, Status::kOk, get_value(16, 0), get_value(14, 0),
create_blob_iterator, check_is_blob(false));
#ifndef ROCKSDB_LITE
// Iterator with blob support and using seek.
ASSERT_OK(dbfull()->SetOptions(
cfh(), {{"max_sequential_skip_in_iterations", "0"}}));
@ -484,7 +481,6 @@ TEST_F(DBBlobIndexTest, Iterate) {
create_blob_iterator, check_is_blob(false));
verify(15, Status::kOk, get_value(16, 0), get_value(14, 0),
create_blob_iterator, check_is_blob(false));
#endif // !ROCKSDB_LITE
for (auto* snapshot : snapshots) {
dbfull()->ReleaseSnapshot(snapshot);
@ -584,12 +580,10 @@ TEST_F(DBBlobIndexTest, IntegratedBlobIterate) {
Status expected_status;
verify(1, expected_status, expected_value);
#ifndef ROCKSDB_LITE
// Test DBIter::FindValueForCurrentKeyUsingSeek flow.
ASSERT_OK(dbfull()->SetOptions(cfh(),
{{"max_sequential_skip_in_iterations", "0"}}));
verify(1, expected_status, expected_value);
#endif // !ROCKSDB_LITE
}
} // namespace ROCKSDB_NAMESPACE

View File

@ -107,11 +107,9 @@ Status BuildTable(
std::vector<std::string> blob_file_paths;
std::string file_checksum = kUnknownFileChecksum;
std::string file_checksum_func_name = kUnknownFileChecksumFuncName;
#ifndef ROCKSDB_LITE
EventHelpers::NotifyTableFileCreationStarted(ioptions.listeners, dbname,
tboptions.column_family_name,
fname, job_id, tboptions.reason);
#endif // !ROCKSDB_LITE
Env* env = db_options.env;
assert(env);
FileSystem* fs = db_options.fs.get();

View File

@ -7,7 +7,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_LITE
#include "rocksdb/c.h"
@ -6399,4 +6398,3 @@ void rocksdb_enable_manual_compaction(rocksdb_t* db) {
} // end extern "C"
#endif // !ROCKSDB_LITE

View File

@ -5,8 +5,6 @@
#include <stdio.h>
#ifndef ROCKSDB_LITE // Lite does not support C API
#include <assert.h>
#include <stddef.h>
#include <stdlib.h>
@ -3468,12 +3466,3 @@ int main(int argc, char** argv) {
fprintf(stderr, "PASS\n");
return 0;
}
#else
int main(void) {
fprintf(stderr, "SKIPPED\n");
return 0;
}
#endif // !ROCKSDB_LITE

View File

@ -53,11 +53,9 @@ ColumnFamilyHandleImpl::ColumnFamilyHandleImpl(
ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
if (cfd_ != nullptr) {
#ifndef ROCKSDB_LITE
for (auto& listener : cfd_->ioptions()->listeners) {
listener->OnColumnFamilyHandleDeletionStarted(this);
}
#endif // ROCKSDB_LITE
// Job id == 0 means that this is not our background process, but rather
// user thread
// Need to hold some shared pointers owned by the initial_cf_options
@ -88,15 +86,10 @@ const std::string& ColumnFamilyHandleImpl::GetName() const {
}
Status ColumnFamilyHandleImpl::GetDescriptor(ColumnFamilyDescriptor* desc) {
#ifndef ROCKSDB_LITE
// accessing mutable cf-options requires db mutex.
InstrumentedMutexLock l(mutex_);
*desc = ColumnFamilyDescriptor(cfd()->GetName(), cfd()->GetLatestCFOptions());
return Status::OK();
#else
(void)desc;
return Status::NotSupported();
#endif // !ROCKSDB_LITE
}
const Comparator* ColumnFamilyHandleImpl::GetComparator() const {
@ -347,7 +340,6 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
result.hard_pending_compaction_bytes_limit;
}
#ifndef ROCKSDB_LITE
// When the DB is stopped, it's possible that there are some .trash files that
// were not deleted yet, when we open the DB we will find these .trash files
// and schedule them to be deleted (or delete immediately if SstFileManager
@ -359,7 +351,6 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
result.cf_paths[i].path)
.PermitUncheckedError();
}
#endif
if (result.cf_paths.empty()) {
result.cf_paths = db_options.db_paths;
@ -602,7 +593,6 @@ ColumnFamilyData::ColumnFamilyData(
if (ioptions_.compaction_style == kCompactionStyleLevel) {
compaction_picker_.reset(
new LevelCompactionPicker(ioptions_, &internal_comparator_));
#ifndef ROCKSDB_LITE
} else if (ioptions_.compaction_style == kCompactionStyleUniversal) {
compaction_picker_.reset(
new UniversalCompactionPicker(ioptions_, &internal_comparator_));
@ -616,7 +606,6 @@ ColumnFamilyData::ColumnFamilyData(
"Column family %s does not use any background compaction. "
"Compactions can only be done via CompactFiles\n",
GetName().c_str());
#endif // !ROCKSDB_LITE
} else {
ROCKS_LOG_ERROR(ioptions_.logger,
"Unable to recognize the specified compaction style %d. "
@ -1440,7 +1429,6 @@ Status ColumnFamilyData::ValidateOptions(
return s;
}
#ifndef ROCKSDB_LITE
Status ColumnFamilyData::SetOptions(
const DBOptions& db_opts,
const std::unordered_map<std::string, std::string>& options_map) {
@ -1459,7 +1447,6 @@ Status ColumnFamilyData::SetOptions(
}
return s;
}
#endif // ROCKSDB_LITE
// REQUIRES: DB mutex held
Env::WriteLifeTimeHint ColumnFamilyData::CalculateSSTWriteHint(int level) {

View File

@ -335,12 +335,10 @@ class ColumnFamilyData {
// Validate CF options against DB options
static Status ValidateOptions(const DBOptions& db_options,
const ColumnFamilyOptions& cf_options);
#ifndef ROCKSDB_LITE
// REQUIRES: DB mutex held
Status SetOptions(
const DBOptions& db_options,
const std::unordered_map<std::string, std::string>& options_map);
#endif // ROCKSDB_LITE
InternalStats* internal_stats() { return internal_stats_.get(); }

View File

@ -71,11 +71,7 @@ class ColumnFamilyTestBase : public testing::Test {
for (auto h : handles_) {
ColumnFamilyDescriptor cfdescriptor;
Status s = h->GetDescriptor(&cfdescriptor);
#ifdef ROCKSDB_LITE
EXPECT_TRUE(s.IsNotSupported());
#else
EXPECT_OK(s);
#endif // ROCKSDB_LITE
column_families.push_back(cfdescriptor);
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
@ -197,12 +193,10 @@ class ColumnFamilyTestBase : public testing::Test {
&db_);
}
#ifndef ROCKSDB_LITE // ReadOnlyDB is not supported
void AssertOpenReadOnly(std::vector<std::string> cf,
std::vector<ColumnFamilyOptions> options = {}) {
ASSERT_OK(OpenReadOnly(cf, options));
}
#endif // !ROCKSDB_LITE
void Open(std::vector<std::string> cf,
std::vector<ColumnFamilyOptions> options = {}) {
@ -224,27 +218,16 @@ class ColumnFamilyTestBase : public testing::Test {
}
bool IsDbWriteStopped() {
#ifndef ROCKSDB_LITE
uint64_t v;
EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.is-write-stopped", &v));
return (v == 1);
#else
return dbfull()->TEST_write_controler().IsStopped();
#endif // !ROCKSDB_LITE
}
uint64_t GetDbDelayedWriteRate() {
#ifndef ROCKSDB_LITE
uint64_t v;
EXPECT_TRUE(
dbfull()->GetIntProperty("rocksdb.actual-delayed-write-rate", &v));
return v;
#else
if (!dbfull()->TEST_write_controler().NeedsDelay()) {
return 0;
}
return dbfull()->TEST_write_controler().delayed_write_rate();
#endif // !ROCKSDB_LITE
}
void Destroy(const std::vector<ColumnFamilyDescriptor>& column_families =
@ -267,7 +250,6 @@ class ColumnFamilyTestBase : public testing::Test {
db_->CreateColumnFamily(current_cf_opt, cfs[i], &handles_[cfi]));
names_[cfi] = cfs[i];
#ifndef ROCKSDB_LITE // RocksDBLite does not support GetDescriptor
// Verify the CF options of the returned CF handle.
ColumnFamilyDescriptor desc;
ASSERT_OK(handles_[cfi]->GetDescriptor(&desc));
@ -276,7 +258,6 @@ class ColumnFamilyTestBase : public testing::Test {
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
ConfigOptions(), desc.options,
SanitizeOptions(dbfull()->immutable_db_options(), current_cf_opt)));
#endif // !ROCKSDB_LITE
cfi++;
}
}
@ -325,7 +306,6 @@ class ColumnFamilyTestBase : public testing::Test {
ASSERT_OK(db_->FlushWAL(/*sync=*/false));
}
#ifndef ROCKSDB_LITE // TEST functions in DB are not supported in lite
void WaitForFlush(int cf) {
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf]));
}
@ -339,7 +319,6 @@ class ColumnFamilyTestBase : public testing::Test {
void AssertMaxTotalInMemoryState(uint64_t value) {
ASSERT_EQ(value, MaxTotalInMemoryState());
}
#endif // !ROCKSDB_LITE
Status Put(int cf, const std::string& key, const std::string& value) {
return db_->Put(WriteOptions(), handles_[cf], Slice(key), Slice(value));
@ -377,7 +356,6 @@ class ColumnFamilyTestBase : public testing::Test {
"rocksdb.num-files-at-level" + std::to_string(level));
}
#ifndef ROCKSDB_LITE
// Return spread of files per level
std::string FilesPerLevel(int cf) {
std::string result;
@ -394,31 +372,19 @@ class ColumnFamilyTestBase : public testing::Test {
result.resize(last_non_zero_offset);
return result;
}
#endif
void AssertFilesPerLevel(const std::string& value, int cf) {
#ifndef ROCKSDB_LITE
ASSERT_EQ(value, FilesPerLevel(cf));
#else
(void)value;
(void)cf;
#endif
}
#ifndef ROCKSDB_LITE // GetLiveFilesMetaData is not supported
int CountLiveFiles() {
std::vector<LiveFileMetaData> metadata;
db_->GetLiveFilesMetaData(&metadata);
return static_cast<int>(metadata.size());
}
#endif // !ROCKSDB_LITE
void AssertCountLiveFiles(int expected_value) {
#ifndef ROCKSDB_LITE
ASSERT_EQ(expected_value, CountLiveFiles());
#else
(void)expected_value;
#endif
}
// Do n memtable flushes, each of which produces an sstable
@ -432,7 +398,6 @@ class ColumnFamilyTestBase : public testing::Test {
}
}
#ifndef ROCKSDB_LITE // GetSortedWalFiles is not supported
int CountLiveLogFiles() {
int micros_wait_for_log_deletion = 20000;
env_->SleepForMicroseconds(micros_wait_for_log_deletion);
@ -461,25 +426,18 @@ class ColumnFamilyTestBase : public testing::Test {
return ret;
return 0;
}
#endif // !ROCKSDB_LITE
void AssertCountLiveLogFiles(int value) {
#ifndef ROCKSDB_LITE // GetSortedWalFiles is not supported
ASSERT_EQ(value, CountLiveLogFiles());
#else
(void)value;
#endif // !ROCKSDB_LITE
}
void AssertNumberOfImmutableMemtables(std::vector<int> num_per_cf) {
assert(num_per_cf.size() == handles_.size());
#ifndef ROCKSDB_LITE // GetProperty is not supported in lite
for (size_t i = 0; i < num_per_cf.size(); ++i) {
ASSERT_EQ(num_per_cf[i], GetProperty(static_cast<int>(i),
"rocksdb.num-immutable-mem-table"));
}
#endif // !ROCKSDB_LITE
}
void CopyFile(const std::string& source, const std::string& destination,
@ -575,7 +533,6 @@ TEST_P(ColumnFamilyTest, DontReuseColumnFamilyID) {
}
}
#ifndef ROCKSDB_LITE
TEST_P(ColumnFamilyTest, CreateCFRaceWithGetAggProperty) {
Open();
@ -598,7 +555,6 @@ TEST_P(ColumnFamilyTest, CreateCFRaceWithGetAggProperty) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif // !ROCKSDB_LITE
class FlushEmptyCFTestWithParam
: public ColumnFamilyTestBase,
@ -942,7 +898,6 @@ TEST_P(ColumnFamilyTest, IgnoreRecoveredLog) {
}
}
#ifndef ROCKSDB_LITE // TEST functions used are not supported
TEST_P(ColumnFamilyTest, FlushTest) {
Open();
CreateColumnFamiliesAndReopen({"one", "two"});
@ -1057,7 +1012,6 @@ TEST_P(ColumnFamilyTest, LogDeletionTest) {
AssertCountLiveLogFiles(4);
Close();
}
#endif // !ROCKSDB_LITE
TEST_P(ColumnFamilyTest, CrashAfterFlush) {
std::unique_ptr<FaultInjectionTestEnv> fault_env(
@ -1097,7 +1051,6 @@ TEST_P(ColumnFamilyTest, OpenNonexistentColumnFamily) {
ASSERT_TRUE(TryOpen({"default", "dne"}).IsInvalidArgument());
}
#ifndef ROCKSDB_LITE // WaitForFlush() is not supported
// Makes sure that obsolete log files get deleted
TEST_P(ColumnFamilyTest, DifferentWriteBufferSizes) {
// disable flushing stale column families
@ -1205,14 +1158,12 @@ TEST_P(ColumnFamilyTest, DifferentWriteBufferSizes) {
AssertCountLiveLogFiles(7);
Close();
}
#endif // !ROCKSDB_LITE
// The test is commented out because we want to test that snapshot is
// not created for memtables not supported it, but There isn't a memtable
// that doesn't support snapshot right now. If we have one later, we can
// re-enable the test.
//
// #ifndef ROCKSDB_LITE // Cuckoo is not supported in lite
// TEST_P(ColumnFamilyTest, MemtableNotSupportSnapshot) {
// db_options_.allow_concurrent_memtable_write = false;
// Open();
@ -1232,7 +1183,6 @@ TEST_P(ColumnFamilyTest, DifferentWriteBufferSizes) {
// {second}); auto* s3 = dbfull()->GetSnapshot(); ASSERT_TRUE(s3 == nullptr);
// Close();
// }
// #endif // !ROCKSDB_LITE
class TestComparator : public Comparator {
int Compare(const ROCKSDB_NAMESPACE::Slice& /*a*/,
@ -1299,7 +1249,6 @@ TEST_P(ColumnFamilyTest, DifferentMergeOperators) {
Close();
}
#ifndef ROCKSDB_LITE // WaitForFlush() is not supported
TEST_P(ColumnFamilyTest, DifferentCompactionStyles) {
Open();
CreateColumnFamilies({"one", "two"});
@ -1367,9 +1316,7 @@ TEST_P(ColumnFamilyTest, DifferentCompactionStyles) {
Close();
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE
// Sync points not supported in RocksDB Lite
TEST_P(ColumnFamilyTest, MultipleManualCompactions) {
@ -2033,9 +1980,7 @@ TEST_P(ColumnFamilyTest, SameCFAutomaticManualCompactions) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // Tailing iterator not supported
namespace {
std::string IterStatus(Iterator* iter) {
std::string result;
@ -2093,9 +2038,7 @@ TEST_P(ColumnFamilyTest, NewIteratorsTest) {
Destroy();
}
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // ReadOnlyDB is not supported
TEST_P(ColumnFamilyTest, ReadOnlyDBTest) {
Open();
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
@ -2144,9 +2087,7 @@ TEST_P(ColumnFamilyTest, ReadOnlyDBTest) {
s = OpenReadOnly({"one", "four"});
ASSERT_TRUE(!s.ok());
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // WaitForFlush() is not supported in lite
TEST_P(ColumnFamilyTest, DontRollEmptyLogs) {
Open();
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
@ -2168,9 +2109,7 @@ TEST_P(ColumnFamilyTest, DontRollEmptyLogs) {
ASSERT_EQ(static_cast<size_t>(total_new_writable_files), handles_.size() + 1);
Close();
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // WaitForCompaction() is not supported in lite
TEST_P(ColumnFamilyTest, FlushStaleColumnFamilies) {
Open();
CreateColumnFamilies({"one", "two"});
@ -2217,7 +2156,6 @@ TEST_P(ColumnFamilyTest, FlushStaleColumnFamilies) {
ASSERT_EQ(0, dbfull()->TEST_total_log_size());
Close();
}
#endif // !ROCKSDB_LITE
TEST_P(ColumnFamilyTest, CreateMissingColumnFamilies) {
Status s = TryOpen({"one", "two"});
@ -2457,8 +2395,6 @@ TEST_P(ColumnFamilyTest, FlushAndDropRaceCondition) {
Destroy();
}
#ifndef ROCKSDB_LITE
// skipped as persisting options is not supported in ROCKSDB_LITE
namespace {
std::atomic<int> test_stage(0);
std::atomic<bool> ordered_by_writethread(false);
@ -2540,7 +2476,6 @@ TEST_P(ColumnFamilyTest, CreateAndDropRace) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // !ROCKSDB_LITE
TEST_P(ColumnFamilyTest, WriteStallSingleColumnFamily) {
const uint64_t kBaseRate = 800000u;
@ -2950,7 +2885,6 @@ TEST_P(ColumnFamilyTest, CreateDropAndDestroy) {
ASSERT_OK(db_->DestroyColumnFamilyHandle(cfh));
}
#ifndef ROCKSDB_LITE
TEST_P(ColumnFamilyTest, CreateDropAndDestroyWithoutFileDeletion) {
ColumnFamilyHandle* cfh;
Open();
@ -3005,9 +2939,7 @@ TEST_P(ColumnFamilyTest, FlushCloseWALFiles) {
db_options_.env = env_;
Close();
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // WaitForFlush() is not supported
TEST_P(ColumnFamilyTest, IteratorCloseWALFile1) {
SpecialEnv env(Env::Default());
db_options_.env = &env;
@ -3114,9 +3046,7 @@ TEST_P(ColumnFamilyTest, IteratorCloseWALFile2) {
db_options_.env = env_;
Close();
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // TEST functions are not supported in lite
TEST_P(ColumnFamilyTest, ForwardIteratorCloseWALFile) {
SpecialEnv env(Env::Default());
// Allow both of flush and purge job to schedule.
@ -3192,7 +3122,6 @@ TEST_P(ColumnFamilyTest, ForwardIteratorCloseWALFile) {
db_options_.env = env_;
Close();
}
#endif // !ROCKSDB_LITE
// Disable on windows because SyncWAL requires env->IsSyncThreadSafe()
// to return true which is not so in unbuffered mode.

View File

@ -3,7 +3,6 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#include <mutex>
#include <string>
@ -490,13 +489,3 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr,
"SKIPPED as DBImpl::CompactFiles is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // !ROCKSDB_LITE

View File

@ -978,7 +978,6 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
void CompactionJob::NotifyOnSubcompactionBegin(
SubcompactionState* sub_compact) {
#ifndef ROCKSDB_LITE
Compaction* c = compact_->compaction;
if (db_options_.listeners.empty()) {
@ -1004,14 +1003,10 @@ void CompactionJob::NotifyOnSubcompactionBegin(
}
info.status.PermitUncheckedError();
#else
(void)sub_compact;
#endif // ROCKSDB_LITE
}
void CompactionJob::NotifyOnSubcompactionCompleted(
SubcompactionState* sub_compact) {
#ifndef ROCKSDB_LITE
if (db_options_.listeners.empty()) {
return;
@ -1032,16 +1027,12 @@ void CompactionJob::NotifyOnSubcompactionCompleted(
for (const auto& listener : db_options_.listeners) {
listener->OnSubcompactionCompleted(info);
}
#else
(void)sub_compact;
#endif // ROCKSDB_LITE
}
void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
assert(sub_compact);
assert(sub_compact->compaction);
#ifndef ROCKSDB_LITE
if (db_options_.compaction_service) {
CompactionServiceJobStatus comp_status =
ProcessKeyValueCompactionWithCompactionService(sub_compact);
@ -1052,7 +1043,6 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
// fallback to local compaction
assert(comp_status == CompactionServiceJobStatus::kUseLocal);
}
#endif // !ROCKSDB_LITE
uint64_t prev_cpu_micros = db_options_.clock->CPUMicros();
@ -1615,7 +1605,6 @@ Status CompactionJob::FinishCompactionOutputFile(
TableFileCreationReason::kCompaction, status_for_listener, file_checksum,
file_checksum_func_name);
#ifndef ROCKSDB_LITE
// Report new file to SstFileManagerImpl
auto sfm =
static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
@ -1634,7 +1623,6 @@ Status CompactionJob::FinishCompactionOutputFile(
db_error_handler_->SetBGError(s, BackgroundErrorReason::kCompaction);
}
}
#endif
outputs.ResetBuilder();
return s;
@ -1759,11 +1747,9 @@ Status CompactionJob::OpenCompactionOutputFile(SubcompactionState* sub_compact,
std::string fname = GetTableFileName(file_number);
// Fire events.
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
#ifndef ROCKSDB_LITE
EventHelpers::NotifyTableFileCreationStarted(
cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname, job_id_,
TableFileCreationReason::kCompaction);
#endif // !ROCKSDB_LITE
// Make the output file
std::unique_ptr<FSWritableFile> writable_file;
#ifndef NDEBUG
@ -1900,7 +1886,6 @@ void CompactionJob::CleanupCompaction() {
compact_ = nullptr;
}
#ifndef ROCKSDB_LITE
namespace {
void CopyPrefix(const Slice& src, size_t prefix_length, std::string* dst) {
assert(prefix_length > 0);
@ -1909,7 +1894,6 @@ void CopyPrefix(const Slice& src, size_t prefix_length, std::string* dst) {
}
} // namespace
#endif // !ROCKSDB_LITE
void CompactionJob::UpdateCompactionStats() {
assert(compact_);
@ -1956,7 +1940,6 @@ void CompactionJob::UpdateCompactionInputStatsHelper(int* num_files,
void CompactionJob::UpdateCompactionJobStats(
const InternalStats::CompactionStats& stats) const {
#ifndef ROCKSDB_LITE
compaction_job_stats_->elapsed_micros = stats.micros;
// input information
@ -1983,9 +1966,6 @@ void CompactionJob::UpdateCompactionJobStats(
CopyPrefix(compact_->LargestUserKey(), CompactionJobStats::kMaxPrefixLength,
&compaction_job_stats_->largest_output_key_prefix);
}
#else
(void)stats;
#endif // !ROCKSDB_LITE
}
void CompactionJob::LogCompaction() {

View File

@ -59,7 +59,6 @@
#include "utilities/merge_operators.h"
#if !defined(IOS_CROSS_COMPILE)
#ifndef ROCKSDB_LITE
namespace ROCKSDB_NAMESPACE {
static std::string RandomString(Random* rnd, int len, double ratio) {
@ -959,15 +958,6 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr, "SKIPPED, not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // !ROCKSDB_LITE
#else

View File

@ -3,7 +3,6 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#include "db/compaction/compaction_job.h"
@ -2442,13 +2441,3 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr,
"SKIPPED as CompactionJobStats is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // ROCKSDB_LITE

View File

@ -877,7 +877,6 @@ Compaction* CompactionPicker::CompactRange(
return compaction;
}
#ifndef ROCKSDB_LITE
namespace {
// Test whether two files have overlapping key-ranges.
bool HaveOverlappingKeyRanges(const Comparator* c, const SstFileMetaData& a,
@ -1116,7 +1115,6 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
return Status::OK();
}
#endif // !ROCKSDB_LITE
void CompactionPicker::RegisterCompaction(Compaction* c) {
if (c == nullptr) {

View File

@ -93,11 +93,9 @@ class CompactionPicker {
// into a valid one by adding more files, the function will return a
// non-ok status with specific reason.
//
#ifndef ROCKSDB_LITE
Status SanitizeCompactionInputFiles(std::unordered_set<uint64_t>* input_files,
const ColumnFamilyMetaData& cf_meta,
const int output_level) const;
#endif // ROCKSDB_LITE
// Free up the files that participated in a compaction
//
@ -229,11 +227,9 @@ class CompactionPicker {
// A helper function to SanitizeCompactionInputFiles() that
// sanitizes "input_files" by adding necessary files.
#ifndef ROCKSDB_LITE
virtual Status SanitizeCompactionInputFilesForAllLevels(
std::unordered_set<uint64_t>* input_files,
const ColumnFamilyMetaData& cf_meta, const int output_level) const;
#endif // ROCKSDB_LITE
// Keeps track of all compactions that are running on Level0.
// Protected by DB mutex
@ -246,7 +242,6 @@ class CompactionPicker {
const InternalKeyComparator* const icmp_;
};
#ifndef ROCKSDB_LITE
// A dummy compaction that never triggers any automatic
// compaction.
class NullCompactionPicker : public CompactionPicker {
@ -287,7 +282,6 @@ class NullCompactionPicker : public CompactionPicker {
return false;
}
};
#endif // !ROCKSDB_LITE
// Attempts to find an intra L0 compaction conforming to the given parameters.
//

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/compaction/compaction_picker_fifo.h"
#ifndef ROCKSDB_LITE
#include <cinttypes>
#include <string>
@ -443,4 +442,3 @@ Compaction* FIFOCompactionPicker::CompactRange(
}
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#ifndef ROCKSDB_LITE
#include "db/compaction/compaction_picker.h"
@ -60,4 +59,3 @@ class FIFOCompactionPicker : public CompactionPicker {
LogBuffer* log_buffer);
};
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -482,8 +482,6 @@ TEST_F(CompactionPickerTest, LevelTriggerDynamic4) {
ASSERT_EQ(num_levels - 1, compaction->output_level());
}
// Universal and FIFO Compactions are not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(CompactionPickerTest, NeedsCompactionUniversal) {
NewVersionStorage(1, kCompactionStyleUniversal);
UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
@ -1245,7 +1243,6 @@ TEST_F(CompactionPickerTest, FIFOToWarmWithHotBetweenWarms) {
ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
}
#endif // ROCKSDB_LITE
TEST_F(CompactionPickerTest, CompactionPriMinOverlapping1) {
NewVersionStorage(6, kCompactionStyleLevel);
@ -2873,7 +2870,6 @@ TEST_F(CompactionPickerTest, IntraL0MaxCompactionBytesHit) {
ASSERT_EQ(0, compaction->output_level());
}
#ifndef ROCKSDB_LITE
TEST_F(CompactionPickerTest, UniversalMarkedCompactionFullOverlap) {
const uint64_t kFileSize = 100000;
@ -3982,7 +3978,6 @@ TEST_P(PerKeyPlacementCompactionPickerTest,
INSTANTIATE_TEST_CASE_P(PerKeyPlacementCompactionPickerTest,
PerKeyPlacementCompactionPickerTest, ::testing::Bool());
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/compaction/compaction_picker_universal.h"
#ifndef ROCKSDB_LITE
#include <cinttypes>
#include <limits>
@ -1451,4 +1450,3 @@ uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const {
}
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#ifndef ROCKSDB_LITE
#include "db/compaction/compaction_picker.h"
@ -29,4 +28,3 @@ class UniversalCompactionPicker : public CompactionPicker {
const VersionStorageInfo* vstorage) const override;
};
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -16,7 +16,6 @@
#include "options/options_helper.h"
#include "rocksdb/utilities/options_type.h"
#ifndef ROCKSDB_LITE
namespace ROCKSDB_NAMESPACE {
class SubcompactionState;
@ -832,4 +831,3 @@ bool CompactionServiceInput::TEST_Equals(CompactionServiceInput* other,
#endif // NDEBUG
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -3,7 +3,6 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#include "db/db_test_util.h"
#include "port/stack_trace.h"
@ -954,13 +953,3 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr,
"SKIPPED as CompactionService is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // ROCKSDB_LITE

View File

@ -15,11 +15,9 @@
namespace ROCKSDB_NAMESPACE {
static std::unordered_map<std::string, OptionTypeInfo>
sst_fixed_prefix_type_info = {
#ifndef ROCKSDB_LITE
{"length",
{0, OptionType::kSizeT, OptionVerificationType::kNormal,
OptionTypeFlags::kNone}},
#endif // ROCKSDB_LITE
};
SstPartitionerFixedPrefixFactory::SstPartitionerFixedPrefixFactory(size_t len)
@ -58,7 +56,6 @@ std::shared_ptr<SstPartitionerFactory> NewSstPartitionerFixedPrefixFactory(
return std::make_shared<SstPartitionerFixedPrefixFactory>(prefix_len);
}
#ifndef ROCKSDB_LITE
namespace {
static int RegisterSstPartitionerFactories(ObjectLibrary& library,
const std::string& /*arg*/) {
@ -73,17 +70,14 @@ static int RegisterSstPartitionerFactories(ObjectLibrary& library,
return 1;
}
} // namespace
#endif // ROCKSDB_LITE
Status SstPartitionerFactory::CreateFromString(
const ConfigOptions& options, const std::string& value,
std::shared_ptr<SstPartitionerFactory>* result) {
#ifndef ROCKSDB_LITE
static std::once_flag once;
std::call_once(once, [&]() {
RegisterSstPartitionerFactories(*(ObjectLibrary::Default().get()), "");
});
#endif // ROCKSDB_LITE
return LoadSharedObject<SstPartitionerFactory>(options, value, nullptr,
result);
}

View File

@ -99,7 +99,6 @@ class SubcompactionState {
penultimate_level_outputs_.RemoveLastEmptyOutput();
}
#ifndef ROCKSDB_LITE
void BuildSubcompactionJobInfo(
SubcompactionJobInfo& subcompaction_job_info) const {
const Compaction* c = compaction;
@ -113,7 +112,6 @@ class SubcompactionState {
subcompaction_job_info.output_level = c->output_level();
subcompaction_job_info.stats = compaction_job_stats;
}
#endif // !ROCKSDB_LITE
SubcompactionState() = delete;
SubcompactionState(const SubcompactionState&) = delete;

View File

@ -17,7 +17,6 @@
namespace ROCKSDB_NAMESPACE {
#if !defined(ROCKSDB_LITE)
class TieredCompactionTest : public DBTestBase,
public testing::WithParamInterface<bool> {
@ -2139,18 +2138,11 @@ TEST_F(PrecludeLastLevelTest, RangeDelsCauseFileEndpointsToOverlap) {
Close();
}
#endif // !defined(ROCKSDB_LITE)
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
(void)argc;
(void)argv;
return 0;
#endif
}

View File

@ -4,7 +4,6 @@
// (found in the LICENSE.Apache file in the root directory).
//
#ifndef ROCKSDB_LITE
#include "rocksdb/convenience.h"
@ -78,4 +77,3 @@ Status VerifySstFileChecksum(const Options& options,
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

View File

@ -8,7 +8,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "rocksdb/options.h"
#ifndef ROCKSDB_LITE
#include <fcntl.h>
#include <sys/stat.h>
@ -1668,12 +1667,3 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr, "SKIPPED as RepairDB() is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // !ROCKSDB_LITE

View File

@ -3,7 +3,6 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#include "db/db_impl/db_impl.h"
#include "db/db_test_util.h"
@ -350,12 +349,3 @@ int main(int argc, char** argv) {
}
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // ROCKSDB_LITE

View File

@ -20,9 +20,7 @@
#include "rocksdb/utilities/debug.h"
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/block_builder.h"
#if !defined(ROCKSDB_LITE)
#include "test_util/sync_point.h"
#endif
#include "util/file_checksum_helper.h"
#include "util/random.h"
#include "utilities/counted_fs.h"
@ -95,7 +93,6 @@ TEST_F(DBBasicTest, UniqueSession) {
EXPECT_MATCHES_REGEX(sid2, expected);
EXPECT_MATCHES_REGEX(sid3, expected);
#ifndef ROCKSDB_LITE
Close();
ASSERT_OK(ReadOnlyReopen(options));
ASSERT_OK(db_->GetDbSessionId(sid1));
@ -110,7 +107,6 @@ TEST_F(DBBasicTest, UniqueSession) {
ASSERT_NE(sid1, sid2);
ASSERT_EQ(sid2, sid3);
#endif // ROCKSDB_LITE
CreateAndReopenWithCF({"goku"}, options);
ASSERT_OK(db_->GetDbSessionId(sid1));
@ -127,7 +123,6 @@ TEST_F(DBBasicTest, UniqueSession) {
ASSERT_NE(sid1, sid4);
}
#ifndef ROCKSDB_LITE
TEST_F(DBBasicTest, ReadOnlyDB) {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("bar", "v2"));
@ -364,7 +359,6 @@ TEST_F(DBBasicTest, LevelLimitReopen) {
options.max_bytes_for_level_multiplier_additional.resize(10, 1);
ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}
#endif // ROCKSDB_LITE
TEST_F(DBBasicTest, PutDeleteGet) {
do {
@ -426,7 +420,6 @@ TEST_F(DBBasicTest, GetFromVersions) {
} while (ChangeOptions());
}
#ifndef ROCKSDB_LITE
TEST_F(DBBasicTest, GetSnapshot) {
anon::OptionsOverride options_override;
options_override.skip_policy = kSkipNoSnapshot;
@ -447,7 +440,6 @@ TEST_F(DBBasicTest, GetSnapshot) {
}
} while (ChangeOptions());
}
#endif // ROCKSDB_LITE
TEST_F(DBBasicTest, CheckLock) {
do {
@ -700,7 +692,6 @@ TEST_F(DBBasicTest, LockFileRecovery) {
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBBasicTest, Snapshot) {
env_->SetMockSleep();
anon::OptionsOverride options_override;
@ -772,7 +763,6 @@ TEST_F(DBBasicTest, Snapshot) {
} while (ChangeOptions());
}
#endif // ROCKSDB_LITE
class DBBasicMultiConfigs : public DBBasicTest,
public ::testing::WithParamInterface<int> {
@ -2729,7 +2719,6 @@ TEST_P(MultiGetPrefixExtractorTest, Batched) {
INSTANTIATE_TEST_CASE_P(MultiGetPrefix, MultiGetPrefixExtractorTest,
::testing::Bool());
#ifndef ROCKSDB_LITE
class DBMultiGetRowCacheTest : public DBBasicTest,
public ::testing::WithParamInterface<bool> {};
@ -2884,7 +2873,6 @@ TEST_F(DBBasicTest, ValueTypeString) {
ASSERT_TRUE(key_version.GetTypeName() != "Invalid");
}
}
#endif // !ROCKSDB_LITE
TEST_F(DBBasicTest, MultiGetIOBufferOverrun) {
Options options = CurrentOptions();
@ -2985,7 +2973,6 @@ TEST_F(DBBasicTest, BestEffortsRecoveryWithVersionBuildingFailure) {
SyncPoint::GetInstance()->ClearAllCallBacks();
}
#ifndef ROCKSDB_LITE
namespace {
class TableFileListener : public EventListener {
public:
@ -3260,7 +3247,6 @@ TEST_F(DBBasicTest, DisableTrackWal) {
ASSERT_TRUE(dbfull()->GetVersionSet()->GetWalSet().GetWals().empty());
Close();
}
#endif // !ROCKSDB_LITE
TEST_F(DBBasicTest, ManifestChecksumMismatch) {
Options options = CurrentOptions();
@ -3304,7 +3290,6 @@ TEST_F(DBBasicTest, ConcurrentlyCloseDB) {
}
}
#ifndef ROCKSDB_LITE
class DBBasicTestTrackWal : public DBTestBase,
public testing::WithParamInterface<bool> {
public:
@ -3360,7 +3345,6 @@ TEST_P(DBBasicTestTrackWal, DoNotTrackObsoleteWal) {
INSTANTIATE_TEST_CASE_P(DBBasicTestTrackWal, DBBasicTestTrackWal,
testing::Bool());
#endif // ROCKSDB_LITE
class DBBasicTestMultiGet : public DBTestBase {
public:
@ -3382,7 +3366,6 @@ class DBBasicTestMultiGet : public DBTestBase {
Random rnd(301);
BlockBasedTableOptions table_options;
#ifndef ROCKSDB_LITE
if (compression_enabled_) {
std::vector<CompressionType> compression_types;
compression_types = GetSupportedCompressions();
@ -3401,12 +3384,6 @@ class DBBasicTestMultiGet : public DBTestBase {
compression_enabled_ = false;
}
}
#else
// GetSupportedCompressions() is not available in LITE build
if (!Snappy_Supported()) {
compression_enabled_ = false;
}
#endif // ROCKSDB_LITE
table_options.block_cache = uncompressed_cache_;
if (table_options.block_cache == nullptr) {
@ -3729,7 +3706,6 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
}
}
#ifndef ROCKSDB_LITE
TEST_P(DBBasicTestWithParallelIO, MultiGetDirectIO) {
class FakeDirectIOEnv : public EnvWrapper {
class FakeDirectIOSequentialFile;
@ -3846,7 +3822,6 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetDirectIO) {
}
Close();
}
#endif // ROCKSDB_LITE
TEST_P(DBBasicTestWithParallelIO, MultiGetWithChecksumMismatch) {
std::vector<std::string> key_data(10);
@ -4387,7 +4362,6 @@ TEST_F(DBBasicTest, FailOpenIfLoggerCreationFail) {
SyncPoint::GetInstance()->ClearAllCallBacks();
}
#ifndef ROCKSDB_LITE
TEST_F(DBBasicTest, VerifyFileChecksums) {
Options options = GetDefaultOptions();
options.create_if_missing = true;
@ -4453,7 +4427,6 @@ TEST_F(DBBasicTest, DISABLED_ManualWalSync) {
ASSERT_TRUE(TryReopen(options).IsCorruption());
}
#endif // !ROCKSDB_LITE
// A test class for intercepting random reads and injecting artificial
// delays. Used for testing the deadline/timeout feature

View File

@ -132,7 +132,6 @@ class DBBlockCacheTest : public DBTestBase {
compression_dict_insert_count_ = new_compression_dict_insert_count;
}
#ifndef ROCKSDB_LITE
const std::array<size_t, kNumCacheEntryRoles> GetCacheEntryRoleCountsBg() {
// Verify in cache entry role stats
std::array<size_t, kNumCacheEntryRoles> cache_entry_role_counts;
@ -146,7 +145,6 @@ class DBBlockCacheTest : public DBTestBase {
}
return cache_entry_role_counts;
}
#endif // ROCKSDB_LITE
};
TEST_F(DBBlockCacheTest, IteratorBlockCacheUsage) {
@ -302,7 +300,6 @@ class ReadOnlyCacheWrapper : public CacheWrapper {
} // anonymous namespace
#endif // SNAPPY
#ifndef ROCKSDB_LITE
// Make sure that when options.block_cache is set, after a new table is
// created its index/filter blocks are added to block cache.
@ -1335,7 +1332,6 @@ TEST_F(DBBlockCacheTest, HyperClockCacheReportProblems) {
EXPECT_EQ(logger->PopCounts(), (std::array<int, 3>{{0, 1, 0}}));
}
#endif // ROCKSDB_LITE
class DBBlockCacheKeyTest
: public DBTestBase,
@ -1442,7 +1438,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
++key_count;
}
#ifndef ROCKSDB_LITE
// Save an export of those ordinary SST files for later
std::string export_files_dir = dbname_ + "/exported";
ExportImportFilesMetaData* metadata_ptr_ = nullptr;
@ -1470,7 +1465,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
IngestExternalFileOptions ingest_opts;
ASSERT_OK(db_->IngestExternalFile(handles_[1], {f}, ingest_opts));
}
#endif
perform_gets();
verify_stats();
@ -1484,7 +1478,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
// Make sure we can cache hit even on a full copy of the DB. Using
// StableCacheKeyTestFS, Checkpoint will resort to full copy not hard link.
// (Checkpoint not available in LITE mode to test this.)
#ifndef ROCKSDB_LITE
auto db_copy_name = dbname_ + "-copy";
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(db_copy_name));
@ -1523,7 +1516,6 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
perform_gets();
verify_stats();
#endif // !ROCKSDB_LITE
Close();
Destroy(options);

View File

@ -245,14 +245,12 @@ TEST_F(DBBloomFilterTest, GetFilterByPrefixBloomCustomPrefixExtractor) {
(*(get_perf_context()->level_to_perf_context))[0].bloom_filter_useful);
// No bloom on extractor changed
#ifndef ROCKSDB_LITE
ASSERT_OK(db_->SetOptions({{"prefix_extractor", "capped:10"}}));
ASSERT_EQ("NOT_FOUND", Get("foobarbar"));
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 3);
ASSERT_EQ(
3,
(*(get_perf_context()->level_to_perf_context))[0].bloom_filter_useful);
#endif // ROCKSDB_LITE
// No bloom on extractor changed, after re-open
options.prefix_extractor.reset(NewCappedPrefixTransform(10));
@ -317,14 +315,12 @@ TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) {
(*(get_perf_context()->level_to_perf_context))[0].bloom_filter_useful);
// No bloom on extractor changed
#ifndef ROCKSDB_LITE
ASSERT_OK(db_->SetOptions({{"prefix_extractor", "capped:10"}}));
ASSERT_EQ("NOT_FOUND", Get("foobarbar"));
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 3);
ASSERT_EQ(
3,
(*(get_perf_context()->level_to_perf_context))[0].bloom_filter_useful);
#endif // ROCKSDB_LITE
get_perf_context()->Reset();
}
@ -564,7 +560,6 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) {
ASSERT_LE(reads, 3 * N / 100);
}
#ifndef ROCKSDB_LITE
// Sanity check some table properties
std::map<std::string, std::string> props;
ASSERT_TRUE(db_->GetMapProperty(
@ -583,7 +578,6 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) {
uint64_t num_filter_entries = ParseUint64(props["num_filter_entries"]);
EXPECT_EQ(num_filter_entries, nkeys);
#endif // ROCKSDB_LITE
env_->delay_sstable_sync_.store(false, std::memory_order_release);
Close();
@ -649,10 +643,8 @@ TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) {
PutFn();
GetFn();
};
#ifndef ROCKSDB_LITE
std::map<std::string, std::string> props;
const auto& kAggTableProps = DB::Properties::kAggregatedTableProperties;
#endif // ROCKSDB_LITE
Options options = CurrentOptions();
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
@ -675,11 +667,9 @@ TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) {
EXPECT_EQ(TestGetTickerCount(options, BLOOM_FILTER_FULL_POSITIVE), 0);
EXPECT_EQ(TestGetTickerCount(options, BLOOM_FILTER_FULL_TRUE_POSITIVE), 0);
#ifndef ROCKSDB_LITE
props.clear();
ASSERT_TRUE(db_->GetMapProperty(kAggTableProps, &props));
EXPECT_EQ(props["filter_size"], "0");
#endif // ROCKSDB_LITE
// Test 2: use custom API to skip filters -> no filter constructed
// or read.
@ -693,11 +683,9 @@ TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) {
EXPECT_EQ(TestGetTickerCount(options, BLOOM_FILTER_FULL_POSITIVE), 0);
EXPECT_EQ(TestGetTickerCount(options, BLOOM_FILTER_FULL_TRUE_POSITIVE), 0);
#ifndef ROCKSDB_LITE
props.clear();
ASSERT_TRUE(db_->GetMapProperty(kAggTableProps, &props));
EXPECT_EQ(props["filter_size"], "0");
#endif // ROCKSDB_LITE
// Control test: using an actual filter with 100% FP rate -> the filter
// is constructed and checked on read.
@ -713,11 +701,9 @@ TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) {
EXPECT_EQ(
TestGetAndResetTickerCount(options, BLOOM_FILTER_FULL_TRUE_POSITIVE),
maxKey);
#ifndef ROCKSDB_LITE
props.clear();
ASSERT_TRUE(db_->GetMapProperty(kAggTableProps, &props));
EXPECT_NE(props["filter_size"], "0");
#endif // ROCKSDB_LITE
// Test 3 (options test): Able to read existing filters with longstanding
// generated options file entry `filter_policy=rocksdb.BuiltinBloomFilter`
@ -743,11 +729,9 @@ TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) {
EXPECT_EQ(TestGetTickerCount(options, BLOOM_FILTER_FULL_POSITIVE), 0);
EXPECT_EQ(TestGetTickerCount(options, BLOOM_FILTER_FULL_TRUE_POSITIVE), 0);
#ifndef ROCKSDB_LITE
props.clear();
ASSERT_TRUE(db_->GetMapProperty(kAggTableProps, &props));
EXPECT_EQ(props["filter_size"], "0");
#endif // ROCKSDB_LITE
}
#if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
@ -1503,7 +1487,6 @@ TEST_P(DBFilterConstructionCorruptionTestWithParam, DetectCorruption) {
}
// RocksDB lite does not support dynamic options
#ifndef ROCKSDB_LITE
TEST_P(DBFilterConstructionCorruptionTestWithParam,
DynamicallyTurnOnAndOffDetectConstructCorruption) {
Options options = CurrentOptions();
@ -1587,7 +1570,6 @@ TEST_P(DBFilterConstructionCorruptionTestWithParam,
db_->GetOptions().table_factory->GetOptions<BlockBasedTableOptions>();
EXPECT_FALSE(updated_table_options->detect_filter_construct_corruption);
}
#endif // ROCKSDB_LITE
namespace {
// NOTE: This class is referenced by HISTORY.md as a model for a wrapper
@ -1756,7 +1738,6 @@ TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) {
EXPECT_LE(useful_count, maxKey * 0.91);
}
} else {
#ifndef ROCKSDB_LITE
// Also try external SST file
{
std::string file_path = dbname_ + "/external.sst";
@ -1768,7 +1749,6 @@ TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) {
// Note: kCompactionStyleLevel is default, ignored if num_levels == -1
EXPECT_EQ(policy->DumpTestReport(),
"cf=abe,s=kCompactionStyleLevel,n=-1,l=-1,b=0,r=kMisc\n");
#endif
}
// Destroy
@ -2036,9 +2016,7 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) {
ASSERT_OK(Put(UKey(i), UKey(i)));
}
ASSERT_OK(Flush());
#ifndef ROCKSDB_LITE
ASSERT_EQ(TotalTableFiles(), 1);
#endif
constexpr uint32_t Q = 29;
// MultiGet In
@ -2190,7 +2168,6 @@ INSTANTIATE_TEST_CASE_P(DBBloomFilterTestVaryPrefixAndFormatVer,
std::make_tuple(true, 3), std::make_tuple(true, 4),
std::make_tuple(true, 5)));
#ifndef ROCKSDB_LITE
namespace {
static const std::string kPlainTable = "test_PlainTableBloom";
} // anonymous namespace
@ -3486,7 +3463,6 @@ TEST_F(DBBloomFilterTest, WeirdPrefixExtractorWithFilter3) {
}
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -285,7 +285,6 @@ class ChangeFilterFactory : public CompactionFilterFactory {
const char* Name() const override { return "ChangeFilterFactory"; }
};
#ifndef ROCKSDB_LITE
TEST_F(DBTestCompactionFilter, CompactionFilter) {
Options options = CurrentOptions();
options.max_open_files = -1;
@ -469,7 +468,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
delete itr;
}
#endif // ROCKSDB_LITE
TEST_F(DBTestCompactionFilter, CompactionFilterFlush) {
// Tests a `CompactionFilterFactory` that filters when table file is created
@ -655,7 +653,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
ASSERT_EQ(newvalue, four);
}
#ifndef ROCKSDB_LITE
TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
KeepFilterFactory* filter = new KeepFilterFactory(true, true);
@ -715,7 +712,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
ASSERT_EQ(count, 0);
}
}
#endif // ROCKSDB_LITE
TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
KeepFilterFactory* filter = new KeepFilterFactory(false, true);
@ -746,7 +742,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
ASSERT_TRUE(filter->compaction_filter_created());
}
#ifndef ROCKSDB_LITE
// Compaction filters aplies to all records, regardless snapshots.
TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
std::string five = std::to_string(5);
@ -807,7 +802,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
// removed.
db_->ReleaseSnapshot(snapshot);
}
#endif // ROCKSDB_LITE
TEST_F(DBTestCompactionFilter, SkipUntil) {
Options options = CurrentOptions();

View File

@ -30,7 +30,6 @@
namespace ROCKSDB_NAMESPACE {
// SYNC_POINT is not supported in released Windows mode.
#if !defined(ROCKSDB_LITE)
class CompactionStatsCollector : public EventListener {
public:
@ -9056,18 +9055,11 @@ TEST_F(DBCompactionTest, BottommostFileCompactionAllowIngestBehind) {
// ASSERT_OK(dbfull()->TEST_WaitForCompact(true /* wait_unscheduled */));
}
#endif // !defined(ROCKSDB_LITE)
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
(void)argc;
(void)argv;
return 0;
#endif
}

View File

@ -10,7 +10,6 @@
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
// which is a pity, it is a good test
#if !defined(ROCKSDB_LITE)
#include "db/db_test_util.h"
#include "port/port.h"
@ -492,16 +491,9 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
}
} // namespace ROCKSDB_NAMESPACE
#endif // !defined(ROCKSDB_LITE)
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
(void)argc;
(void)argv;
return 0;
#endif
}

View File

@ -6,9 +6,7 @@
#include "db/db_test_util.h"
#include "port/stack_trace.h"
#include "rocksdb/perf_context.h"
#if !defined(ROCKSDB_LITE)
#include "test_util/sync_point.h"
#endif
#include <iostream>
#include <string>
@ -27,7 +25,6 @@ class DBEncryptionTest : public DBTestBase {
}
};
#ifndef ROCKSDB_LITE
TEST_F(DBEncryptionTest, CheckEncrypted) {
ASSERT_OK(Put("foo567", "v1.fetdq"));
@ -119,7 +116,6 @@ TEST_F(DBEncryptionTest, ReadEmptyFile) {
ASSERT_TRUE(data.empty());
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -4,7 +4,6 @@
// (found in the LICENSE.Apache file in the root directory).
//
#ifndef ROCKSDB_LITE
#include <algorithm>
#include <cstdint>
@ -439,4 +438,3 @@ Status DBImpl::GetLiveFilesStorageInfo(
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

View File

@ -73,9 +73,7 @@ TEST_F(DBFlushTest, FlushWhileWritingManifest) {
ASSERT_OK(dbfull()->Flush(no_wait));
// If the issue is hit we will wait here forever.
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
#ifndef ROCKSDB_LITE
ASSERT_EQ(2, TotalTableFiles());
#endif // ROCKSDB_LITE
}
// Disable this test temporarily on Travis as it fails intermittently.
@ -105,9 +103,7 @@ TEST_F(DBFlushTest, SyncFail) {
// Now the background job will do the flush; wait for it.
// Returns the IO error happend during flush.
ASSERT_NOK(dbfull()->TEST_WaitForFlushMemTable());
#ifndef ROCKSDB_LITE
ASSERT_EQ("", FilesPerLevel()); // flush failed.
#endif // ROCKSDB_LITE
Destroy(options);
}
@ -664,7 +660,6 @@ TEST_F(DBFlushTest, StatisticsGarbageRangeDeletes) {
Close();
}
#ifndef ROCKSDB_LITE
// This simple Listener can only handle one flush at a time.
class TestFlushListener : public EventListener {
public:
@ -744,10 +739,8 @@ class TestFlushListener : public EventListener {
Env* env_;
DBFlushTest* test_;
};
#endif // !ROCKSDB_LITE
// RocksDB lite does not support GetLiveFiles()
#ifndef ROCKSDB_LITE
TEST_F(DBFlushTest, FixFlushReasonRaceFromConcurrentFlushes) {
Options options = CurrentOptions();
options.atomic_flush = true;
@ -802,7 +795,6 @@ TEST_F(DBFlushTest, FixFlushReasonRaceFromConcurrentFlushes) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif // !ROCKSDB_LITE
TEST_F(DBFlushTest, MemPurgeBasic) {
Options options = CurrentOptions();
@ -835,24 +827,16 @@ TEST_F(DBFlushTest, MemPurgeBasic) {
// Enforce size of a single MemTable to 64MB (64MB = 67108864 bytes).
options.write_buffer_size = 1 << 20;
#ifndef ROCKSDB_LITE
// Initially deactivate the MemPurge prototype.
options.experimental_mempurge_threshold = 0.0;
TestFlushListener* listener = new TestFlushListener(options.env, this);
options.listeners.emplace_back(listener);
#else
// Activate directly the MemPurge prototype.
// (RocksDB lite does not support dynamic options)
options.experimental_mempurge_threshold = 1.0;
#endif // !ROCKSDB_LITE
ASSERT_OK(TryReopen(options));
// RocksDB lite does not support dynamic options
#ifndef ROCKSDB_LITE
// Dynamically activate the MemPurge prototype without restarting the DB.
ColumnFamilyHandle* cfh = db_->DefaultColumnFamily();
ASSERT_OK(db_->SetOptions(cfh, {{"experimental_mempurge_threshold", "1.0"}}));
#endif
std::atomic<uint32_t> mempurge_count{0};
std::atomic<uint32_t> sst_count{0};
@ -985,7 +969,6 @@ TEST_F(DBFlushTest, MemPurgeBasic) {
}
// RocksDB lite does not support dynamic options
#ifndef ROCKSDB_LITE
TEST_F(DBFlushTest, MemPurgeBasicToggle) {
Options options = CurrentOptions();
@ -1098,12 +1081,10 @@ TEST_F(DBFlushTest, MemPurgeBasicToggle) {
Close();
}
// Closes the "#ifndef ROCKSDB_LITE"
// End of MemPurgeBasicToggle, which is not
// supported with RocksDB LITE because it
// relies on dynamically changing the option
// flag experimental_mempurge_threshold.
#endif
// At the moment, MemPurge feature is deactivated
// when atomic_flush is enabled. This is because the level
@ -1221,10 +1202,8 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) {
options.compression = kNoCompression;
options.inplace_update_support = false;
options.allow_concurrent_memtable_write = true;
#ifndef ROCKSDB_LITE
TestFlushListener* listener = new TestFlushListener(options.env, this);
options.listeners.emplace_back(listener);
#endif // !ROCKSDB_LITE
// Enforce size of a single MemTable to 64MB (64MB = 67108864 bytes).
options.write_buffer_size = 1 << 20;
// Activate the MemPurge prototype.
@ -1422,10 +1401,8 @@ TEST_F(DBFlushTest, MemPurgeAndCompactionFilter) {
options.compression = kNoCompression;
options.inplace_update_support = false;
options.allow_concurrent_memtable_write = true;
#ifndef ROCKSDB_LITE
TestFlushListener* listener = new TestFlushListener(options.env, this);
options.listeners.emplace_back(listener);
#endif // !ROCKSDB_LITE
// Create a ConditionalUpdate compaction filter
// that will update all the values of the KV pairs
// where the keys are "lower" than KEY4.
@ -1878,12 +1855,10 @@ TEST_F(DBFlushTest, ManualFlushFailsInReadOnlyMode) {
ASSERT_OK(db_->ContinueBackgroundWork());
// We ingested the error to env, so the returned status is not OK.
ASSERT_NOK(dbfull()->TEST_WaitForFlushMemTable());
#ifndef ROCKSDB_LITE
uint64_t num_bg_errors;
ASSERT_TRUE(
db_->GetIntProperty(DB::Properties::kBackgroundErrors, &num_bg_errors));
ASSERT_GT(num_bg_errors, 0);
#endif // ROCKSDB_LITE
// In the bug scenario, triggering another flush would cause the second flush
// to hang forever. After the fix we expect it to return an error.
@ -1925,7 +1900,6 @@ TEST_F(DBFlushTest, CFDropRaceWithWaitForFlushMemTables) {
SyncPoint::GetInstance()->DisableProcessing();
}
#ifndef ROCKSDB_LITE
TEST_F(DBFlushTest, FireOnFlushCompletedAfterCommittedResult) {
class TestListener : public EventListener {
public:
@ -2016,7 +1990,6 @@ TEST_F(DBFlushTest, FireOnFlushCompletedAfterCommittedResult) {
SyncPoint::GetInstance()->DisableProcessing();
SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // !ROCKSDB_LITE
TEST_F(DBFlushTest, FlushWithBlob) {
constexpr uint64_t min_blob_size = 10;
@ -2078,7 +2051,6 @@ TEST_F(DBFlushTest, FlushWithBlob) {
ASSERT_EQ(blob_file->GetTotalBlobCount(), 1);
#ifndef ROCKSDB_LITE
const InternalStats* const internal_stats = cfd->internal_stats();
assert(internal_stats);
@ -2094,7 +2066,6 @@ TEST_F(DBFlushTest, FlushWithBlob) {
ASSERT_EQ(cf_stats_value[InternalStats::BYTES_FLUSHED],
compaction_stats[0].bytes_written +
compaction_stats[0].bytes_written_blob);
#endif // ROCKSDB_LITE
}
TEST_F(DBFlushTest, FlushWithChecksumHandoff1) {
@ -2408,7 +2379,6 @@ TEST_P(DBFlushTestBlobError, FlushError) {
ASSERT_NE(type, kBlobFile);
}
#ifndef ROCKSDB_LITE
const InternalStats* const internal_stats = cfd->internal_stats();
assert(internal_stats);
@ -2432,10 +2402,8 @@ TEST_P(DBFlushTestBlobError, FlushError) {
ASSERT_EQ(cf_stats_value[InternalStats::BYTES_FLUSHED],
compaction_stats[0].bytes_written +
compaction_stats[0].bytes_written_blob);
#endif // ROCKSDB_LITE
}
#ifndef ROCKSDB_LITE
TEST_F(DBFlushTest, TombstoneVisibleInSnapshot) {
class SimpleTestFlushListener : public EventListener {
public:
@ -2616,7 +2584,6 @@ TEST_P(DBAtomicFlushTest, ManualAtomicFlush) {
ASSERT_TRUE(cfh->cfd()->mem()->IsEmpty());
}
}
#endif // ROCKSDB_LITE
TEST_P(DBAtomicFlushTest, PrecomputeMinLogNumberToKeepNon2PC) {
Options options = CurrentOptions();

View File

@ -3,7 +3,6 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#include "db/db_impl/compacted_db_impl.h"
#include "db/db_impl/db_impl.h"
@ -254,4 +253,3 @@ Status CompactedDBImpl::Open(const Options& options, const std::string& dbname,
}
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

View File

@ -4,7 +4,6 @@
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <vector>
@ -133,12 +132,10 @@ class CompactedDBImpl : public DBImpl {
// Share with DBImplReadOnly?
protected:
#ifndef ROCKSDB_LITE
Status FlushForGetLiveFiles() override {
// No-op for read-only DB
return Status::OK();
}
#endif // !ROCKSDB_LITE
private:
friend class DB;
@ -151,4 +148,3 @@ class CompactedDBImpl : public DBImpl {
LevelFilesBrief files_;
};
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE

View File

@ -213,17 +213,13 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname,
has_unpersisted_data_(false),
unable_to_release_oldest_log_(false),
num_running_ingest_file_(0),
#ifndef ROCKSDB_LITE
wal_manager_(immutable_db_options_, file_options_, io_tracer_,
seq_per_batch),
#endif // ROCKSDB_LITE
bg_work_paused_(0),
bg_compaction_paused_(0),
refitting_level_(false),
opened_successfully_(false),
#ifndef ROCKSDB_LITE
periodic_task_scheduler_(),
#endif // ROCKSDB_LITE
two_write_queues_(options.two_write_queues),
manual_wal_flush_(options.manual_wal_flush),
// last_sequencee_ is always maintained by the main queue that also writes
@ -265,7 +261,6 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname,
SetDbSessionId();
assert(!db_session_id_.empty());
#ifndef ROCKSDB_LITE
periodic_task_functions_.emplace(PeriodicTaskType::kDumpStats,
[this]() { this->DumpStats(); });
periodic_task_functions_.emplace(PeriodicTaskType::kPersistStats,
@ -275,7 +270,6 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname,
periodic_task_functions_.emplace(
PeriodicTaskType::kRecordSeqnoTime,
[this]() { this->RecordSeqnoToTimeMapping(); });
#endif // ROCKSDB_LITE
versions_.reset(new VersionSet(dbname_, &immutable_db_options_, file_options_,
table_cache_.get(), write_buffer_manager_,
@ -496,7 +490,6 @@ void DBImpl::CancelAllBackgroundWork(bool wait) {
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Shutdown: canceling all background work");
#ifndef ROCKSDB_LITE
for (uint8_t task_type = 0;
task_type < static_cast<uint8_t>(PeriodicTaskType::kMax); task_type++) {
Status s = periodic_task_scheduler_.Unregister(
@ -507,7 +500,6 @@ void DBImpl::CancelAllBackgroundWork(bool wait) {
task_type, s.ToString().c_str());
}
}
#endif // !ROCKSDB_LITE
InstrumentedMutexLock l(&mutex_);
if (!shutting_down_.load(std::memory_order_acquire) &&
@ -704,7 +696,6 @@ Status DBImpl::CloseHelper() {
ROCKS_LOG_INFO(immutable_db_options_.info_log, "Shutdown complete");
LogFlush(immutable_db_options_.info_log);
#ifndef ROCKSDB_LITE
// If the sst_file_manager was allocated by us during DB::Open(), ccall
// Close() on it before closing the info_log. Otherwise, background thread
// in SstFileManagerImpl might try to log something
@ -713,7 +704,6 @@ Status DBImpl::CloseHelper() {
immutable_db_options_.sst_file_manager.get());
sfm->Close();
}
#endif // ROCKSDB_LITE
if (immutable_db_options_.info_log && own_info_log_) {
Status s = immutable_db_options_.info_log->Close();
@ -791,7 +781,6 @@ void DBImpl::PrintStatistics() {
}
Status DBImpl::StartPeriodicTaskScheduler() {
#ifndef ROCKSDB_LITE
#ifndef NDEBUG
// It only used by test to disable scheduler
@ -834,13 +823,9 @@ Status DBImpl::StartPeriodicTaskScheduler() {
periodic_task_functions_.at(PeriodicTaskType::kFlushInfoLog));
return s;
#else
return Status::OK();
#endif // !ROCKSDB_LITE
}
Status DBImpl::RegisterRecordSeqnoTimeWorker() {
#ifndef ROCKSDB_LITE
uint64_t min_time_duration = std::numeric_limits<uint64_t>::max();
uint64_t max_time_duration = std::numeric_limits<uint64_t>::min();
{
@ -883,9 +868,6 @@ Status DBImpl::RegisterRecordSeqnoTimeWorker() {
}
return s;
#else
return Status::OK();
#endif // !ROCKSDB_LITE
}
// esitmate the total size of stats_history_
@ -906,7 +888,6 @@ size_t DBImpl::EstimateInMemoryStatsHistorySize() const {
void DBImpl::PersistStats() {
TEST_SYNC_POINT("DBImpl::PersistStats:Entry");
#ifndef ROCKSDB_LITE
if (shutdown_initiated_) {
return;
}
@ -1011,7 +992,6 @@ void DBImpl::PersistStats() {
stats_history_size, stats_history_.size());
}
TEST_SYNC_POINT("DBImpl::PersistStats:End");
#endif // !ROCKSDB_LITE
}
bool DBImpl::FindStatsByTime(uint64_t start_time, uint64_t end_time,
@ -1053,7 +1033,6 @@ Status DBImpl::GetStatsHistory(
void DBImpl::DumpStats() {
TEST_SYNC_POINT("DBImpl::DumpStats:1");
#ifndef ROCKSDB_LITE
std::string stats;
if (shutdown_initiated_) {
return;
@ -1118,7 +1097,6 @@ void DBImpl::DumpStats() {
ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s", stats.c_str());
}
}
#endif // !ROCKSDB_LITE
PrintStatistics();
}
@ -1173,11 +1151,6 @@ FSDirectory* DBImpl::GetDataDir(ColumnFamilyData* cfd, size_t path_id) const {
Status DBImpl::SetOptions(
ColumnFamilyHandle* column_family,
const std::unordered_map<std::string, std::string>& options_map) {
#ifdef ROCKSDB_LITE
(void)column_family;
(void)options_map;
return Status::NotSupported("Not supported in ROCKSDB LITE");
#else
auto* cfd =
static_cast_with_check<ColumnFamilyHandleImpl>(column_family)->cfd();
if (options_map.empty()) {
@ -1235,15 +1208,10 @@ Status DBImpl::SetOptions(
}
LogFlush(immutable_db_options_.info_log);
return s;
#endif // ROCKSDB_LITE
}
Status DBImpl::SetDBOptions(
const std::unordered_map<std::string, std::string>& options_map) {
#ifdef ROCKSDB_LITE
(void)options_map;
return Status::NotSupported("Not supported in ROCKSDB LITE");
#else
if (options_map.empty()) {
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"SetDBOptions(), empty input.");
@ -1408,7 +1376,6 @@ Status DBImpl::SetDBOptions(
}
LogFlush(immutable_db_options_.info_log);
return s;
#endif // ROCKSDB_LITE
}
// return the same level if it cannot be moved
@ -3295,11 +3262,6 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options,
assert(cfd != nullptr);
ReadCallback* read_callback = nullptr; // No read callback provided.
if (read_options.tailing) {
#ifdef ROCKSDB_LITE
// not supported in lite version
result = nullptr;
#else
SuperVersion* sv = cfd->GetReferencedSuperVersion(this);
auto iter = new ForwardIterator(this, read_options, cfd, sv,
/* allow_unprepared_value */ true);
@ -3308,7 +3270,6 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options,
cfd->user_comparator(), iter, sv->current, kMaxSequenceNumber,
sv->mutable_cf_options.max_sequential_skip_in_iterations, read_callback,
this, cfd);
#endif
} else {
// Note: no need to consider the special case of
// last_seq_same_as_publish_seq_==false since NewIterator is overridden in
@ -3439,10 +3400,6 @@ Status DBImpl::NewIterators(
iterators->clear();
iterators->reserve(column_families.size());
if (read_options.tailing) {
#ifdef ROCKSDB_LITE
return Status::InvalidArgument(
"Tailing iterator not supported in RocksDB lite");
#else
for (auto cfh : column_families) {
auto cfd = static_cast_with_check<ColumnFamilyHandleImpl>(cfh)->cfd();
SuperVersion* sv = cfd->GetReferencedSuperVersion(this);
@ -3454,7 +3411,6 @@ Status DBImpl::NewIterators(
sv->mutable_cf_options.max_sequential_skip_in_iterations,
read_callback, this, cfd));
}
#endif
} else {
// Note: no need to consider the special case of
// last_seq_same_as_publish_seq_==false since NewIterators is overridden in
@ -3476,11 +3432,9 @@ Status DBImpl::NewIterators(
const Snapshot* DBImpl::GetSnapshot() { return GetSnapshotImpl(false); }
#ifndef ROCKSDB_LITE
const Snapshot* DBImpl::GetSnapshotForWriteConflictBoundary() {
return GetSnapshotImpl(true);
}
#endif // ROCKSDB_LITE
std::pair<Status, std::shared_ptr<const Snapshot>>
DBImpl::CreateTimestampedSnapshot(SequenceNumber snapshot_seq, uint64_t ts) {
@ -3728,7 +3682,6 @@ void DBImpl::ReleaseSnapshot(const Snapshot* s) {
delete casted_s;
}
#ifndef ROCKSDB_LITE
Status DBImpl::GetPropertiesOfAllTables(ColumnFamilyHandle* column_family,
TablePropertiesCollection* props) {
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
@ -3772,7 +3725,6 @@ Status DBImpl::GetPropertiesOfTablesInRange(ColumnFamilyHandle* column_family,
return s;
}
#endif // ROCKSDB_LITE
const std::string& DBImpl::GetName() const { return dbname_; }
@ -3791,7 +3743,6 @@ SystemClock* DBImpl::GetSystemClock() const {
return immutable_db_options_.clock;
}
#ifndef ROCKSDB_LITE
Status DBImpl::StartIOTrace(const TraceOptions& trace_options,
std::unique_ptr<TraceWriter>&& trace_writer) {
@ -3805,7 +3756,6 @@ Status DBImpl::EndIOTrace() {
return Status::OK();
}
#endif // ROCKSDB_LITE
Options DBImpl::GetOptions(ColumnFamilyHandle* column_family) const {
InstrumentedMutexLock l(&mutex_);
@ -3934,7 +3884,6 @@ bool DBImpl::GetPropertyHandleOptionsStatistics(std::string* value) {
return true;
}
#ifndef ROCKSDB_LITE
Status DBImpl::ResetStats() {
InstrumentedMutexLock l(&mutex_);
for (auto* cfd : *versions_->GetColumnFamilySet()) {
@ -3944,7 +3893,6 @@ Status DBImpl::ResetStats() {
}
return Status::OK();
}
#endif // ROCKSDB_LITE
bool DBImpl::GetAggregatedIntProperty(const Slice& property,
uint64_t* aggregated_value) {
@ -4153,7 +4101,6 @@ void DBImpl::ReleaseFileNumberFromPendingOutputs(
}
}
#ifndef ROCKSDB_LITE
Status DBImpl::GetUpdatesSince(
SequenceNumber seq, std::unique_ptr<TransactionLogIterator>* iter,
const TransactionLogIterator::ReadOptions& read_options) {
@ -4402,7 +4349,6 @@ void DBImpl::GetAllColumnFamilyMetaData(
}
}
#endif // ROCKSDB_LITE
Status DBImpl::CheckConsistency() {
mutex_.AssertHeld();
@ -4753,7 +4699,6 @@ Status DestroyDB(const std::string& dbname, const Options& options,
Status DBImpl::WriteOptionsFile(bool need_mutex_lock,
bool need_enter_write_thread) {
#ifndef ROCKSDB_LITE
WriteThread::Writer w;
if (need_mutex_lock) {
mutex_.Lock();
@ -4810,14 +4755,9 @@ Status DBImpl::WriteOptionsFile(bool need_mutex_lock,
s.ToString().c_str());
}
}
#else
(void)need_mutex_lock;
(void)need_enter_write_thread;
#endif // !ROCKSDB_LITE
return Status::OK();
}
#ifndef ROCKSDB_LITE
namespace {
void DeleteOptionsFilesHelper(const std::map<uint64_t, std::string>& filenames,
const size_t num_files_to_keep,
@ -4835,10 +4775,8 @@ void DeleteOptionsFilesHelper(const std::map<uint64_t, std::string>& filenames,
}
}
} // namespace
#endif // !ROCKSDB_LITE
Status DBImpl::DeleteObsoleteOptionsFiles() {
#ifndef ROCKSDB_LITE
std::vector<std::string> filenames;
// use ordered map to store keep the filenames sorted from the newest
// to the oldest.
@ -4866,13 +4804,9 @@ Status DBImpl::DeleteObsoleteOptionsFiles() {
DeleteOptionsFilesHelper(options_filenames, kNumOptionsFilesKept,
immutable_db_options_.info_log, GetEnv());
return Status::OK();
#else
return Status::OK();
#endif // !ROCKSDB_LITE
}
Status DBImpl::RenameTempFileToOptionsFile(const std::string& file_name) {
#ifndef ROCKSDB_LITE
Status s;
uint64_t options_file_number = versions_->NewFileNumber();
@ -4916,10 +4850,6 @@ Status DBImpl::RenameTempFileToOptionsFile(const std::string& file_name) {
DeleteObsoleteOptionsFiles().PermitUncheckedError();
}
return s;
#else
(void)file_name;
return Status::OK();
#endif // !ROCKSDB_LITE
}
#ifdef ROCKSDB_USING_THREAD_STATUS
@ -4967,7 +4897,6 @@ void DumpRocksDBBuildVersion(Logger* log) {
}
}
#ifndef ROCKSDB_LITE
SequenceNumber DBImpl::GetEarliestMemTableSequenceNumber(SuperVersion* sv,
bool include_history) {
// Find the earliest sequence number that we know we can rely on reading
@ -5924,6 +5853,5 @@ void DBImpl::RecordSeqnoToTimeMapping() {
seqno, unix_time);
}
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -54,13 +54,9 @@
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/status.h"
#ifndef ROCKSDB_LITE
#include "rocksdb/trace_reader_writer.h"
#endif // ROCKSDB_LITE
#include "rocksdb/transaction_log.h"
#ifndef ROCKSDB_LITE
#include "rocksdb/utilities/replayer.h"
#endif // ROCKSDB_LITE
#include "rocksdb/write_buffer_manager.h"
#include "table/merging_iterator.h"
#include "table/scoped_arena_iterator.h"
@ -469,7 +465,6 @@ class DBImpl : public DB {
uint64_t start_time, uint64_t end_time,
std::unique_ptr<StatsHistoryIterator>* stats_iterator) override;
#ifndef ROCKSDB_LITE
using DB::ResetStats;
virtual Status ResetStats() override;
// All the returned filenames start with "/"
@ -597,7 +592,6 @@ class DBImpl : public DB {
ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
TablePropertiesCollection* props) override;
#endif // ROCKSDB_LITE
// ---- End of implementations of the DB interface ----
SystemClock* GetSystemClock() const;
@ -655,7 +649,6 @@ class DBImpl : public DB {
// depends also on data written to the WAL but not to the memtable.
SequenceNumber TEST_GetLastVisibleSequence() const;
#ifndef ROCKSDB_LITE
// Similar to Write() but will call the callback once on the single write
// thread to determine whether it is safe to perform the write.
virtual Status WriteWithCallback(const WriteOptions& write_options,
@ -720,7 +713,6 @@ class DBImpl : public DB {
Status TraceIteratorSeekForPrev(const uint32_t& cf_id, const Slice& key,
const Slice& lower_bound,
const Slice upper_bound);
#endif // ROCKSDB_LITE
// Similar to GetSnapshot(), but also lets the db know that this snapshot
// will be used for transaction write-conflict checking. The DB can then
@ -1176,9 +1168,7 @@ class DBImpl : public DB {
return files_grabbed_for_purge_;
}
#ifndef ROCKSDB_LITE
const PeriodicTaskScheduler& TEST_GetPeriodicTaskScheduler() const;
#endif // !ROCKSDB_LITE
#endif // NDEBUG
@ -1401,12 +1391,10 @@ class DBImpl : public DB {
void NotifyOnMemTableSealed(ColumnFamilyData* cfd,
const MemTableInfo& mem_table_info);
#ifndef ROCKSDB_LITE
void NotifyOnExternalFileIngested(
ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job);
virtual Status FlushForGetLiveFiles();
#endif // !ROCKSDB_LITE
void NewThreadStatusCfInfo(ColumnFamilyData* cfd) const;
@ -1554,9 +1542,7 @@ class DBImpl : public DB {
friend class WriteUnpreparedTxnDB;
friend class WriteUnpreparedTxn;
#ifndef ROCKSDB_LITE
friend class ForwardIterator;
#endif
friend struct SuperVersion;
friend class CompactedDBImpl;
friend class DBTest_ConcurrentFlushWAL_Test;
@ -2023,7 +2009,6 @@ class DBImpl : public DB {
// Used by WriteImpl to update bg_error_ in case of memtable insert error.
void MemTableInsertStatusCheck(const Status& memtable_insert_status);
#ifndef ROCKSDB_LITE
Status CompactFilesImpl(const CompactionOptions& compact_options,
ColumnFamilyData* cfd, Version* version,
const std::vector<std::string>& input_file_names,
@ -2031,7 +2016,6 @@ class DBImpl : public DB {
const int output_level, int output_path_id,
JobContext* job_context, LogBuffer* log_buffer,
CompactionJobInfo* compaction_job_info);
#endif // ROCKSDB_LITE
ColumnFamilyData* GetColumnFamilyDataByName(const std::string& cf_name);
@ -2170,7 +2154,6 @@ class DBImpl : public DB {
bool ShouldntRunManualCompaction(ManualCompactionState* m);
bool HaveManualCompaction(ColumnFamilyData* cfd);
bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1);
#ifndef ROCKSDB_LITE
void BuildCompactionJobInfo(const ColumnFamilyData* cfd, Compaction* c,
const Status& st,
const CompactionJobStats& compaction_job_stats,
@ -2183,7 +2166,6 @@ class DBImpl : public DB {
ColumnFamilyData* cfd, uint64_t num,
std::unique_ptr<std::list<uint64_t>::iterator>& pending_output_elem,
uint64_t* next_file_number);
#endif //! ROCKSDB_LITE
bool ShouldPurge(uint64_t file_number) const;
void MarkAsGrabbedForPurge(uint64_t file_number);
@ -2593,9 +2575,7 @@ class DBImpl : public DB {
// REQUIRES: mutex held
int num_running_ingest_file_;
#ifndef ROCKSDB_LITE
WalManager wal_manager_;
#endif // ROCKSDB_LITE
// A value of > 0 temporarily disables scheduling of background work
int bg_work_paused_;
@ -2623,14 +2603,12 @@ class DBImpl : public DB {
// Only to be set during initialization
std::unique_ptr<PreReleaseCallback> recoverable_state_pre_release_callback_;
#ifndef ROCKSDB_LITE
// Scheduler to run DumpStats(), PersistStats(), and FlushInfoLog().
// Currently, internally it has a global timer instance for running the tasks.
PeriodicTaskScheduler periodic_task_scheduler_;
// It contains the implementations for each periodic task.
std::map<PeriodicTaskType, const PeriodicTaskFunc> periodic_task_functions_;
#endif
// When set, we use a separate queue for writes that don't write to memtable.
// In 2PC these are the writes at Prepare phase.

View File

@ -30,7 +30,6 @@ bool DBImpl::EnoughRoomForCompaction(
bool* sfm_reserved_compact_space, LogBuffer* log_buffer) {
// Check if we have enough room to do the compaction
bool enough_room = true;
#ifndef ROCKSDB_LITE
auto sfm = static_cast<SstFileManagerImpl*>(
immutable_db_options_.sst_file_manager.get());
if (sfm) {
@ -45,11 +44,6 @@ bool DBImpl::EnoughRoomForCompaction(
*sfm_reserved_compact_space = true;
}
}
#else
(void)cfd;
(void)inputs;
(void)sfm_reserved_compact_space;
#endif // ROCKSDB_LITE
if (!enough_room) {
// Just in case tests want to change the value of enough_room
TEST_SYNC_POINT_CALLBACK(
@ -259,11 +253,9 @@ Status DBImpl::FlushMemTableToOutputFile(
TEST_SYNC_POINT_CALLBACK(
"DBImpl::FlushMemTableToOutputFile:AfterPickMemtables", &flush_job);
#ifndef ROCKSDB_LITE
// may temporarily unlock and lock the mutex.
NotifyOnFlushBegin(cfd, &file_meta, mutable_cf_options, job_context->job_id,
flush_reason);
#endif // ROCKSDB_LITE
bool switched_to_mempurge = false;
// Within flush_job.Run, rocksdb may call event listener to notify
@ -344,7 +336,6 @@ Status DBImpl::FlushMemTableToOutputFile(
// If flush ran smoothly and no mempurge happened
// install new SST file path.
if (s.ok() && (!switched_to_mempurge)) {
#ifndef ROCKSDB_LITE
// may temporarily unlock and lock the mutex.
NotifyOnFlushCompleted(cfd, mutable_cf_options,
flush_job.GetCommittedFlushJobsInfo());
@ -367,7 +358,6 @@ Status DBImpl::FlushMemTableToOutputFile(
error_handler_.SetBGError(new_bg_error, BackgroundErrorReason::kFlush);
}
}
#endif // ROCKSDB_LITE
}
TEST_SYNC_POINT("DBImpl::FlushMemTableToOutputFile:Finish");
return s;
@ -486,7 +476,6 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
IOStatus log_io_s = IOStatus::OK();
assert(num_cfs == static_cast<int>(jobs.size()));
#ifndef ROCKSDB_LITE
for (int i = 0; i != num_cfs; ++i) {
const MutableCFOptions& mutable_cf_options = all_mutable_cf_options.at(i);
// may temporarily unlock and lock the mutex.
@ -494,7 +483,6 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
NotifyOnFlushBegin(cfds[i], &file_meta[i], mutable_cf_options,
job_context->job_id, flush_reason);
}
#endif /* !ROCKSDB_LITE */
if (logfile_number_ > 0) {
// TODO (yanqin) investigate whether we should sync the closed logs for
@ -703,10 +691,8 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
mems_list.emplace_back(&mems);
mutable_cf_options_list.emplace_back(&all_mutable_cf_options[i]);
tmp_file_meta.emplace_back(&file_meta[i]);
#ifndef ROCKSDB_LITE
committed_flush_jobs_info.emplace_back(
jobs[i]->GetCommittedFlushJobsInfo());
#endif //! ROCKSDB_LITE
}
}
@ -758,7 +744,6 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
if (made_progress) {
*made_progress = true;
}
#ifndef ROCKSDB_LITE
auto sfm = static_cast<SstFileManagerImpl*>(
immutable_db_options_.sst_file_manager.get());
assert(all_mutable_cf_options.size() == static_cast<size_t>(num_cfs));
@ -789,7 +774,6 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
}
}
}
#endif // ROCKSDB_LITE
}
// Need to undo atomic flush if something went wrong, i.e. s is not OK and
@ -827,7 +811,6 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
const MutableCFOptions& mutable_cf_options,
int job_id, FlushReason flush_reason) {
#ifndef ROCKSDB_LITE
if (immutable_db_options_.listeners.size() == 0U) {
return;
}
@ -867,19 +850,11 @@ void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
mutex_.Lock();
// no need to signal bg_cv_ as it will be signaled at the end of the
// flush process.
#else
(void)cfd;
(void)file_meta;
(void)mutable_cf_options;
(void)job_id;
(void)flush_reason;
#endif // ROCKSDB_LITE
}
void DBImpl::NotifyOnFlushCompleted(
ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
std::list<std::unique_ptr<FlushJobInfo>>* flush_jobs_info) {
#ifndef ROCKSDB_LITE
assert(flush_jobs_info != nullptr);
if (immutable_db_options_.listeners.size() == 0U) {
return;
@ -911,11 +886,6 @@ void DBImpl::NotifyOnFlushCompleted(
mutex_.Lock();
// no need to signal bg_cv_ as it will be signaled at the end of the
// flush process.
#else
(void)cfd;
(void)mutable_cf_options;
(void)flush_jobs_info;
#endif // ROCKSDB_LITE
}
Status DBImpl::CompactRange(const CompactRangeOptions& options,
@ -1298,17 +1268,6 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options,
const int output_level, const int output_path_id,
std::vector<std::string>* const output_file_names,
CompactionJobInfo* compaction_job_info) {
#ifdef ROCKSDB_LITE
(void)compact_options;
(void)column_family;
(void)input_file_names;
(void)output_level;
(void)output_path_id;
(void)output_file_names;
(void)compaction_job_info;
// not supported in lite version
return Status::NotSupported("Not supported in ROCKSDB LITE");
#else
if (column_family == nullptr) {
return Status::InvalidArgument("ColumnFamilyHandle must be non-null.");
}
@ -1367,10 +1326,8 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options,
}
return s;
#endif // ROCKSDB_LITE
}
#ifndef ROCKSDB_LITE
Status DBImpl::CompactFilesImpl(
const CompactionOptions& compact_options, ColumnFamilyData* cfd,
Version* version, const std::vector<std::string>& input_file_names,
@ -1512,14 +1469,12 @@ Status DBImpl::CompactFilesImpl(
// SetBGError
compaction_job.io_status().PermitUncheckedError();
c->ReleaseCompactionFiles(s);
#ifndef ROCKSDB_LITE
// Need to make sure SstFileManager does its bookkeeping
auto sfm = static_cast<SstFileManagerImpl*>(
immutable_db_options_.sst_file_manager.get());
if (sfm && sfm_reserved_compact_space) {
sfm->OnCompactionCompletion(c.get());
}
#endif // ROCKSDB_LITE
ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
@ -1576,7 +1531,6 @@ Status DBImpl::CompactFilesImpl(
return status;
}
#endif // ROCKSDB_LITE
Status DBImpl::PauseBackgroundWork() {
InstrumentedMutexLock guard_lock(&mutex_);
@ -1610,7 +1564,6 @@ void DBImpl::NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
const Status& st,
const CompactionJobStats& job_stats,
int job_id) {
#ifndef ROCKSDB_LITE
if (immutable_db_options_.listeners.empty()) {
return;
}
@ -1639,19 +1592,11 @@ void DBImpl::NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
}
mutex_.Lock();
current->Unref();
#else
(void)cfd;
(void)c;
(void)st;
(void)job_stats;
(void)job_id;
#endif // ROCKSDB_LITE
}
void DBImpl::NotifyOnCompactionCompleted(
ColumnFamilyData* cfd, Compaction* c, const Status& st,
const CompactionJobStats& compaction_job_stats, const int job_id) {
#ifndef ROCKSDB_LITE
if (immutable_db_options_.listeners.size() == 0U) {
return;
}
@ -1681,13 +1626,6 @@ void DBImpl::NotifyOnCompactionCompleted(
current->Unref();
// no need to signal bg_cv_ as it will be signaled at the end of the
// flush process.
#else
(void)cfd;
(void)c;
(void)st;
(void)compaction_job_stats;
(void)job_id;
#endif // ROCKSDB_LITE
}
// REQUIREMENT: block all background work by calling PauseBackgroundWork()
@ -3590,14 +3528,12 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
c->ReleaseCompactionFiles(status);
*made_progress = true;
#ifndef ROCKSDB_LITE
// Need to make sure SstFileManager does its bookkeeping
auto sfm = static_cast<SstFileManagerImpl*>(
immutable_db_options_.sst_file_manager.get());
if (sfm && sfm_reserved_compact_space) {
sfm->OnCompactionCompletion(c.get());
}
#endif // ROCKSDB_LITE
NotifyOnCompactionCompleted(c->column_family_data(), c.get(), status,
compaction_job_stats, job_context->job_id);
@ -3774,7 +3710,6 @@ bool DBImpl::MCOverlap(ManualCompactionState* m, ManualCompactionState* m1) {
return false;
}
#ifndef ROCKSDB_LITE
void DBImpl::BuildCompactionJobInfo(
const ColumnFamilyData* cfd, Compaction* c, const Status& st,
const CompactionJobStats& compaction_job_stats, const int job_id,
@ -3843,7 +3778,6 @@ void DBImpl::BuildCompactionJobInfo(
std::move(blob_file_garbage_info));
}
}
#endif
// SuperVersionContext gets created and destructed outside of the lock --
// we use this conveniently to:

View File

@ -289,7 +289,6 @@ size_t DBImpl::TEST_GetWalPreallocateBlockSize(
return GetWalPreallocateBlockSize(write_buffer_size);
}
#ifndef ROCKSDB_LITE
void DBImpl::TEST_WaitForPeriodicTaskRun(std::function<void()> callback) const {
periodic_task_scheduler_.TEST_WaitForRun(callback);
}
@ -303,7 +302,6 @@ SeqnoToTimeMapping DBImpl::TEST_GetSeqnoToTimeMapping() const {
return seqno_time_mapping_;
}
#endif // !ROCKSDB_LITE
size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const {
return EstimateInMemoryStatsHistorySize();

View File

@ -20,7 +20,6 @@
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
Status DBImpl::SuggestCompactRange(ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end) {
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
@ -154,6 +153,5 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
return status;
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -599,13 +599,11 @@ void DBImpl::PurgeObsoleteFiles(JobContext& state, bool schedule_only) {
to_delete;
}
#ifndef ROCKSDB_LITE
if (type == kWalFile && (immutable_db_options_.WAL_ttl_seconds > 0 ||
immutable_db_options_.WAL_size_limit_MB > 0)) {
wal_manager_.ArchiveWALFile(fname, number);
continue;
}
#endif // !ROCKSDB_LITE
// If I do not own these files, e.g. secondary instance with max_open_files
// = -1, then no need to delete or schedule delete these files since they
@ -669,9 +667,7 @@ void DBImpl::PurgeObsoleteFiles(JobContext& state, bool schedule_only) {
}
}
}
#ifndef ROCKSDB_LITE
wal_manager_.PurgeObsoleteWALFiles();
#endif // ROCKSDB_LITE
LogFlush(immutable_db_options_.info_log);
InstrumentedMutexLock l(&mutex_);
--pending_purge_obsolete_files_;

View File

@ -153,7 +153,6 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src,
result.avoid_flush_during_recovery = false;
}
#ifndef ROCKSDB_LITE
ImmutableDBOptions immutable_db_options(result);
if (!immutable_db_options.IsWalDirSameAsDBPath()) {
// Either the WAL dir and db_paths[0]/db_name are not the same, or we
@ -195,7 +194,6 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src,
NewSstFileManager(result.env, result.info_log));
result.sst_file_manager = sst_file_manager;
}
#endif // !ROCKSDB_LITE
// Supported wal compression types
if (!StreamingCompressionTypeSupported(result.wal_compression)) {
@ -845,7 +843,6 @@ Status DBImpl::LogAndApplyForRecovery(const RecoveryContext& recovery_ctx) {
}
void DBImpl::InvokeWalFilterIfNeededOnColumnFamilyToWalNumberMap() {
#ifndef ROCKSDB_LITE
if (immutable_db_options_.wal_filter == nullptr) {
return;
}
@ -863,7 +860,6 @@ void DBImpl::InvokeWalFilterIfNeededOnColumnFamilyToWalNumberMap() {
}
wal_filter.ColumnFamilyLogNumberMap(cf_lognumber_map, cf_name_id_map);
#endif // !ROCKSDB_LITE
}
bool DBImpl::InvokeWalFilterIfNeededOnWalRecord(uint64_t wal_number,
@ -872,7 +868,6 @@ bool DBImpl::InvokeWalFilterIfNeededOnWalRecord(uint64_t wal_number,
Status& status,
bool& stop_replay,
WriteBatch& batch) {
#ifndef ROCKSDB_LITE
if (immutable_db_options_.wal_filter == nullptr) {
return true;
}
@ -958,15 +953,6 @@ bool DBImpl::InvokeWalFilterIfNeededOnWalRecord(uint64_t wal_number,
batch = new_batch;
}
return true;
#else // !ROCKSDB_LITE
(void)wal_number;
(void)wal_fname;
(void)reporter;
(void)status;
(void)stop_replay;
(void)batch;
return true;
#endif // ROCKSDB_LITE
}
// REQUIRES: wal_numbers are sorted in ascending order
@ -1981,7 +1967,6 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
}
impl->mutex_.Unlock();
#ifndef ROCKSDB_LITE
auto sfm = static_cast<SstFileManagerImpl*>(
impl->immutable_db_options_.sst_file_manager.get());
if (s.ok() && sfm) {
@ -2065,7 +2050,6 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
impl->immutable_db_options_.db_paths[0].path);
}
#endif // !ROCKSDB_LITE
if (s.ok()) {
ROCKS_LOG_HEADER(impl->immutable_db_options_.info_log, "DB pointer %p",

View File

@ -16,7 +16,6 @@
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
const std::string& dbname)
@ -321,21 +320,5 @@ Status DBImplReadOnly::OpenForReadOnlyWithoutCheck(
return s;
}
#else // !ROCKSDB_LITE
Status DB::OpenForReadOnly(const Options& /*options*/,
const std::string& /*dbname*/, DB** /*dbptr*/,
bool /*error_if_wal_file_exists*/) {
return Status::NotSupported("Not supported in ROCKSDB_LITE.");
}
Status DB::OpenForReadOnly(
const DBOptions& /*db_options*/, const std::string& /*dbname*/,
const std::vector<ColumnFamilyDescriptor>& /*column_families*/,
std::vector<ColumnFamilyHandle*>* /*handles*/, DB** /*dbptr*/,
bool /*error_if_wal_file_exists*/) {
return Status::NotSupported("Not supported in ROCKSDB_LITE.");
}
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -5,7 +5,6 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <vector>
@ -146,12 +145,10 @@ class DBImplReadOnly : public DBImpl {
// FIXME: some missing overrides for more "write" functions
protected:
#ifndef ROCKSDB_LITE
Status FlushForGetLiveFiles() override {
// No-op for read-only DB
return Status::OK();
}
#endif // !ROCKSDB_LITE
private:
// A "helper" function for DB::OpenForReadOnly without column families
@ -167,4 +164,3 @@ class DBImplReadOnly : public DBImpl {
};
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -17,7 +17,6 @@
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
DBImplSecondary::DBImplSecondary(const DBOptions& db_options,
const std::string& dbname,
std::string secondary_path)
@ -946,22 +945,5 @@ Status DB::OpenAndCompact(
output, override_options);
}
#else // !ROCKSDB_LITE
Status DB::OpenAsSecondary(const Options& /*options*/,
const std::string& /*name*/,
const std::string& /*secondary_path*/,
DB** /*dbptr*/) {
return Status::NotSupported("Not supported in ROCKSDB_LITE.");
}
Status DB::OpenAsSecondary(
const DBOptions& /*db_options*/, const std::string& /*dbname*/,
const std::string& /*secondary_path*/,
const std::vector<ColumnFamilyDescriptor>& /*column_families*/,
std::vector<ColumnFamilyHandle*>* /*handles*/, DB** /*dbptr*/) {
return Status::NotSupported("Not supported in ROCKSDB_LITE.");
}
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -5,7 +5,6 @@
#pragma once
#ifndef ROCKSDB_LITE
#include <string>
#include <vector>
@ -269,12 +268,10 @@ class DBImplSecondary : public DBImpl {
#endif // NDEBUG
protected:
#ifndef ROCKSDB_LITE
Status FlushForGetLiveFiles() override {
// No-op for read-only DB
return Status::OK();
}
#endif // !ROCKSDB_LITE
// ColumnFamilyCollector is a write batch handler which does nothing
// except recording unique column family IDs
@ -407,4 +404,3 @@ class DBImplSecondary : public DBImpl {
} // namespace ROCKSDB_NAMESPACE
#endif // !ROCKSDB_LITE

View File

@ -149,7 +149,6 @@ Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) {
return s;
}
#ifndef ROCKSDB_LITE
Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
WriteBatch* my_batch,
WriteCallback* callback) {
@ -163,7 +162,6 @@ Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
}
return s;
}
#endif // ROCKSDB_LITE
// The main write queue. This is the only write queue that updates LastSequence.
// When using one write queue, the same sequence also indicates the last
@ -2032,7 +2030,6 @@ Status DBImpl::ScheduleFlushes(WriteContext* context) {
return status;
}
#ifndef ROCKSDB_LITE
void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/,
const MemTableInfo& mem_table_info) {
if (immutable_db_options_.listeners.size() == 0U) {
@ -2048,7 +2045,6 @@ void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/,
}
mutex_.Lock();
}
#endif // ROCKSDB_LITE
// REQUIRES: mutex_ is held
// REQUIRES: this thread is currently at the front of the writer queue
@ -2087,14 +2083,12 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions();
// Set memtable_info for memtable sealed callback
#ifndef ROCKSDB_LITE
MemTableInfo memtable_info;
memtable_info.cf_name = cfd->GetName();
memtable_info.first_seqno = cfd->mem()->GetFirstSequenceNumber();
memtable_info.earliest_seqno = cfd->mem()->GetEarliestSequenceNumber();
memtable_info.num_entries = cfd->mem()->num_entries();
memtable_info.num_deletes = cfd->mem()->num_deletes();
#endif // ROCKSDB_LITE
// Log this later after lock release. It may be outdated, e.g., if background
// flush happens before logging, but that should be ok.
int num_imm_unflushed = cfd->imm()->NumNotFlushed();
@ -2253,11 +2247,9 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
InstallSuperVersionAndScheduleWork(cfd, &context->superversion_context,
mutable_cf_options);
#ifndef ROCKSDB_LITE
// Notify client that memtable is sealed, now that we have successfully
// installed a new memtable
NotifyOnMemTableSealed(cfd, memtable_info);
#endif // ROCKSDB_LITE
// It is possible that we got here without checking the value of i_os, but
// that is okay. If we did, it most likely means that s was already an error.
// In any case, ignore any unchecked error for i_os here.

View File

@ -19,7 +19,6 @@ class DBIOFailureTest : public DBTestBase {
DBIOFailureTest() : DBTestBase("db_io_failure_test", /*env_do_fsync=*/true) {}
};
#ifndef ROCKSDB_LITE
// Check that number of files does not grow when writes are dropped
TEST_F(DBIOFailureTest, DropWrites) {
do {
@ -123,7 +122,6 @@ TEST_F(DBIOFailureTest, NoSpaceCompactRange) {
env_->no_space_.store(false, std::memory_order_release);
} while (ChangeCompactOptions());
}
#endif // ROCKSDB_LITE
TEST_F(DBIOFailureTest, NonWritableFileSystem) {
do {
@ -147,7 +145,6 @@ TEST_F(DBIOFailureTest, NonWritableFileSystem) {
} while (ChangeCompactOptions());
}
#ifndef ROCKSDB_LITE
TEST_F(DBIOFailureTest, ManifestWriteError) {
// Test for the following problem:
// (a) Compaction produces file F
@ -582,7 +579,6 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) {
ASSERT_EQ("bar3", Get(1, "foo"));
}
#endif // !(defined NDEBUG) || !defined(OS_WIN)
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {

View File

@ -1439,7 +1439,6 @@ void DBIter::Seek(const Slice& target) {
PERF_CPU_TIMER_GUARD(iter_seek_cpu_nanos, clock_);
StopWatch sw(clock_, statistics_, DB_SEEK);
#ifndef ROCKSDB_LITE
if (db_impl_ != nullptr && cfd_ != nullptr) {
// TODO: What do we do if this returns an error?
Slice lower_bound, upper_bound;
@ -1456,7 +1455,6 @@ void DBIter::Seek(const Slice& target) {
db_impl_->TraceIteratorSeek(cfd_->GetID(), target, lower_bound, upper_bound)
.PermitUncheckedError();
}
#endif // ROCKSDB_LITE
status_ = Status::OK();
ReleaseTempPinnedData();
@ -1514,7 +1512,6 @@ void DBIter::SeekForPrev(const Slice& target) {
PERF_CPU_TIMER_GUARD(iter_seek_cpu_nanos, clock_);
StopWatch sw(clock_, statistics_, DB_SEEK);
#ifndef ROCKSDB_LITE
if (db_impl_ != nullptr && cfd_ != nullptr) {
// TODO: What do we do if this returns an error?
Slice lower_bound, upper_bound;
@ -1533,7 +1530,6 @@ void DBIter::SeekForPrev(const Slice& target) {
upper_bound)
.PermitUncheckedError();
}
#endif // ROCKSDB_LITE
status_ = Status::OK();
ReleaseTempPinnedData();

View File

@ -388,13 +388,7 @@ class DBIter final : public Iterator {
MergeContext merge_context_;
LocalStatistics local_stats_;
PinnedIteratorsManager pinned_iters_mgr_;
#ifdef ROCKSDB_LITE
ROCKSDB_FIELD_UNUSED
#endif
DBImpl* db_impl_;
#ifdef ROCKSDB_LITE
ROCKSDB_FIELD_UNUSED
#endif
ColumnFamilyData* cfd_;
const Slice* const timestamp_ub_;
const Slice* const timestamp_lb_;

View File

@ -891,7 +891,6 @@ TEST_P(DBIteratorTest, IteratorDeleteAfterCfDrop) {
}
// SetOptions not defined in ROCKSDB LITE
#ifndef ROCKSDB_LITE
TEST_P(DBIteratorTest, DBIteratorBoundTest) {
Options options = CurrentOptions();
options.env = env_;
@ -1119,7 +1118,6 @@ TEST_P(DBIteratorTest, DBIteratorBoundMultiSeek) {
TestGetTickerCount(options, BLOCK_CACHE_MISS));
}
}
#endif
TEST_P(DBIteratorTest, DBIteratorBoundOptimizationTest) {
for (auto format_version : {2, 3, 4}) {
@ -1564,7 +1562,6 @@ INSTANTIATE_TEST_CASE_P(DBIteratorTestForPinnedDataInstance,
DBIteratorTestForPinnedData,
testing::Values(true, false));
#ifndef ROCKSDB_LITE
TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) {
Options options = CurrentOptions();
BlockBasedTableOptions table_options;
@ -1634,7 +1631,6 @@ TEST_P(DBIteratorTest, PinnedDataIteratorMultipleFiles) {
delete iter;
}
#endif
TEST_P(DBIteratorTest, PinnedDataIteratorMergeOperator) {
Options options = CurrentOptions();
@ -2216,9 +2212,7 @@ TEST_P(DBIteratorTest, ReadAhead) {
ASSERT_OK(Put(Key(i), value));
}
ASSERT_OK(Flush());
#ifndef ROCKSDB_LITE
ASSERT_EQ("1,1,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
env_->random_read_bytes_counter_ = 0;
options.statistics->setTickerCount(NO_FILE_OPENS, 0);
@ -2281,12 +2275,10 @@ TEST_P(DBIteratorTest, DBIteratorSkipRecentDuplicatesTest) {
ASSERT_OK(Put("b", std::to_string(i + 1).c_str()));
}
#ifndef ROCKSDB_LITE
// Check that memtable wasn't flushed.
std::string val;
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level0", &val));
EXPECT_EQ("0", val);
#endif
// Seek iterator to a smaller key.
get_perf_context()->Reset();

View File

@ -10,7 +10,6 @@
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
// which is a pity, it is a good test
#if !defined(ROCKSDB_LITE)
#include "db/db_test_util.h"
#include "env/mock_env.h"
@ -290,16 +289,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) {
}
} // namespace ROCKSDB_NAMESPACE
#endif // !defined(ROCKSDB_LITE)
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
(void)argc;
(void)argv;
return 0;
#endif
}

View File

@ -72,12 +72,8 @@ TEST_F(DBLogicalBlockSizeCacheTest, OpenClose) {
printf("Open\n");
ASSERT_OK(DB::Open(options, dbname_, &db));
} else {
#ifdef ROCKSDB_LITE
break;
#else
printf("OpenForReadOnly\n");
ASSERT_OK(DB::OpenForReadOnly(options, dbname_, &db));
#endif
}
ASSERT_EQ(2, cache_->Size());
ASSERT_TRUE(cache_->Contains(data_path_0_));
@ -104,12 +100,8 @@ TEST_F(DBLogicalBlockSizeCacheTest, OpenDelete) {
printf("Open\n");
ASSERT_OK(DB::Open(options, dbname_, &db));
} else {
#ifdef ROCKSDB_LITE
break;
#else
printf("OpenForReadOnly\n");
ASSERT_OK(DB::OpenForReadOnly(options, dbname_, &db));
#endif
}
ASSERT_EQ(1, cache_->Size());
ASSERT_TRUE(cache_->Contains(dbname_));
@ -261,16 +253,12 @@ TEST_F(DBLogicalBlockSizeCacheTest, OpenWithColumnFamilies) {
{"default", ColumnFamilyOptions()}},
&cfs, &db));
} else {
#ifdef ROCKSDB_LITE
break;
#else
printf("OpenForReadOnly\n");
ASSERT_OK(DB::OpenForReadOnly(options, dbname_,
{{"cf1", cf_options},
{"cf2", cf_options},
{"default", ColumnFamilyOptions()}},
&cfs, &db));
#endif
}
// Logical block sizes of dbname_ and cf_path_0_ are cached during Open.
@ -360,14 +348,10 @@ TEST_F(DBLogicalBlockSizeCacheTest, DestroyColumnFamilyHandle) {
options, dbname_,
{{"cf", cf_options}, {"default", ColumnFamilyOptions()}}, &cfs, &db));
} else {
#ifdef ROCKSDB_LITE
break;
#else
printf("OpenForReadOnly\n");
ASSERT_OK(DB::OpenForReadOnly(
options, dbname_,
{{"cf", cf_options}, {"default", ColumnFamilyOptions()}}, &cfs, &db));
#endif
}
// cf_path_0_ and dbname_ are cached.
ASSERT_EQ(2, cache_->Size());

View File

@ -8,9 +8,7 @@
#include "rocksdb/perf_context.h"
#include "rocksdb/utilities/debug.h"
#include "table/block_based/block_builder.h"
#if !defined(ROCKSDB_LITE)
#include "test_util/sync_point.h"
#endif
#include "rocksdb/merge_operator.h"
#include "utilities/fault_injection_env.h"
#include "utilities/merge_operators.h"

View File

@ -202,7 +202,6 @@ TEST_F(DBMergeOperatorTest, MergeErrorOnIteration) {
VerifyDBInternal({{"k1", "v1"}, {"k2", "corrupted"}, {"k2", "v2"}});
}
#ifndef ROCKSDB_LITE
TEST_F(DBMergeOperatorTest, MergeOperatorFailsWithMustMerge) {
// This is like a mini-stress test dedicated to `OpFailureScope::kMustMerge`.
@ -355,7 +354,6 @@ TEST_F(DBMergeOperatorTest, MergeOperatorFailsWithMustMerge) {
}
}
#endif // ROCKSDB_LITE
class MergeOperatorPinningTest : public DBMergeOperatorTest,
public testing::WithParamInterface<bool> {
@ -368,7 +366,6 @@ class MergeOperatorPinningTest : public DBMergeOperatorTest,
INSTANTIATE_TEST_CASE_P(MergeOperatorPinningTest, MergeOperatorPinningTest,
::testing::Bool());
#ifndef ROCKSDB_LITE
TEST_P(MergeOperatorPinningTest, OperandsMultiBlocks) {
Options options = CurrentOptions();
BlockBasedTableOptions table_options;
@ -639,7 +636,6 @@ TEST_F(DBMergeOperatorTest, TailingIteratorMemtableUnrefedBySomeoneElse) {
EXPECT_TRUE(pushed_first_operand);
EXPECT_TRUE(stepped_to_next_operand);
}
#endif // ROCKSDB_LITE
TEST_F(DBMergeOperatorTest, SnapshotCheckerAndReadCallback) {
Options options = CurrentOptions();

View File

@ -29,7 +29,6 @@ class DBOptionsTest : public DBTestBase {
public:
DBOptionsTest() : DBTestBase("db_options_test", /*env_do_fsync=*/true) {}
#ifndef ROCKSDB_LITE
std::unordered_map<std::string, std::string> GetMutableDBOptionsMap(
const DBOptions& options) {
std::string options_str;
@ -76,7 +75,6 @@ class DBOptionsTest : public DBTestBase {
auto sanitized_options = SanitizeOptions(dbname_, db_options);
return GetMutableDBOptionsMap(sanitized_options);
}
#endif // ROCKSDB_LITE
};
TEST_F(DBOptionsTest, ImmutableTrackAndVerifyWalsInManifest) {
@ -112,7 +110,6 @@ TEST_F(DBOptionsTest, ImmutableVerifySstUniqueIdInManifest) {
}
// RocksDB lite don't support dynamic options.
#ifndef ROCKSDB_LITE
TEST_F(DBOptionsTest, AvoidUpdatingOptions) {
Options options;
@ -1148,7 +1145,6 @@ TEST_F(DBOptionsTest, ChangeCompression) {
SyncPoint::GetInstance()->DisableProcessing();
}
#endif // ROCKSDB_LITE
TEST_F(DBOptionsTest, BottommostCompressionOptsWithFallbackType) {
// Verify the bottommost compression options still take effect even when the

View File

@ -55,7 +55,6 @@ class DBPropertiesTest : public DBTestBase {
}
};
#ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest, Empty) {
do {
Options options;
@ -1112,7 +1111,6 @@ TEST_F(DBPropertiesTest, EstimateCompressionRatio) {
ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
}
#endif // ROCKSDB_LITE
class CountingUserTblPropCollector : public TablePropertiesCollector {
public:
@ -1263,7 +1261,6 @@ class BlockCountingTablePropertiesCollectorFactory
}
};
#ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = (1 << 30);
@ -1303,7 +1300,6 @@ TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
ASSERT_GT(collector_factory->num_created_, 0U);
}
#endif // ROCKSDB_LITE
TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
Options options = CurrentOptions();
@ -1365,7 +1361,6 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
ASSERT_GT(collector_factory->num_created_, 0U);
}
#ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) {
Random rnd(301);
@ -2195,7 +2190,6 @@ TEST_F(DBPropertiesTest, TableMetaIndexKeys) {
} while (ChangeOptions());
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -27,9 +27,6 @@ class DBRangeDelTest : public DBTestBase {
}
};
// PlainTableFactory, WriteBatchWithIndex, and NumTableFilesAtLevel() are not
// supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(DBRangeDelTest, NonBlockBasedTableNotSupported) {
// TODO: figure out why MmapReads trips the iterator pinning assertion in
// RangeDelAggregator. Ideally it would be supported; otherwise it should at
@ -298,7 +295,6 @@ TEST_F(DBRangeDelTest, CompactRangeDelsSameStartKey) {
ASSERT_TRUE(db_->Get(ReadOptions(), "b1", &value).IsNotFound());
}
}
#endif // ROCKSDB_LITE
TEST_F(DBRangeDelTest, FlushRemovesCoveredKeys) {
const int kNum = 300, kRangeBegin = 50, kRangeEnd = 250;
@ -335,8 +331,6 @@ TEST_F(DBRangeDelTest, FlushRemovesCoveredKeys) {
db_->ReleaseSnapshot(snapshot);
}
// NumTableFilesAtLevel() is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(DBRangeDelTest, CompactionRemovesCoveredKeys) {
const int kNumPerFile = 100, kNumFiles = 4;
Options opts = CurrentOptions();
@ -517,7 +511,6 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) {
std::numeric_limits<uint64_t>::max() /* max_file_num_to_ignore */,
"" /*trim_ts*/));
}
#endif // ROCKSDB_LITE
TEST_F(DBRangeDelTest, CompactionRemovesCoveredMergeOperands) {
const int kNumPerFile = 3, kNumFiles = 3;
@ -589,8 +582,6 @@ TEST_F(DBRangeDelTest, PutDeleteRangeMergeFlush) {
ASSERT_EQ(expected, actual);
}
// NumTableFilesAtLevel() is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(DBRangeDelTest, ObsoleteTombstoneCleanup) {
// During compaction to bottommost level, verify range tombstones older than
// the oldest snapshot are removed, while others are preserved.
@ -3024,7 +3015,6 @@ TEST_F(DBRangeDelTest, DoubleCountRangeTombstoneCompensatedSize) {
db_->ReleaseSnapshot(snapshot);
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -96,19 +96,10 @@ std::string GetTestNameSuffix(
return oss.str();
}
#ifndef ROCKSDB_LITE
INSTANTIATE_TEST_CASE_P(DBRateLimiterOnReadTest, DBRateLimiterOnReadTest,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Bool()),
GetTestNameSuffix);
#else // ROCKSDB_LITE
// Cannot use direct I/O in lite mode.
INSTANTIATE_TEST_CASE_P(DBRateLimiterOnReadTest, DBRateLimiterOnReadTest,
::testing::Combine(::testing::Values(false),
::testing::Bool(),
::testing::Bool()),
GetTestNameSuffix);
#endif // ROCKSDB_LITE
TEST_P(DBRateLimiterOnReadTest, Get) {
if (use_direct_io_ && !IsDirectIOSupported()) {
@ -234,7 +225,6 @@ TEST_P(DBRateLimiterOnReadTest, Iterator) {
ASSERT_EQ(expected, options_.rate_limiter->GetTotalRequests(Env::IO_USER));
}
#if !defined(ROCKSDB_LITE)
TEST_P(DBRateLimiterOnReadTest, VerifyChecksum) {
if (use_direct_io_ && !IsDirectIOSupported()) {
@ -264,7 +254,6 @@ TEST_P(DBRateLimiterOnReadTest, VerifyFileChecksums) {
ASSERT_EQ(expected, options_.rate_limiter->GetTotalRequests(Env::IO_USER));
}
#endif // !defined(ROCKSDB_LITE)
class DBRateLimiterOnWriteTest : public DBTestBase {
public:
@ -319,10 +308,8 @@ TEST_F(DBRateLimiterOnWriteTest, Compact) {
// Pre-comaction:
// level-0 : `kNumFiles` SST files overlapping on [kStartKey, kEndKey]
#ifndef ROCKSDB_LITE
std::string files_per_level_pre_compaction = std::to_string(kNumFiles);
ASSERT_EQ(files_per_level_pre_compaction, FilesPerLevel(0 /* cf */));
#endif // !ROCKSDB_LITE
std::int64_t prev_total_request =
options_.rate_limiter->GetTotalRequests(Env::IO_TOTAL);
@ -337,10 +324,8 @@ TEST_F(DBRateLimiterOnWriteTest, Compact) {
// Post-comaction:
// level-0 : 0 SST file
// level-1 : 1 SST file
#ifndef ROCKSDB_LITE
std::string files_per_level_post_compaction = "0,1";
ASSERT_EQ(files_per_level_post_compaction, FilesPerLevel(0 /* cf */));
#endif // !ROCKSDB_LITE
std::int64_t exepcted_compaction_request = 1;
EXPECT_EQ(actual_compaction_request, exepcted_compaction_request);

View File

@ -17,7 +17,6 @@ class DBReadOnlyTestWithTimestamp : public DBBasicTestWithTimestampBase {
: DBBasicTestWithTimestampBase("db_readonly_test_with_timestamp") {}
protected:
#ifndef ROCKSDB_LITE
void CheckDBOpenedAsCompactedDBWithOneLevel0File() {
VersionSet* const versions = dbfull()->GetVersionSet();
ASSERT_NE(versions, nullptr);
@ -63,10 +62,8 @@ class DBReadOnlyTestWithTimestamp : public DBBasicTestWithTimestampBase {
ASSERT_TRUE(
storage_info->LevelFilesBrief(highest_non_empty_level).num_files > 0);
}
#endif // !ROCKSDB_LITE
};
#ifndef ROCKSDB_LITE
TEST_F(DBReadOnlyTestWithTimestamp, IteratorAndGetReadTimestampSizeMismatch) {
const int kNumKeysPerFile = 128;
const uint64_t kMaxKey = 1024;
@ -949,7 +946,6 @@ TEST_F(DBReadOnlyTestWithTimestamp,
Close();
}
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {

View File

@ -18,7 +18,6 @@
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
class DBSecondaryTestBase : public DBBasicTestWithTimestampBase {
public:
explicit DBSecondaryTestBase(const std::string& dbname)
@ -1682,7 +1681,6 @@ TEST_F(DBSecondaryTestWithTimestamp, Iterators) {
Close();
}
#endif //! ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -24,7 +24,6 @@ class DBSSTTest : public DBTestBase {
DBSSTTest() : DBTestBase("db_sst_test", /*env_do_fsync=*/true) {}
};
#ifndef ROCKSDB_LITE
// A class which remembers the name of each flushed file.
class FlushedFileCollector : public EventListener {
public:
@ -53,7 +52,6 @@ class FlushedFileCollector : public EventListener {
std::vector<std::string> flushed_files_;
std::mutex mutex_;
};
#endif // ROCKSDB_LITE
TEST_F(DBSSTTest, DontDeletePendingOutputs) {
Options options;
@ -151,7 +149,6 @@ TEST_F(DBSSTTest, SkipCheckingSSTFileSizesOnDBOpen) {
ASSERT_EQ("choo", Get("pika"));
}
#ifndef ROCKSDB_LITE
TEST_F(DBSSTTest, DontDeleteMovedFile) {
// This test triggers move compaction and verifies that the file is not
// deleted when it's part of move compaction
@ -1856,7 +1853,6 @@ TEST_F(DBSSTTest, DBWithSFMForBlobFilesAtomicFlush) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -157,7 +157,6 @@ TEST_F(DBStatisticsTest, ExcludeTickers) {
ASSERT_GT(options.statistics->getTickerCount(BYTES_READ), 0);
}
#ifndef ROCKSDB_LITE
TEST_F(DBStatisticsTest, VerifyChecksumReadStat) {
Options options = CurrentOptions();
@ -204,7 +203,6 @@ TEST_F(DBStatisticsTest, VerifyChecksumReadStat) {
}
}
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -24,7 +24,6 @@
#include "test_util/testutil.h"
#include "util/random.h"
#ifndef ROCKSDB_LITE
namespace ROCKSDB_NAMESPACE {
@ -616,7 +615,6 @@ INSTANTIATE_TEST_CASE_P(DBTablePropertiesTest, DBTablePropertiesTest,
} // namespace ROCKSDB_NAMESPACE
#endif // ROCKSDB_LITE
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();

View File

@ -10,7 +10,6 @@
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
// which is a pity, it is a good test
#if !defined(ROCKSDB_LITE)
#include "db/db_test_util.h"
#include "db/forward_iterator.h"
@ -588,16 +587,9 @@ TEST_P(DBTestTailingIterator, SeekToFirstWithUpperBoundBug) {
} // namespace ROCKSDB_NAMESPACE
#endif // !defined(ROCKSDB_LITE)
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
(void)argc;
(void)argv;
return 0;
#endif
}

View File

@ -129,8 +129,6 @@ TEST_F(DBTest, MockEnvTest) {
ASSERT_TRUE(!iterator->Valid());
delete iterator;
// TEST_FlushMemTable() is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
DBImpl* dbi = static_cast_with_check<DBImpl>(db);
ASSERT_OK(dbi->TEST_FlushMemTable());
@ -139,14 +137,10 @@ TEST_F(DBTest, MockEnvTest) {
ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
#endif // ROCKSDB_LITE
delete db;
}
// NewMemEnv returns nullptr in ROCKSDB_LITE since class InMemoryEnv isn't
// defined.
#ifndef ROCKSDB_LITE
TEST_F(DBTest, MemEnvTest) {
std::unique_ptr<Env> env{NewMemEnv(Env::Default())};
Options options;
@ -199,7 +193,6 @@ TEST_F(DBTest, MemEnvTest) {
}
delete db;
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, WriteEmptyBatch) {
Options options = CurrentOptions();
@ -458,7 +451,6 @@ TEST_F(DBTest, MixedSlowdownOptionsStop) {
wo.no_slowdown = true;
ASSERT_OK(dbfull()->Put(wo, "foo3", "bar"));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, LevelLimitReopen) {
Options options = CurrentOptions();
@ -481,9 +473,7 @@ TEST_F(DBTest, LevelLimitReopen) {
options.max_bytes_for_level_multiplier_additional.resize(10, 1);
ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}
#endif // ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(DBTest, LevelReopenWithFIFO) {
const int kLevelCount = 4;
const int kKeyCount = 5;
@ -610,7 +600,6 @@ TEST_F(DBTest, LevelReopenWithFIFO) {
ASSERT_EQ(expected_files_per_level_after_fifo[i], FilesPerLevel(kCF));
}
}
#endif // !ROCKSDB_LITE
TEST_F(DBTest, PutSingleDeleteGet) {
do {
@ -857,9 +846,7 @@ TEST_F(DBTest, DISABLED_VeryLargeValue) {
ASSERT_OK(Put(key2, raw));
dbfull()->TEST_WaitForFlushMemTable();
#ifndef ROCKSDB_LITE
ASSERT_EQ(1, NumTableFilesAtLevel(0));
#endif // !ROCKSDB_LITE
std::string value;
Status s = db_->Get(ReadOptions(), key1, &value);
@ -936,7 +923,6 @@ TEST_F(DBTest, WrongLevel0Config) {
ASSERT_OK(DB::Open(options, dbname_, &db_));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, GetOrderedByLevels) {
do {
CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
@ -1007,7 +993,6 @@ TEST_F(DBTest, GetEncountersEmptyLevel) {
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); // XXX
} while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, FlushMultipleMemtable) {
do {
@ -1027,7 +1012,6 @@ TEST_F(DBTest, FlushMultipleMemtable) {
ASSERT_OK(Flush(1));
} while (ChangeCompactOptions());
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, FlushSchedule) {
Options options = CurrentOptions();
options.disable_auto_compactions = true;
@ -1072,7 +1056,6 @@ TEST_F(DBTest, FlushSchedule) {
ASSERT_LE(pikachu_tables, static_cast<uint64_t>(10));
ASSERT_GT(pikachu_tables, static_cast<uint64_t>(0));
}
#endif // ROCKSDB_LITE
namespace {
class KeepFilter : public CompactionFilter {
@ -1137,14 +1120,12 @@ class DelayFilterFactory : public CompactionFilterFactory {
};
} // anonymous namespace
#ifndef ROCKSDB_LITE
static std::string CompressibleString(Random* rnd, int len) {
std::string r;
test::CompressibleString(rnd, 0.8, len, &r);
return r;
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, FailMoreDbPaths) {
Options options = CurrentOptions();
@ -1266,7 +1247,6 @@ void CheckLiveFilesMeta(
}
}
#ifndef ROCKSDB_LITE
void AddBlobFile(const ColumnFamilyHandle* cfh, uint64_t blob_file_number,
uint64_t total_blob_count, uint64_t total_blob_bytes,
const std::string& checksum_method,
@ -1558,9 +1538,7 @@ TEST_F(DBTest, DISABLED_RepeatedWritesToSameKey) {
}
} while (ChangeCompactOptions());
}
#endif // ROCKSDB_LITE
#ifndef ROCKSDB_LITE
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
@ -1957,9 +1935,7 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
// ApproximateOffsetOf() is not yet implemented in plain table format.
} while (ChangeOptions(kSkipPlainTable));
}
#endif // ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(DBTest, Snapshot) {
env_->SetMockSleep();
anon::OptionsOverride options_override;
@ -2081,7 +2057,6 @@ TEST_F(DBTest, HiddenValuesAreRemoved) {
} while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
kSkipPlainTable));
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, UnremovableSingleDelete) {
// If we compact:
@ -2132,7 +2107,6 @@ TEST_F(DBTest, UnremovableSingleDelete) {
kSkipMergePut));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DeletionMarkers1) {
Options options = CurrentOptions();
CreateAndReopenWithCF({"pikachu"}, options);
@ -2252,7 +2226,6 @@ TEST_F(DBTest, OverlapInLevel0) {
ASSERT_EQ("NOT_FOUND", Get(1, "600"));
} while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
@ -2440,7 +2413,6 @@ TEST_F(DBTest, DestroyDBMetaDatabase) {
ASSERT_TRUE(!(DB::Open(options, metametadbname, &db)).ok());
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, SnapshotFiles) {
do {
Options options = CurrentOptions();
@ -2667,7 +2639,6 @@ TEST_F(DBTest, GetLiveBlobFiles) {
ASSERT_EQ(cfmd.blob_file_count, 1U);
ASSERT_EQ(cfmd.blob_file_size, bmd.blob_file_size);
}
#endif
TEST_F(DBTest, PurgeInfoLogs) {
Options options = CurrentOptions();
@ -2715,7 +2686,6 @@ TEST_F(DBTest, PurgeInfoLogs) {
}
}
#ifndef ROCKSDB_LITE
// Multi-threaded test:
namespace {
@ -2902,7 +2872,6 @@ INSTANTIATE_TEST_CASE_P(
::testing::Combine(
::testing::ValuesIn(MultiThreadedDBTest::GenerateOptionConfigs()),
::testing::Bool()));
#endif // ROCKSDB_LITE
// Group commit test:
#if !defined(OS_WIN)
@ -3109,7 +3078,6 @@ class ModelDB : public DB {
return s;
}
#ifndef ROCKSDB_LITE
using DB::IngestExternalFile;
Status IngestExternalFile(
ColumnFamilyHandle* /*column_family*/,
@ -3151,7 +3119,6 @@ class ModelDB : public DB {
std::size_t /*n*/, TablePropertiesCollection* /*props*/) override {
return Status();
}
#endif // ROCKSDB_LITE
using DB::KeyMayExist;
bool KeyMayExist(const ReadOptions& /*options*/,
@ -3332,7 +3299,6 @@ class ModelDB : public DB {
Status DisableFileDeletions() override { return Status::OK(); }
Status EnableFileDeletions(bool /*force*/) override { return Status::OK(); }
#ifndef ROCKSDB_LITE
Status GetLiveFiles(std::vector<std::string>&, uint64_t* /*size*/,
bool /*flush_memtable*/ = true) override {
@ -3376,7 +3342,6 @@ class ModelDB : public DB {
void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/,
ColumnFamilyMetaData* /*metadata*/) override {}
#endif // ROCKSDB_LITE
Status GetDbIdentity(std::string& /*identity*/) const override {
return Status::OK();
@ -3625,12 +3590,10 @@ TEST_F(DBTest, BlockBasedTablePrefixIndexTest) {
ASSERT_EQ("v1", Get("k1"));
ASSERT_EQ("v2", Get("k2"));
#ifndef ROCKSDB_LITE
// Back to original
ASSERT_OK(dbfull()->SetOptions({{"prefix_extractor", "fixed:1"}}));
ASSERT_EQ("v1", Get("k1"));
ASSERT_EQ("v2", Get("k2"));
#endif // !ROCKSDB_LITE
// Same if there's a problem initally loading prefix transform
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
@ -3642,12 +3605,10 @@ TEST_F(DBTest, BlockBasedTablePrefixIndexTest) {
ASSERT_EQ("v1", Get("k1"));
ASSERT_EQ("v2", Get("k2"));
#ifndef ROCKSDB_LITE
// Change again
ASSERT_OK(dbfull()->SetOptions({{"prefix_extractor", "fixed:2"}}));
ASSERT_EQ("v1", Get("k1"));
ASSERT_EQ("v2", Get("k2"));
#endif // !ROCKSDB_LITE
SyncPoint::GetInstance()->DisableProcessing();
// Reopen with no prefix extractor, make sure everything still works.
@ -3769,7 +3730,6 @@ TEST_F(DBTest, ChecksumTest) {
ASSERT_EQ("h", Get("g"));
}
#ifndef ROCKSDB_LITE
TEST_P(DBTestWithParam, FIFOCompactionTest) {
for (int iter = 0; iter < 2; ++iter) {
// first iteration -- auto compaction
@ -4141,9 +4101,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) {
options.compaction_options_fifo.max_table_files_size);
}
}
#endif // ROCKSDB_LITE
#ifndef ROCKSDB_LITE
/*
* This test is not reliable enough as it heavily depends on disk behavior.
* Disable as it is flaky.
@ -4324,7 +4282,6 @@ TEST_F(DBTest, ConcurrentMemtableNotSupported) {
ASSERT_NOK(db_->CreateColumnFamily(cf_options, "name", &handle));
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, SanitizeNumThreads) {
for (int attempt = 0; attempt < 2; attempt++) {
@ -4478,7 +4435,6 @@ TEST_F(DBTest, ManualFlushWalAndWriteRace) {
ASSERT_EQ("value2", Get("foo2"));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DynamicMemtableOptions) {
const uint64_t k64KB = 1 << 16;
const uint64_t k128KB = 1 << 17;
@ -4632,7 +4588,6 @@ TEST_F(DBTest, DynamicMemtableOptions) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif // ROCKSDB_LITE
#ifdef ROCKSDB_USING_THREAD_STATUS
namespace {
@ -5082,7 +5037,6 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
#endif // ROCKSDB_USING_THREAD_STATUS
#ifndef ROCKSDB_LITE
TEST_F(DBTest, FlushOnDestroy) {
WriteOptions wo;
wo.disableWAL = true;
@ -5635,7 +5589,6 @@ TEST_F(DBTest, DynamicUniversalCompactionOptions) {
dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
false);
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, FileCreationRandomFailure) {
Options options;
@ -5698,7 +5651,6 @@ TEST_F(DBTest, FileCreationRandomFailure) {
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DynamicMiscOptions) {
// Test max_sequential_skip_in_iterations
@ -5793,7 +5745,6 @@ TEST_F(DBTest, DynamicMiscOptions) {
&mutable_cf_options));
ASSERT_FALSE(mutable_cf_options.check_flush_compaction_key_order);
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, L0L1L2AndUpHitCounter) {
const int kNumLevels = 3;
@ -6012,7 +5963,6 @@ TEST_F(DBTest, MergeTestTime) {
#endif // ROCKSDB_USING_THREAD_STATUS
}
#ifndef ROCKSDB_LITE
TEST_P(DBTestWithParam, MergeCompactionTimeTest) {
SetPerfLevel(kEnableTime);
Options options = CurrentOptions();
@ -6075,7 +6025,6 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME));
delete itr;
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, TestLogCleanup) {
Options options = CurrentOptions();
@ -6092,7 +6041,6 @@ TEST_F(DBTest, TestLogCleanup) {
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, EmptyCompactedDB) {
Options options = CurrentOptions();
options.max_open_files = -1;
@ -6102,9 +6050,7 @@ TEST_F(DBTest, EmptyCompactedDB) {
ASSERT_TRUE(s.IsNotSupported());
Close();
}
#endif // ROCKSDB_LITE
#ifndef ROCKSDB_LITE
TEST_F(DBTest, SuggestCompactRangeTest) {
class CompactionFilterFactoryGetContext : public CompactionFilterFactory {
public:
@ -6333,7 +6279,6 @@ TEST_F(DBTest, CompactRangeWithEmptyBottomLevel) {
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), kNumL0Files);
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
const int kNumL0Files = 50;
@ -6391,7 +6336,6 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
ASSERT_OK(dbfull()->TEST_WaitForCompact());
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) {
Options options = CurrentOptions();
options.max_background_compactions = 1;
@ -6451,7 +6395,6 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) {
ASSERT_LE(cf_meta_data.levels[0].files.size(),
options.level0_file_num_compaction_trigger);
}
#endif // ROCKSDB_LITE
// Github issue #595
// Large write batch with column families
@ -6662,7 +6605,7 @@ TEST_F(DBTest, HardLimit) {
sleeping_task_low.WaitUntilDone();
}
#if !defined(ROCKSDB_LITE) && !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
#if !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
class WriteStallListener : public EventListener {
public:
WriteStallListener() : condition_(WriteStallCondition::kNormal) {}
@ -6904,8 +6847,7 @@ TEST_F(DBTest, LastWriteBufferDelay) {
sleeping_task.WakeUp();
sleeping_task.WaitUntilDone();
}
#endif // !defined(ROCKSDB_LITE) &&
// !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
#endif // !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
TEST_F(DBTest, FailWhenCompressionNotSupportedTest) {
CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
@ -6941,7 +6883,6 @@ TEST_F(DBTest, CreateColumnFamilyShouldFailOnIncompatibleOptions) {
delete handle;
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, RowCache) {
Options options = CurrentOptions();
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
@ -7073,7 +7014,6 @@ TEST_F(DBTest, ReusePinnableSlice) {
1);
}
#endif // ROCKSDB_LITE
TEST_F(DBTest, DeletingOldWalAfterDrop) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
@ -7199,7 +7139,6 @@ TEST_F(DBTest, LargeBlockSizeTest) {
ASSERT_NOK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest, CreationTimeOfOldestFile) {
const int kNumKeysPerFile = 32;
@ -7388,7 +7327,6 @@ TEST_F(DBTest, ShuttingDownNotBlockStalledWrites) {
thd.join();
}
#endif
} // namespace ROCKSDB_NAMESPACE

View File

@ -48,7 +48,6 @@ class DBTest2 : public DBTestBase {
}
};
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, OpenForReadOnly) {
DB* db_ptr = nullptr;
std::string dbname = test::PerThreadDBPath("db_readonly");
@ -146,7 +145,6 @@ TEST_F(DBTest2, PartitionedIndexUserToInternalKey) {
}
}
#endif // ROCKSDB_LITE
class PrefixFullBloomWithReverseComparator
: public DBTestBase,
@ -290,7 +288,6 @@ TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) {
Reopen(options);
}
#ifndef ROCKSDB_LITE
class DBTestSharedWriteBufferAcrossCFs
: public DBTestBase,
public testing::WithParamInterface<std::tuple<bool, bool>> {
@ -1544,9 +1541,7 @@ TEST_P(PresetCompressionDictTest, CompactNonBottommost) {
}
ASSERT_OK(Flush());
}
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,0,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
uint64_t prev_compression_dict_bytes_inserted =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT);
@ -1554,9 +1549,7 @@ TEST_P(PresetCompressionDictTest, CompactNonBottommost) {
// file is not bottommost due to the existing L2 file covering the same key-
// range.
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
// We can use `BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT` to detect whether a
// compression dictionary exists since dictionaries would be preloaded when
// the compaction finishes.
@ -1620,17 +1613,13 @@ TEST_P(PresetCompressionDictTest, CompactBottommost) {
}
ASSERT_OK(Flush());
}
#ifndef ROCKSDB_LITE
ASSERT_EQ("2", FilesPerLevel(0));
#endif // ROCKSDB_LITE
uint64_t prev_compression_dict_bytes_inserted =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT);
CompactRangeOptions cro;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
ASSERT_GT(
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT),
prev_compression_dict_bytes_inserted);
@ -1997,7 +1986,6 @@ TEST_F(DBTest2, CompactionStall) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, FirstSnapshotTest) {
Options options;
@ -2014,7 +2002,6 @@ TEST_F(DBTest2, FirstSnapshotTest) {
db_->ReleaseSnapshot(s1);
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, DuplicateSnapshot) {
Options options;
options = CurrentOptions(options);
@ -2046,7 +2033,6 @@ TEST_F(DBTest2, DuplicateSnapshot) {
db_->ReleaseSnapshot(s);
}
}
#endif // ROCKSDB_LITE
class PinL0IndexAndFilterBlocksTest
: public DBTestBase,
@ -2291,7 +2277,6 @@ INSTANTIATE_TEST_CASE_P(PinL0IndexAndFilterBlocksTest,
std::make_tuple(false, false),
std::make_tuple(false, true)));
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, MaxCompactionBytesTest) {
Options options = CurrentOptions();
options.memtable_factory.reset(test::NewSpecialSkipListFactory(
@ -2657,7 +2642,6 @@ TEST_F(DBTest2, SyncPointMarker) {
ASSERT_EQ(sync_point_called.load(), 1);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif
size_t GetEncodedEntrySize(size_t key_size, size_t value_size) {
std::string buffer;
@ -2864,7 +2848,6 @@ TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
}
#endif // !OS_SOLARIS
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) {
Options options = CurrentOptions();
options.num_levels = 3;
@ -3145,9 +3128,7 @@ TEST_F(DBTest2, PausingManualCompaction3) {
DestroyAndReopen(options);
generate_files();
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
int run_manual_compactions = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():PausingManualCompaction:1",
@ -3161,18 +3142,14 @@ TEST_F(DBTest2, PausingManualCompaction3) {
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
// As manual compaction disabled, not even reach sync point
ASSERT_EQ(run_manual_compactions, 0);
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
"CompactionJob::Run():PausingManualCompaction:1");
dbfull()->EnableManualCompaction();
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
@ -3201,9 +3178,7 @@ TEST_F(DBTest2, PausingManualCompaction4) {
DestroyAndReopen(options);
generate_files();
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
int run_manual_compactions = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():PausingManualCompaction:2", [&](void* arg) {
@ -3230,17 +3205,13 @@ TEST_F(DBTest2, PausingManualCompaction4) {
.IsManualCompactionPaused());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
ASSERT_EQ(run_manual_compactions, 1);
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
"CompactionJob::Run():PausingManualCompaction:2");
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
@ -3273,9 +3244,7 @@ TEST_F(DBTest2, CancelManualCompaction1) {
DestroyAndReopen(options);
generate_files();
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
int run_manual_compactions = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
@ -3299,9 +3268,7 @@ TEST_F(DBTest2, CancelManualCompaction1) {
// E.g. we should call the compaction function exactly one time.
ASSERT_EQ(compactions_run, 0);
ASSERT_EQ(run_manual_compactions, 0);
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
compactions_run = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
@ -3332,9 +3299,7 @@ TEST_F(DBTest2, CancelManualCompaction1) {
compact_options.canceled->store(false, std::memory_order_relaxed);
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
@ -3368,9 +3333,7 @@ TEST_F(DBTest2, CancelManualCompaction2) {
DestroyAndReopen(options);
generate_files();
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
@ -3418,9 +3381,7 @@ TEST_F(DBTest2, CancelManualCompaction2) {
compact_options.canceled->store(false, std::memory_order_relaxed);
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
@ -3648,7 +3609,6 @@ TEST_F(DBTest2, OptimizeForSmallDB) {
value.Reset();
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, IterRaceFlush1) {
ASSERT_OK(Put("foo", "v1"));
@ -3931,7 +3891,6 @@ TEST_F(DBTest2, LowPriWrite) {
ASSERT_EQ(1, rate_limit_count.load());
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, RateLimitedCompactionReads) {
// compaction input has 512KB data
const int kNumKeysPerFile = 128;
@ -4012,7 +3971,6 @@ TEST_F(DBTest2, RateLimitedCompactionReads) {
}
}
}
#endif // ROCKSDB_LITE
// Make sure DB can be reopen with reduced number of levels, given no file
// is on levels higher than the new num_levels.
@ -4025,21 +3983,15 @@ TEST_F(DBTest2, ReduceLevel) {
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Flush());
MoveFilesToLevel(6);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
CompactRangeOptions compact_options;
compact_options.change_level = true;
compact_options.target_level = 1;
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
options.num_levels = 3;
Reopen(options);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
}
// Test that ReadCallback is actually used in both memtbale and sst tables
@ -4074,18 +4026,14 @@ TEST_F(DBTest2, ReadCallbackTest) {
}
ASSERT_OK(Flush());
MoveFilesToLevel(6);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
for (; i < 30; i++) {
ASSERT_OK(Put(key, value + std::to_string(i)));
auto snapshot = dbfull()->GetSnapshot();
snapshots.push_back(snapshot);
}
ASSERT_OK(Flush());
#ifndef ROCKSDB_LITE
ASSERT_EQ("1,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
// And also add some values to the memtable
for (; i < 40; i++) {
ASSERT_OK(Put(key, value + std::to_string(i)));
@ -4127,7 +4075,6 @@ TEST_F(DBTest2, ReadCallbackTest) {
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, LiveFilesOmitObsoleteFiles) {
// Regression test for race condition where an obsolete file is returned to
@ -5196,7 +5143,6 @@ TEST_F(DBTest2, TraceWithFilter) {
ASSERT_EQ(count, 6);
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, PinnableSliceAndMmapReads) {
Options options = CurrentOptions();
@ -5227,7 +5173,6 @@ TEST_F(DBTest2, PinnableSliceAndMmapReads) {
// compaction. It crashes if it does.
ASSERT_EQ(pinned_value.ToString(), "bar");
#ifndef ROCKSDB_LITE
pinned_value.Reset();
// Unsafe to pin mmap files when they could be kicked out of table cache
Close();
@ -5245,7 +5190,6 @@ TEST_F(DBTest2, PinnableSliceAndMmapReads) {
ASSERT_EQ(Get("foo", &pinned_value), Status::OK());
ASSERT_TRUE(pinned_value.IsPinned());
ASSERT_EQ(pinned_value.ToString(), "bar");
#endif
}
TEST_F(DBTest2, DISABLED_IteratorPinnedMemory) {
@ -5478,7 +5422,6 @@ TEST_F(DBTest2, TestGetColumnFamilyHandleUnlocked) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, TestCompactFiles) {
// Setup sync point dependency to reproduce the race condition of
// DBImpl::GetColumnFamilyHandleUnlocked
@ -5546,7 +5489,6 @@ TEST_F(DBTest2, TestCompactFiles) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, MultiDBParallelOpenTest) {
const int kNumDbs = 2;
@ -5732,7 +5674,6 @@ TEST_F(DBTest2, PrefixBloomFilteredOut) {
delete iter;
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, RowCacheSnapshot) {
Options options = CurrentOptions();
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
@ -5776,7 +5717,6 @@ TEST_F(DBTest2, RowCacheSnapshot) {
db_->ReleaseSnapshot(s2);
db_->ReleaseSnapshot(s3);
}
#endif // ROCKSDB_LITE
// When DB is reopened with multiple column families, the manifest file
// is written after the first CF is flushed, and it is written again
@ -5966,9 +5906,7 @@ TEST_F(DBTest2, SameSmallestInSameLevel) {
ASSERT_OK(db_->Merge(WriteOptions(), "key", "8"));
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,4,1", FilesPerLevel());
#endif // ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", Get("key"));
}
@ -6101,11 +6039,7 @@ TEST_F(DBTest2, ChangePrefixExtractor) {
// Sometimes filter is checked based on upper bound. Assert counters
// for that case. Otherwise, only check data correctness.
#ifndef ROCKSDB_LITE
bool expect_filter_check = !use_partitioned_filter;
#else
bool expect_filter_check = false;
#endif
table_options.partition_filters = use_partitioned_filter;
if (use_partitioned_filter) {
table_options.index_type =
@ -6303,7 +6237,6 @@ TEST_F(DBTest2, BlockBasedTablePrefixGetIndexNotFound) {
ASSERT_EQ("ok", Get("b1"));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, AutoPrefixMode1) {
do {
// create a DB with block prefix index
@ -7181,7 +7114,6 @@ TEST_F(DBTest2, FileTemperatureManifestFixup) {
std::vector<ColumnFamilyDescriptor> column_families;
for (size_t i = 0; i < handles_.size(); ++i) {
ColumnFamilyDescriptor cfdescriptor;
// GetDescriptor is not implemented for ROCKSDB_LITE
handles_[i]->GetDescriptor(&cfdescriptor).PermitUncheckedError();
column_families.push_back(cfdescriptor);
}
@ -7221,7 +7153,6 @@ TEST_F(DBTest2, FileTemperatureManifestFixup) {
Close();
}
#endif // ROCKSDB_LITE
// WAL recovery mode is WALRecoveryMode::kPointInTimeRecovery.
TEST_F(DBTest2, PointInTimeRecoveryWithIOErrorWhileReadingWal) {
@ -7277,7 +7208,6 @@ TEST_F(DBTest2, PointInTimeRecoveryWithSyncFailureInCFCreation) {
ReopenWithColumnFamilies({"default", "test1", "test2"}, options);
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, SortL0FilesByEpochNumber) {
Options options = CurrentOptions();
options.num_levels = 1;
@ -7487,7 +7417,6 @@ TEST_F(DBTest2, RecoverEpochNumber) {
}
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, RenameDirectory) {
Options options = CurrentOptions();
@ -7565,9 +7494,7 @@ TEST_F(DBTest2, SstUniqueIdVerifyBackwardCompatible) {
}
ASSERT_OK(dbfull()->TEST_WaitForCompact());
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
// Reopen (with verification)
ASSERT_TRUE(options.verify_sst_unique_id_in_manifest);
@ -7622,9 +7549,7 @@ TEST_F(DBTest2, SstUniqueIdVerify) {
}
ASSERT_OK(dbfull()->TEST_WaitForCompact());
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
// Reopen with verification should fail
options.verify_sst_unique_id_in_manifest = true;
@ -7747,7 +7672,6 @@ TEST_F(DBTest2, BestEffortsRecoveryWithSstUniqueIdVerification) {
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, GetLatestSeqAndTsForKey) {
Destroy(last_options_);
@ -7804,7 +7728,6 @@ TEST_F(DBTest2, GetLatestSeqAndTsForKey) {
// Verify that no read to SST files.
ASSERT_EQ(0, options.statistics->getTickerCount(GET_HIT_L0));
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -70,7 +70,6 @@ DBTestBase::DBTestBase(const std::string path, bool env_do_fsync)
if (getenv("MEM_ENV")) {
mem_env_ = MockEnv::Create(base_env, base_env->GetSystemClock());
}
#ifndef ROCKSDB_LITE
if (getenv("ENCRYPTED_ENV")) {
std::shared_ptr<EncryptionProvider> provider;
std::string provider_id = getenv("ENCRYPTED_ENV");
@ -82,7 +81,6 @@ DBTestBase::DBTestBase(const std::string path, bool env_do_fsync)
&provider));
encrypted_env_ = NewEncryptedEnv(mem_env_ ? mem_env_ : base_env, provider);
}
#endif // !ROCKSDB_LITE
env_ = new SpecialEnv(encrypted_env_ ? encrypted_env_
: (mem_env_ ? mem_env_ : base_env));
env_->SetBackgroundThreads(1, Env::LOW);
@ -124,22 +122,6 @@ DBTestBase::~DBTestBase() {
}
bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) {
#ifdef ROCKSDB_LITE
// These options are not supported in ROCKSDB_LITE
if (option_config == kHashSkipList ||
option_config == kPlainTableFirstBytePrefix ||
option_config == kPlainTableCappedPrefix ||
option_config == kPlainTableCappedPrefixNonMmap ||
option_config == kPlainTableAllBytesPrefix ||
option_config == kVectorRep || option_config == kHashLinkList ||
option_config == kUniversalCompaction ||
option_config == kUniversalCompactionMultiLevel ||
option_config == kUniversalSubcompactions ||
option_config == kFIFOCompaction ||
option_config == kConcurrentSkipList) {
return true;
}
#endif
if ((skip_mask & kSkipUniversalCompaction) &&
(option_config == kUniversalCompaction ||
@ -377,7 +359,6 @@ Options DBTestBase::GetOptions(
bool can_allow_mmap = IsMemoryMappedAccessSupported();
switch (option_config) {
#ifndef ROCKSDB_LITE
case kHashSkipList:
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.memtable_factory.reset(NewHashSkipListRepFactory(16));
@ -431,7 +412,6 @@ Options DBTestBase::GetOptions(
SetupSyncPointsToMockDirectIO();
break;
}
#endif // ROCKSDB_LITE
case kMergePut:
options.merge_operator = MergeOperators::CreatePutOperator();
break;
@ -702,7 +682,6 @@ void DBTestBase::Destroy(const Options& options, bool delete_cf_paths) {
if (delete_cf_paths) {
for (size_t i = 0; i < handles_.size(); ++i) {
ColumnFamilyDescriptor cfdescriptor;
// GetDescriptor is not implemented for ROCKSDB_LITE
handles_[i]->GetDescriptor(&cfdescriptor).PermitUncheckedError();
column_families.push_back(cfdescriptor);
}
@ -1043,7 +1022,6 @@ std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) {
return result;
}
#ifndef ROCKSDB_LITE
int DBTestBase::NumSortedRuns(int cf) {
ColumnFamilyMetaData cf_meta;
if (cf == 0) {
@ -1162,7 +1140,6 @@ std::string DBTestBase::FilesPerLevel(int cf) {
return result;
}
#endif // !ROCKSDB_LITE
std::vector<uint64_t> DBTestBase::GetBlobFileNumbers() {
VersionSet* const versions = dbfull()->GetVersionSet();
@ -1280,7 +1257,6 @@ void DBTestBase::MoveFilesToLevel(int level, int cf) {
}
}
#ifndef ROCKSDB_LITE
void DBTestBase::DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
fprintf(stderr, "maxoverlap: %" PRIu64 "\n",
@ -1292,7 +1268,6 @@ void DBTestBase::DumpFileCounts(const char* label) {
}
}
}
#endif // !ROCKSDB_LITE
std::string DBTestBase::DumpSSTableList() {
std::string property;
@ -1618,7 +1593,6 @@ void DBTestBase::VerifyDBFromMap(std::map<std::string, std::string> true_data,
}
if (tailing_iter) {
#ifndef ROCKSDB_LITE
// Tailing iterator
int iter_cnt = 0;
ReadOptions ro;
@ -1647,7 +1621,6 @@ void DBTestBase::VerifyDBFromMap(std::map<std::string, std::string> true_data,
}
delete iter;
#endif // ROCKSDB_LITE
}
if (total_reads_res) {
@ -1675,7 +1648,6 @@ void DBTestBase::VerifyDBInternal(
iter->~InternalIterator();
}
#ifndef ROCKSDB_LITE
uint64_t DBTestBase::GetNumberOfSstFilesForColumnFamily(
DB* db, std::string column_family_name) {
@ -1696,7 +1668,6 @@ uint64_t DBTestBase::GetSstSizeHelper(Temperature temperature) {
&prop));
return static_cast<uint64_t>(std::atoi(prop.c_str()));
}
#endif // ROCKSDB_LITE
void VerifySstUniqueIds(const TablePropertiesCollection& props) {
ASSERT_FALSE(props.empty()); // suspicious test if empty

View File

@ -695,7 +695,6 @@ class SpecialEnv : public EnvWrapper {
bool no_slowdown_;
};
#ifndef ROCKSDB_LITE
class FileTemperatureTestFS : public FileSystemWrapper {
public:
explicit FileTemperatureTestFS(const std::shared_ptr<FileSystem>& fs)
@ -870,7 +869,6 @@ class FlushCounterListener : public EventListener {
ASSERT_EQ(expected_flush_reason.load(), flush_job_info.flush_reason);
}
};
#endif
// A test merge operator mimics put but also fails if one of merge operands is
// "corrupted", "corrupted_try_merge", or "corrupted_must_merge".
@ -1260,7 +1258,6 @@ class DBTestBase : public testing::Test {
const std::vector<std::string>& cfs,
const Options& options);
#ifndef ROCKSDB_LITE
int NumSortedRuns(int cf = 0);
uint64_t TotalSize(int cf = 0);
@ -1276,7 +1273,6 @@ class DBTestBase : public testing::Test {
double CompressionRatioAtLevel(int level, int cf = 0);
int TotalTableFiles(int cf = 0, int levels = -1);
#endif // ROCKSDB_LITE
std::vector<uint64_t> GetBlobFileNumbers();
@ -1312,9 +1308,7 @@ class DBTestBase : public testing::Test {
void MoveFilesToLevel(int level, int cf = 0);
#ifndef ROCKSDB_LITE
void DumpFileCounts(const char* label);
#endif // ROCKSDB_LITE
std::string DumpSSTableList();
@ -1383,12 +1377,10 @@ class DBTestBase : public testing::Test {
void VerifyDBInternal(
std::vector<std::pair<std::string, std::string>> true_data);
#ifndef ROCKSDB_LITE
uint64_t GetNumberOfSstFilesForColumnFamily(DB* db,
std::string column_family_name);
uint64_t GetSstSizeHelper(Temperature temperature);
#endif // ROCKSDB_LITE
uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) {
return options.statistics->getTickerCount(ticker_type);

View File

@ -9,7 +9,6 @@
#include "db/db_test_util.h"
#include "port/stack_trace.h"
#if !defined(ROCKSDB_LITE)
#include "rocksdb/utilities/table_properties_collectors.h"
#include "test_util/sync_point.h"
#include "test_util/testutil.h"
@ -2220,16 +2219,9 @@ TEST_F(DBTestUniversalCompaction2, PeriodicCompaction) {
} // namespace ROCKSDB_NAMESPACE
#endif // !defined(ROCKSDB_LITE)
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
(void)argc;
(void)argv;
return 0;
#endif
}

View File

@ -444,7 +444,6 @@ TEST_F(DBWALTest, RecoverWithBlob) {
ASSERT_EQ(blob_file->GetTotalBlobCount(), 1);
#ifndef ROCKSDB_LITE
const InternalStats* const internal_stats = cfd->internal_stats();
ASSERT_NE(internal_stats, nullptr);
@ -460,7 +459,6 @@ TEST_F(DBWALTest, RecoverWithBlob) {
ASSERT_EQ(cf_stats_value[InternalStats::BYTES_FLUSHED],
compaction_stats[0].bytes_written +
compaction_stats[0].bytes_written_blob);
#endif // ROCKSDB_LITE
}
TEST_F(DBWALTest, RecoverWithBlobMultiSST) {
@ -610,7 +608,6 @@ TEST_F(DBWALTest, WALWithChecksumHandoff) {
#endif // ROCKSDB_ASSERT_STATUS_CHECKED
}
#ifndef ROCKSDB_LITE
TEST_F(DBWALTest, LockWal) {
do {
Options options = CurrentOptions();
@ -654,7 +651,6 @@ TEST_F(DBWALTest, LockWal) {
SyncPoint::GetInstance()->DisableProcessing();
} while (ChangeWalOptions());
}
#endif //! ROCKSDB_LITE
class DBRecoveryTestBlobError
: public DBWALTest,
@ -892,7 +888,6 @@ TEST_F(DBWALTest, PreallocateBlock) {
}
#endif // !(defined NDEBUG) || !defined(OS_WIN)
#ifndef ROCKSDB_LITE
TEST_F(DBWALTest, DISABLED_FullPurgePreservesRecycledLog) {
// TODO(ajkr): Disabled until WAL recycling is fixed for
// `kPointInTimeRecovery`.
@ -2372,7 +2367,6 @@ TEST_F(DBWALTest, WalInManifestButNotInSortedWals) {
Close();
}
#endif // ROCKSDB_LITE
TEST_F(DBWALTest, WalTermTest) {
Options options = CurrentOptions();
@ -2398,7 +2392,6 @@ TEST_F(DBWALTest, WalTermTest) {
ASSERT_EQ("NOT_FOUND", Get(1, "foo2"));
}
#ifndef ROCKSDB_LITE
TEST_F(DBWALTest, GetCompressedWalsAfterSync) {
if (db_->GetOptions().wal_compression == kNoCompression) {
ROCKSDB_GTEST_BYPASS("stream compression not present");
@ -2433,7 +2426,6 @@ TEST_F(DBWALTest, GetCompressedWalsAfterSync) {
Status s = dbfull()->GetSortedWalFiles(wals);
ASSERT_OK(s);
}
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {

View File

@ -13,9 +13,7 @@
#include "rocksdb/utilities/debug.h"
#include "table/block_based/block_based_table_reader.h"
#include "table/block_based/block_builder.h"
#if !defined(ROCKSDB_LITE)
#include "test_util/sync_point.h"
#endif
#include "test_util/testutil.h"
#include "utilities/fault_injection_env.h"
#include "utilities/merge_operators/string_append/stringappend2.h"
@ -645,7 +643,6 @@ TEST_F(DBBasicTestWithTimestamp, OpenAndTrimHistoryInvalidOptionTest) {
.IsInvalidArgument());
}
#ifndef ROCKSDB_LITE
TEST_F(DBBasicTestWithTimestamp, GetTimestampTableProperties) {
Options options = CurrentOptions();
const size_t kTimestampSize = Timestamp(0, 0).size();
@ -675,7 +672,6 @@ TEST_F(DBBasicTestWithTimestamp, GetTimestampTableProperties) {
}
Close();
}
#endif // !ROCKSDB_LITE
class DBBasicTestWithTimestampTableOptions
: public DBBasicTestWithTimestampBase,
@ -2677,7 +2673,6 @@ TEST_P(DBBasicTestWithTimestampCompressionSettings, PutDeleteGet) {
}
}
#ifndef ROCKSDB_LITE
// A class which remembers the name of each flushed file.
class FlushedFileCollector : public EventListener {
public:
@ -2970,7 +2965,6 @@ TEST_F(DBBasicTestWithTimestamp, MultiGetNoReturnTs) {
Close();
}
#endif // !ROCKSDB_LITE
INSTANTIATE_TEST_CASE_P(
Timestamp, DBBasicTestWithTimestampCompressionSettings,

View File

@ -198,7 +198,6 @@ class TestFilePartitionerFactory : public SstPartitionerFactory {
const char* Name() const override { return "TestFilePartitionerFactory"; }
};
#ifndef ROCKSDB_LITE
TEST_F(TimestampCompatibleCompactionTest, CompactFilesRangeCheckL0) {
Options options = CurrentOptions();
options.env = env_;
@ -344,7 +343,6 @@ TEST_F(TimestampCompatibleCompactionTest, EmptyCompactionOutput) {
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
}
#endif // !ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

View File

@ -780,7 +780,6 @@ TEST_P(DBWriteBufferManagerTest, MixedSlowDownOptionsMultipleDB) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#ifndef ROCKSDB_LITE
// Tests a `WriteBufferManager` constructed with `allow_stall == false` does not
// thrash memtable switching when full and a CF receives multiple writes.
@ -847,7 +846,6 @@ TEST_P(DBWriteBufferManagerTest, StopSwitchingMemTablesOnceFlushing) {
delete shared_wbm_db;
}
#endif // ROCKSDB_LITE
INSTANTIATE_TEST_CASE_P(DBWriteBufferManagerTest, DBWriteBufferManagerTest,
testing::Bool());

View File

@ -7,7 +7,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_LITE
#include <stdlib.h>
@ -602,13 +601,3 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr,
"SKIPPED as DBImpl::DeleteFile is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // !ROCKSDB_LITE

View File

@ -228,7 +228,6 @@ std::map<std::tuple<BackgroundErrorReason, bool>, Status::Severity>
};
void ErrorHandler::CancelErrorRecovery() {
#ifndef ROCKSDB_LITE
db_mutex_->AssertHeld();
// We'll release the lock before calling sfm, so make sure no new
@ -249,7 +248,6 @@ void ErrorHandler::CancelErrorRecovery() {
// If auto recovery is also runing to resume from the retryable error,
// we should wait and end the auto recovery.
EndAutoRecovery();
#endif
}
STATIC_AVOID_DESTRUCTION(const Status, kOkStatus){Status::OK()};
@ -499,7 +497,6 @@ const Status& ErrorHandler::SetBGError(const Status& bg_status,
Status ErrorHandler::OverrideNoSpaceError(const Status& bg_error,
bool* auto_recovery) {
#ifndef ROCKSDB_LITE
if (bg_error.severity() >= Status::Severity::kFatalError) {
return bg_error;
}
@ -528,14 +525,9 @@ Status ErrorHandler::OverrideNoSpaceError(const Status& bg_error,
}
return bg_error;
#else
(void)auto_recovery;
return Status(bg_error, Status::Severity::kFatalError);
#endif
}
void ErrorHandler::RecoverFromNoSpace() {
#ifndef ROCKSDB_LITE
SstFileManagerImpl* sfm =
reinterpret_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
@ -543,11 +535,9 @@ void ErrorHandler::RecoverFromNoSpace() {
if (sfm) {
sfm->StartErrorRecovery(this, bg_error_);
}
#endif
}
Status ErrorHandler::ClearBGError() {
#ifndef ROCKSDB_LITE
db_mutex_->AssertHeld();
// Signal that recovery succeeded
@ -566,13 +556,9 @@ Status ErrorHandler::ClearBGError() {
bg_error_, db_mutex_);
}
return recovery_error_;
#else
return bg_error_;
#endif
}
Status ErrorHandler::RecoverFromBGError(bool is_manual) {
#ifndef ROCKSDB_LITE
InstrumentedMutexLock l(db_mutex_);
bool no_bg_work_original_flag = soft_error_no_bg_work_;
if (is_manual) {
@ -625,15 +611,10 @@ Status ErrorHandler::RecoverFromBGError(bool is_manual) {
recovery_in_prog_ = false;
}
return s;
#else
(void)is_manual;
return bg_error_;
#endif
}
const Status& ErrorHandler::StartRecoverFromRetryableBGIOError(
const IOStatus& io_error) {
#ifndef ROCKSDB_LITE
db_mutex_->AssertHeld();
if (bg_error_.ok()) {
return bg_error_;
@ -667,16 +648,11 @@ const Status& ErrorHandler::StartRecoverFromRetryableBGIOError(
} else {
return bg_error_;
}
#else
(void)io_error;
return bg_error_;
#endif
}
// Automatic recover from Retryable BG IO error. Must be called after db
// mutex is released.
void ErrorHandler::RecoverFromRetryableBGIOError() {
#ifndef ROCKSDB_LITE
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeStart");
InstrumentedMutexLock l(db_mutex_);
if (end_recovery_) {
@ -784,9 +760,6 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count);
}
return;
#else
return;
#endif
}
void ErrorHandler::CheckAndSetRecoveryAndBGError(const Status& bg_err) {

View File

@ -6,16 +6,13 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_LITE
#include "db/db_test_util.h"
#include "file/sst_file_manager_impl.h"
#include "port/stack_trace.h"
#include "rocksdb/io_status.h"
#include "rocksdb/sst_file_manager.h"
#if !defined(ROCKSDB_LITE)
#include "test_util/sync_point.h"
#endif
#include "util/random.h"
#include "utilities/fault_injection_env.h"
#include "utilities/fault_injection_fs.h"
@ -2864,12 +2861,3 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
#else
#include <stdio.h>
int main(int /*argc*/, char** /*argv*/) {
fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // ROCKSDB_LITE

View File

@ -10,13 +10,11 @@
#include "rocksdb/utilities/customizable_util.h"
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
Status EventListener::CreateFromString(const ConfigOptions& config_options,
const std::string& id,
std::shared_ptr<EventListener>* result) {
return LoadSharedObject<EventListener>(config_options, id, nullptr, result);
}
#endif // ROCKSDB_LITE
namespace {
template <class T>
@ -32,7 +30,6 @@ void EventHelpers::AppendCurrentTime(JSONWriter* jwriter) {
.count();
}
#ifndef ROCKSDB_LITE
void EventHelpers::NotifyTableFileCreationStarted(
const std::vector<std::shared_ptr<EventListener>>& listeners,
const std::string& db_name, const std::string& cf_name,
@ -50,13 +47,11 @@ void EventHelpers::NotifyTableFileCreationStarted(
listener->OnTableFileCreationStarted(info);
}
}
#endif // !ROCKSDB_LITE
void EventHelpers::NotifyOnBackgroundError(
const std::vector<std::shared_ptr<EventListener>>& listeners,
BackgroundErrorReason reason, Status* bg_error, InstrumentedMutex* db_mutex,
bool* auto_recovery) {
#ifndef ROCKSDB_LITE
if (listeners.empty()) {
return;
}
@ -71,13 +66,6 @@ void EventHelpers::NotifyOnBackgroundError(
}
}
db_mutex->Lock();
#else
(void)listeners;
(void)reason;
(void)bg_error;
(void)db_mutex;
(void)auto_recovery;
#endif // ROCKSDB_LITE
}
void EventHelpers::LogAndNotifyTableFileCreationFinished(
@ -179,7 +167,6 @@ void EventHelpers::LogAndNotifyTableFileCreationFinished(
event_logger->Log(jwriter);
}
#ifndef ROCKSDB_LITE
if (listeners.empty()) {
return;
}
@ -198,13 +185,6 @@ void EventHelpers::LogAndNotifyTableFileCreationFinished(
listener->OnTableFileCreated(info);
}
info.status.PermitUncheckedError();
#else
(void)listeners;
(void)db_name;
(void)cf_name;
(void)file_path;
(void)reason;
#endif // !ROCKSDB_LITE
}
void EventHelpers::LogAndNotifyTableFileDeletion(
@ -226,7 +206,6 @@ void EventHelpers::LogAndNotifyTableFileDeletion(
event_logger->Log(jwriter);
#ifndef ROCKSDB_LITE
if (listeners.empty()) {
return;
}
@ -239,18 +218,12 @@ void EventHelpers::LogAndNotifyTableFileDeletion(
listener->OnTableFileDeleted(info);
}
info.status.PermitUncheckedError();
#else
(void)file_path;
(void)dbname;
(void)listeners;
#endif // !ROCKSDB_LITE
}
void EventHelpers::NotifyOnErrorRecoveryEnd(
const std::vector<std::shared_ptr<EventListener>>& listeners,
const Status& old_bg_error, const Status& new_bg_error,
InstrumentedMutex* db_mutex) {
#ifndef ROCKSDB_LITE
if (!listeners.empty()) {
db_mutex->AssertHeld();
// release lock while notifying events
@ -266,15 +239,8 @@ void EventHelpers::NotifyOnErrorRecoveryEnd(
}
db_mutex->Lock();
}
#else
(void)listeners;
(void)old_bg_error;
(void)new_bg_error;
(void)db_mutex;
#endif // ROCKSDB_LITE
}
#ifndef ROCKSDB_LITE
void EventHelpers::NotifyBlobFileCreationStarted(
const std::vector<std::shared_ptr<EventListener>>& listeners,
const std::string& db_name, const std::string& cf_name,
@ -289,7 +255,6 @@ void EventHelpers::NotifyBlobFileCreationStarted(
listener->OnBlobFileCreationStarted(info);
}
}
#endif // !ROCKSDB_LITE
void EventHelpers::LogAndNotifyBlobFileCreationFinished(
EventLogger* event_logger,
@ -314,7 +279,6 @@ void EventHelpers::LogAndNotifyBlobFileCreationFinished(
event_logger->Log(jwriter);
}
#ifndef ROCKSDB_LITE
if (listeners.empty()) {
return;
}
@ -325,12 +289,6 @@ void EventHelpers::LogAndNotifyBlobFileCreationFinished(
listener->OnBlobFileCreated(info);
}
info.status.PermitUncheckedError();
#else
(void)listeners;
(void)db_name;
(void)file_path;
(void)creation_reason;
#endif
}
void EventHelpers::LogAndNotifyBlobFileDeletion(
@ -352,7 +310,6 @@ void EventHelpers::LogAndNotifyBlobFileDeletion(
jwriter.EndObject();
event_logger->Log(jwriter);
}
#ifndef ROCKSDB_LITE
if (listeners.empty()) {
return;
}
@ -361,11 +318,6 @@ void EventHelpers::LogAndNotifyBlobFileDeletion(
listener->OnBlobFileDeleted(info);
}
info.status.PermitUncheckedError();
#else
(void)listeners;
(void)dbname;
(void)file_path;
#endif // !ROCKSDB_LITE
}
} // namespace ROCKSDB_NAMESPACE

View File

@ -19,12 +19,10 @@ namespace ROCKSDB_NAMESPACE {
class EventHelpers {
public:
static void AppendCurrentTime(JSONWriter* json_writer);
#ifndef ROCKSDB_LITE
static void NotifyTableFileCreationStarted(
const std::vector<std::shared_ptr<EventListener>>& listeners,
const std::string& db_name, const std::string& cf_name,
const std::string& file_path, int job_id, TableFileCreationReason reason);
#endif // !ROCKSDB_LITE
static void NotifyOnBackgroundError(
const std::vector<std::shared_ptr<EventListener>>& listeners,
BackgroundErrorReason reason, Status* bg_error,
@ -48,13 +46,11 @@ class EventHelpers {
const Status& old_bg_error, const Status& new_bg_error,
InstrumentedMutex* db_mutex);
#ifndef ROCKSDB_LITE
static void NotifyBlobFileCreationStarted(
const std::vector<std::shared_ptr<EventListener>>& listeners,
const std::string& db_name, const std::string& cf_name,
const std::string& file_path, int job_id,
BlobFileCreationReason creation_reason);
#endif // !ROCKSDB_LITE
static void LogAndNotifyBlobFileCreationFinished(
EventLogger* event_logger,

View File

@ -12,7 +12,6 @@
namespace ROCKSDB_NAMESPACE {
namespace experimental {
#ifndef ROCKSDB_LITE
Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end) {
@ -30,19 +29,6 @@ Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, int target_level) {
return db->PromoteL0(column_family, target_level);
}
#else // ROCKSDB_LITE
Status SuggestCompactRange(DB* /*db*/, ColumnFamilyHandle* /*column_family*/,
const Slice* /*begin*/, const Slice* /*end*/) {
return Status::NotSupported("Not supported in RocksDB LITE");
}
Status PromoteL0(DB* /*db*/, ColumnFamilyHandle* /*column_family*/,
int /*target_level*/) {
return Status::NotSupported("Not supported in RocksDB LITE");
}
#endif // ROCKSDB_LITE
Status SuggestCompactRange(DB* db, const Slice* begin, const Slice* end) {
return SuggestCompactRange(db, db->DefaultColumnFamily(), begin, end);

View File

@ -17,7 +17,6 @@
namespace ROCKSDB_NAMESPACE {
#ifndef ROCKSDB_LITE
class ExternalSSTFileBasicTest
: public DBTestBase,
public ::testing::WithParamInterface<std::tuple<bool, bool>> {
@ -1988,7 +1987,6 @@ INSTANTIATE_TEST_CASE_P(ExternalSSTFileBasicTest, ExternalSSTFileBasicTest,
std::make_tuple(false, true),
std::make_tuple(false, false)));
#endif // ROCKSDB_LITE
} // namespace ROCKSDB_NAMESPACE

Some files were not shown because too many files have changed in this diff Show More