mirror of https://github.com/facebook/rocksdb.git
Run internal cpp modernizer on RocksDB repo (#12398)
Summary: When internal cpp modernizer attempts to format rocksdb code, it will replace macro `ROCKSDB_NAMESPACE` with its default definition `rocksdb` when collapsing nested namespace. We filed a feedback for the tool T180254030 and the team filed a bug for this: https://github.com/llvm/llvm-project/issues/83452. At the same time, they suggested us to run the modernizer tool ourselves so future auto codemod attempts will be smaller. This diff contains: Running `xplat/scripts/codemod_service/cpp_modernizer.sh` in fbcode/internal_repo_rocksdb/repo (excluding some directories in utilities/transactions/lock/range/range_tree/lib that has a non meta copyright comment) without swapping out the namespace macro `ROCKSDB_NAMESPACE` Followed by RocksDB's own `make format` Pull Request resolved: https://github.com/facebook/rocksdb/pull/12398 Test Plan: Auto tests Reviewed By: hx235 Differential Revision: D54382532 Pulled By: jowlyzhang fbshipit-source-id: e7d5b40f9b113b60e5a503558c181f080b9d02fa
This commit is contained in:
parent
d7b8756976
commit
1cfdece85d
|
@ -194,7 +194,7 @@ class SharedState {
|
||||||
: cv_(&mu_),
|
: cv_(&mu_),
|
||||||
cache_bench_(cache_bench) {}
|
cache_bench_(cache_bench) {}
|
||||||
|
|
||||||
~SharedState() {}
|
~SharedState() = default;
|
||||||
|
|
||||||
port::Mutex* GetMutex() { return &mu_; }
|
port::Mutex* GetMutex() { return &mu_; }
|
||||||
|
|
||||||
|
@ -425,7 +425,7 @@ class CacheBench {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~CacheBench() {}
|
~CacheBench() = default;
|
||||||
|
|
||||||
void PopulateCache() {
|
void PopulateCache() {
|
||||||
Random64 rnd(FLAGS_seed);
|
Random64 rnd(FLAGS_seed);
|
||||||
|
|
|
@ -106,7 +106,7 @@ class CacheTest : public testing::Test,
|
||||||
type_ = GetParam();
|
type_ = GetParam();
|
||||||
}
|
}
|
||||||
|
|
||||||
~CacheTest() override {}
|
~CacheTest() override = default;
|
||||||
|
|
||||||
// These functions encode/decode keys in tests cases that use
|
// These functions encode/decode keys in tests cases that use
|
||||||
// int keys.
|
// int keys.
|
||||||
|
@ -766,7 +766,9 @@ TEST_P(CacheTest, OverCapacity) {
|
||||||
std::string key = EncodeKey(i + 1);
|
std::string key = EncodeKey(i + 1);
|
||||||
auto h = cache.Lookup(key);
|
auto h = cache.Lookup(key);
|
||||||
ASSERT_TRUE(h != nullptr);
|
ASSERT_TRUE(h != nullptr);
|
||||||
if (h) cache.Release(h);
|
if (h) {
|
||||||
|
cache.Release(h);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// the cache is over capacity since nothing could be evicted
|
// the cache is over capacity since nothing could be evicted
|
||||||
|
@ -777,7 +779,7 @@ TEST_P(CacheTest, OverCapacity) {
|
||||||
|
|
||||||
if (IsHyperClock()) {
|
if (IsHyperClock()) {
|
||||||
// Make sure eviction is triggered.
|
// Make sure eviction is triggered.
|
||||||
ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, &handles[0]));
|
ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, handles.data()));
|
||||||
|
|
||||||
// cache is under capacity now since elements were released
|
// cache is under capacity now since elements were released
|
||||||
ASSERT_GE(n, cache.get()->GetUsage());
|
ASSERT_GE(n, cache.get()->GetUsage());
|
||||||
|
|
|
@ -26,7 +26,7 @@ CompressedSecondaryCache::CompressedSecondaryCache(
|
||||||
cache_))),
|
cache_))),
|
||||||
disable_cache_(opts.capacity == 0) {}
|
disable_cache_(opts.capacity == 0) {}
|
||||||
|
|
||||||
CompressedSecondaryCache::~CompressedSecondaryCache() {}
|
CompressedSecondaryCache::~CompressedSecondaryCache() = default;
|
||||||
|
|
||||||
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
|
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
|
||||||
const Slice& key, const Cache::CacheItemHelper* helper,
|
const Slice& key, const Cache::CacheItemHelper* helper,
|
||||||
|
|
|
@ -33,7 +33,7 @@ const std::string key3 = "____ ____key3";
|
||||||
class CompressedSecondaryCacheTestBase : public testing::Test,
|
class CompressedSecondaryCacheTestBase : public testing::Test,
|
||||||
public WithCacheType {
|
public WithCacheType {
|
||||||
public:
|
public:
|
||||||
CompressedSecondaryCacheTestBase() {}
|
CompressedSecondaryCacheTestBase() = default;
|
||||||
~CompressedSecondaryCacheTestBase() override = default;
|
~CompressedSecondaryCacheTestBase() override = default;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
@ -32,7 +32,7 @@ namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
class LRUCacheTest : public testing::Test {
|
class LRUCacheTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
LRUCacheTest() {}
|
LRUCacheTest() = default;
|
||||||
~LRUCacheTest() override { DeleteCache(); }
|
~LRUCacheTest() override { DeleteCache(); }
|
||||||
|
|
||||||
void DeleteCache() {
|
void DeleteCache() {
|
||||||
|
@ -378,7 +378,7 @@ class ClockCacheTest : public testing::Test {
|
||||||
using Table = typename Shard::Table;
|
using Table = typename Shard::Table;
|
||||||
using TableOpts = typename Table::Opts;
|
using TableOpts = typename Table::Opts;
|
||||||
|
|
||||||
ClockCacheTest() {}
|
ClockCacheTest() = default;
|
||||||
~ClockCacheTest() override { DeleteShard(); }
|
~ClockCacheTest() override { DeleteShard(); }
|
||||||
|
|
||||||
void DeleteShard() {
|
void DeleteShard() {
|
||||||
|
@ -1976,7 +1976,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) {
|
||||||
ah.priority = Cache::Priority::LOW;
|
ah.priority = Cache::Priority::LOW;
|
||||||
cache->StartAsyncLookup(ah);
|
cache->StartAsyncLookup(ah);
|
||||||
}
|
}
|
||||||
cache->WaitAll(&async_handles[0], async_handles.size());
|
cache->WaitAll(async_handles.data(), async_handles.size());
|
||||||
for (size_t i = 0; i < async_handles.size(); ++i) {
|
for (size_t i = 0; i < async_handles.size(); ++i) {
|
||||||
SCOPED_TRACE("i = " + std::to_string(i));
|
SCOPED_TRACE("i = " + std::to_string(i));
|
||||||
Cache::Handle* result = async_handles[i].Result();
|
Cache::Handle* result = async_handles[i].Result();
|
||||||
|
|
|
@ -386,7 +386,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
|
||||||
|
@ -400,7 +400,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(20));
|
keys.push_back(Key(20));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -414,7 +414,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -428,7 +428,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -442,7 +442,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -456,7 +456,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(20));
|
keys.push_back(Key(20));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -470,7 +470,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(20));
|
keys.push_back(Key(20));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -484,7 +484,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
|
||||||
keys.push_back(Key(20));
|
keys.push_back(Key(20));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -528,7 +528,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
|
||||||
|
@ -542,7 +542,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
|
||||||
keys.push_back(Key(20));
|
keys.push_back(Key(20));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -561,7 +561,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
|
||||||
keys.push_back(Key(36));
|
keys.push_back(Key(36));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
|
||||||
|
@ -582,7 +582,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
|
||||||
|
@ -629,7 +629,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
|
||||||
|
@ -644,7 +644,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
|
||||||
keys.push_back(Key(20));
|
keys.push_back(Key(20));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -659,7 +659,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
|
||||||
keys.push_back(Key(8));
|
keys.push_back(Key(8));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
|
||||||
|
@ -676,7 +676,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
|
||||||
keys.push_back(Key(36));
|
keys.push_back(Key(36));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
|
||||||
|
@ -691,7 +691,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
|
||||||
keys.push_back(Key(36));
|
keys.push_back(Key(36));
|
||||||
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
|
||||||
ASSERT_EQ(values.size(), keys.size());
|
ASSERT_EQ(values.size(), keys.size());
|
||||||
for (auto value : values) {
|
for (const auto& value : values) {
|
||||||
ASSERT_EQ(1007, value.size());
|
ASSERT_EQ(1007, value.size());
|
||||||
}
|
}
|
||||||
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
|
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
|
||||||
|
|
|
@ -405,7 +405,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) {
|
||||||
|
|
||||||
requests_buf[0] =
|
requests_buf[0] =
|
||||||
BlobReadRequest(key_refs[0], blob_offsets[0], blob_sizes[0],
|
BlobReadRequest(key_refs[0], blob_offsets[0], blob_sizes[0],
|
||||||
kNoCompression, nullptr, &statuses_buf[0]);
|
kNoCompression, nullptr, statuses_buf.data());
|
||||||
requests_buf[1] =
|
requests_buf[1] =
|
||||||
BlobReadRequest(key_refs[1], blob_offsets[1], blob_sizes[1] + 1,
|
BlobReadRequest(key_refs[1], blob_offsets[1], blob_sizes[1] + 1,
|
||||||
kNoCompression, nullptr, &statuses_buf[1]);
|
kNoCompression, nullptr, &statuses_buf[1]);
|
||||||
|
|
|
@ -168,8 +168,8 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
|
||||||
|
|
||||||
uint64_t file_size = BlobLogHeader::kSize;
|
uint64_t file_size = BlobLogHeader::kSize;
|
||||||
for (size_t i = 0; i < num_blobs; ++i) {
|
for (size_t i = 0; i < num_blobs; ++i) {
|
||||||
keys.push_back({key_strs[i]});
|
keys.emplace_back(key_strs[i]);
|
||||||
blobs.push_back({blob_strs[i]});
|
blobs.emplace_back(blob_strs[i]);
|
||||||
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
|
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
|
||||||
}
|
}
|
||||||
file_size += BlobLogFooter::kSize;
|
file_size += BlobLogFooter::kSize;
|
||||||
|
@ -482,8 +482,8 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) {
|
||||||
std::vector<Slice> blobs;
|
std::vector<Slice> blobs;
|
||||||
|
|
||||||
for (size_t i = 0; i < num_blobs; ++i) {
|
for (size_t i = 0; i < num_blobs; ++i) {
|
||||||
keys.push_back({key_strs[i]});
|
keys.emplace_back(key_strs[i]);
|
||||||
blobs.push_back({blob_strs[i]});
|
blobs.emplace_back(blob_strs[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<uint64_t> blob_offsets(keys.size());
|
std::vector<uint64_t> blob_offsets(keys.size());
|
||||||
|
@ -610,8 +610,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) {
|
||||||
uint64_t file_size = BlobLogHeader::kSize;
|
uint64_t file_size = BlobLogHeader::kSize;
|
||||||
uint64_t blob_value_bytes = 0;
|
uint64_t blob_value_bytes = 0;
|
||||||
for (size_t i = 0; i < num_blobs; ++i) {
|
for (size_t i = 0; i < num_blobs; ++i) {
|
||||||
keys.push_back({key_strs[i]});
|
keys.emplace_back(key_strs[i]);
|
||||||
blobs.push_back({blob_strs[i]});
|
blobs.emplace_back(blob_strs[i]);
|
||||||
blob_value_bytes += blobs[i].size();
|
blob_value_bytes += blobs[i].size();
|
||||||
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
|
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
|
||||||
}
|
}
|
||||||
|
@ -802,8 +802,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
|
||||||
|
|
||||||
uint64_t file_size = BlobLogHeader::kSize;
|
uint64_t file_size = BlobLogHeader::kSize;
|
||||||
for (size_t i = 0; i < num_blobs; ++i) {
|
for (size_t i = 0; i < num_blobs; ++i) {
|
||||||
keys.push_back({key_strs[i]});
|
keys.emplace_back(key_strs[i]);
|
||||||
blobs.push_back({blob_strs[i]});
|
blobs.emplace_back(blob_strs[i]);
|
||||||
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
|
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
|
||||||
}
|
}
|
||||||
file_size += BlobLogFooter::kSize;
|
file_size += BlobLogFooter::kSize;
|
||||||
|
@ -1164,7 +1164,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
|
||||||
ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
|
ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
|
||||||
blob_offsets[0], file_size, blob_sizes[0],
|
blob_offsets[0], file_size, blob_sizes[0],
|
||||||
kNoCompression, nullptr /* prefetch_buffer */,
|
kNoCompression, nullptr /* prefetch_buffer */,
|
||||||
&values[0], nullptr /* bytes_read */));
|
values.data(), nullptr /* bytes_read */));
|
||||||
// Release cache handle
|
// Release cache handle
|
||||||
values[0].Reset();
|
values[0].Reset();
|
||||||
|
|
||||||
|
@ -1183,7 +1183,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
|
||||||
ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
|
ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
|
||||||
blob_offsets[0], file_size, blob_sizes[0],
|
blob_offsets[0], file_size, blob_sizes[0],
|
||||||
kNoCompression, nullptr /* prefetch_buffer */,
|
kNoCompression, nullptr /* prefetch_buffer */,
|
||||||
&values[0], nullptr /* bytes_read */));
|
values.data(), nullptr /* bytes_read */));
|
||||||
ASSERT_EQ(values[0], blobs[0]);
|
ASSERT_EQ(values[0], blobs[0]);
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(
|
||||||
blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[0]));
|
blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[0]));
|
||||||
|
@ -1263,7 +1263,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
|
||||||
ASSERT_OK(blob_source.GetBlob(
|
ASSERT_OK(blob_source.GetBlob(
|
||||||
read_options, keys[0], file_number, blob_offsets[0], file_size,
|
read_options, keys[0], file_number, blob_offsets[0], file_size,
|
||||||
blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */,
|
blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */,
|
||||||
&values[0], nullptr /* bytes_read */));
|
values.data(), nullptr /* bytes_read */));
|
||||||
ASSERT_EQ(values[0], blobs[0]);
|
ASSERT_EQ(values[0], blobs[0]);
|
||||||
|
|
||||||
// Release cache handle
|
// Release cache handle
|
||||||
|
@ -1365,8 +1365,8 @@ class BlobSourceCacheReservationTest : public DBTestBase {
|
||||||
|
|
||||||
blob_file_size_ = BlobLogHeader::kSize;
|
blob_file_size_ = BlobLogHeader::kSize;
|
||||||
for (size_t i = 0; i < kNumBlobs; ++i) {
|
for (size_t i = 0; i < kNumBlobs; ++i) {
|
||||||
keys_.push_back({key_strs_[i]});
|
keys_.emplace_back(key_strs_[i]);
|
||||||
blobs_.push_back({blob_strs_[i]});
|
blobs_.emplace_back(blob_strs_[i]);
|
||||||
blob_file_size_ +=
|
blob_file_size_ +=
|
||||||
BlobLogRecord::kHeaderSize + keys_[i].size() + blobs_[i].size();
|
BlobLogRecord::kHeaderSize + keys_[i].size() + blobs_[i].size();
|
||||||
}
|
}
|
||||||
|
|
|
@ -418,8 +418,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobs) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -441,8 +441,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobs) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -512,8 +512,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -534,8 +534,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -553,8 +553,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -574,8 +574,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -758,8 +758,8 @@ TEST_F(DBBlobBasicTest, MultiGetWithDirectIO) {
|
||||||
//
|
//
|
||||||
// [offset=0, len=12288]
|
// [offset=0, len=12288]
|
||||||
|
|
||||||
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
SyncPoint::GetInstance()->DisableProcessing();
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
SyncPoint::GetInstance()->ClearAllCallBacks();
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
@ -829,8 +829,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
|
||||||
{
|
{
|
||||||
std::array<PinnableSlice, kNumKeys> values;
|
std::array<PinnableSlice, kNumKeys> values;
|
||||||
std::array<Status, kNumKeys> statuses;
|
std::array<Status, kNumKeys> statuses;
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
for (size_t i = 0; i < kNumKeys; ++i) {
|
for (size_t i = 0; i < kNumKeys; ++i) {
|
||||||
ASSERT_OK(statuses[i]);
|
ASSERT_OK(statuses[i]);
|
||||||
|
@ -843,8 +843,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
|
||||||
{
|
{
|
||||||
std::array<PinnableSlice, kNumKeys> values;
|
std::array<PinnableSlice, kNumKeys> values;
|
||||||
std::array<Status, kNumKeys> statuses;
|
std::array<Status, kNumKeys> statuses;
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
for (size_t i = 0; i < kNumKeys; ++i) {
|
for (size_t i = 0; i < kNumKeys; ++i) {
|
||||||
ASSERT_TRUE(statuses[i].IsIncomplete());
|
ASSERT_TRUE(statuses[i].IsIncomplete());
|
||||||
|
@ -858,8 +858,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
|
||||||
{
|
{
|
||||||
std::array<PinnableSlice, kNumKeys> values;
|
std::array<PinnableSlice, kNumKeys> values;
|
||||||
std::array<Status, kNumKeys> statuses;
|
std::array<Status, kNumKeys> statuses;
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
for (size_t i = 0; i < kNumKeys; ++i) {
|
for (size_t i = 0; i < kNumKeys; ++i) {
|
||||||
ASSERT_OK(statuses[i]);
|
ASSERT_OK(statuses[i]);
|
||||||
|
@ -872,8 +872,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
|
||||||
{
|
{
|
||||||
std::array<PinnableSlice, kNumKeys> values;
|
std::array<PinnableSlice, kNumKeys> values;
|
||||||
std::array<Status, kNumKeys> statuses;
|
std::array<Status, kNumKeys> statuses;
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
for (size_t i = 0; i < kNumKeys; ++i) {
|
for (size_t i = 0; i < kNumKeys; ++i) {
|
||||||
ASSERT_OK(statuses[i]);
|
ASSERT_OK(statuses[i]);
|
||||||
|
@ -1206,8 +1206,8 @@ TEST_F(DBBlobBasicTest, MultiGetMergeBlobWithPut) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], "v0_0,v0_1,v0_2");
|
ASSERT_EQ(values[0], "v0_0,v0_1,v0_2");
|
||||||
|
@ -1470,8 +1470,8 @@ TEST_P(DBBlobBasicIOErrorMultiGetTest, MultiGetBlobs_IOError) {
|
||||||
});
|
});
|
||||||
SyncPoint::GetInstance()->EnableProcessing();
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
SyncPoint::GetInstance()->DisableProcessing();
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
SyncPoint::GetInstance()->ClearAllCallBacks();
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
|
@ -1820,7 +1820,7 @@ TEST_F(DBBlobBasicTest, GetEntityBlob) {
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGetEntity(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
|
db_->MultiGetEntity(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
|
||||||
&keys[0], &results[0], &statuses[0]);
|
keys.data(), results.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(results[0].columns(), expected_columns);
|
ASSERT_EQ(results[0].columns(), expected_columns);
|
||||||
|
@ -1917,8 +1917,8 @@ TEST_F(DBBlobWithTimestampTest, MultiGetBlobs) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
|
||||||
&values[0], &statuses[0]);
|
keys.data(), values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], first_value);
|
ASSERT_EQ(values[0], first_value);
|
||||||
|
@ -2001,8 +2001,8 @@ TEST_F(DBBlobWithTimestampTest, MultiGetMergeBlobWithPut) {
|
||||||
std::array<PinnableSlice, num_keys> values;
|
std::array<PinnableSlice, num_keys> values;
|
||||||
std::array<Status, num_keys> statuses;
|
std::array<Status, num_keys> statuses;
|
||||||
|
|
||||||
db_->MultiGet(read_opts, db_->DefaultColumnFamily(), num_keys, &keys[0],
|
db_->MultiGet(read_opts, db_->DefaultColumnFamily(), num_keys, keys.data(),
|
||||||
&values[0], &statuses[0]);
|
values.data(), statuses.data());
|
||||||
|
|
||||||
ASSERT_OK(statuses[0]);
|
ASSERT_OK(statuses[0]);
|
||||||
ASSERT_EQ(values[0], "v0_0,v0_1,v0_2");
|
ASSERT_EQ(values[0], "v0_0,v0_1,v0_2");
|
||||||
|
|
47
db/c.cc
47
db/c.cc
|
@ -446,7 +446,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
|
||||||
size_t new_value_len;
|
size_t new_value_len;
|
||||||
char* tmp_new_value = (*full_merge_)(
|
char* tmp_new_value = (*full_merge_)(
|
||||||
state_, merge_in.key.data(), merge_in.key.size(), existing_value_data,
|
state_, merge_in.key.data(), merge_in.key.size(), existing_value_data,
|
||||||
existing_value_len, &operand_pointers[0], &operand_sizes[0],
|
existing_value_len, operand_pointers.data(), operand_sizes.data(),
|
||||||
static_cast<int>(n), &success, &new_value_len);
|
static_cast<int>(n), &success, &new_value_len);
|
||||||
merge_out->new_value.assign(tmp_new_value, new_value_len);
|
merge_out->new_value.assign(tmp_new_value, new_value_len);
|
||||||
|
|
||||||
|
@ -475,8 +475,9 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
|
||||||
unsigned char success;
|
unsigned char success;
|
||||||
size_t new_value_len;
|
size_t new_value_len;
|
||||||
char* tmp_new_value = (*partial_merge_)(
|
char* tmp_new_value = (*partial_merge_)(
|
||||||
state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
|
state_, key.data(), key.size(), operand_pointers.data(),
|
||||||
static_cast<int>(operand_count), &success, &new_value_len);
|
operand_sizes.data(), static_cast<int>(operand_count), &success,
|
||||||
|
&new_value_len);
|
||||||
new_value->assign(tmp_new_value, new_value_len);
|
new_value->assign(tmp_new_value, new_value_len);
|
||||||
|
|
||||||
if (delete_value_ != nullptr) {
|
if (delete_value_ != nullptr) {
|
||||||
|
@ -886,9 +887,9 @@ rocksdb_t* rocksdb_open_and_trim_history(
|
||||||
size_t trim_tslen, char** errptr) {
|
size_t trim_tslen, char** errptr) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (int i = 0; i < num_column_families; i++) {
|
for (int i = 0; i < num_column_families; i++) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(
|
column_families.emplace_back(
|
||||||
std::string(column_family_names[i]),
|
std::string(column_family_names[i]),
|
||||||
ColumnFamilyOptions(column_family_options[i]->rep)));
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string trim_ts_(trim_ts, trim_tslen);
|
std::string trim_ts_(trim_ts, trim_tslen);
|
||||||
|
@ -919,9 +920,9 @@ rocksdb_t* rocksdb_open_column_families(
|
||||||
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (int i = 0; i < num_column_families; i++) {
|
for (int i = 0; i < num_column_families; i++) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(
|
column_families.emplace_back(
|
||||||
std::string(column_family_names[i]),
|
std::string(column_family_names[i]),
|
||||||
ColumnFamilyOptions(column_family_options[i]->rep)));
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
DB* db;
|
DB* db;
|
||||||
|
@ -953,9 +954,9 @@ rocksdb_t* rocksdb_open_column_families_with_ttl(
|
||||||
for (int i = 0; i < num_column_families; i++) {
|
for (int i = 0; i < num_column_families; i++) {
|
||||||
ttls_vec.push_back(ttls[i]);
|
ttls_vec.push_back(ttls[i]);
|
||||||
|
|
||||||
column_families.push_back(ColumnFamilyDescriptor(
|
column_families.emplace_back(
|
||||||
std::string(column_family_names[i]),
|
std::string(column_family_names[i]),
|
||||||
ColumnFamilyOptions(column_family_options[i]->rep)));
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::DBWithTTL* db;
|
ROCKSDB_NAMESPACE::DBWithTTL* db;
|
||||||
|
@ -985,9 +986,9 @@ rocksdb_t* rocksdb_open_for_read_only_column_families(
|
||||||
unsigned char error_if_wal_file_exists, char** errptr) {
|
unsigned char error_if_wal_file_exists, char** errptr) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (int i = 0; i < num_column_families; i++) {
|
for (int i = 0; i < num_column_families; i++) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(
|
column_families.emplace_back(
|
||||||
std::string(column_family_names[i]),
|
std::string(column_family_names[i]),
|
||||||
ColumnFamilyOptions(column_family_options[i]->rep)));
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
DB* db;
|
DB* db;
|
||||||
|
@ -1081,7 +1082,7 @@ rocksdb_column_family_handle_t** rocksdb_create_column_families(
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
std::vector<std::string> names;
|
std::vector<std::string> names;
|
||||||
for (int i = 0; i != num_column_families; ++i) {
|
for (int i = 0; i != num_column_families; ++i) {
|
||||||
names.push_back(std::string(column_family_names[i]));
|
names.emplace_back(column_family_names[i]);
|
||||||
}
|
}
|
||||||
SaveError(errptr, db->rep->CreateColumnFamilies(
|
SaveError(errptr, db->rep->CreateColumnFamilies(
|
||||||
ColumnFamilyOptions(column_family_options->rep), names,
|
ColumnFamilyOptions(column_family_options->rep), names,
|
||||||
|
@ -2788,7 +2789,9 @@ void rocksdb_options_set_cuckoo_table_factory(
|
||||||
void rocksdb_set_options(rocksdb_t* db, int count, const char* const keys[],
|
void rocksdb_set_options(rocksdb_t* db, int count, const char* const keys[],
|
||||||
const char* const values[], char** errptr) {
|
const char* const values[], char** errptr) {
|
||||||
std::unordered_map<std::string, std::string> options_map;
|
std::unordered_map<std::string, std::string> options_map;
|
||||||
for (int i = 0; i < count; i++) options_map[keys[i]] = values[i];
|
for (int i = 0; i < count; i++) {
|
||||||
|
options_map[keys[i]] = values[i];
|
||||||
|
}
|
||||||
SaveError(errptr, db->rep->SetOptions(options_map));
|
SaveError(errptr, db->rep->SetOptions(options_map));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2797,7 +2800,9 @@ void rocksdb_set_options_cf(rocksdb_t* db,
|
||||||
const char* const keys[],
|
const char* const keys[],
|
||||||
const char* const values[], char** errptr) {
|
const char* const values[], char** errptr) {
|
||||||
std::unordered_map<std::string, std::string> options_map;
|
std::unordered_map<std::string, std::string> options_map;
|
||||||
for (int i = 0; i < count; i++) options_map[keys[i]] = values[i];
|
for (int i = 0; i < count; i++) {
|
||||||
|
options_map[keys[i]] = values[i];
|
||||||
|
}
|
||||||
SaveError(errptr, db->rep->SetOptions(handle->rep, options_map));
|
SaveError(errptr, db->rep->SetOptions(handle->rep, options_map));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5060,7 +5065,9 @@ void rocksdb_env_lower_high_priority_thread_pool_cpu_priority(
|
||||||
}
|
}
|
||||||
|
|
||||||
void rocksdb_env_destroy(rocksdb_env_t* env) {
|
void rocksdb_env_destroy(rocksdb_env_t* env) {
|
||||||
if (!env->is_default) delete env->rep;
|
if (!env->is_default) {
|
||||||
|
delete env->rep;
|
||||||
|
}
|
||||||
delete env;
|
delete env;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5524,7 +5531,7 @@ size_t rocksdb_column_family_metadata_get_level_count(
|
||||||
rocksdb_level_metadata_t* rocksdb_column_family_metadata_get_level_metadata(
|
rocksdb_level_metadata_t* rocksdb_column_family_metadata_get_level_metadata(
|
||||||
rocksdb_column_family_metadata_t* cf_meta, size_t i) {
|
rocksdb_column_family_metadata_t* cf_meta, size_t i) {
|
||||||
if (i >= cf_meta->rep.levels.size()) {
|
if (i >= cf_meta->rep.levels.size()) {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
rocksdb_level_metadata_t* level_meta =
|
rocksdb_level_metadata_t* level_meta =
|
||||||
(rocksdb_level_metadata_t*)malloc(sizeof(rocksdb_level_metadata_t));
|
(rocksdb_level_metadata_t*)malloc(sizeof(rocksdb_level_metadata_t));
|
||||||
|
@ -5739,9 +5746,9 @@ rocksdb_transactiondb_t* rocksdb_transactiondb_open_column_families(
|
||||||
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (int i = 0; i < num_column_families; i++) {
|
for (int i = 0; i < num_column_families; i++) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(
|
column_families.emplace_back(
|
||||||
std::string(column_family_names[i]),
|
std::string(column_family_names[i]),
|
||||||
ColumnFamilyOptions(column_family_options[i]->rep)));
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
TransactionDB* txn_db;
|
TransactionDB* txn_db;
|
||||||
|
@ -6533,9 +6540,9 @@ rocksdb_optimistictransactiondb_open_column_families(
|
||||||
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (int i = 0; i < num_column_families; i++) {
|
for (int i = 0; i < num_column_families; i++) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(
|
column_families.emplace_back(
|
||||||
std::string(column_family_names[i]),
|
std::string(column_family_names[i]),
|
||||||
ColumnFamilyOptions(column_family_options[i]->rep)));
|
ColumnFamilyOptions(column_family_options[i]->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
OptimisticTransactionDB* otxn_db;
|
OptimisticTransactionDB* otxn_db;
|
||||||
|
|
10
db/c_test.c
10
db/c_test.c
|
@ -50,14 +50,15 @@ static void StartPhase(const char* name) {
|
||||||
#endif
|
#endif
|
||||||
static const char* GetTempDir(void) {
|
static const char* GetTempDir(void) {
|
||||||
const char* ret = getenv("TEST_TMPDIR");
|
const char* ret = getenv("TEST_TMPDIR");
|
||||||
if (ret == NULL || ret[0] == '\0')
|
if (ret == NULL || ret[0] == '\0') {
|
||||||
#ifdef OS_WIN
|
#ifdef OS_WIN
|
||||||
ret = getenv("TEMP");
|
ret = getenv("TEMP");
|
||||||
#else
|
#else
|
||||||
ret = "/tmp";
|
ret = "/tmp";
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
#pragma warning(pop)
|
#pragma warning(pop)
|
||||||
#endif
|
#endif
|
||||||
|
@ -206,10 +207,11 @@ static int CmpCompare(void* arg, const char* a, size_t alen, const char* b,
|
||||||
size_t n = (alen < blen) ? alen : blen;
|
size_t n = (alen < blen) ? alen : blen;
|
||||||
int r = memcmp(a, b, n);
|
int r = memcmp(a, b, n);
|
||||||
if (r == 0) {
|
if (r == 0) {
|
||||||
if (alen < blen)
|
if (alen < blen) {
|
||||||
r = -1;
|
r = -1;
|
||||||
else if (alen > blen)
|
} else if (alen > blen) {
|
||||||
r = +1;
|
r = +1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,7 +270,7 @@ class ColumnFamilyTestBase : public testing::Test {
|
||||||
|
|
||||||
void Reopen(const std::vector<ColumnFamilyOptions> options = {}) {
|
void Reopen(const std::vector<ColumnFamilyOptions> options = {}) {
|
||||||
std::vector<std::string> names;
|
std::vector<std::string> names;
|
||||||
for (auto name : names_) {
|
for (const auto& name : names_) {
|
||||||
if (name != "") {
|
if (name != "") {
|
||||||
names.push_back(name);
|
names.push_back(name);
|
||||||
}
|
}
|
||||||
|
@ -607,7 +607,7 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest) {
|
||||||
// Preserve file system state up to here to simulate a crash condition.
|
// Preserve file system state up to here to simulate a crash condition.
|
||||||
fault_env->SetFilesystemActive(false);
|
fault_env->SetFilesystemActive(false);
|
||||||
std::vector<std::string> names;
|
std::vector<std::string> names;
|
||||||
for (auto name : names_) {
|
for (const auto& name : names_) {
|
||||||
if (name != "") {
|
if (name != "") {
|
||||||
names.push_back(name);
|
names.push_back(name);
|
||||||
}
|
}
|
||||||
|
@ -669,7 +669,7 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest2) {
|
||||||
// Preserve file system state up to here to simulate a crash condition.
|
// Preserve file system state up to here to simulate a crash condition.
|
||||||
fault_env->SetFilesystemActive(false);
|
fault_env->SetFilesystemActive(false);
|
||||||
std::vector<std::string> names;
|
std::vector<std::string> names;
|
||||||
for (auto name : names_) {
|
for (const auto& name : names_) {
|
||||||
if (name != "") {
|
if (name != "") {
|
||||||
names.push_back(name);
|
names.push_back(name);
|
||||||
}
|
}
|
||||||
|
@ -1034,7 +1034,7 @@ TEST_P(ColumnFamilyTest, CrashAfterFlush) {
|
||||||
fault_env->SetFilesystemActive(false);
|
fault_env->SetFilesystemActive(false);
|
||||||
|
|
||||||
std::vector<std::string> names;
|
std::vector<std::string> names;
|
||||||
for (auto name : names_) {
|
for (const auto& name : names_) {
|
||||||
if (name != "") {
|
if (name != "") {
|
||||||
names.push_back(name);
|
names.push_back(name);
|
||||||
}
|
}
|
||||||
|
@ -3407,9 +3407,13 @@ TEST_P(ColumnFamilyTest, DISABLED_LogTruncationTest) {
|
||||||
for (size_t i = 0; i < filenames.size(); i++) {
|
for (size_t i = 0; i < filenames.size(); i++) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
if (!(ParseFileName(filenames[i], &number, &type))) continue;
|
if (!(ParseFileName(filenames[i], &number, &type))) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (type != kWalFile) continue;
|
if (type != kWalFile) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
logfs.push_back(filenames[i]);
|
logfs.push_back(filenames[i]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,8 +34,8 @@ class CompactFilesTest : public testing::Test {
|
||||||
// A class which remembers the name of each flushed file.
|
// A class which remembers the name of each flushed file.
|
||||||
class FlushedFileCollector : public EventListener {
|
class FlushedFileCollector : public EventListener {
|
||||||
public:
|
public:
|
||||||
FlushedFileCollector() {}
|
FlushedFileCollector() = default;
|
||||||
~FlushedFileCollector() override {}
|
~FlushedFileCollector() override = default;
|
||||||
|
|
||||||
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
@ -45,7 +45,7 @@ class FlushedFileCollector : public EventListener {
|
||||||
std::vector<std::string> GetFlushedFiles() {
|
std::vector<std::string> GetFlushedFiles() {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
std::vector<std::string> result;
|
std::vector<std::string> result;
|
||||||
for (auto fname : flushed_files_) {
|
for (const auto& fname : flushed_files_) {
|
||||||
result.push_back(fname);
|
result.push_back(fname);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -159,7 +159,9 @@ TEST_F(CompactFilesTest, MultipleLevel) {
|
||||||
// Compact files except the file in L3
|
// Compact files except the file in L3
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
if (i == 3) continue;
|
if (i == 3) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
for (auto& file : meta.levels[i].files) {
|
for (auto& file : meta.levels[i].files) {
|
||||||
files.push_back(file.db_path + "/" + file.name);
|
files.push_back(file.db_path + "/" + file.name);
|
||||||
}
|
}
|
||||||
|
@ -228,7 +230,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
|
||||||
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact());
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact());
|
||||||
|
|
||||||
// verify all compaction input files are deleted
|
// verify all compaction input files are deleted
|
||||||
for (auto fname : l0_files) {
|
for (const auto& fname : l0_files) {
|
||||||
ASSERT_EQ(Status::NotFound(), env_->FileExists(fname));
|
ASSERT_EQ(Status::NotFound(), env_->FileExists(fname));
|
||||||
}
|
}
|
||||||
delete db;
|
delete db;
|
||||||
|
@ -492,4 +494,3 @@ int main(int argc, char** argv) {
|
||||||
::testing::InitGoogleTest(&argc, argv);
|
::testing::InitGoogleTest(&argc, argv);
|
||||||
return RUN_ALL_TESTS();
|
return RUN_ALL_TESTS();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -160,7 +160,9 @@ std::vector<CompactionInputFiles> Compaction::PopulateWithAtomicBoundaries(
|
||||||
AtomicCompactionUnitBoundary cur_boundary;
|
AtomicCompactionUnitBoundary cur_boundary;
|
||||||
size_t first_atomic_idx = 0;
|
size_t first_atomic_idx = 0;
|
||||||
auto add_unit_boundary = [&](size_t to) {
|
auto add_unit_boundary = [&](size_t to) {
|
||||||
if (first_atomic_idx == to) return;
|
if (first_atomic_idx == to) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
for (size_t k = first_atomic_idx; k < to; k++) {
|
for (size_t k = first_atomic_idx; k < to; k++) {
|
||||||
inputs[i].atomic_compaction_unit_boundaries.push_back(cur_boundary);
|
inputs[i].atomic_compaction_unit_boundaries.push_back(cur_boundary);
|
||||||
}
|
}
|
||||||
|
@ -753,7 +755,9 @@ int InputSummary(const std::vector<FileMetaData*>& files, char* output,
|
||||||
AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16);
|
AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16);
|
||||||
ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ",
|
ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ",
|
||||||
files.at(i)->fd.GetNumber(), sztxt);
|
files.at(i)->fd.GetNumber(), sztxt);
|
||||||
if (ret < 0 || ret >= sz) break;
|
if (ret < 0 || ret >= sz) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
write += ret;
|
write += ret;
|
||||||
}
|
}
|
||||||
// if files.size() is non-zero, overwrite the last space
|
// if files.size() is non-zero, overwrite the last space
|
||||||
|
|
|
@ -404,7 +404,9 @@ void CompactionJob::AcquireSubcompactionResources(
|
||||||
|
|
||||||
void CompactionJob::ShrinkSubcompactionResources(uint64_t num_extra_resources) {
|
void CompactionJob::ShrinkSubcompactionResources(uint64_t num_extra_resources) {
|
||||||
// Do nothing when we have zero resources to shrink
|
// Do nothing when we have zero resources to shrink
|
||||||
if (num_extra_resources == 0) return;
|
if (num_extra_resources == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
db_mutex_->Lock();
|
db_mutex_->Lock();
|
||||||
// We cannot release threads more than what we reserved before
|
// We cannot release threads more than what we reserved before
|
||||||
int extra_num_subcompaction_threads_released = env_->ReleaseThreads(
|
int extra_num_subcompaction_threads_released = env_->ReleaseThreads(
|
||||||
|
@ -584,7 +586,9 @@ void CompactionJob::GenSubcompactionBoundaries() {
|
||||||
|
|
||||||
TEST_SYNC_POINT_CALLBACK("CompactionJob::GenSubcompactionBoundaries:0",
|
TEST_SYNC_POINT_CALLBACK("CompactionJob::GenSubcompactionBoundaries:0",
|
||||||
&num_planned_subcompactions);
|
&num_planned_subcompactions);
|
||||||
if (num_planned_subcompactions == 1) return;
|
if (num_planned_subcompactions == 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Group the ranges into subcompactions
|
// Group the ranges into subcompactions
|
||||||
uint64_t target_range_size = std::max(
|
uint64_t target_range_size = std::max(
|
||||||
|
@ -641,7 +645,7 @@ Status CompactionJob::Run() {
|
||||||
|
|
||||||
// Always schedule the first subcompaction (whether or not there are also
|
// Always schedule the first subcompaction (whether or not there are also
|
||||||
// others) in the current thread to be efficient with resources
|
// others) in the current thread to be efficient with resources
|
||||||
ProcessKeyValueCompaction(&compact_->sub_compact_states[0]);
|
ProcessKeyValueCompaction(compact_->sub_compact_states.data());
|
||||||
|
|
||||||
// Wait for all other threads (if there are any) to finish execution
|
// Wait for all other threads (if there are any) to finish execution
|
||||||
for (auto& thread : thread_pool) {
|
for (auto& thread : thread_pool) {
|
||||||
|
|
|
@ -131,7 +131,7 @@ class CompactionJobStatsTest : public testing::Test,
|
||||||
ColumnFamilyOptions cf_opts(options);
|
ColumnFamilyOptions cf_opts(options);
|
||||||
size_t cfi = handles_.size();
|
size_t cfi = handles_.size();
|
||||||
handles_.resize(cfi + cfs.size());
|
handles_.resize(cfi + cfs.size());
|
||||||
for (auto cf : cfs) {
|
for (const auto& cf : cfs) {
|
||||||
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
|
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,7 @@ class CompactionJobStatsTest : public testing::Test,
|
||||||
EXPECT_EQ(cfs.size(), options.size());
|
EXPECT_EQ(cfs.size(), options.size());
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
|
column_families.emplace_back(cfs[i], options[i]);
|
||||||
}
|
}
|
||||||
DBOptions db_opts = DBOptions(options[0]);
|
DBOptions db_opts = DBOptions(options[0]);
|
||||||
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
|
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
|
||||||
|
|
|
@ -308,7 +308,7 @@ class CompactionJobTestBase : public testing::Test {
|
||||||
kDefaultColumnFamilyName, -1 /* level */),
|
kDefaultColumnFamilyName, -1 /* level */),
|
||||||
file_writer.get()));
|
file_writer.get()));
|
||||||
// Build table.
|
// Build table.
|
||||||
for (auto kv : contents) {
|
for (const auto& kv : contents) {
|
||||||
std::string key;
|
std::string key;
|
||||||
std::string value;
|
std::string value;
|
||||||
std::tie(key, value) = kv;
|
std::tie(key, value) = kv;
|
||||||
|
@ -327,7 +327,7 @@ class CompactionJobTestBase : public testing::Test {
|
||||||
SequenceNumber smallest_seqno = kMaxSequenceNumber;
|
SequenceNumber smallest_seqno = kMaxSequenceNumber;
|
||||||
SequenceNumber largest_seqno = 0;
|
SequenceNumber largest_seqno = 0;
|
||||||
uint64_t oldest_blob_file_number = kInvalidBlobFileNumber;
|
uint64_t oldest_blob_file_number = kInvalidBlobFileNumber;
|
||||||
for (auto kv : contents) {
|
for (const auto& kv : contents) {
|
||||||
ParsedInternalKey key;
|
ParsedInternalKey key;
|
||||||
std::string skey;
|
std::string skey;
|
||||||
std::string value;
|
std::string value;
|
||||||
|
|
|
@ -130,7 +130,7 @@ CompactionPicker::CompactionPicker(const ImmutableOptions& ioptions,
|
||||||
const InternalKeyComparator* icmp)
|
const InternalKeyComparator* icmp)
|
||||||
: ioptions_(ioptions), icmp_(icmp) {}
|
: ioptions_(ioptions), icmp_(icmp) {}
|
||||||
|
|
||||||
CompactionPicker::~CompactionPicker() {}
|
CompactionPicker::~CompactionPicker() = default;
|
||||||
|
|
||||||
// Delete this compaction from the list of running compactions.
|
// Delete this compaction from the list of running compactions.
|
||||||
void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) {
|
void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) {
|
||||||
|
|
|
@ -355,7 +355,9 @@ void LevelCompactionBuilder::SetupOtherFilesWithRoundRobinExpansion() {
|
||||||
TEST_SYNC_POINT("LevelCompactionPicker::RoundRobin");
|
TEST_SYNC_POINT("LevelCompactionPicker::RoundRobin");
|
||||||
|
|
||||||
// Only expand the inputs when we have selected a file in start_level_inputs_
|
// Only expand the inputs when we have selected a file in start_level_inputs_
|
||||||
if (start_level_inputs_.size() == 0) return;
|
if (start_level_inputs_.size() == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t start_lvl_bytes_no_compacting = 0;
|
uint64_t start_lvl_bytes_no_compacting = 0;
|
||||||
uint64_t curr_bytes_to_compact = 0;
|
uint64_t curr_bytes_to_compact = 0;
|
||||||
|
|
|
@ -77,7 +77,7 @@ class CompactionPickerTestBase : public testing::Test {
|
||||||
ioptions_.level_compaction_dynamic_level_bytes = false;
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
~CompactionPickerTestBase() override {}
|
~CompactionPickerTestBase() override = default;
|
||||||
|
|
||||||
void NewVersionStorage(int num_levels, CompactionStyle style) {
|
void NewVersionStorage(int num_levels, CompactionStyle style) {
|
||||||
DeleteVersionStorage();
|
DeleteVersionStorage();
|
||||||
|
@ -214,7 +214,7 @@ class CompactionPickerTest : public CompactionPickerTestBase {
|
||||||
explicit CompactionPickerTest()
|
explicit CompactionPickerTest()
|
||||||
: CompactionPickerTestBase(BytewiseComparator()) {}
|
: CompactionPickerTestBase(BytewiseComparator()) {}
|
||||||
|
|
||||||
~CompactionPickerTest() override {}
|
~CompactionPickerTest() override = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CompactionPickerU64TsTest : public CompactionPickerTestBase {
|
class CompactionPickerU64TsTest : public CompactionPickerTestBase {
|
||||||
|
@ -222,7 +222,7 @@ class CompactionPickerU64TsTest : public CompactionPickerTestBase {
|
||||||
explicit CompactionPickerU64TsTest()
|
explicit CompactionPickerU64TsTest()
|
||||||
: CompactionPickerTestBase(test::BytewiseComparatorWithU64TsWrapper()) {}
|
: CompactionPickerTestBase(test::BytewiseComparatorWithU64TsWrapper()) {}
|
||||||
|
|
||||||
~CompactionPickerU64TsTest() override {}
|
~CompactionPickerU64TsTest() override = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(CompactionPickerTest, Empty) {
|
TEST_F(CompactionPickerTest, Empty) {
|
||||||
|
|
|
@ -563,10 +563,10 @@ TEST_F(CompactionServiceTest, ConcurrentCompaction) {
|
||||||
|
|
||||||
std::vector<std::thread> threads;
|
std::vector<std::thread> threads;
|
||||||
for (const auto& file : meta.levels[1].files) {
|
for (const auto& file : meta.levels[1].files) {
|
||||||
threads.emplace_back(std::thread([&]() {
|
threads.emplace_back([&]() {
|
||||||
std::string fname = file.db_path + "/" + file.name;
|
std::string fname = file.db_path + "/" + file.name;
|
||||||
ASSERT_OK(db_->CompactFiles(CompactionOptions(), {fname}, 2));
|
ASSERT_OK(db_->CompactFiles(CompactionOptions(), {fname}, 2));
|
||||||
}));
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& thread : threads) {
|
for (auto& thread : threads) {
|
||||||
|
|
|
@ -170,7 +170,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
|
||||||
|
|
||||||
class DoubleComparator : public Comparator {
|
class DoubleComparator : public Comparator {
|
||||||
public:
|
public:
|
||||||
DoubleComparator() {}
|
DoubleComparator() = default;
|
||||||
|
|
||||||
const char* Name() const override { return "DoubleComparator"; }
|
const char* Name() const override { return "DoubleComparator"; }
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ class DoubleComparator : public Comparator {
|
||||||
|
|
||||||
class HashComparator : public Comparator {
|
class HashComparator : public Comparator {
|
||||||
public:
|
public:
|
||||||
HashComparator() {}
|
HashComparator() = default;
|
||||||
|
|
||||||
const char* Name() const override { return "HashComparator"; }
|
const char* Name() const override { return "HashComparator"; }
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ class HashComparator : public Comparator {
|
||||||
|
|
||||||
class TwoStrComparator : public Comparator {
|
class TwoStrComparator : public Comparator {
|
||||||
public:
|
public:
|
||||||
TwoStrComparator() {}
|
TwoStrComparator() = default;
|
||||||
|
|
||||||
const char* Name() const override { return "TwoStrComparator"; }
|
const char* Name() const override { return "TwoStrComparator"; }
|
||||||
|
|
||||||
|
@ -372,7 +372,7 @@ TEST_P(ComparatorDBTest, Uint64Comparator) {
|
||||||
uint64_t r = rnd64.Next();
|
uint64_t r = rnd64.Next();
|
||||||
std::string str;
|
std::string str;
|
||||||
str.resize(8);
|
str.resize(8);
|
||||||
memcpy(&str[0], static_cast<void*>(&r), 8);
|
memcpy(str.data(), static_cast<void*>(&r), 8);
|
||||||
source_strings.push_back(str);
|
source_strings.push_back(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -209,7 +209,7 @@ static std::string Key(int i) {
|
||||||
static std::string Uint64Key(uint64_t i) {
|
static std::string Uint64Key(uint64_t i) {
|
||||||
std::string str;
|
std::string str;
|
||||||
str.resize(8);
|
str.resize(8);
|
||||||
memcpy(&str[0], static_cast<void*>(&i), 8);
|
memcpy(str.data(), static_cast<void*>(&i), 8);
|
||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
} // namespace.
|
} // namespace.
|
||||||
|
|
|
@ -1368,9 +1368,9 @@ TEST_P(DBMultiGetTestWithParam, MultiGetMultiCF) {
|
||||||
for (int i = 0; i < num_keys; ++i) {
|
for (int i = 0; i < num_keys; ++i) {
|
||||||
int cf = i / 3;
|
int cf = i / 3;
|
||||||
int cf_key = 1 % 3;
|
int cf_key = 1 % 3;
|
||||||
cf_kv_vec.emplace_back(std::make_tuple(
|
cf_kv_vec.emplace_back(
|
||||||
cf, "cf" + std::to_string(cf) + "_key_" + std::to_string(cf_key),
|
cf, "cf" + std::to_string(cf) + "_key_" + std::to_string(cf_key),
|
||||||
"cf" + std::to_string(cf) + "_val_" + std::to_string(cf_key)));
|
"cf" + std::to_string(cf) + "_val_" + std::to_string(cf_key));
|
||||||
ASSERT_OK(Put(std::get<0>(cf_kv_vec[i]), std::get<1>(cf_kv_vec[i]),
|
ASSERT_OK(Put(std::get<0>(cf_kv_vec[i]), std::get<1>(cf_kv_vec[i]),
|
||||||
std::get<2>(cf_kv_vec[i])));
|
std::get<2>(cf_kv_vec[i])));
|
||||||
}
|
}
|
||||||
|
@ -2607,9 +2607,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1) {
|
||||||
key_strs.push_back(Key(33));
|
key_strs.push_back(Key(33));
|
||||||
key_strs.push_back(Key(54));
|
key_strs.push_back(Key(54));
|
||||||
key_strs.push_back(Key(102));
|
key_strs.push_back(Key(102));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
keys.push_back(key_strs[2]);
|
keys.emplace_back(key_strs[2]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2652,9 +2652,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1Error) {
|
||||||
key_strs.push_back(Key(33));
|
key_strs.push_back(Key(33));
|
||||||
key_strs.push_back(Key(54));
|
key_strs.push_back(Key(54));
|
||||||
key_strs.push_back(Key(102));
|
key_strs.push_back(Key(102));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
keys.push_back(key_strs[2]);
|
keys.emplace_back(key_strs[2]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2717,9 +2717,9 @@ TEST_P(DBMultiGetAsyncIOTest, LastKeyInFile) {
|
||||||
key_strs.push_back(Key(21));
|
key_strs.push_back(Key(21));
|
||||||
key_strs.push_back(Key(54));
|
key_strs.push_back(Key(54));
|
||||||
key_strs.push_back(Key(102));
|
key_strs.push_back(Key(102));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
keys.push_back(key_strs[2]);
|
keys.emplace_back(key_strs[2]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2762,9 +2762,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2) {
|
||||||
key_strs.push_back(Key(33));
|
key_strs.push_back(Key(33));
|
||||||
key_strs.push_back(Key(56));
|
key_strs.push_back(Key(56));
|
||||||
key_strs.push_back(Key(102));
|
key_strs.push_back(Key(102));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
keys.push_back(key_strs[2]);
|
keys.emplace_back(key_strs[2]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2805,8 +2805,8 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL2WithRangeOverlapL0L1) {
|
||||||
// 19 and 26 are in L2, but overlap with L0 and L1 file ranges
|
// 19 and 26 are in L2, but overlap with L0 and L1 file ranges
|
||||||
key_strs.push_back(Key(19));
|
key_strs.push_back(Key(19));
|
||||||
key_strs.push_back(Key(26));
|
key_strs.push_back(Key(26));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2841,8 +2841,8 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL2WithRangeDelInL1) {
|
||||||
// 139 and 163 are in L2, but overlap with a range deletes in L1
|
// 139 and 163 are in L2, but overlap with a range deletes in L1
|
||||||
key_strs.push_back(Key(139));
|
key_strs.push_back(Key(139));
|
||||||
key_strs.push_back(Key(163));
|
key_strs.push_back(Key(163));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2871,9 +2871,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2WithRangeDelInL1) {
|
||||||
key_strs.push_back(Key(139));
|
key_strs.push_back(Key(139));
|
||||||
key_strs.push_back(Key(144));
|
key_strs.push_back(Key(144));
|
||||||
key_strs.push_back(Key(163));
|
key_strs.push_back(Key(163));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
keys.push_back(key_strs[2]);
|
keys.emplace_back(key_strs[2]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -2904,9 +2904,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetNoIOUring) {
|
||||||
key_strs.push_back(Key(33));
|
key_strs.push_back(Key(33));
|
||||||
key_strs.push_back(Key(54));
|
key_strs.push_back(Key(54));
|
||||||
key_strs.push_back(Key(102));
|
key_strs.push_back(Key(102));
|
||||||
keys.push_back(key_strs[0]);
|
keys.emplace_back(key_strs[0]);
|
||||||
keys.push_back(key_strs[1]);
|
keys.emplace_back(key_strs[1]);
|
||||||
keys.push_back(key_strs[2]);
|
keys.emplace_back(key_strs[2]);
|
||||||
values.resize(keys.size());
|
values.resize(keys.size());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
|
@ -3285,9 +3285,9 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) {
|
||||||
|
|
||||||
// Warm up the cache first
|
// Warm up the cache first
|
||||||
key_data.emplace_back(Key(0));
|
key_data.emplace_back(Key(0));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
key_data.emplace_back(Key(50));
|
key_data.emplace_back(Key(50));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
|
@ -3661,10 +3661,10 @@ TEST_F(DBBasicTest, ConcurrentlyCloseDB) {
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
std::vector<std::thread> workers;
|
std::vector<std::thread> workers;
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
workers.push_back(std::thread([&]() {
|
workers.emplace_back([&]() {
|
||||||
auto s = db_->Close();
|
auto s = db_->Close();
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
}));
|
});
|
||||||
}
|
}
|
||||||
for (auto& w : workers) {
|
for (auto& w : workers) {
|
||||||
w.join();
|
w.join();
|
||||||
|
@ -3938,9 +3938,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||||
|
|
||||||
// Warm up the cache first
|
// Warm up the cache first
|
||||||
key_data.emplace_back(Key(0));
|
key_data.emplace_back(Key(0));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
key_data.emplace_back(Key(50));
|
key_data.emplace_back(Key(50));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
|
@ -4119,9 +4119,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetDirectIO) {
|
||||||
|
|
||||||
// Warm up the cache first
|
// Warm up the cache first
|
||||||
key_data.emplace_back(Key(0));
|
key_data.emplace_back(Key(0));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
key_data.emplace_back(Key(50));
|
key_data.emplace_back(Key(50));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
|
@ -4189,9 +4189,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithChecksumMismatch) {
|
||||||
|
|
||||||
// Warm up the cache first
|
// Warm up the cache first
|
||||||
key_data.emplace_back(Key(0));
|
key_data.emplace_back(Key(0));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
key_data.emplace_back(Key(50));
|
key_data.emplace_back(Key(50));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
|
@ -4237,9 +4237,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithMissingFile) {
|
||||||
|
|
||||||
// Warm up the cache first
|
// Warm up the cache first
|
||||||
key_data.emplace_back(Key(0));
|
key_data.emplace_back(Key(0));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
key_data.emplace_back(Key(50));
|
key_data.emplace_back(Key(50));
|
||||||
keys.emplace_back(Slice(key_data.back()));
|
keys.emplace_back(key_data.back());
|
||||||
statuses.resize(keys.size());
|
statuses.resize(keys.size());
|
||||||
|
|
||||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
|
@ -4743,7 +4743,7 @@ TEST_F(DBBasicTest, VerifyFileChecksumsReadahead) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
|
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
|
||||||
for (auto name : filenames) {
|
for (const auto& name : filenames) {
|
||||||
if (ParseFileName(name, &number, &type)) {
|
if (ParseFileName(name, &number, &type)) {
|
||||||
if (type == kTableFile) {
|
if (type == kTableFile) {
|
||||||
sst_cnt++;
|
sst_cnt++;
|
||||||
|
|
|
@ -744,7 +744,7 @@ TEST_F(DBBlockCacheTest, AddRedundantStats) {
|
||||||
const size_t capacity = size_t{1} << 25;
|
const size_t capacity = size_t{1} << 25;
|
||||||
const int num_shard_bits = 0; // 1 shard
|
const int num_shard_bits = 0; // 1 shard
|
||||||
int iterations_tested = 0;
|
int iterations_tested = 0;
|
||||||
for (std::shared_ptr<Cache> base_cache :
|
for (const std::shared_ptr<Cache>& base_cache :
|
||||||
{NewLRUCache(capacity, num_shard_bits),
|
{NewLRUCache(capacity, num_shard_bits),
|
||||||
// FixedHyperClockCache
|
// FixedHyperClockCache
|
||||||
HyperClockCacheOptions(
|
HyperClockCacheOptions(
|
||||||
|
@ -990,7 +990,7 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) {
|
||||||
int iterations_tested = 0;
|
int iterations_tested = 0;
|
||||||
for (bool partition : {false, true}) {
|
for (bool partition : {false, true}) {
|
||||||
SCOPED_TRACE("Partition? " + std::to_string(partition));
|
SCOPED_TRACE("Partition? " + std::to_string(partition));
|
||||||
for (std::shared_ptr<Cache> cache :
|
for (const std::shared_ptr<Cache>& cache :
|
||||||
{NewLRUCache(capacity),
|
{NewLRUCache(capacity),
|
||||||
HyperClockCacheOptions(
|
HyperClockCacheOptions(
|
||||||
capacity,
|
capacity,
|
||||||
|
@ -1251,7 +1251,7 @@ void DummyFillCache(Cache& cache, size_t entry_size,
|
||||||
|
|
||||||
class CountingLogger : public Logger {
|
class CountingLogger : public Logger {
|
||||||
public:
|
public:
|
||||||
~CountingLogger() override {}
|
~CountingLogger() override = default;
|
||||||
using Logger::Logv;
|
using Logger::Logv;
|
||||||
void Logv(const InfoLogLevel log_level, const char* format,
|
void Logv(const InfoLogLevel log_level, const char* format,
|
||||||
va_list /*ap*/) override {
|
va_list /*ap*/) override {
|
||||||
|
@ -1373,7 +1373,7 @@ class StableCacheKeyTestFS : public FaultInjectionTestFS {
|
||||||
SetFailGetUniqueId(true);
|
SetFailGetUniqueId(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
~StableCacheKeyTestFS() override {}
|
~StableCacheKeyTestFS() override = default;
|
||||||
|
|
||||||
IOStatus LinkFile(const std::string&, const std::string&, const IOOptions&,
|
IOStatus LinkFile(const std::string&, const std::string&, const IOOptions&,
|
||||||
IODebugContext*) override {
|
IODebugContext*) override {
|
||||||
|
@ -1566,7 +1566,7 @@ class CacheKeyTest : public testing::Test {
|
||||||
tp_.db_id = std::to_string(db_id_);
|
tp_.db_id = std::to_string(db_id_);
|
||||||
tp_.orig_file_number = file_number;
|
tp_.orig_file_number = file_number;
|
||||||
bool is_stable;
|
bool is_stable;
|
||||||
std::string cur_session_id = ""; // ignored
|
std::string cur_session_id; // ignored
|
||||||
uint64_t cur_file_number = 42; // ignored
|
uint64_t cur_file_number = 42; // ignored
|
||||||
OffsetableCacheKey rv;
|
OffsetableCacheKey rv;
|
||||||
BlockBasedTable::SetupBaseCacheKey(&tp_, cur_session_id, cur_file_number,
|
BlockBasedTable::SetupBaseCacheKey(&tp_, cur_session_id, cur_file_number,
|
||||||
|
|
|
@ -78,7 +78,7 @@ class DBBloomFilterTestWithParam
|
||||||
DBBloomFilterTestWithParam()
|
DBBloomFilterTestWithParam()
|
||||||
: DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {}
|
: DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {}
|
||||||
|
|
||||||
~DBBloomFilterTestWithParam() override {}
|
~DBBloomFilterTestWithParam() override = default;
|
||||||
|
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
bfp_impl_ = std::get<0>(GetParam());
|
bfp_impl_ = std::get<0>(GetParam());
|
||||||
|
@ -2051,7 +2051,7 @@ class DBBloomFilterTestVaryPrefixAndFormatVer
|
||||||
DBBloomFilterTestVaryPrefixAndFormatVer()
|
DBBloomFilterTestVaryPrefixAndFormatVer()
|
||||||
: DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {}
|
: DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {}
|
||||||
|
|
||||||
~DBBloomFilterTestVaryPrefixAndFormatVer() override {}
|
~DBBloomFilterTestVaryPrefixAndFormatVer() override = default;
|
||||||
|
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
use_prefix_ = std::get<0>(GetParam());
|
use_prefix_ = std::get<0>(GetParam());
|
||||||
|
@ -2126,8 +2126,9 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) {
|
||||||
values[i] = PinnableSlice();
|
values[i] = PinnableSlice();
|
||||||
}
|
}
|
||||||
|
|
||||||
db_->MultiGet(ropts, Q, &column_families[0], &key_slices[0], &values[0],
|
db_->MultiGet(ropts, Q, column_families.data(), key_slices.data(),
|
||||||
/*timestamps=*/nullptr, &statuses[0], true);
|
values.data(),
|
||||||
|
/*timestamps=*/nullptr, statuses.data(), true);
|
||||||
|
|
||||||
// Confirm correct status results
|
// Confirm correct status results
|
||||||
uint32_t number_not_found = 0;
|
uint32_t number_not_found = 0;
|
||||||
|
@ -2177,8 +2178,9 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) {
|
||||||
values[i] = PinnableSlice();
|
values[i] = PinnableSlice();
|
||||||
}
|
}
|
||||||
|
|
||||||
db_->MultiGet(ropts, Q, &column_families[0], &key_slices[0], &values[0],
|
db_->MultiGet(ropts, Q, column_families.data(), key_slices.data(),
|
||||||
/*timestamps=*/nullptr, &statuses[0], true);
|
values.data(),
|
||||||
|
/*timestamps=*/nullptr, statuses.data(), true);
|
||||||
|
|
||||||
// Confirm correct status results
|
// Confirm correct status results
|
||||||
uint32_t number_not_found = 0;
|
uint32_t number_not_found = 0;
|
||||||
|
|
|
@ -150,7 +150,7 @@ class ConditionalFilter : public CompactionFilter {
|
||||||
|
|
||||||
class ChangeFilter : public CompactionFilter {
|
class ChangeFilter : public CompactionFilter {
|
||||||
public:
|
public:
|
||||||
explicit ChangeFilter() {}
|
explicit ChangeFilter() = default;
|
||||||
|
|
||||||
bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
|
bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
|
||||||
std::string* new_value, bool* value_changed) const override {
|
std::string* new_value, bool* value_changed) const override {
|
||||||
|
@ -289,7 +289,7 @@ class ConditionalFilterFactory : public CompactionFilterFactory {
|
||||||
|
|
||||||
class ChangeFilterFactory : public CompactionFilterFactory {
|
class ChangeFilterFactory : public CompactionFilterFactory {
|
||||||
public:
|
public:
|
||||||
explicit ChangeFilterFactory() {}
|
explicit ChangeFilterFactory() = default;
|
||||||
|
|
||||||
std::unique_ptr<CompactionFilter> CreateCompactionFilter(
|
std::unique_ptr<CompactionFilter> CreateCompactionFilter(
|
||||||
const CompactionFilter::Context& /*context*/) override {
|
const CompactionFilter::Context& /*context*/) override {
|
||||||
|
|
|
@ -41,7 +41,7 @@ class CompactionStatsCollector : public EventListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~CompactionStatsCollector() override {}
|
~CompactionStatsCollector() override = default;
|
||||||
|
|
||||||
void OnCompactionCompleted(DB* /* db */,
|
void OnCompactionCompleted(DB* /* db */,
|
||||||
const CompactionJobInfo& info) override {
|
const CompactionJobInfo& info) override {
|
||||||
|
@ -241,8 +241,8 @@ class RoundRobinSubcompactionsAgainstResources
|
||||||
namespace {
|
namespace {
|
||||||
class FlushedFileCollector : public EventListener {
|
class FlushedFileCollector : public EventListener {
|
||||||
public:
|
public:
|
||||||
FlushedFileCollector() {}
|
FlushedFileCollector() = default;
|
||||||
~FlushedFileCollector() override {}
|
~FlushedFileCollector() override = default;
|
||||||
|
|
||||||
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
@ -252,7 +252,7 @@ class FlushedFileCollector : public EventListener {
|
||||||
std::vector<std::string> GetFlushedFiles() {
|
std::vector<std::string> GetFlushedFiles() {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
std::vector<std::string> result;
|
std::vector<std::string> result;
|
||||||
for (auto fname : flushed_files_) {
|
for (const auto& fname : flushed_files_) {
|
||||||
result.push_back(fname);
|
result.push_back(fname);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -2090,9 +2090,9 @@ TEST_P(DBDeleteFileRangeTest, DeleteFilesInRanges) {
|
||||||
Slice begin2(begin_str2), end2(end_str2);
|
Slice begin2(begin_str2), end2(end_str2);
|
||||||
Slice begin3(begin_str3), end3(end_str3);
|
Slice begin3(begin_str3), end3(end_str3);
|
||||||
std::vector<RangePtr> ranges;
|
std::vector<RangePtr> ranges;
|
||||||
ranges.push_back(RangePtr(&begin1, &end1));
|
ranges.emplace_back(&begin1, &end1);
|
||||||
ranges.push_back(RangePtr(&begin2, &end2));
|
ranges.emplace_back(&begin2, &end2);
|
||||||
ranges.push_back(RangePtr(&begin3, &end3));
|
ranges.emplace_back(&begin3, &end3);
|
||||||
ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(),
|
ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(),
|
||||||
ranges.data(), ranges.size()));
|
ranges.data(), ranges.size()));
|
||||||
ASSERT_EQ("0,3,7", FilesPerLevel(0));
|
ASSERT_EQ("0,3,7", FilesPerLevel(0));
|
||||||
|
@ -2117,9 +2117,9 @@ TEST_P(DBDeleteFileRangeTest, DeleteFilesInRanges) {
|
||||||
Slice begin2(begin_str2), end2(end_str2);
|
Slice begin2(begin_str2), end2(end_str2);
|
||||||
Slice begin3(begin_str3), end3(end_str3);
|
Slice begin3(begin_str3), end3(end_str3);
|
||||||
std::vector<RangePtr> ranges;
|
std::vector<RangePtr> ranges;
|
||||||
ranges.push_back(RangePtr(&begin1, &end1));
|
ranges.emplace_back(&begin1, &end1);
|
||||||
ranges.push_back(RangePtr(&begin2, &end2));
|
ranges.emplace_back(&begin2, &end2);
|
||||||
ranges.push_back(RangePtr(&begin3, &end3));
|
ranges.emplace_back(&begin3, &end3);
|
||||||
ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(),
|
ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(),
|
||||||
ranges.data(), ranges.size(), false));
|
ranges.data(), ranges.size(), false));
|
||||||
ASSERT_EQ("0,1,4", FilesPerLevel(0));
|
ASSERT_EQ("0,1,4", FilesPerLevel(0));
|
||||||
|
@ -6641,7 +6641,7 @@ TEST_F(DBCompactionTest, RoundRobinCutOutputAtCompactCursor) {
|
||||||
|
|
||||||
class NoopMergeOperator : public MergeOperator {
|
class NoopMergeOperator : public MergeOperator {
|
||||||
public:
|
public:
|
||||||
NoopMergeOperator() {}
|
NoopMergeOperator() = default;
|
||||||
|
|
||||||
bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
|
bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
|
||||||
MergeOperationOutput* merge_out) const override {
|
MergeOperationOutput* merge_out) const override {
|
||||||
|
@ -9878,7 +9878,7 @@ TEST_F(DBCompactionTest, TurnOnLevelCompactionDynamicLevelBytesUCToLC) {
|
||||||
options.compaction_style = CompactionStyle::kCompactionStyleLevel;
|
options.compaction_style = CompactionStyle::kCompactionStyleLevel;
|
||||||
options.level_compaction_dynamic_level_bytes = true;
|
options.level_compaction_dynamic_level_bytes = true;
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
std::string expected_lsm = "";
|
std::string expected_lsm;
|
||||||
for (int i = 0; i < 49; ++i) {
|
for (int i = 0; i < 49; ++i) {
|
||||||
expected_lsm += "0,";
|
expected_lsm += "0,";
|
||||||
}
|
}
|
||||||
|
@ -10394,20 +10394,20 @@ TEST_F(DBCompactionTest, ReleaseCompactionDuringManifestWrite) {
|
||||||
SyncPoint::GetInstance()->EnableProcessing();
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
std::vector<std::thread> threads;
|
std::vector<std::thread> threads;
|
||||||
threads.emplace_back(std::thread([&]() {
|
threads.emplace_back([&]() {
|
||||||
std::string k1_str = Key(1);
|
std::string k1_str = Key(1);
|
||||||
std::string k2_str = Key(2);
|
std::string k2_str = Key(2);
|
||||||
Slice k1 = k1_str;
|
Slice k1 = k1_str;
|
||||||
Slice k2 = k2_str;
|
Slice k2 = k2_str;
|
||||||
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k1, &k2));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k1, &k2));
|
||||||
}));
|
});
|
||||||
threads.emplace_back(std::thread([&]() {
|
threads.emplace_back([&]() {
|
||||||
std::string k10_str = Key(10);
|
std::string k10_str = Key(10);
|
||||||
std::string k11_str = Key(11);
|
std::string k11_str = Key(11);
|
||||||
Slice k10 = k10_str;
|
Slice k10 = k10_str;
|
||||||
Slice k11 = k11_str;
|
Slice k11 = k11_str;
|
||||||
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k10, &k11));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k10, &k11));
|
||||||
}));
|
});
|
||||||
std::string k100_str = Key(100);
|
std::string k100_str = Key(100);
|
||||||
std::string k101_str = Key(101);
|
std::string k101_str = Key(101);
|
||||||
Slice k100 = k100_str;
|
Slice k100 = k100_str;
|
||||||
|
|
|
@ -1367,14 +1367,15 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) {
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
key = (iter->key()).ToString(false);
|
key = (iter->key()).ToString(false);
|
||||||
value = (iter->value()).ToString(false);
|
value = (iter->value()).ToString(false);
|
||||||
if (key.compare(KEY3) == 0)
|
if (key.compare(KEY3) == 0) {
|
||||||
ASSERT_EQ(value, p_v3b);
|
ASSERT_EQ(value, p_v3b);
|
||||||
else if (key.compare(KEY4) == 0)
|
} else if (key.compare(KEY4) == 0) {
|
||||||
ASSERT_EQ(value, p_v4);
|
ASSERT_EQ(value, p_v4);
|
||||||
else if (key.compare(KEY5) == 0)
|
} else if (key.compare(KEY5) == 0) {
|
||||||
ASSERT_EQ(value, p_v5);
|
ASSERT_EQ(value, p_v5);
|
||||||
else
|
} else {
|
||||||
ASSERT_EQ(value, NOT_FOUND);
|
ASSERT_EQ(value, NOT_FOUND);
|
||||||
|
}
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
|
@ -1404,22 +1405,25 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) {
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
key = (iter->key()).ToString(false);
|
key = (iter->key()).ToString(false);
|
||||||
value = (iter->value()).ToString(false);
|
value = (iter->value()).ToString(false);
|
||||||
if (key.compare(KEY2) == 0)
|
if (key.compare(KEY2) == 0) {
|
||||||
ASSERT_EQ(value, p_v2);
|
ASSERT_EQ(value, p_v2);
|
||||||
else if (key.compare(KEY3) == 0)
|
} else if (key.compare(KEY3) == 0) {
|
||||||
ASSERT_EQ(value, p_v3b);
|
ASSERT_EQ(value, p_v3b);
|
||||||
else if (key.compare(KEY4) == 0)
|
} else if (key.compare(KEY4) == 0) {
|
||||||
ASSERT_EQ(value, p_v4);
|
ASSERT_EQ(value, p_v4);
|
||||||
else if (key.compare(KEY5) == 0)
|
} else if (key.compare(KEY5) == 0) {
|
||||||
ASSERT_EQ(value, p_v5);
|
ASSERT_EQ(value, p_v5);
|
||||||
else
|
} else {
|
||||||
ASSERT_EQ(value, NOT_FOUND);
|
ASSERT_EQ(value, NOT_FOUND);
|
||||||
|
}
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expected count here is 4: KEY2, KEY3, KEY4, KEY5.
|
// Expected count here is 4: KEY2, KEY3, KEY4, KEY5.
|
||||||
ASSERT_EQ(count, EXPECTED_COUNT_END);
|
ASSERT_EQ(count, EXPECTED_COUNT_END);
|
||||||
if (iter) delete iter;
|
if (iter) {
|
||||||
|
delete iter;
|
||||||
|
}
|
||||||
|
|
||||||
Close();
|
Close();
|
||||||
}
|
}
|
||||||
|
@ -2499,7 +2503,7 @@ TEST_F(DBFlushTest, TombstoneVisibleInSnapshot) {
|
||||||
class SimpleTestFlushListener : public EventListener {
|
class SimpleTestFlushListener : public EventListener {
|
||||||
public:
|
public:
|
||||||
explicit SimpleTestFlushListener(DBFlushTest* _test) : test_(_test) {}
|
explicit SimpleTestFlushListener(DBFlushTest* _test) : test_(_test) {}
|
||||||
~SimpleTestFlushListener() override {}
|
~SimpleTestFlushListener() override = default;
|
||||||
|
|
||||||
void OnFlushBegin(DB* db, const FlushJobInfo& info) override {
|
void OnFlushBegin(DB* db, const FlushJobInfo& info) override {
|
||||||
ASSERT_EQ(static_cast<uint32_t>(0), info.cf_id);
|
ASSERT_EQ(static_cast<uint32_t>(0), info.cf_id);
|
||||||
|
|
|
@ -21,7 +21,7 @@ CompactedDBImpl::CompactedDBImpl(const DBOptions& options,
|
||||||
version_(nullptr),
|
version_(nullptr),
|
||||||
user_comparator_(nullptr) {}
|
user_comparator_(nullptr) {}
|
||||||
|
|
||||||
CompactedDBImpl::~CompactedDBImpl() {}
|
CompactedDBImpl::~CompactedDBImpl() = default;
|
||||||
|
|
||||||
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
||||||
size_t right = files_.num_files - 1;
|
size_t right = files_.num_files - 1;
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <cstdint>
|
||||||
#ifdef OS_SOLARIS
|
#ifdef OS_SOLARIS
|
||||||
#include <alloca.h>
|
#include <alloca.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -959,7 +959,9 @@ size_t DBImpl::EstimateInMemoryStatsHistorySize() const {
|
||||||
stats_history_mutex_.AssertHeld();
|
stats_history_mutex_.AssertHeld();
|
||||||
size_t size_total =
|
size_t size_total =
|
||||||
sizeof(std::map<uint64_t, std::map<std::string, uint64_t>>);
|
sizeof(std::map<uint64_t, std::map<std::string, uint64_t>>);
|
||||||
if (stats_history_.size() == 0) return size_total;
|
if (stats_history_.size() == 0) {
|
||||||
|
return size_total;
|
||||||
|
}
|
||||||
size_t size_per_slice =
|
size_t size_per_slice =
|
||||||
sizeof(uint64_t) + sizeof(std::map<std::string, uint64_t>);
|
sizeof(uint64_t) + sizeof(std::map<std::string, uint64_t>);
|
||||||
// non-empty map, stats_history_.begin() guaranteed to exist
|
// non-empty map, stats_history_.begin() guaranteed to exist
|
||||||
|
@ -1085,7 +1087,9 @@ bool DBImpl::FindStatsByTime(uint64_t start_time, uint64_t end_time,
|
||||||
std::map<std::string, uint64_t>* stats_map) {
|
std::map<std::string, uint64_t>* stats_map) {
|
||||||
assert(new_time);
|
assert(new_time);
|
||||||
assert(stats_map);
|
assert(stats_map);
|
||||||
if (!new_time || !stats_map) return false;
|
if (!new_time || !stats_map) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
// lock when search for start_time
|
// lock when search for start_time
|
||||||
{
|
{
|
||||||
InstrumentedMutexLock l(&stats_history_mutex_);
|
InstrumentedMutexLock l(&stats_history_mutex_);
|
||||||
|
@ -1492,7 +1496,9 @@ int DBImpl::FindMinimumEmptyLevelFitting(
|
||||||
int minimum_level = level;
|
int minimum_level = level;
|
||||||
for (int i = level - 1; i > 0; --i) {
|
for (int i = level - 1; i > 0; --i) {
|
||||||
// stop if level i is not empty
|
// stop if level i is not empty
|
||||||
if (vstorage->NumLevelFiles(i) > 0) break;
|
if (vstorage->NumLevelFiles(i) > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
// stop if level i is too small (cannot fit the level files)
|
// stop if level i is too small (cannot fit the level files)
|
||||||
if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) {
|
if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) {
|
||||||
break;
|
break;
|
||||||
|
@ -4615,9 +4621,9 @@ Status DBImpl::DeleteFile(std::string name) {
|
||||||
read_options, write_options, &edit, &mutex_,
|
read_options, write_options, &edit, &mutex_,
|
||||||
directories_.GetDbDir());
|
directories_.GetDbDir());
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
InstallSuperVersionAndScheduleWork(cfd,
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context.superversion_contexts[0],
|
cfd, job_context.superversion_contexts.data(),
|
||||||
*cfd->GetLatestMutableCFOptions());
|
*cfd->GetLatestMutableCFOptions());
|
||||||
}
|
}
|
||||||
FindObsoleteFiles(&job_context, false);
|
FindObsoleteFiles(&job_context, false);
|
||||||
} // lock released here
|
} // lock released here
|
||||||
|
@ -4728,9 +4734,9 @@ Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
|
||||||
read_options, write_options, &edit, &mutex_,
|
read_options, write_options, &edit, &mutex_,
|
||||||
directories_.GetDbDir());
|
directories_.GetDbDir());
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
InstallSuperVersionAndScheduleWork(cfd,
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context.superversion_contexts[0],
|
cfd, job_context.superversion_contexts.data(),
|
||||||
*cfd->GetLatestMutableCFOptions());
|
*cfd->GetLatestMutableCFOptions());
|
||||||
}
|
}
|
||||||
for (auto* deleted_file : deleted_files) {
|
for (auto* deleted_file : deleted_files) {
|
||||||
deleted_file->being_compacted = false;
|
deleted_file->being_compacted = false;
|
||||||
|
@ -4965,7 +4971,7 @@ Status DB::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) {
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
DB::~DB() {}
|
DB::~DB() = default;
|
||||||
|
|
||||||
Status DBImpl::Close() {
|
Status DBImpl::Close() {
|
||||||
InstrumentedMutexLock closing_lock_guard(&closing_mutex_);
|
InstrumentedMutexLock closing_lock_guard(&closing_mutex_);
|
||||||
|
@ -4992,7 +4998,7 @@ Status DB::ListColumnFamilies(const DBOptions& db_options,
|
||||||
return VersionSet::ListColumnFamilies(column_families, name, fs.get());
|
return VersionSet::ListColumnFamilies(column_families, name, fs.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
Snapshot::~Snapshot() {}
|
Snapshot::~Snapshot() = default;
|
||||||
|
|
||||||
Status DestroyDB(const std::string& dbname, const Options& options,
|
Status DestroyDB(const std::string& dbname, const Options& options,
|
||||||
const std::vector<ColumnFamilyDescriptor>& column_families) {
|
const std::vector<ColumnFamilyDescriptor>& column_families) {
|
||||||
|
@ -6024,8 +6030,8 @@ Status DBImpl::ClipColumnFamily(ColumnFamilyHandle* column_family,
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
// DeleteFilesInRanges non-overlap files except L0
|
// DeleteFilesInRanges non-overlap files except L0
|
||||||
std::vector<RangePtr> ranges;
|
std::vector<RangePtr> ranges;
|
||||||
ranges.push_back(RangePtr(nullptr, &begin_key));
|
ranges.emplace_back(nullptr, &begin_key);
|
||||||
ranges.push_back(RangePtr(&end_key, nullptr));
|
ranges.emplace_back(&end_key, nullptr);
|
||||||
status = DeleteFilesInRanges(column_family, ranges.data(), ranges.size());
|
status = DeleteFilesInRanges(column_family, ranges.data(), ranges.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6273,7 +6279,7 @@ void DBImpl::NotifyOnExternalFileIngested(
|
||||||
info.internal_file_path = f.internal_file_path;
|
info.internal_file_path = f.internal_file_path;
|
||||||
info.global_seqno = f.assigned_seqno;
|
info.global_seqno = f.assigned_seqno;
|
||||||
info.table_properties = f.table_properties;
|
info.table_properties = f.table_properties;
|
||||||
for (auto listener : immutable_db_options_.listeners) {
|
for (const auto& listener : immutable_db_options_.listeners) {
|
||||||
listener->OnExternalFileIngested(this, info);
|
listener->OnExternalFileIngested(this, info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -970,7 +970,7 @@ void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
|
||||||
info.smallest_seqno = file_meta->fd.smallest_seqno;
|
info.smallest_seqno = file_meta->fd.smallest_seqno;
|
||||||
info.largest_seqno = file_meta->fd.largest_seqno;
|
info.largest_seqno = file_meta->fd.largest_seqno;
|
||||||
info.flush_reason = flush_reason;
|
info.flush_reason = flush_reason;
|
||||||
for (auto listener : immutable_db_options_.listeners) {
|
for (const auto& listener : immutable_db_options_.listeners) {
|
||||||
listener->OnFlushBegin(this, info);
|
listener->OnFlushBegin(this, info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1002,7 +1002,7 @@ void DBImpl::NotifyOnFlushCompleted(
|
||||||
for (auto& info : *flush_jobs_info) {
|
for (auto& info : *flush_jobs_info) {
|
||||||
info->triggered_writes_slowdown = triggered_writes_slowdown;
|
info->triggered_writes_slowdown = triggered_writes_slowdown;
|
||||||
info->triggered_writes_stop = triggered_writes_stop;
|
info->triggered_writes_stop = triggered_writes_stop;
|
||||||
for (auto listener : immutable_db_options_.listeners) {
|
for (const auto& listener : immutable_db_options_.listeners) {
|
||||||
listener->OnFlushCompleted(this, *info);
|
listener->OnFlushCompleted(this, *info);
|
||||||
}
|
}
|
||||||
TEST_SYNC_POINT(
|
TEST_SYNC_POINT(
|
||||||
|
@ -1609,9 +1609,9 @@ Status DBImpl::CompactFilesImpl(
|
||||||
}
|
}
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
assert(compaction_job.io_status().ok());
|
assert(compaction_job.io_status().ok());
|
||||||
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context->superversion_contexts[0],
|
c->column_family_data(), job_context->superversion_contexts.data(),
|
||||||
*c->mutable_cf_options());
|
*c->mutable_cf_options());
|
||||||
}
|
}
|
||||||
// status above captures any error during compaction_job.Install, so its ok
|
// status above captures any error during compaction_job.Install, so its ok
|
||||||
// not check compaction_job.io_status() explicitly if we're not calling
|
// not check compaction_job.io_status() explicitly if we're not calling
|
||||||
|
@ -1731,7 +1731,7 @@ void DBImpl::NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
|
||||||
{
|
{
|
||||||
CompactionJobInfo info{};
|
CompactionJobInfo info{};
|
||||||
BuildCompactionJobInfo(cfd, c, st, job_stats, job_id, &info);
|
BuildCompactionJobInfo(cfd, c, st, job_stats, job_id, &info);
|
||||||
for (auto listener : immutable_db_options_.listeners) {
|
for (const auto& listener : immutable_db_options_.listeners) {
|
||||||
listener->OnCompactionBegin(this, info);
|
listener->OnCompactionBegin(this, info);
|
||||||
}
|
}
|
||||||
info.status.PermitUncheckedError();
|
info.status.PermitUncheckedError();
|
||||||
|
@ -1760,7 +1760,7 @@ void DBImpl::NotifyOnCompactionCompleted(
|
||||||
{
|
{
|
||||||
CompactionJobInfo info{};
|
CompactionJobInfo info{};
|
||||||
BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, &info);
|
BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, &info);
|
||||||
for (auto listener : immutable_db_options_.listeners) {
|
for (const auto& listener : immutable_db_options_.listeners) {
|
||||||
listener->OnCompactionCompleted(this, info);
|
listener->OnCompactionCompleted(this, info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3221,7 +3221,7 @@ Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context,
|
||||||
column_families_not_to_flush.push_back(cfd);
|
column_families_not_to_flush.push_back(cfd);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
superversion_contexts.emplace_back(SuperVersionContext(true));
|
superversion_contexts.emplace_back(true);
|
||||||
bg_flush_args.emplace_back(cfd, max_memtable_id,
|
bg_flush_args.emplace_back(cfd, max_memtable_id,
|
||||||
&(superversion_contexts.back()), flush_reason);
|
&(superversion_contexts.back()), flush_reason);
|
||||||
}
|
}
|
||||||
|
@ -3726,9 +3726,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
|
||||||
compaction_released = true;
|
compaction_released = true;
|
||||||
});
|
});
|
||||||
io_s = versions_->io_status();
|
io_s = versions_->io_status();
|
||||||
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context->superversion_contexts[0],
|
c->column_family_data(), job_context->superversion_contexts.data(),
|
||||||
*c->mutable_cf_options());
|
*c->mutable_cf_options());
|
||||||
ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
|
ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
|
||||||
c->column_family_data()->GetName().c_str(),
|
c->column_family_data()->GetName().c_str(),
|
||||||
c->num_input_files(0));
|
c->num_input_files(0));
|
||||||
|
@ -3801,9 +3801,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
|
||||||
});
|
});
|
||||||
io_s = versions_->io_status();
|
io_s = versions_->io_status();
|
||||||
// Use latest MutableCFOptions
|
// Use latest MutableCFOptions
|
||||||
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context->superversion_contexts[0],
|
c->column_family_data(), job_context->superversion_contexts.data(),
|
||||||
*c->mutable_cf_options());
|
*c->mutable_cf_options());
|
||||||
|
|
||||||
VersionStorageInfo::LevelSummaryStorage tmp;
|
VersionStorageInfo::LevelSummaryStorage tmp;
|
||||||
c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(),
|
c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(),
|
||||||
|
@ -3896,9 +3896,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
|
||||||
compaction_job.Install(*c->mutable_cf_options(), &compaction_released);
|
compaction_job.Install(*c->mutable_cf_options(), &compaction_released);
|
||||||
io_s = compaction_job.io_status();
|
io_s = compaction_job.io_status();
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context->superversion_contexts[0],
|
c->column_family_data(), job_context->superversion_contexts.data(),
|
||||||
*c->mutable_cf_options());
|
*c->mutable_cf_options());
|
||||||
}
|
}
|
||||||
*made_progress = true;
|
*made_progress = true;
|
||||||
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction",
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction",
|
||||||
|
@ -4045,7 +4045,6 @@ void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) {
|
||||||
++it;
|
++it;
|
||||||
}
|
}
|
||||||
assert(false);
|
assert(false);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
|
bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
|
||||||
|
|
|
@ -104,7 +104,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == 0) continue;
|
if (i == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
auto prev_f = l0_files[i - 1];
|
auto prev_f = l0_files[i - 1];
|
||||||
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
|
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
|
||||||
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
||||||
|
@ -148,9 +150,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
|
||||||
read_options, write_options, &edit, &mutex_,
|
read_options, write_options, &edit, &mutex_,
|
||||||
directories_.GetDbDir());
|
directories_.GetDbDir());
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
InstallSuperVersionAndScheduleWork(cfd,
|
InstallSuperVersionAndScheduleWork(
|
||||||
&job_context.superversion_contexts[0],
|
cfd, job_context.superversion_contexts.data(),
|
||||||
*cfd->GetLatestMutableCFOptions());
|
*cfd->GetLatestMutableCFOptions());
|
||||||
}
|
}
|
||||||
} // lock released here
|
} // lock released here
|
||||||
LogFlush(immutable_db_options_.info_log);
|
LogFlush(immutable_db_options_.info_log);
|
||||||
|
|
|
@ -1799,11 +1799,9 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
|
||||||
DBOptions db_options(options);
|
DBOptions db_options(options);
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
||||||
if (db_options.persist_stats_to_disk) {
|
if (db_options.persist_stats_to_disk) {
|
||||||
column_families.push_back(
|
column_families.emplace_back(kPersistentStatsColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kPersistentStatsColumnFamilyName, cf_options));
|
|
||||||
}
|
}
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
Status s = DB::Open(db_options, dbname, column_families, &handles, dbptr);
|
Status s = DB::Open(db_options, dbname, column_families, &handles, dbptr);
|
||||||
|
@ -1972,7 +1970,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
||||||
handles->clear();
|
handles->clear();
|
||||||
|
|
||||||
size_t max_write_buffer_size = 0;
|
size_t max_write_buffer_size = 0;
|
||||||
for (auto cf : column_families) {
|
for (const auto& cf : column_families) {
|
||||||
max_write_buffer_size =
|
max_write_buffer_size =
|
||||||
std::max(max_write_buffer_size, cf.options.write_buffer_size);
|
std::max(max_write_buffer_size, cf.options.write_buffer_size);
|
||||||
}
|
}
|
||||||
|
@ -2044,8 +2042,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
impl->alive_log_files_.push_back(
|
impl->alive_log_files_.emplace_back(impl->logfile_number_);
|
||||||
DBImpl::LogFileNumberSize(impl->logfile_number_));
|
|
||||||
// In WritePrepared there could be gap in sequence numbers. This breaks
|
// In WritePrepared there could be gap in sequence numbers. This breaks
|
||||||
// the trick we use in kPointInTimeRecovery which assumes the first seq in
|
// the trick we use in kPointInTimeRecovery which assumes the first seq in
|
||||||
// the log right after the corrupted log is one larger than the last seq
|
// the log right after the corrupted log is one larger than the last seq
|
||||||
|
@ -2093,7 +2090,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// set column family handles
|
// set column family handles
|
||||||
for (auto cf : column_families) {
|
for (const auto& cf : column_families) {
|
||||||
auto cfd =
|
auto cfd =
|
||||||
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
||||||
if (cfd != nullptr) {
|
if (cfd != nullptr) {
|
||||||
|
|
|
@ -26,7 +26,7 @@ DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
|
||||||
LogFlush(immutable_db_options_.info_log);
|
LogFlush(immutable_db_options_.info_log);
|
||||||
}
|
}
|
||||||
|
|
||||||
DBImplReadOnly::~DBImplReadOnly() {}
|
DBImplReadOnly::~DBImplReadOnly() = default;
|
||||||
|
|
||||||
// Implementations of the DB interface
|
// Implementations of the DB interface
|
||||||
Status DBImplReadOnly::GetImpl(const ReadOptions& read_options,
|
Status DBImplReadOnly::GetImpl(const ReadOptions& read_options,
|
||||||
|
@ -293,8 +293,7 @@ Status DB::OpenForReadOnly(const Options& options, const std::string& dbname,
|
||||||
DBOptions db_options(options);
|
DBOptions db_options(options);
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
|
|
||||||
s = DBImplReadOnly::OpenForReadOnlyWithoutCheck(
|
s = DBImplReadOnly::OpenForReadOnlyWithoutCheck(
|
||||||
|
@ -339,7 +338,7 @@ Status DBImplReadOnly::OpenForReadOnlyWithoutCheck(
|
||||||
error_if_wal_file_exists);
|
error_if_wal_file_exists);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// set column family handles
|
// set column family handles
|
||||||
for (auto cf : column_families) {
|
for (const auto& cf : column_families) {
|
||||||
auto cfd =
|
auto cfd =
|
||||||
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
||||||
if (cfd == nullptr) {
|
if (cfd == nullptr) {
|
||||||
|
|
|
@ -28,7 +28,7 @@ DBImplSecondary::DBImplSecondary(const DBOptions& db_options,
|
||||||
LogFlush(immutable_db_options_.info_log);
|
LogFlush(immutable_db_options_.info_log);
|
||||||
}
|
}
|
||||||
|
|
||||||
DBImplSecondary::~DBImplSecondary() {}
|
DBImplSecondary::~DBImplSecondary() = default;
|
||||||
|
|
||||||
Status DBImplSecondary::Recover(
|
Status DBImplSecondary::Recover(
|
||||||
const std::vector<ColumnFamilyDescriptor>& column_families,
|
const std::vector<ColumnFamilyDescriptor>& column_families,
|
||||||
|
@ -804,7 +804,7 @@ Status DB::OpenAsSecondary(
|
||||||
impl->mutex_.Lock();
|
impl->mutex_.Lock();
|
||||||
s = impl->Recover(column_families, true, false, false);
|
s = impl->Recover(column_families, true, false, false);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
for (auto cf : column_families) {
|
for (const auto& cf : column_families) {
|
||||||
auto cfd =
|
auto cfd =
|
||||||
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
|
||||||
if (nullptr == cfd) {
|
if (nullptr == cfd) {
|
||||||
|
|
|
@ -2135,7 +2135,7 @@ void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/,
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
for (auto listener : immutable_db_options_.listeners) {
|
for (const auto& listener : immutable_db_options_.listeners) {
|
||||||
listener->OnMemTableSealed(mem_table_info);
|
listener->OnMemTableSealed(mem_table_info);
|
||||||
}
|
}
|
||||||
mutex_.Lock();
|
mutex_.Lock();
|
||||||
|
@ -2252,7 +2252,7 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
|
||||||
log_empty_ = true;
|
log_empty_ = true;
|
||||||
log_dir_synced_ = false;
|
log_dir_synced_ = false;
|
||||||
logs_.emplace_back(logfile_number_, new_log);
|
logs_.emplace_back(logfile_number_, new_log);
|
||||||
alive_log_files_.push_back(LogFileNumberSize(logfile_number_));
|
alive_log_files_.emplace_back(logfile_number_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -280,7 +280,7 @@ class DBTablePropertiesInRangeTest : public DBTestBase,
|
||||||
// run the query
|
// run the query
|
||||||
TablePropertiesCollection props;
|
TablePropertiesCollection props;
|
||||||
ColumnFamilyHandle* default_cf = db_->DefaultColumnFamily();
|
ColumnFamilyHandle* default_cf = db_->DefaultColumnFamily();
|
||||||
EXPECT_OK(db_->GetPropertiesOfTablesInRange(default_cf, &ranges[0],
|
EXPECT_OK(db_->GetPropertiesOfTablesInRange(default_cf, ranges.data(),
|
||||||
ranges.size(), &props));
|
ranges.size(), &props));
|
||||||
|
|
||||||
const Comparator* ucmp = default_cf->GetComparator();
|
const Comparator* ucmp = default_cf->GetComparator();
|
||||||
|
|
|
@ -17,9 +17,7 @@
|
||||||
#include "logging/logging.h"
|
#include "logging/logging.h"
|
||||||
#include "util/atomic.h"
|
#include "util/atomic.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::experimental {
|
||||||
namespace experimental {
|
|
||||||
|
|
||||||
|
|
||||||
Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
|
Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
|
||||||
const Slice* begin, const Slice* end) {
|
const Slice* begin, const Slice* end) {
|
||||||
|
@ -378,7 +376,7 @@ enum BuiltinSstQueryFilters : char {
|
||||||
|
|
||||||
class SstQueryFilterBuilder {
|
class SstQueryFilterBuilder {
|
||||||
public:
|
public:
|
||||||
virtual ~SstQueryFilterBuilder() {}
|
virtual ~SstQueryFilterBuilder() = default;
|
||||||
virtual void Add(const Slice& key,
|
virtual void Add(const Slice& key,
|
||||||
const KeySegmentsExtractor::Result& extracted,
|
const KeySegmentsExtractor::Result& extracted,
|
||||||
const Slice* prev_key,
|
const Slice* prev_key,
|
||||||
|
@ -395,7 +393,7 @@ class SstQueryFilterConfigImpl : public SstQueryFilterConfig {
|
||||||
const KeySegmentsExtractor::KeyCategorySet& categories)
|
const KeySegmentsExtractor::KeyCategorySet& categories)
|
||||||
: input_(input), categories_(categories) {}
|
: input_(input), categories_(categories) {}
|
||||||
|
|
||||||
virtual ~SstQueryFilterConfigImpl() {}
|
virtual ~SstQueryFilterConfigImpl() = default;
|
||||||
|
|
||||||
virtual std::unique_ptr<SstQueryFilterBuilder> NewBuilder(
|
virtual std::unique_ptr<SstQueryFilterBuilder> NewBuilder(
|
||||||
bool sanity_checks) const = 0;
|
bool sanity_checks) const = 0;
|
||||||
|
@ -1210,5 +1208,4 @@ Status SstQueryFilterConfigsManager::MakeShared(
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace experimental
|
} // namespace ROCKSDB_NAMESPACE::experimental
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -18,8 +18,7 @@
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::log {
|
||||||
namespace log {
|
|
||||||
|
|
||||||
Reader::Reporter::~Reporter() = default;
|
Reader::Reporter::~Reporter() = default;
|
||||||
|
|
||||||
|
@ -937,5 +936,4 @@ bool FragmentBufferedReader::TryReadFragment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace log
|
} // namespace ROCKSDB_NAMESPACE::log
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -19,8 +19,7 @@
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "utilities/memory_allocators.h"
|
#include "utilities/memory_allocators.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::log {
|
||||||
namespace log {
|
|
||||||
|
|
||||||
// Construct a string of the specified length made out of the supplied
|
// Construct a string of the specified length made out of the supplied
|
||||||
// partial string.
|
// partial string.
|
||||||
|
@ -1206,8 +1205,7 @@ INSTANTIATE_TEST_CASE_P(
|
||||||
kBlockSize * 2),
|
kBlockSize * 2),
|
||||||
::testing::Values(CompressionType::kZSTD)));
|
::testing::Values(CompressionType::kZSTD)));
|
||||||
|
|
||||||
} // namespace log
|
} // namespace ROCKSDB_NAMESPACE::log
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
||||||
|
|
|
@ -18,8 +18,7 @@
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
#include "util/udt_util.h"
|
#include "util/udt_util.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::log {
|
||||||
namespace log {
|
|
||||||
|
|
||||||
Writer::Writer(std::unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
|
Writer::Writer(std::unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
|
||||||
bool recycle_log_files, bool manual_flush,
|
bool recycle_log_files, bool manual_flush,
|
||||||
|
@ -297,5 +296,4 @@ IOStatus Writer::EmitPhysicalRecord(const WriteOptions& write_options,
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace log
|
} // namespace ROCKSDB_NAMESPACE::log
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -422,7 +422,7 @@ bool SeqnoToTimeMapping::Append(SequenceNumber seqno, uint64_t time) {
|
||||||
// TODO: consider changing?
|
// TODO: consider changing?
|
||||||
} else if (pairs_.empty()) {
|
} else if (pairs_.empty()) {
|
||||||
enforced_ = true;
|
enforced_ = true;
|
||||||
pairs_.push_back({seqno, time});
|
pairs_.emplace_back(seqno, time);
|
||||||
// skip normal enforced check below
|
// skip normal enforced check below
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -437,13 +437,13 @@ bool SeqnoToTimeMapping::Append(SequenceNumber seqno, uint64_t time) {
|
||||||
// reset
|
// reset
|
||||||
assert(false);
|
assert(false);
|
||||||
} else {
|
} else {
|
||||||
pairs_.push_back({seqno, time});
|
pairs_.emplace_back(seqno, time);
|
||||||
added = true;
|
added = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (!enforced_) {
|
} else if (!enforced_) {
|
||||||
// Treat like AddUnenforced and fix up below
|
// Treat like AddUnenforced and fix up below
|
||||||
pairs_.push_back({seqno, time});
|
pairs_.emplace_back(seqno, time);
|
||||||
added = true;
|
added = true;
|
||||||
} else {
|
} else {
|
||||||
// Out of order append attempted
|
// Out of order append attempted
|
||||||
|
|
|
@ -29,8 +29,7 @@ using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
||||||
DEFINE_bool(enable_print, false, "Print options generated to console.");
|
DEFINE_bool(enable_print, false, "Print options generated to console.");
|
||||||
#endif // GFLAGS
|
#endif // GFLAGS
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::test {
|
||||||
namespace test {
|
|
||||||
class StringLogger : public Logger {
|
class StringLogger : public Logger {
|
||||||
public:
|
public:
|
||||||
using Logger::Logv;
|
using Logger::Logv;
|
||||||
|
@ -849,8 +848,7 @@ INSTANTIATE_TEST_CASE_P(
|
||||||
"block_size=1024;"
|
"block_size=1024;"
|
||||||
"no_block_cache=true;")));
|
"no_block_cache=true;")));
|
||||||
|
|
||||||
} // namespace test
|
} // namespace ROCKSDB_NAMESPACE::test
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
||||||
::testing::InitGoogleTest(&argc, argv);
|
::testing::InitGoogleTest(&argc, argv);
|
||||||
|
|
|
@ -55,8 +55,7 @@ void* SaveStack(int* /*num_frames*/, int /*first_frames_to_skip*/) {
|
||||||
|
|
||||||
#include "port/lang.h"
|
#include "port/lang.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::port {
|
||||||
namespace port {
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
@ -413,7 +412,6 @@ void InstallStackTraceHandler() {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace port
|
} // namespace ROCKSDB_NAMESPACE::port
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -13,8 +13,7 @@
|
||||||
#include "table/get_context.h"
|
#include "table/get_context.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::mock {
|
||||||
namespace mock {
|
|
||||||
|
|
||||||
KVVector MakeMockFile(std::initializer_list<KVPair> l) { return KVVector(l); }
|
KVVector MakeMockFile(std::initializer_list<KVPair> l) { return KVVector(l); }
|
||||||
|
|
||||||
|
@ -347,5 +346,4 @@ void MockTableFactory::AssertLatestFiles(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mock
|
} // namespace ROCKSDB_NAMESPACE::mock
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -431,7 +431,7 @@ class SstFileReaderTimestampNotPersistedTest
|
||||||
sst_name_ = test::PerThreadDBPath("sst_file_ts_not_persisted");
|
sst_name_ = test::PerThreadDBPath("sst_file_ts_not_persisted");
|
||||||
}
|
}
|
||||||
|
|
||||||
~SstFileReaderTimestampNotPersistedTest() {}
|
~SstFileReaderTimestampNotPersistedTest() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(SstFileReaderTimestampNotPersistedTest, Basic) {
|
TEST_F(SstFileReaderTimestampNotPersistedTest, Basic) {
|
||||||
|
|
|
@ -7,9 +7,7 @@
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::secondary_cache_test_util {
|
||||||
|
|
||||||
namespace secondary_cache_test_util {
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
using TestItem = WithCacheType::TestItem;
|
using TestItem = WithCacheType::TestItem;
|
||||||
|
@ -92,6 +90,4 @@ const Cache::CacheItemHelper* WithCacheType::GetHelperFail(CacheEntryRole r) {
|
||||||
return GetHelper(r, true, true);
|
return GetHelper(r, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace secondary_cache_test_util
|
} // namespace ROCKSDB_NAMESPACE::secondary_cache_test_util
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -13,8 +13,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::test {
|
||||||
namespace test {
|
|
||||||
|
|
||||||
#ifdef OS_WIN
|
#ifdef OS_WIN
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
@ -103,5 +102,4 @@ bool TestRegex::Matches(const std::string& str) const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace test
|
} // namespace ROCKSDB_NAMESPACE::test
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -34,8 +34,7 @@
|
||||||
void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {}
|
void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::test {
|
||||||
namespace test {
|
|
||||||
|
|
||||||
const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version;
|
const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version;
|
||||||
const std::set<uint32_t> kFooterFormatVersionsToTest{
|
const std::set<uint32_t> kFooterFormatVersionsToTest{
|
||||||
|
@ -749,5 +748,4 @@ void RegisterTestLibrary(const std::string& arg) {
|
||||||
ObjectRegistry::Default()->AddLibrary("test", RegisterTestObjects, arg);
|
ObjectRegistry::Default()->AddLibrary("test", RegisterTestObjects, arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // namespace test
|
} // namespace ROCKSDB_NAMESPACE::test
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -56,8 +56,7 @@ ASSERT_FEATURE_COMPAT_HEADER();
|
||||||
bool pmull_runtime_flag = false;
|
bool pmull_runtime_flag = false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::crc32c {
|
||||||
namespace crc32c {
|
|
||||||
|
|
||||||
#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC)
|
#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC)
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
@ -1293,5 +1292,4 @@ uint32_t Crc32cCombine(uint32_t crc1, uint32_t crc2, size_t crc2len) {
|
||||||
pure_crc2_with_init);
|
pure_crc2_with_init);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace crc32c
|
} // namespace ROCKSDB_NAMESPACE::crc32c
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -12,8 +12,7 @@
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::crc32c {
|
||||||
namespace crc32c {
|
|
||||||
|
|
||||||
class CRC {};
|
class CRC {};
|
||||||
|
|
||||||
|
@ -170,8 +169,7 @@ TEST(CRC, Crc32cCombineBigSizeTest) {
|
||||||
ASSERT_EQ(crc1_2, crc1_2_combine);
|
ASSERT_EQ(crc1_2, crc1_2_combine);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace crc32c
|
} // namespace ROCKSDB_NAMESPACE::crc32c
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
// copied from folly
|
// copied from folly
|
||||||
const uint64_t FNV_64_HASH_START = 14695981039346656037ULL;
|
const uint64_t FNV_64_HASH_START = 14695981039346656037ULL;
|
||||||
|
|
|
@ -7,12 +7,10 @@
|
||||||
|
|
||||||
#include "util/math.h"
|
#include "util/math.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::detail {
|
||||||
namespace detail {
|
|
||||||
|
|
||||||
int CountTrailingZeroBitsForSmallEnumSet(uint64_t v) {
|
int CountTrailingZeroBitsForSmallEnumSet(uint64_t v) {
|
||||||
return CountTrailingZeroBits(v);
|
return CountTrailingZeroBits(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace ROCKSDB_NAMESPACE::detail
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -5,11 +5,7 @@
|
||||||
|
|
||||||
#include "util/ribbon_config.h"
|
#include "util/ribbon_config.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::ribbon::detail {
|
||||||
|
|
||||||
namespace ribbon {
|
|
||||||
|
|
||||||
namespace detail {
|
|
||||||
|
|
||||||
// Each instantiation of this struct is sufficiently unique for configuration
|
// Each instantiation of this struct is sufficiently unique for configuration
|
||||||
// purposes, and is only instantiated for settings where we support the
|
// purposes, and is only instantiated for settings where we support the
|
||||||
|
@ -499,8 +495,4 @@ template struct BandingConfigHelper1MaybeSupported<
|
||||||
template struct BandingConfigHelper1MaybeSupported<kOneIn1000, 64U, /*sm*/ true,
|
template struct BandingConfigHelper1MaybeSupported<kOneIn1000, 64U, /*sm*/ true,
|
||||||
/*hm*/ true, /*sup*/ true>;
|
/*hm*/ true, /*sup*/ true>;
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace ROCKSDB_NAMESPACE::ribbon::detail
|
||||||
|
|
||||||
} // namespace ribbon
|
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -9,9 +9,8 @@
|
||||||
|
|
||||||
#include "rocksdb/slice.h"
|
#include "rocksdb/slice.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
#include "rocksdb/convenience.h"
|
#include "rocksdb/convenience.h"
|
||||||
#include "rocksdb/slice_transform.h"
|
#include "rocksdb/slice_transform.h"
|
||||||
|
@ -128,7 +127,7 @@ class CappedPrefixTransform : public SliceTransform {
|
||||||
|
|
||||||
class NoopTransform : public SliceTransform {
|
class NoopTransform : public SliceTransform {
|
||||||
public:
|
public:
|
||||||
explicit NoopTransform() {}
|
explicit NoopTransform() = default;
|
||||||
|
|
||||||
static const char* kClassName() { return "rocksdb.Noop"; }
|
static const char* kClassName() { return "rocksdb.Noop"; }
|
||||||
const char* Name() const override { return kClassName(); }
|
const char* Name() const override { return kClassName(); }
|
||||||
|
@ -173,7 +172,7 @@ static int RegisterBuiltinSliceTransform(ObjectLibrary& library,
|
||||||
.AddNumber(":"),
|
.AddNumber(":"),
|
||||||
[](const std::string& uri, std::unique_ptr<const SliceTransform>* guard,
|
[](const std::string& uri, std::unique_ptr<const SliceTransform>* guard,
|
||||||
std::string* /*errmsg*/) {
|
std::string* /*errmsg*/) {
|
||||||
auto colon = uri.find(":");
|
auto colon = uri.find(':');
|
||||||
auto len = ParseSizeT(uri.substr(colon + 1));
|
auto len = ParseSizeT(uri.substr(colon + 1));
|
||||||
guard->reset(NewFixedPrefixTransform(len));
|
guard->reset(NewFixedPrefixTransform(len));
|
||||||
return guard->get();
|
return guard->get();
|
||||||
|
@ -193,7 +192,7 @@ static int RegisterBuiltinSliceTransform(ObjectLibrary& library,
|
||||||
.AddNumber(":"),
|
.AddNumber(":"),
|
||||||
[](const std::string& uri, std::unique_ptr<const SliceTransform>* guard,
|
[](const std::string& uri, std::unique_ptr<const SliceTransform>* guard,
|
||||||
std::string* /*errmsg*/) {
|
std::string* /*errmsg*/) {
|
||||||
auto colon = uri.find(":");
|
auto colon = uri.find(':');
|
||||||
auto len = ParseSizeT(uri.substr(colon + 1));
|
auto len = ParseSizeT(uri.substr(colon + 1));
|
||||||
guard->reset(NewCappedPrefixTransform(len));
|
guard->reset(NewCappedPrefixTransform(len));
|
||||||
return guard->get();
|
return guard->get();
|
||||||
|
|
|
@ -169,8 +169,8 @@ TEST_F(PinnableSliceTest, Move) {
|
||||||
// Unit test for SmallEnumSet
|
// Unit test for SmallEnumSet
|
||||||
class SmallEnumSetTest : public testing::Test {
|
class SmallEnumSetTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
SmallEnumSetTest() {}
|
SmallEnumSetTest() = default;
|
||||||
~SmallEnumSetTest() {}
|
~SmallEnumSetTest() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(SmallEnumSetTest, SmallEnumSetTest1) {
|
TEST_F(SmallEnumSetTest, SmallEnumSetTest1) {
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
#include "rocksdb/status.h"
|
#include "rocksdb/status.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <cstdio>
|
||||||
#ifdef OS_WIN
|
#ifdef OS_WIN
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -5,13 +5,12 @@
|
||||||
//
|
//
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <cerrno>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cstdlib>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
@ -266,7 +265,9 @@ std::string UnescapeOptionString(const std::string& escaped_string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string trim(const std::string& str) {
|
std::string trim(const std::string& str) {
|
||||||
if (str.empty()) return std::string();
|
if (str.empty()) {
|
||||||
|
return std::string();
|
||||||
|
}
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
size_t end = str.size() - 1;
|
size_t end = str.size() - 1;
|
||||||
while (isspace(str[start]) != 0 && start < end) {
|
while (isspace(str[start]) != 0 && start < end) {
|
||||||
|
@ -346,14 +347,15 @@ uint64_t ParseUint64(const std::string& value) {
|
||||||
|
|
||||||
if (endchar < value.length()) {
|
if (endchar < value.length()) {
|
||||||
char c = value[endchar];
|
char c = value[endchar];
|
||||||
if (c == 'k' || c == 'K')
|
if (c == 'k' || c == 'K') {
|
||||||
num <<= 10LL;
|
num <<= 10LL;
|
||||||
else if (c == 'm' || c == 'M')
|
} else if (c == 'm' || c == 'M') {
|
||||||
num <<= 20LL;
|
num <<= 20LL;
|
||||||
else if (c == 'g' || c == 'G')
|
} else if (c == 'g' || c == 'G') {
|
||||||
num <<= 30LL;
|
num <<= 30LL;
|
||||||
else if (c == 't' || c == 'T')
|
} else if (c == 't' || c == 'T') {
|
||||||
num <<= 40LL;
|
num <<= 40LL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return num;
|
return num;
|
||||||
|
@ -371,14 +373,15 @@ int64_t ParseInt64(const std::string& value) {
|
||||||
|
|
||||||
if (endchar < value.length()) {
|
if (endchar < value.length()) {
|
||||||
char c = value[endchar];
|
char c = value[endchar];
|
||||||
if (c == 'k' || c == 'K')
|
if (c == 'k' || c == 'K') {
|
||||||
num <<= 10LL;
|
num <<= 10LL;
|
||||||
else if (c == 'm' || c == 'M')
|
} else if (c == 'm' || c == 'M') {
|
||||||
num <<= 20LL;
|
num <<= 20LL;
|
||||||
else if (c == 'g' || c == 'G')
|
} else if (c == 'g' || c == 'G') {
|
||||||
num <<= 30LL;
|
num <<= 30LL;
|
||||||
else if (c == 't' || c == 'T')
|
} else if (c == 't' || c == 'T') {
|
||||||
num <<= 40LL;
|
num <<= 40LL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return num;
|
return num;
|
||||||
|
@ -396,12 +399,13 @@ int ParseInt(const std::string& value) {
|
||||||
|
|
||||||
if (endchar < value.length()) {
|
if (endchar < value.length()) {
|
||||||
char c = value[endchar];
|
char c = value[endchar];
|
||||||
if (c == 'k' || c == 'K')
|
if (c == 'k' || c == 'K') {
|
||||||
num <<= 10;
|
num <<= 10;
|
||||||
else if (c == 'm' || c == 'M')
|
} else if (c == 'm' || c == 'M') {
|
||||||
num <<= 20;
|
num <<= 20;
|
||||||
else if (c == 'g' || c == 'G')
|
} else if (c == 'g' || c == 'G') {
|
||||||
num <<= 30;
|
num <<= 30;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return num;
|
return num;
|
||||||
|
|
|
@ -97,7 +97,7 @@ class SimulatedBackgroundTask {
|
||||||
|
|
||||||
class ThreadListTest : public testing::Test {
|
class ThreadListTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
ThreadListTest() {}
|
ThreadListTest() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(ThreadListTest, GlobalTables) {
|
TEST_F(ThreadListTest, GlobalTables) {
|
||||||
|
@ -161,7 +161,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
|
||||||
// Verify the number of running threads in each pool.
|
// Verify the number of running threads in each pool.
|
||||||
ASSERT_OK(env->GetThreadList(&thread_list));
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0};
|
int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0};
|
||||||
for (auto thread_status : thread_list) {
|
for (const auto& thread_status : thread_list) {
|
||||||
if (thread_status.cf_name == "pikachu" &&
|
if (thread_status.cf_name == "pikachu" &&
|
||||||
thread_status.db_name == "running") {
|
thread_status.db_name == "running") {
|
||||||
running_count[thread_status.thread_type]++;
|
running_count[thread_status.thread_type]++;
|
||||||
|
@ -189,7 +189,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
|
||||||
for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) {
|
for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) {
|
||||||
running_count[i] = 0;
|
running_count[i] = 0;
|
||||||
}
|
}
|
||||||
for (auto thread_status : thread_list) {
|
for (const auto& thread_status : thread_list) {
|
||||||
if (thread_status.cf_name == "pikachu" &&
|
if (thread_status.cf_name == "pikachu" &&
|
||||||
thread_status.db_name == "running") {
|
thread_status.db_name == "running") {
|
||||||
running_count[thread_status.thread_type]++;
|
running_count[thread_status.thread_type]++;
|
||||||
|
@ -204,7 +204,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
|
||||||
namespace {
|
namespace {
|
||||||
void UpdateStatusCounts(const std::vector<ThreadStatus>& thread_list,
|
void UpdateStatusCounts(const std::vector<ThreadStatus>& thread_list,
|
||||||
int operation_counts[], int state_counts[]) {
|
int operation_counts[], int state_counts[]) {
|
||||||
for (auto thread_status : thread_list) {
|
for (const auto& thread_status : thread_list) {
|
||||||
operation_counts[thread_status.operation_type]++;
|
operation_counts[thread_status.operation_type]++;
|
||||||
state_counts[thread_status.state_type]++;
|
state_counts[thread_status.state_type]++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
#include "util/thread_local.h"
|
#include "util/thread_local.h"
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <cstdlib>
|
||||||
|
|
||||||
#include "port/likely.h"
|
#include "port/likely.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
|
|
|
@ -18,11 +18,10 @@
|
||||||
#include <sys/syscall.h>
|
#include <sys/syscall.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
|
#include <cstdlib>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
@ -465,7 +464,7 @@ int ThreadPoolImpl::Impl::UnSchedule(void* arg) {
|
||||||
|
|
||||||
ThreadPoolImpl::ThreadPoolImpl() : impl_(new Impl()) {}
|
ThreadPoolImpl::ThreadPoolImpl() : impl_(new Impl()) {}
|
||||||
|
|
||||||
ThreadPoolImpl::~ThreadPoolImpl() {}
|
ThreadPoolImpl::~ThreadPoolImpl() = default;
|
||||||
|
|
||||||
void ThreadPoolImpl::JoinAllThreads() { impl_->JoinThreads(false); }
|
void ThreadPoolImpl::JoinAllThreads() { impl_->JoinThreads(false); }
|
||||||
|
|
||||||
|
|
|
@ -20,16 +20,16 @@ static const std::string kValuePlaceHolder = "value";
|
||||||
|
|
||||||
class HandleTimestampSizeDifferenceTest : public testing::Test {
|
class HandleTimestampSizeDifferenceTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
HandleTimestampSizeDifferenceTest() {}
|
HandleTimestampSizeDifferenceTest() = default;
|
||||||
|
|
||||||
// Test handler used to collect the column family id and user keys contained
|
// Test handler used to collect the column family id and user keys contained
|
||||||
// in a WriteBatch for test verification. And verifies the value part stays
|
// in a WriteBatch for test verification. And verifies the value part stays
|
||||||
// the same if it's available.
|
// the same if it's available.
|
||||||
class KeyCollector : public WriteBatch::Handler {
|
class KeyCollector : public WriteBatch::Handler {
|
||||||
public:
|
public:
|
||||||
explicit KeyCollector() {}
|
explicit KeyCollector() = default;
|
||||||
|
|
||||||
~KeyCollector() override {}
|
~KeyCollector() override = default;
|
||||||
|
|
||||||
Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override {
|
||||||
if (value.compare(kValuePlaceHolder) != 0) {
|
if (value.compare(kValuePlaceHolder) != 0) {
|
||||||
|
@ -90,7 +90,7 @@ class HandleTimestampSizeDifferenceTest : public testing::Test {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Status AddKey(uint32_t cf, const Slice& key) {
|
Status AddKey(uint32_t cf, const Slice& key) {
|
||||||
keys_.push_back(std::make_pair(cf, key));
|
keys_.emplace_back(cf, key);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
std::vector<std::pair<uint32_t, const Slice>> keys_;
|
std::vector<std::pair<uint32_t, const Slice>> keys_;
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
|
|
||||||
#include "rocksdb/utilities/agg_merge.h"
|
#include "rocksdb/utilities/agg_merge.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
@ -24,7 +23,7 @@
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
static std::unordered_map<std::string, std::unique_ptr<Aggregator>> func_map;
|
static std::unordered_map<std::string, std::unique_ptr<Aggregator>> func_map;
|
||||||
const std::string kUnnamedFuncName = "";
|
const std::string kUnnamedFuncName;
|
||||||
const std::string kErrorFuncName = "kErrorFuncName";
|
const std::string kErrorFuncName = "kErrorFuncName";
|
||||||
|
|
||||||
Status AddAggregator(const std::string& function_name,
|
Status AddAggregator(const std::string& function_name,
|
||||||
|
@ -37,7 +36,7 @@ Status AddAggregator(const std::string& function_name,
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
AggMergeOperator::AggMergeOperator() {}
|
AggMergeOperator::AggMergeOperator() = default;
|
||||||
|
|
||||||
std::string EncodeAggFuncAndPayloadNoCheck(const Slice& function_name,
|
std::string EncodeAggFuncAndPayloadNoCheck(const Slice& function_name,
|
||||||
const Slice& value) {
|
const Slice& value) {
|
||||||
|
@ -123,7 +122,7 @@ class AggMergeOperator::Accumulator {
|
||||||
}
|
}
|
||||||
std::swap(scratch_, aggregated_);
|
std::swap(scratch_, aggregated_);
|
||||||
values_.clear();
|
values_.clear();
|
||||||
values_.push_back(aggregated_);
|
values_.emplace_back(aggregated_);
|
||||||
func_ = my_func;
|
func_ = my_func;
|
||||||
}
|
}
|
||||||
values_.push_back(my_value);
|
values_.push_back(my_value);
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
|
|
||||||
#include "test_agg_merge.h"
|
#include "test_agg_merge.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
|
|
@ -384,7 +384,7 @@ class BackupEngineImpl {
|
||||||
BackupMeta(const BackupMeta&) = delete;
|
BackupMeta(const BackupMeta&) = delete;
|
||||||
BackupMeta& operator=(const BackupMeta&) = delete;
|
BackupMeta& operator=(const BackupMeta&) = delete;
|
||||||
|
|
||||||
~BackupMeta() {}
|
~BackupMeta() = default;
|
||||||
|
|
||||||
void RecordTimestamp() {
|
void RecordTimestamp() {
|
||||||
// Best effort
|
// Best effort
|
||||||
|
@ -639,11 +639,9 @@ class BackupEngineImpl {
|
||||||
std::string db_session_id;
|
std::string db_session_id;
|
||||||
|
|
||||||
CopyOrCreateWorkItem()
|
CopyOrCreateWorkItem()
|
||||||
: src_path(""),
|
: src_temperature(Temperature::kUnknown),
|
||||||
dst_path(""),
|
|
||||||
src_temperature(Temperature::kUnknown),
|
|
||||||
dst_temperature(Temperature::kUnknown),
|
dst_temperature(Temperature::kUnknown),
|
||||||
contents(""),
|
|
||||||
src_env(nullptr),
|
src_env(nullptr),
|
||||||
dst_env(nullptr),
|
dst_env(nullptr),
|
||||||
src_env_options(),
|
src_env_options(),
|
||||||
|
@ -651,10 +649,7 @@ class BackupEngineImpl {
|
||||||
rate_limiter(nullptr),
|
rate_limiter(nullptr),
|
||||||
size_limit(0),
|
size_limit(0),
|
||||||
stats(nullptr),
|
stats(nullptr),
|
||||||
src_checksum_func_name(kUnknownFileChecksumFuncName),
|
src_checksum_func_name(kUnknownFileChecksumFuncName) {}
|
||||||
src_checksum_hex(""),
|
|
||||||
db_id(""),
|
|
||||||
db_session_id("") {}
|
|
||||||
|
|
||||||
CopyOrCreateWorkItem(const CopyOrCreateWorkItem&) = delete;
|
CopyOrCreateWorkItem(const CopyOrCreateWorkItem&) = delete;
|
||||||
CopyOrCreateWorkItem& operator=(const CopyOrCreateWorkItem&) = delete;
|
CopyOrCreateWorkItem& operator=(const CopyOrCreateWorkItem&) = delete;
|
||||||
|
@ -727,12 +722,7 @@ class BackupEngineImpl {
|
||||||
std::string dst_path;
|
std::string dst_path;
|
||||||
std::string dst_relative;
|
std::string dst_relative;
|
||||||
BackupAfterCopyOrCreateWorkItem()
|
BackupAfterCopyOrCreateWorkItem()
|
||||||
: shared(false),
|
: shared(false), needed_to_copy(false), backup_env(nullptr) {}
|
||||||
needed_to_copy(false),
|
|
||||||
backup_env(nullptr),
|
|
||||||
dst_path_tmp(""),
|
|
||||||
dst_path(""),
|
|
||||||
dst_relative("") {}
|
|
||||||
|
|
||||||
BackupAfterCopyOrCreateWorkItem(
|
BackupAfterCopyOrCreateWorkItem(
|
||||||
BackupAfterCopyOrCreateWorkItem&& o) noexcept {
|
BackupAfterCopyOrCreateWorkItem&& o) noexcept {
|
||||||
|
@ -773,7 +763,7 @@ class BackupEngineImpl {
|
||||||
std::string from_file;
|
std::string from_file;
|
||||||
std::string to_file;
|
std::string to_file;
|
||||||
std::string checksum_hex;
|
std::string checksum_hex;
|
||||||
RestoreAfterCopyOrCreateWorkItem() : checksum_hex("") {}
|
RestoreAfterCopyOrCreateWorkItem() {}
|
||||||
RestoreAfterCopyOrCreateWorkItem(std::future<CopyOrCreateResult>&& _result,
|
RestoreAfterCopyOrCreateWorkItem(std::future<CopyOrCreateResult>&& _result,
|
||||||
const std::string& _from_file,
|
const std::string& _from_file,
|
||||||
const std::string& _to_file,
|
const std::string& _to_file,
|
||||||
|
@ -874,7 +864,7 @@ class BackupEngineImplThreadSafe : public BackupEngine,
|
||||||
BackupEngineImplThreadSafe(const BackupEngineOptions& options, Env* db_env,
|
BackupEngineImplThreadSafe(const BackupEngineOptions& options, Env* db_env,
|
||||||
bool read_only = false)
|
bool read_only = false)
|
||||||
: impl_(options, db_env, read_only) {}
|
: impl_(options, db_env, read_only) {}
|
||||||
~BackupEngineImplThreadSafe() override {}
|
~BackupEngineImplThreadSafe() override = default;
|
||||||
|
|
||||||
using BackupEngine::CreateNewBackupWithMetadata;
|
using BackupEngine::CreateNewBackupWithMetadata;
|
||||||
IOStatus CreateNewBackupWithMetadata(const CreateBackupOptions& options,
|
IOStatus CreateNewBackupWithMetadata(const CreateBackupOptions& options,
|
||||||
|
|
|
@ -858,8 +858,8 @@ class BackupEngineTest : public testing::Test {
|
||||||
for (auto& dir : child_dirs) {
|
for (auto& dir : child_dirs) {
|
||||||
dir = "private/" + dir;
|
dir = "private/" + dir;
|
||||||
}
|
}
|
||||||
child_dirs.push_back("shared"); // might not exist
|
child_dirs.emplace_back("shared"); // might not exist
|
||||||
child_dirs.push_back("shared_checksum"); // might not exist
|
child_dirs.emplace_back("shared_checksum"); // might not exist
|
||||||
for (auto& dir : child_dirs) {
|
for (auto& dir : child_dirs) {
|
||||||
std::vector<std::string> children;
|
std::vector<std::string> children;
|
||||||
test_backup_env_->GetChildren(backupdir_ + "/" + dir, &children)
|
test_backup_env_->GetChildren(backupdir_ + "/" + dir, &children)
|
||||||
|
@ -927,7 +927,7 @@ class BackupEngineTest : public testing::Test {
|
||||||
void DeleteLogFiles() {
|
void DeleteLogFiles() {
|
||||||
std::vector<std::string> delete_logs;
|
std::vector<std::string> delete_logs;
|
||||||
ASSERT_OK(db_chroot_env_->GetChildren(dbname_, &delete_logs));
|
ASSERT_OK(db_chroot_env_->GetChildren(dbname_, &delete_logs));
|
||||||
for (auto f : delete_logs) {
|
for (const auto& f : delete_logs) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
bool ok = ParseFileName(f, &number, &type);
|
bool ok = ParseFileName(f, &number, &type);
|
||||||
|
@ -1925,7 +1925,7 @@ TEST_F(BackupEngineTest, BackupOptions) {
|
||||||
ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name));
|
ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name));
|
||||||
ASSERT_OK(backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i),
|
ASSERT_OK(backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i),
|
||||||
&filenames));
|
&filenames));
|
||||||
for (auto fn : filenames) {
|
for (const auto& fn : filenames) {
|
||||||
if (fn.compare(0, 7, "OPTIONS") == 0) {
|
if (fn.compare(0, 7, "OPTIONS") == 0) {
|
||||||
ASSERT_EQ(name, fn);
|
ASSERT_EQ(name, fn);
|
||||||
}
|
}
|
||||||
|
@ -2664,7 +2664,7 @@ TEST_F(BackupEngineTest, DeleteTmpFiles) {
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
for (std::string file_or_dir : tmp_files_and_dirs) {
|
for (const std::string& file_or_dir : tmp_files_and_dirs) {
|
||||||
if (file_manager_->FileExists(file_or_dir) != Status::NotFound()) {
|
if (file_manager_->FileExists(file_or_dir) != Status::NotFound()) {
|
||||||
FAIL() << file_or_dir << " was expected to be deleted." << cleanup_fn;
|
FAIL() << file_or_dir << " was expected to be deleted." << cleanup_fn;
|
||||||
}
|
}
|
||||||
|
@ -2698,7 +2698,7 @@ class BackupEngineRateLimitingTestWithParam
|
||||||
int /* 0 = single threaded, 1 = multi threaded*/,
|
int /* 0 = single threaded, 1 = multi threaded*/,
|
||||||
std::pair<uint64_t, uint64_t> /* limits */>> {
|
std::pair<uint64_t, uint64_t> /* limits */>> {
|
||||||
public:
|
public:
|
||||||
BackupEngineRateLimitingTestWithParam() {}
|
BackupEngineRateLimitingTestWithParam() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
uint64_t const MB = 1024 * 1024;
|
uint64_t const MB = 1024 * 1024;
|
||||||
|
@ -2848,7 +2848,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) {
|
||||||
true /* include_file_details */));
|
true /* include_file_details */));
|
||||||
|
|
||||||
std::uint64_t bytes_read_during_verify_backup = 0;
|
std::uint64_t bytes_read_during_verify_backup = 0;
|
||||||
for (BackupFileInfo backup_file_info : backup_info.file_details) {
|
for (const BackupFileInfo& backup_file_info : backup_info.file_details) {
|
||||||
bytes_read_during_verify_backup += backup_file_info.size;
|
bytes_read_during_verify_backup += backup_file_info.size;
|
||||||
}
|
}
|
||||||
auto start_verify_backup = special_env->NowMicros();
|
auto start_verify_backup = special_env->NowMicros();
|
||||||
|
@ -2986,7 +2986,7 @@ class BackupEngineRateLimitingTestWithParam2
|
||||||
public testing::WithParamInterface<
|
public testing::WithParamInterface<
|
||||||
std::tuple<std::pair<uint64_t, uint64_t> /* limits */>> {
|
std::tuple<std::pair<uint64_t, uint64_t> /* limits */>> {
|
||||||
public:
|
public:
|
||||||
BackupEngineRateLimitingTestWithParam2() {}
|
BackupEngineRateLimitingTestWithParam2() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
INSTANTIATE_TEST_CASE_P(
|
INSTANTIATE_TEST_CASE_P(
|
||||||
|
@ -4212,7 +4212,7 @@ TEST_F(BackupEngineTest, FileTemperatures) {
|
||||||
std::vector<LiveFileStorageInfo> infos;
|
std::vector<LiveFileStorageInfo> infos;
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
db_->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos));
|
db_->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos));
|
||||||
for (auto info : infos) {
|
for (const auto& info : infos) {
|
||||||
if (info.file_type == kTableFile) {
|
if (info.file_type == kTableFile) {
|
||||||
manifest_temps.emplace(info.file_number, info.temperature);
|
manifest_temps.emplace(info.file_number, info.temperature);
|
||||||
manifest_temp_counts[info.temperature]++;
|
manifest_temp_counts[info.temperature]++;
|
||||||
|
@ -4379,7 +4379,7 @@ TEST_F(BackupEngineTest, ExcludeFiles) {
|
||||||
MaybeExcludeBackupFile* files_end) {
|
MaybeExcludeBackupFile* files_end) {
|
||||||
for (auto* f = files_begin; f != files_end; ++f) {
|
for (auto* f = files_begin; f != files_end; ++f) {
|
||||||
std::string s = StringSplit(f->info.relative_file, '/').back();
|
std::string s = StringSplit(f->info.relative_file, '/').back();
|
||||||
s = s.substr(0, s.find("_"));
|
s = s.substr(0, s.find('_'));
|
||||||
int64_t num = std::strtoll(s.c_str(), nullptr, /*base*/ 10);
|
int64_t num = std::strtoll(s.c_str(), nullptr, /*base*/ 10);
|
||||||
// Exclude if not a match
|
// Exclude if not a match
|
||||||
f->exclude_decision = (num % modulus) != remainder;
|
f->exclude_decision = (num % modulus) != remainder;
|
||||||
|
|
|
@ -13,8 +13,7 @@
|
||||||
#include "rocksdb/system_clock.h"
|
#include "rocksdb/system_clock.h"
|
||||||
#include "test_util/sync_point.h"
|
#include "test_util/sync_point.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
BlobIndexCompactionFilterBase::~BlobIndexCompactionFilterBase() {
|
BlobIndexCompactionFilterBase::~BlobIndexCompactionFilterBase() {
|
||||||
if (blob_file_) {
|
if (blob_file_) {
|
||||||
|
@ -488,5 +487,4 @@ BlobIndexCompactionFilterFactoryGC::CreateCompactionFilter(
|
||||||
std::move(user_comp_filter_from_factory), current_time, statistics()));
|
std::move(user_comp_filter_from_factory), current_time, statistics()));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
#include "logging/logging.h"
|
#include "logging/logging.h"
|
||||||
#include "utilities/blob_db/blob_db_impl.h"
|
#include "utilities/blob_db/blob_db_impl.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options,
|
Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options,
|
||||||
const std::string& dbname, BlobDB** blob_db) {
|
const std::string& dbname, BlobDB** blob_db) {
|
||||||
|
@ -20,8 +19,7 @@ Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options,
|
||||||
DBOptions db_options(options);
|
DBOptions db_options(options);
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
Status s = BlobDB::Open(db_options, bdb_options, dbname, column_families,
|
Status s = BlobDB::Open(db_options, bdb_options, dbname, column_families,
|
||||||
&handles, blob_db);
|
&handles, blob_db);
|
||||||
|
@ -108,5 +106,4 @@ void BlobDBOptions::Dump(Logger* log) const {
|
||||||
disable_background_tasks);
|
disable_background_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -48,8 +48,7 @@ namespace {
|
||||||
int kBlockBasedTableVersionFormat = 2;
|
int kBlockBasedTableVersionFormat = 2;
|
||||||
} // end namespace
|
} // end namespace
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
bool BlobFileComparator::operator()(
|
bool BlobFileComparator::operator()(
|
||||||
const std::shared_ptr<BlobFile>& lhs,
|
const std::shared_ptr<BlobFile>& lhs,
|
||||||
|
@ -1461,7 +1460,6 @@ void BlobDBImpl::MultiGet(const ReadOptions& _read_options, size_t num_keys,
|
||||||
if (snapshot_created) {
|
if (snapshot_created) {
|
||||||
db_->ReleaseSnapshot(read_options.snapshot);
|
db_->ReleaseSnapshot(read_options.snapshot);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) {
|
bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) {
|
||||||
|
@ -1602,8 +1600,8 @@ Status BlobDBImpl::GetRawBlobFromFile(const Slice& key, uint64_t file_number,
|
||||||
} else {
|
} else {
|
||||||
buf.reserve(static_cast<size_t>(record_size));
|
buf.reserve(static_cast<size_t>(record_size));
|
||||||
s = reader->Read(IOOptions(), record_offset,
|
s = reader->Read(IOOptions(), record_offset,
|
||||||
static_cast<size_t>(record_size), &blob_record, &buf[0],
|
static_cast<size_t>(record_size), &blob_record,
|
||||||
nullptr);
|
buf.data(), nullptr);
|
||||||
}
|
}
|
||||||
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size());
|
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size());
|
||||||
}
|
}
|
||||||
|
@ -1770,7 +1768,7 @@ std::pair<bool, int64_t> BlobDBImpl::SanityCheck(bool aborted) {
|
||||||
|
|
||||||
uint64_t now = EpochNow();
|
uint64_t now = EpochNow();
|
||||||
|
|
||||||
for (auto blob_file_pair : blob_files_) {
|
for (const auto& blob_file_pair : blob_files_) {
|
||||||
auto blob_file = blob_file_pair.second;
|
auto blob_file = blob_file_pair.second;
|
||||||
std::ostringstream buf;
|
std::ostringstream buf;
|
||||||
|
|
||||||
|
@ -1930,7 +1928,7 @@ std::pair<bool, int64_t> BlobDBImpl::EvictExpiredFiles(bool aborted) {
|
||||||
uint64_t now = EpochNow();
|
uint64_t now = EpochNow();
|
||||||
{
|
{
|
||||||
ReadLock rl(&mutex_);
|
ReadLock rl(&mutex_);
|
||||||
for (auto p : blob_files_) {
|
for (const auto& p : blob_files_) {
|
||||||
auto& blob_file = p.second;
|
auto& blob_file = p.second;
|
||||||
ReadLock file_lock(&blob_file->mutex_);
|
ReadLock file_lock(&blob_file->mutex_);
|
||||||
if (blob_file->HasTTL() && !blob_file->Obsolete() &&
|
if (blob_file->HasTTL() && !blob_file->Obsolete() &&
|
||||||
|
@ -1977,7 +1975,7 @@ Status BlobDBImpl::SyncBlobFiles(const WriteOptions& write_options) {
|
||||||
std::vector<std::shared_ptr<BlobFile>> process_files;
|
std::vector<std::shared_ptr<BlobFile>> process_files;
|
||||||
{
|
{
|
||||||
ReadLock rl(&mutex_);
|
ReadLock rl(&mutex_);
|
||||||
for (auto fitr : open_ttl_files_) {
|
for (const auto& fitr : open_ttl_files_) {
|
||||||
process_files.push_back(fitr);
|
process_files.push_back(fitr);
|
||||||
}
|
}
|
||||||
if (open_non_ttl_file_ != nullptr) {
|
if (open_non_ttl_file_ != nullptr) {
|
||||||
|
@ -2006,7 +2004,9 @@ Status BlobDBImpl::SyncBlobFiles(const WriteOptions& write_options) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<bool, int64_t> BlobDBImpl::ReclaimOpenFiles(bool aborted) {
|
std::pair<bool, int64_t> BlobDBImpl::ReclaimOpenFiles(bool aborted) {
|
||||||
if (aborted) return std::make_pair(false, -1);
|
if (aborted) {
|
||||||
|
return std::make_pair(false, -1);
|
||||||
|
}
|
||||||
|
|
||||||
if (open_file_count_.load() < kOpenFilesTrigger) {
|
if (open_file_count_.load() < kOpenFilesTrigger) {
|
||||||
return std::make_pair(true, -1);
|
return std::make_pair(true, -1);
|
||||||
|
@ -2017,7 +2017,9 @@ std::pair<bool, int64_t> BlobDBImpl::ReclaimOpenFiles(bool aborted) {
|
||||||
ReadLock rl(&mutex_);
|
ReadLock rl(&mutex_);
|
||||||
for (auto const& ent : blob_files_) {
|
for (auto const& ent : blob_files_) {
|
||||||
auto bfile = ent.second;
|
auto bfile = ent.second;
|
||||||
if (bfile->last_access_.load() == -1) continue;
|
if (bfile->last_access_.load() == -1) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
WriteLock lockbfile_w(&bfile->mutex_);
|
WriteLock lockbfile_w(&bfile->mutex_);
|
||||||
CloseRandomAccessLocked(bfile);
|
CloseRandomAccessLocked(bfile);
|
||||||
|
@ -2100,7 +2102,7 @@ std::pair<bool, int64_t> BlobDBImpl::DeleteObsoleteFiles(bool aborted) {
|
||||||
// put files back into obsolete if for some reason, delete failed
|
// put files back into obsolete if for some reason, delete failed
|
||||||
if (!tobsolete.empty()) {
|
if (!tobsolete.empty()) {
|
||||||
WriteLock wl(&mutex_);
|
WriteLock wl(&mutex_);
|
||||||
for (auto bfile : tobsolete) {
|
for (const auto& bfile : tobsolete) {
|
||||||
blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile));
|
blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile));
|
||||||
obsolete_files_.push_front(bfile);
|
obsolete_files_.push_front(bfile);
|
||||||
}
|
}
|
||||||
|
@ -2264,5 +2266,4 @@ void BlobDBImpl::TEST_ProcessCompactionJobInfo(const CompactionJobInfo& info) {
|
||||||
|
|
||||||
#endif // !NDEBUG
|
#endif // !NDEBUG
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -12,8 +12,7 @@
|
||||||
|
|
||||||
// BlobDBImpl methods to get snapshot of files, e.g. for replication.
|
// BlobDBImpl methods to get snapshot of files, e.g. for replication.
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
Status BlobDBImpl::DisableFileDeletions() {
|
Status BlobDBImpl::DisableFileDeletions() {
|
||||||
// Disable base DB file deletions.
|
// Disable base DB file deletions.
|
||||||
|
@ -72,7 +71,7 @@ Status BlobDBImpl::GetLiveFiles(std::vector<std::string>& ret,
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
ret.reserve(ret.size() + blob_files_.size());
|
ret.reserve(ret.size() + blob_files_.size());
|
||||||
for (auto bfile_pair : blob_files_) {
|
for (const auto& bfile_pair : blob_files_) {
|
||||||
auto blob_file = bfile_pair.second;
|
auto blob_file = bfile_pair.second;
|
||||||
// Path should be relative to db_name, but begin with slash.
|
// Path should be relative to db_name, but begin with slash.
|
||||||
ret.emplace_back(
|
ret.emplace_back(
|
||||||
|
@ -87,7 +86,7 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
||||||
// Hold a lock in the beginning to avoid updates to base DB during the call
|
// Hold a lock in the beginning to avoid updates to base DB during the call
|
||||||
ReadLock rl(&mutex_);
|
ReadLock rl(&mutex_);
|
||||||
db_->GetLiveFilesMetaData(metadata);
|
db_->GetLiveFilesMetaData(metadata);
|
||||||
for (auto bfile_pair : blob_files_) {
|
for (const auto& bfile_pair : blob_files_) {
|
||||||
auto blob_file = bfile_pair.second;
|
auto blob_file = bfile_pair.second;
|
||||||
LiveFileMetaData filemetadata;
|
LiveFileMetaData filemetadata;
|
||||||
filemetadata.size = blob_file->GetFileSize();
|
filemetadata.size = blob_file->GetFileSize();
|
||||||
|
@ -105,5 +104,4 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -31,8 +31,7 @@
|
||||||
#include "utilities/blob_db/blob_db_impl.h"
|
#include "utilities/blob_db/blob_db_impl.h"
|
||||||
#include "utilities/fault_injection_env.h"
|
#include "utilities/fault_injection_env.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
class BlobDBTest : public testing::Test {
|
class BlobDBTest : public testing::Test {
|
||||||
public:
|
public:
|
||||||
|
@ -607,7 +606,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) {
|
||||||
VerifyDB(data);
|
VerifyDB(data);
|
||||||
|
|
||||||
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
for (auto bfile : blob_files) {
|
for (const auto &bfile : blob_files) {
|
||||||
ASSERT_EQ(kNoCompression, bfile->GetCompressionType());
|
ASSERT_EQ(kNoCompression, bfile->GetCompressionType());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -627,7 +626,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) {
|
||||||
VerifyDB(data);
|
VerifyDB(data);
|
||||||
|
|
||||||
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
for (auto bfile : blob_files) {
|
for (const auto &bfile : blob_files) {
|
||||||
ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType());
|
ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -678,7 +677,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
|
||||||
|
|
||||||
blob_db_impl()->TEST_DeleteObsoleteFiles();
|
blob_db_impl()->TEST_DeleteObsoleteFiles();
|
||||||
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
for (auto bfile : blob_files) {
|
for (const auto &bfile : blob_files) {
|
||||||
ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType());
|
ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -695,7 +694,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
|
||||||
|
|
||||||
blob_db_impl()->TEST_DeleteObsoleteFiles();
|
blob_db_impl()->TEST_DeleteObsoleteFiles();
|
||||||
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
for (auto bfile : blob_files) {
|
for (const auto &bfile : blob_files) {
|
||||||
ASSERT_EQ(kNoCompression, bfile->GetCompressionType());
|
ASSERT_EQ(kNoCompression, bfile->GetCompressionType());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -719,7 +718,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
|
||||||
|
|
||||||
blob_db_impl()->TEST_DeleteObsoleteFiles();
|
blob_db_impl()->TEST_DeleteObsoleteFiles();
|
||||||
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
for (auto bfile : blob_files) {
|
for (const auto &bfile : blob_files) {
|
||||||
ASSERT_EQ(kLZ4Compression, bfile->GetCompressionType());
|
ASSERT_EQ(kLZ4Compression, bfile->GetCompressionType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -731,8 +730,8 @@ TEST_F(BlobDBTest, MultipleWriters) {
|
||||||
|
|
||||||
std::vector<port::Thread> workers;
|
std::vector<port::Thread> workers;
|
||||||
std::vector<std::map<std::string, std::string>> data_set(10);
|
std::vector<std::map<std::string, std::string>> data_set(10);
|
||||||
for (uint32_t i = 0; i < 10; i++)
|
for (uint32_t i = 0; i < 10; i++) {
|
||||||
workers.push_back(port::Thread(
|
workers.emplace_back(
|
||||||
[&](uint32_t id) {
|
[&](uint32_t id) {
|
||||||
Random rnd(301 + id);
|
Random rnd(301 + id);
|
||||||
for (int j = 0; j < 100; j++) {
|
for (int j = 0; j < 100; j++) {
|
||||||
|
@ -747,7 +746,8 @@ TEST_F(BlobDBTest, MultipleWriters) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
i));
|
i);
|
||||||
|
}
|
||||||
std::map<std::string, std::string> data;
|
std::map<std::string, std::string> data;
|
||||||
for (size_t i = 0; i < 10; i++) {
|
for (size_t i = 0; i < 10; i++) {
|
||||||
workers[i].join();
|
workers[i].join();
|
||||||
|
@ -1375,8 +1375,8 @@ TEST_F(BlobDBTest, UserCompactionFilter) {
|
||||||
constexpr uint64_t kMinValueSize = 1 << 6;
|
constexpr uint64_t kMinValueSize = 1 << 6;
|
||||||
constexpr uint64_t kMaxValueSize = 1 << 8;
|
constexpr uint64_t kMaxValueSize = 1 << 8;
|
||||||
constexpr uint64_t kMinBlobSize = 1 << 7;
|
constexpr uint64_t kMinBlobSize = 1 << 7;
|
||||||
static_assert(kMinValueSize < kMinBlobSize, "");
|
static_assert(kMinValueSize < kMinBlobSize);
|
||||||
static_assert(kMaxValueSize > kMinBlobSize, "");
|
static_assert(kMaxValueSize > kMinBlobSize);
|
||||||
|
|
||||||
BlobDBOptions bdb_options;
|
BlobDBOptions bdb_options;
|
||||||
bdb_options.min_blob_size = kMinBlobSize;
|
bdb_options.min_blob_size = kMinBlobSize;
|
||||||
|
@ -1747,8 +1747,8 @@ TEST_F(BlobDBTest, GarbageCollection) {
|
||||||
constexpr uint64_t kSmallValueSize = 1 << 6;
|
constexpr uint64_t kSmallValueSize = 1 << 6;
|
||||||
constexpr uint64_t kLargeValueSize = 1 << 8;
|
constexpr uint64_t kLargeValueSize = 1 << 8;
|
||||||
constexpr uint64_t kMinBlobSize = 1 << 7;
|
constexpr uint64_t kMinBlobSize = 1 << 7;
|
||||||
static_assert(kSmallValueSize < kMinBlobSize, "");
|
static_assert(kSmallValueSize < kMinBlobSize);
|
||||||
static_assert(kLargeValueSize > kMinBlobSize, "");
|
static_assert(kLargeValueSize > kMinBlobSize);
|
||||||
|
|
||||||
constexpr size_t kBlobsPerFile = 8;
|
constexpr size_t kBlobsPerFile = 8;
|
||||||
constexpr size_t kNumBlobFiles = kNumPuts / kBlobsPerFile;
|
constexpr size_t kNumBlobFiles = kNumPuts / kBlobsPerFile;
|
||||||
|
@ -1999,7 +1999,7 @@ TEST_F(BlobDBTest, EvictExpiredFile) {
|
||||||
ASSERT_EQ(0, blob_db_impl()->TEST_GetObsoleteFiles().size());
|
ASSERT_EQ(0, blob_db_impl()->TEST_GetObsoleteFiles().size());
|
||||||
// Make sure we don't return garbage value after blob file being evicted,
|
// Make sure we don't return garbage value after blob file being evicted,
|
||||||
// but the blob index still exists in the LSM tree.
|
// but the blob index still exists in the LSM tree.
|
||||||
std::string val = "";
|
std::string val;
|
||||||
ASSERT_TRUE(blob_db_->Get(ReadOptions(), "foo", &val).IsNotFound());
|
ASSERT_TRUE(blob_db_->Get(ReadOptions(), "foo", &val).IsNotFound());
|
||||||
ASSERT_EQ("", val);
|
ASSERT_EQ("", val);
|
||||||
}
|
}
|
||||||
|
@ -2413,8 +2413,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeCloseIOError) {
|
||||||
ASSERT_TRUE(s.IsIOError());
|
ASSERT_TRUE(s.IsIOError());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
// A black-box test for the ttl wrapper around rocksdb
|
// A black-box test for the ttl wrapper around rocksdb
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
|
|
|
@ -5,9 +5,8 @@
|
||||||
|
|
||||||
#include "utilities/blob_db/blob_dump_tool.h"
|
#include "utilities/blob_db/blob_dump_tool.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
#include <cstdio>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -21,8 +20,7 @@
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
BlobDumpTool::BlobDumpTool()
|
BlobDumpTool::BlobDumpTool()
|
||||||
: reader_(nullptr), buffer_(nullptr), buffer_size_(0) {}
|
: reader_(nullptr), buffer_(nullptr), buffer_size_(0) {}
|
||||||
|
@ -275,5 +273,4 @@ std::string BlobDumpTool::GetString(std::pair<T, T> p) {
|
||||||
return "(" + std::to_string(p.first) + ", " + std::to_string(p.second) + ")";
|
return "(" + std::to_string(p.first) + ", " + std::to_string(p.second) + ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -5,10 +5,9 @@
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
#include "utilities/blob_db/blob_file.h"
|
#include "utilities/blob_db/blob_file.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
#include <cstdio>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "db/column_family.h"
|
#include "db/column_family.h"
|
||||||
|
@ -19,9 +18,7 @@
|
||||||
#include "logging/logging.h"
|
#include "logging/logging.h"
|
||||||
#include "utilities/blob_db/blob_db_impl.h"
|
#include "utilities/blob_db/blob_db_impl.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::blob_db {
|
||||||
|
|
||||||
namespace blob_db {
|
|
||||||
|
|
||||||
BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn,
|
BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn,
|
||||||
Logger* info_log)
|
Logger* info_log)
|
||||||
|
@ -120,9 +117,11 @@ Status BlobFile::ReadFooter(BlobLogFooter* bf) {
|
||||||
} else {
|
} else {
|
||||||
buf.reserve(BlobLogFooter::kSize + 10);
|
buf.reserve(BlobLogFooter::kSize + 10);
|
||||||
s = ra_file_reader_->Read(IOOptions(), footer_offset, BlobLogFooter::kSize,
|
s = ra_file_reader_->Read(IOOptions(), footer_offset, BlobLogFooter::kSize,
|
||||||
&result, &buf[0], nullptr);
|
&result, buf.data(), nullptr);
|
||||||
|
}
|
||||||
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
}
|
}
|
||||||
if (!s.ok()) return s;
|
|
||||||
if (result.size() != BlobLogFooter::kSize) {
|
if (result.size() != BlobLogFooter::kSize) {
|
||||||
// should not happen
|
// should not happen
|
||||||
return Status::IOError("EOF reached before footer");
|
return Status::IOError("EOF reached before footer");
|
||||||
|
@ -242,7 +241,7 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
|
||||||
} else {
|
} else {
|
||||||
header_buf.reserve(BlobLogHeader::kSize);
|
header_buf.reserve(BlobLogHeader::kSize);
|
||||||
s = file_reader->Read(IOOptions(), 0, BlobLogHeader::kSize, &header_slice,
|
s = file_reader->Read(IOOptions(), 0, BlobLogHeader::kSize, &header_slice,
|
||||||
&header_buf[0], nullptr);
|
header_buf.data(), nullptr);
|
||||||
}
|
}
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
ROCKS_LOG_ERROR(
|
ROCKS_LOG_ERROR(
|
||||||
|
@ -283,8 +282,8 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
|
||||||
} else {
|
} else {
|
||||||
footer_buf.reserve(BlobLogFooter::kSize);
|
footer_buf.reserve(BlobLogFooter::kSize);
|
||||||
s = file_reader->Read(IOOptions(), file_size - BlobLogFooter::kSize,
|
s = file_reader->Read(IOOptions(), file_size - BlobLogFooter::kSize,
|
||||||
BlobLogFooter::kSize, &footer_slice, &footer_buf[0],
|
BlobLogFooter::kSize, &footer_slice,
|
||||||
nullptr);
|
footer_buf.data(), nullptr);
|
||||||
}
|
}
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
ROCKS_LOG_ERROR(
|
ROCKS_LOG_ERROR(
|
||||||
|
@ -309,5 +308,4 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blob_db
|
} // namespace ROCKSDB_NAMESPACE::blob_db
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -13,8 +13,7 @@
|
||||||
#include "utilities/cassandra/format.h"
|
#include "utilities/cassandra/format.h"
|
||||||
#include "utilities/cassandra/merge_operator.h"
|
#include "utilities/cassandra/merge_operator.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
static std::unordered_map<std::string, OptionTypeInfo>
|
static std::unordered_map<std::string, OptionTypeInfo>
|
||||||
cassandra_filter_type_info = {
|
cassandra_filter_type_info = {
|
||||||
{"purge_ttl_on_expiration",
|
{"purge_ttl_on_expiration",
|
||||||
|
@ -102,5 +101,4 @@ int RegisterCassandraObjects(ObjectLibrary& library,
|
||||||
size_t num_types;
|
size_t num_types;
|
||||||
return static_cast<int>(library.GetFactoryCount(&num_types));
|
return static_cast<int>(library.GetFactoryCount(&num_types));
|
||||||
}
|
}
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
#include "utilities/cassandra/serialize.h"
|
#include "utilities/cassandra/serialize.h"
|
||||||
#include "utilities/cassandra/test_utils.h"
|
#include "utilities/cassandra/test_utils.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
|
|
||||||
TEST(ColumnTest, Column) {
|
TEST(ColumnTest, Column) {
|
||||||
char data[4] = {'d', 'a', 't', 'a'};
|
char data[4] = {'d', 'a', 't', 'a'};
|
||||||
|
@ -367,8 +366,7 @@ TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) {
|
||||||
compacted.ConvertExpiredColumnsToTombstones(&changed);
|
compacted.ConvertExpiredColumnsToTombstones(&changed);
|
||||||
EXPECT_FALSE(changed);
|
EXPECT_FALSE(changed);
|
||||||
}
|
}
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
||||||
|
|
|
@ -18,8 +18,7 @@
|
||||||
#include "utilities/cassandra/test_utils.h"
|
#include "utilities/cassandra/test_utils.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
|
|
||||||
// Path to the database on file system
|
// Path to the database on file system
|
||||||
const std::string kDbName = test::PerThreadDBPath("cassandra_functional_test");
|
const std::string kDbName = test::PerThreadDBPath("cassandra_functional_test");
|
||||||
|
@ -434,8 +433,7 @@ TEST_F(CassandraFunctionalTest, LoadCompactionFilterFactory) {
|
||||||
ASSERT_TRUE(opts->purge_ttl_on_expiration);
|
ASSERT_TRUE(opts->purge_ttl_on_expiration);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
||||||
|
|
|
@ -9,8 +9,7 @@
|
||||||
#include "utilities/cassandra/format.h"
|
#include "utilities/cassandra/format.h"
|
||||||
#include "utilities/cassandra/test_utils.h"
|
#include "utilities/cassandra/test_utils.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
|
|
||||||
class RowValueMergeTest : public testing::Test {};
|
class RowValueMergeTest : public testing::Test {};
|
||||||
|
|
||||||
|
@ -88,8 +87,7 @@ TEST(RowValueMergeTest, MergeWithRowTombstone) {
|
||||||
EXPECT_EQ(merged.LastModifiedTime(), 17);
|
EXPECT_EQ(merged.LastModifiedTime(), 17);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "utilities/cassandra/serialize.h"
|
#include "utilities/cassandra/serialize.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
|
|
||||||
TEST(SerializeTest, SerializeI64) {
|
TEST(SerializeTest, SerializeI64) {
|
||||||
std::string dest;
|
std::string dest;
|
||||||
|
@ -154,8 +153,7 @@ TEST(SerializeTest, DeserializeI8) {
|
||||||
EXPECT_EQ(-128, Deserialize<int8_t>(dest.c_str(), offset));
|
EXPECT_EQ(-128, Deserialize<int8_t>(dest.c_str(), offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
|
|
||||||
#include "utilities/cassandra/serialize.h"
|
#include "utilities/cassandra/serialize.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
namespace {
|
namespace {
|
||||||
const int32_t kDefaultLocalDeletionTime = std::numeric_limits<int32_t>::max();
|
const int32_t kDefaultLocalDeletionTime = std::numeric_limits<int32_t>::max();
|
||||||
const int64_t kDefaultMarkedForDeleteAt = std::numeric_limits<int64_t>::min();
|
const int64_t kDefaultMarkedForDeleteAt = std::numeric_limits<int64_t>::min();
|
||||||
|
@ -363,5 +362,4 @@ RowValue RowValue::Merge(std::vector<RowValue>&& values) {
|
||||||
return RowValue(std::move(columns), last_modified_time);
|
return RowValue(std::move(columns), last_modified_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
|
|
||||||
#include "merge_operator.h"
|
#include "merge_operator.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "rocksdb/merge_operator.h"
|
#include "rocksdb/merge_operator.h"
|
||||||
|
@ -15,8 +14,7 @@
|
||||||
#include "utilities/cassandra/format.h"
|
#include "utilities/cassandra/format.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
static std::unordered_map<std::string, OptionTypeInfo>
|
static std::unordered_map<std::string, OptionTypeInfo>
|
||||||
merge_operator_options_info = {
|
merge_operator_options_info = {
|
||||||
{"gc_grace_period_in_seconds",
|
{"gc_grace_period_in_seconds",
|
||||||
|
@ -75,6 +73,4 @@ bool CassandraValueMergeOperator::PartialMergeMulti(
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
|
|
||||||
#include "test_utils.h"
|
#include "test_utils.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE::cassandra {
|
||||||
namespace cassandra {
|
|
||||||
const char kData[] = {'d', 'a', 't', 'a'};
|
const char kData[] = {'d', 'a', 't', 'a'};
|
||||||
const char kExpiringData[] = {'e', 'd', 'a', 't', 'a'};
|
const char kExpiringData[] = {'e', 'd', 'a', 't', 'a'};
|
||||||
const int32_t kTtl = 86400;
|
const int32_t kTtl = 86400;
|
||||||
|
@ -65,5 +64,4 @@ int64_t ToMicroSeconds(int64_t seconds) { return seconds * (int64_t)1000000; }
|
||||||
int32_t ToSeconds(int64_t microseconds) {
|
int32_t ToSeconds(int64_t microseconds) {
|
||||||
return (int32_t)(microseconds / (int64_t)1000000);
|
return (int32_t)(microseconds / (int64_t)1000000);
|
||||||
}
|
}
|
||||||
} // namespace cassandra
|
} // namespace ROCKSDB_NAMESPACE::cassandra
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
|
|
|
@ -112,7 +112,7 @@ class CheckpointTest : public testing::Test {
|
||||||
ColumnFamilyOptions cf_opts(options);
|
ColumnFamilyOptions cf_opts(options);
|
||||||
size_t cfi = handles_.size();
|
size_t cfi = handles_.size();
|
||||||
handles_.resize(cfi + cfs.size());
|
handles_.resize(cfi + cfs.size());
|
||||||
for (auto cf : cfs) {
|
for (const auto& cf : cfs) {
|
||||||
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
|
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ class CheckpointTest : public testing::Test {
|
||||||
EXPECT_EQ(cfs.size(), options.size());
|
EXPECT_EQ(cfs.size(), options.size());
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
|
column_families.emplace_back(cfs[i], options[i]);
|
||||||
}
|
}
|
||||||
DBOptions db_opts = DBOptions(options[0]);
|
DBOptions db_opts = DBOptions(options[0]);
|
||||||
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
|
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
|
||||||
|
@ -507,7 +507,7 @@ TEST_F(CheckpointTest, CheckpointCF) {
|
||||||
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
|
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
|
column_families.emplace_back(cfs[i], options);
|
||||||
}
|
}
|
||||||
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
|
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
|
||||||
&snapshotDB));
|
&snapshotDB));
|
||||||
|
@ -565,7 +565,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) {
|
||||||
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
|
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
|
column_families.emplace_back(cfs[i], options);
|
||||||
}
|
}
|
||||||
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
|
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
|
||||||
&snapshotDB));
|
&snapshotDB));
|
||||||
|
@ -717,12 +717,9 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) {
|
||||||
|
|
||||||
TransactionDB* snapshotDB;
|
TransactionDB* snapshotDB;
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
|
column_families.emplace_back("CFA", ColumnFamilyOptions());
|
||||||
column_families.push_back(
|
column_families.emplace_back("CFB", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
|
|
||||||
column_families.push_back(
|
|
||||||
ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
|
|
||||||
std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
|
std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
|
||||||
ASSERT_OK(TransactionDB::Open(options, txn_db_options, snapshot_name_,
|
ASSERT_OK(TransactionDB::Open(options, txn_db_options, snapshot_name_,
|
||||||
column_families, &cf_handles, &snapshotDB));
|
column_families, &cf_handles, &snapshotDB));
|
||||||
|
|
|
@ -214,10 +214,11 @@ Status EnvMirror::NewSequentialFile(const std::string& f,
|
||||||
Status as = a_->NewSequentialFile(f, &mf->a_, options);
|
Status as = a_->NewSequentialFile(f, &mf->a_, options);
|
||||||
Status bs = b_->NewSequentialFile(f, &mf->b_, options);
|
Status bs = b_->NewSequentialFile(f, &mf->b_, options);
|
||||||
assert(as == bs);
|
assert(as == bs);
|
||||||
if (as.ok())
|
if (as.ok()) {
|
||||||
r->reset(mf);
|
r->reset(mf);
|
||||||
else
|
} else {
|
||||||
delete mf;
|
delete mf;
|
||||||
|
}
|
||||||
return as;
|
return as;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,25 +232,29 @@ Status EnvMirror::NewRandomAccessFile(const std::string& f,
|
||||||
Status as = a_->NewRandomAccessFile(f, &mf->a_, options);
|
Status as = a_->NewRandomAccessFile(f, &mf->a_, options);
|
||||||
Status bs = b_->NewRandomAccessFile(f, &mf->b_, options);
|
Status bs = b_->NewRandomAccessFile(f, &mf->b_, options);
|
||||||
assert(as == bs);
|
assert(as == bs);
|
||||||
if (as.ok())
|
if (as.ok()) {
|
||||||
r->reset(mf);
|
r->reset(mf);
|
||||||
else
|
} else {
|
||||||
delete mf;
|
delete mf;
|
||||||
|
}
|
||||||
return as;
|
return as;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status EnvMirror::NewWritableFile(const std::string& f,
|
Status EnvMirror::NewWritableFile(const std::string& f,
|
||||||
std::unique_ptr<WritableFile>* r,
|
std::unique_ptr<WritableFile>* r,
|
||||||
const EnvOptions& options) {
|
const EnvOptions& options) {
|
||||||
if (f.find("/proc/") == 0) return a_->NewWritableFile(f, r, options);
|
if (f.find("/proc/") == 0) {
|
||||||
|
return a_->NewWritableFile(f, r, options);
|
||||||
|
}
|
||||||
WritableFileMirror* mf = new WritableFileMirror(f, options);
|
WritableFileMirror* mf = new WritableFileMirror(f, options);
|
||||||
Status as = a_->NewWritableFile(f, &mf->a_, options);
|
Status as = a_->NewWritableFile(f, &mf->a_, options);
|
||||||
Status bs = b_->NewWritableFile(f, &mf->b_, options);
|
Status bs = b_->NewWritableFile(f, &mf->b_, options);
|
||||||
assert(as == bs);
|
assert(as == bs);
|
||||||
if (as.ok())
|
if (as.ok()) {
|
||||||
r->reset(mf);
|
r->reset(mf);
|
||||||
else
|
} else {
|
||||||
delete mf;
|
delete mf;
|
||||||
|
}
|
||||||
return as;
|
return as;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,16 +262,18 @@ Status EnvMirror::ReuseWritableFile(const std::string& fname,
|
||||||
const std::string& old_fname,
|
const std::string& old_fname,
|
||||||
std::unique_ptr<WritableFile>* r,
|
std::unique_ptr<WritableFile>* r,
|
||||||
const EnvOptions& options) {
|
const EnvOptions& options) {
|
||||||
if (fname.find("/proc/") == 0)
|
if (fname.find("/proc/") == 0) {
|
||||||
return a_->ReuseWritableFile(fname, old_fname, r, options);
|
return a_->ReuseWritableFile(fname, old_fname, r, options);
|
||||||
|
}
|
||||||
WritableFileMirror* mf = new WritableFileMirror(fname, options);
|
WritableFileMirror* mf = new WritableFileMirror(fname, options);
|
||||||
Status as = a_->ReuseWritableFile(fname, old_fname, &mf->a_, options);
|
Status as = a_->ReuseWritableFile(fname, old_fname, &mf->a_, options);
|
||||||
Status bs = b_->ReuseWritableFile(fname, old_fname, &mf->b_, options);
|
Status bs = b_->ReuseWritableFile(fname, old_fname, &mf->b_, options);
|
||||||
assert(as == bs);
|
assert(as == bs);
|
||||||
if (as.ok())
|
if (as.ok()) {
|
||||||
r->reset(mf);
|
r->reset(mf);
|
||||||
else
|
} else {
|
||||||
delete mf;
|
delete mf;
|
||||||
|
}
|
||||||
return as;
|
return as;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ Status Truncate(Env* env, const std::string& filename, uint64_t length) {
|
||||||
|
|
||||||
// Trim the tailing "/" in the end of `str`
|
// Trim the tailing "/" in the end of `str`
|
||||||
std::string TrimDirname(const std::string& str) {
|
std::string TrimDirname(const std::string& str) {
|
||||||
size_t found = str.find_last_not_of("/");
|
size_t found = str.find_last_not_of('/');
|
||||||
if (found == std::string::npos) {
|
if (found == std::string::npos) {
|
||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
@ -528,7 +528,7 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& pair : map_copy) {
|
for (auto& pair : map_copy) {
|
||||||
for (std::string name : pair.second) {
|
for (const std::string& name : pair.second) {
|
||||||
Status s = DeleteFile(pair.first + "/" + name);
|
Status s = DeleteFile(pair.first + "/" + name);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
const std::string kNewFileNoOverwrite = "";
|
const std::string kNewFileNoOverwrite;
|
||||||
|
|
||||||
// Assume a filename, and not a directory name like "/foo/bar/"
|
// Assume a filename, and not a directory name like "/foo/bar/"
|
||||||
std::string TestFSGetDirName(const std::string filename) {
|
std::string TestFSGetDirName(const std::string filename) {
|
||||||
|
@ -47,7 +47,7 @@ std::string TestFSGetDirName(const std::string filename) {
|
||||||
|
|
||||||
// Trim the tailing "/" in the end of `str`
|
// Trim the tailing "/" in the end of `str`
|
||||||
std::string TestFSTrimDirname(const std::string& str) {
|
std::string TestFSTrimDirname(const std::string& str) {
|
||||||
size_t found = str.find_last_not_of("/");
|
size_t found = str.find_last_not_of('/');
|
||||||
if (found == std::string::npos) {
|
if (found == std::string::npos) {
|
||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,6 @@ void CalculateTypedChecksum(const ChecksumType& checksum_type, const char* data,
|
||||||
uint32_t v = XXH32(data, size, 0);
|
uint32_t v = XXH32(data, size, 0);
|
||||||
PutFixed32(checksum, v);
|
PutFixed32(checksum, v);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
IOStatus FSFileState::DropUnsyncedData() {
|
IOStatus FSFileState::DropUnsyncedData() {
|
||||||
|
@ -1014,7 +1013,7 @@ IOStatus FaultInjectionTestFS::InjectThreadSpecificReadError(
|
||||||
|
|
||||||
bool FaultInjectionTestFS::TryParseFileName(const std::string& file_name,
|
bool FaultInjectionTestFS::TryParseFileName(const std::string& file_name,
|
||||||
uint64_t* number, FileType* type) {
|
uint64_t* number, FileType* type) {
|
||||||
std::size_t found = file_name.find_last_of("/");
|
std::size_t found = file_name.find_last_of('/');
|
||||||
std::string file = file_name.substr(found);
|
std::string file = file_name.substr(found);
|
||||||
return ParseFileName(file, number, type);
|
return ParseFileName(file, number, type);
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ class MemoryTest : public testing::Test {
|
||||||
if (db_impl != nullptr) {
|
if (db_impl != nullptr) {
|
||||||
ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
|
ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
|
||||||
}
|
}
|
||||||
for (auto pair : iopts_map) {
|
for (const auto& pair : iopts_map) {
|
||||||
GetCachePointersFromTableFactory(pair.second->table_factory.get(),
|
GetCachePointersFromTableFactory(pair.second->table_factory.get(),
|
||||||
cache_set);
|
cache_set);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,9 @@ bool SortList::PartialMergeMulti(const Slice& /*key*/,
|
||||||
void SortList::MakeVector(std::vector<int>& operand, Slice slice) const {
|
void SortList::MakeVector(std::vector<int>& operand, Slice slice) const {
|
||||||
do {
|
do {
|
||||||
const char* begin = slice.data_;
|
const char* begin = slice.data_;
|
||||||
while (*slice.data_ != ',' && *slice.data_) slice.data_++;
|
while (*slice.data_ != ',' && *slice.data_) {
|
||||||
|
slice.data_++;
|
||||||
|
}
|
||||||
operand.push_back(std::stoi(std::string(begin, slice.data_)));
|
operand.push_back(std::stoi(std::string(begin, slice.data_)));
|
||||||
} while (0 != *slice.data_++);
|
} while (0 != *slice.data_++);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,8 +7,7 @@
|
||||||
|
|
||||||
#include "stringappend.h"
|
#include "stringappend.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "rocksdb/merge_operator.h"
|
#include "rocksdb/merge_operator.h"
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
|
|
||||||
#include "stringappend2.h"
|
#include "stringappend2.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
#include "rocksdb/utilities/object_registry.h"
|
#include "rocksdb/utilities/object_registry.h"
|
||||||
|
|
||||||
#include <ctype.h>
|
#include <cctype>
|
||||||
|
|
||||||
#include "logging/logging.h"
|
#include "logging/logging.h"
|
||||||
#include "port/lang.h"
|
#include "port/lang.h"
|
||||||
|
|
|
@ -119,7 +119,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) {
|
||||||
{
|
{
|
||||||
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
||||||
it->SeekToFirst();
|
it->SeekToFirst();
|
||||||
for (std::string key : keys) {
|
for (const std::string& key : keys) {
|
||||||
ASSERT_TRUE(it->Valid());
|
ASSERT_TRUE(it->Valid());
|
||||||
ASSERT_EQ(key, it->key().ToString());
|
ASSERT_EQ(key, it->key().ToString());
|
||||||
it->Next();
|
it->Next();
|
||||||
|
@ -199,7 +199,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) {
|
||||||
{
|
{
|
||||||
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
||||||
it->SeekToFirst();
|
it->SeekToFirst();
|
||||||
for (std::string key : keys) {
|
for (const std::string& key : keys) {
|
||||||
ASSERT_TRUE(it->Valid());
|
ASSERT_TRUE(it->Valid());
|
||||||
ASSERT_EQ(key, it->key().ToString());
|
ASSERT_EQ(key, it->key().ToString());
|
||||||
it->Next();
|
it->Next();
|
||||||
|
@ -285,7 +285,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) {
|
||||||
{
|
{
|
||||||
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
||||||
it->SeekToFirst();
|
it->SeekToFirst();
|
||||||
for (std::string key : keys) {
|
for (const std::string& key : keys) {
|
||||||
ASSERT_TRUE(it->Valid());
|
ASSERT_TRUE(it->Valid());
|
||||||
ASSERT_EQ(key, it->key().ToString());
|
ASSERT_EQ(key, it->key().ToString());
|
||||||
it->Next();
|
it->Next();
|
||||||
|
@ -371,7 +371,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) {
|
||||||
{
|
{
|
||||||
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
||||||
it->SeekToFirst();
|
it->SeekToFirst();
|
||||||
for (std::string key : keys) {
|
for (const std::string& key : keys) {
|
||||||
ASSERT_TRUE(it->Valid());
|
ASSERT_TRUE(it->Valid());
|
||||||
ASSERT_EQ(key, it->key().ToString());
|
ASSERT_EQ(key, it->key().ToString());
|
||||||
it->Next();
|
it->Next();
|
||||||
|
@ -538,7 +538,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
|
||||||
{
|
{
|
||||||
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
|
||||||
it->SeekToFirst();
|
it->SeekToFirst();
|
||||||
for (std::string key : keys) {
|
for (const std::string& key : keys) {
|
||||||
ASSERT_TRUE(it->Valid());
|
ASSERT_TRUE(it->Valid());
|
||||||
ASSERT_EQ(key, it->key().ToString());
|
ASSERT_EQ(key, it->key().ToString());
|
||||||
it->Next();
|
it->Next();
|
||||||
|
|
|
@ -121,8 +121,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) {
|
||||||
|
|
||||||
std::vector<std::string> cf_names;
|
std::vector<std::string> cf_names;
|
||||||
cf_names.push_back(kDefaultColumnFamilyName);
|
cf_names.push_back(kDefaultColumnFamilyName);
|
||||||
cf_names.push_back("cf_sample");
|
cf_names.emplace_back("cf_sample");
|
||||||
cf_names.push_back("cf_plain_table_sample");
|
cf_names.emplace_back("cf_plain_table_sample");
|
||||||
// Saving DB in file
|
// Saving DB in file
|
||||||
const std::string kFileName = "OPTIONS-LOAD_CACHE_123456";
|
const std::string kFileName = "OPTIONS-LOAD_CACHE_123456";
|
||||||
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), db_opt, cf_names, cf_opts,
|
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), db_opt, cf_names, cf_opts,
|
||||||
|
@ -151,8 +151,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) {
|
||||||
namespace {
|
namespace {
|
||||||
class DummyTableFactory : public TableFactory {
|
class DummyTableFactory : public TableFactory {
|
||||||
public:
|
public:
|
||||||
DummyTableFactory() {}
|
DummyTableFactory() = default;
|
||||||
~DummyTableFactory() override {}
|
~DummyTableFactory() override = default;
|
||||||
|
|
||||||
const char* Name() const override { return "DummyTableFactory"; }
|
const char* Name() const override { return "DummyTableFactory"; }
|
||||||
|
|
||||||
|
@ -183,8 +183,8 @@ class DummyTableFactory : public TableFactory {
|
||||||
|
|
||||||
class DummyMergeOperator : public MergeOperator {
|
class DummyMergeOperator : public MergeOperator {
|
||||||
public:
|
public:
|
||||||
DummyMergeOperator() {}
|
DummyMergeOperator() = default;
|
||||||
~DummyMergeOperator() override {}
|
~DummyMergeOperator() override = default;
|
||||||
|
|
||||||
bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
|
bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
|
||||||
MergeOperationOutput* /*merge_out*/) const override {
|
MergeOperationOutput* /*merge_out*/) const override {
|
||||||
|
@ -203,8 +203,8 @@ class DummyMergeOperator : public MergeOperator {
|
||||||
|
|
||||||
class DummySliceTransform : public SliceTransform {
|
class DummySliceTransform : public SliceTransform {
|
||||||
public:
|
public:
|
||||||
DummySliceTransform() {}
|
DummySliceTransform() = default;
|
||||||
~DummySliceTransform() override {}
|
~DummySliceTransform() override = default;
|
||||||
|
|
||||||
// Return the name of this transformation.
|
// Return the name of this transformation.
|
||||||
const char* Name() const override { return "DummySliceTransform"; }
|
const char* Name() const override { return "DummySliceTransform"; }
|
||||||
|
|
|
@ -78,7 +78,7 @@ bool IsCacheFile(const std::string& file) {
|
||||||
// check if the file has .rc suffix
|
// check if the file has .rc suffix
|
||||||
// Unfortunately regex support across compilers is not even, so we use simple
|
// Unfortunately regex support across compilers is not even, so we use simple
|
||||||
// string parsing
|
// string parsing
|
||||||
size_t pos = file.find(".");
|
size_t pos = file.find('.');
|
||||||
if (pos == std::string::npos) {
|
if (pos == std::string::npos) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ Status BlockCacheTier::CleanupCacheFolder(const std::string& folder) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanup files with the patter :digi:.rc
|
// cleanup files with the patter :digi:.rc
|
||||||
for (auto file : files) {
|
for (const auto& file : files) {
|
||||||
if (IsCacheFile(file)) {
|
if (IsCacheFile(file)) {
|
||||||
// cache file
|
// cache file
|
||||||
Info(opt_.log, "Removing file %s.", file.c_str());
|
Info(opt_.log, "Removing file %s.", file.c_str());
|
||||||
|
|
|
@ -79,7 +79,7 @@ struct CacheRecordHeader {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CacheRecord {
|
struct CacheRecord {
|
||||||
CacheRecord() {}
|
CacheRecord() = default;
|
||||||
CacheRecord(const Slice& key, const Slice& val)
|
CacheRecord(const Slice& key, const Slice& val)
|
||||||
: hdr_(MAGIC, static_cast<uint32_t>(key.size()),
|
: hdr_(MAGIC, static_cast<uint32_t>(key.size()),
|
||||||
static_cast<uint32_t>(val.size())),
|
static_cast<uint32_t>(val.size())),
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
//
|
//
|
||||||
#include "utilities/persistent_cache/hash_table.h"
|
#include "utilities/persistent_cache/hash_table.h"
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <cstdlib>
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -17,14 +16,13 @@
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "utilities/persistent_cache/hash_table_evictable.h"
|
#include "utilities/persistent_cache/hash_table_evictable.h"
|
||||||
|
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
struct HashTableTest : public testing::Test {
|
struct HashTableTest : public testing::Test {
|
||||||
~HashTableTest() override { map_.Clear(&HashTableTest::ClearNode); }
|
~HashTableTest() override { map_.Clear(&HashTableTest::ClearNode); }
|
||||||
|
|
||||||
struct Node {
|
struct Node {
|
||||||
Node() {}
|
Node() = default;
|
||||||
explicit Node(const uint64_t key, const std::string& val = std::string())
|
explicit Node(const uint64_t key, const std::string& val = std::string())
|
||||||
: key_(key), val_(val) {}
|
: key_(key), val_(val) {}
|
||||||
|
|
||||||
|
@ -55,7 +53,7 @@ struct EvictableHashTableTest : public testing::Test {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Node : LRUElement<Node> {
|
struct Node : LRUElement<Node> {
|
||||||
Node() {}
|
Node() = default;
|
||||||
explicit Node(const uint64_t key, const std::string& val = std::string())
|
explicit Node(const uint64_t key, const std::string& val = std::string())
|
||||||
: key_(key), val_(val) {}
|
: key_(key), val_(val) {}
|
||||||
|
|
||||||
|
|
|
@ -82,9 +82,9 @@ bool PersistentCacheTier::Erase(const Slice& /*key*/) {
|
||||||
|
|
||||||
std::string PersistentCacheTier::PrintStats() {
|
std::string PersistentCacheTier::PrintStats() {
|
||||||
std::ostringstream os;
|
std::ostringstream os;
|
||||||
for (auto tier_stats : Stats()) {
|
for (const auto& tier_stats : Stats()) {
|
||||||
os << "---- next tier -----" << std::endl;
|
os << "---- next tier -----" << std::endl;
|
||||||
for (auto stat : tier_stats) {
|
for (const auto& stat : tier_stats) {
|
||||||
os << stat.first << ": " << stat.second << std::endl;
|
os << stat.first << ": " << stat.second << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,7 +157,7 @@ class SimCacheImpl : public SimCache {
|
||||||
hit_times_(0),
|
hit_times_(0),
|
||||||
stats_(nullptr) {}
|
stats_(nullptr) {}
|
||||||
|
|
||||||
~SimCacheImpl() override {}
|
~SimCacheImpl() override = default;
|
||||||
|
|
||||||
const char* Name() const override { return "SimCache"; }
|
const char* Name() const override { return "SimCache"; }
|
||||||
|
|
||||||
|
|
|
@ -175,7 +175,7 @@ TEST_F(SimCacheTest, SimCacheLogging) {
|
||||||
sim_cache->StopActivityLogging();
|
sim_cache->StopActivityLogging();
|
||||||
ASSERT_OK(sim_cache->GetActivityLoggingStatus());
|
ASSERT_OK(sim_cache->GetActivityLoggingStatus());
|
||||||
|
|
||||||
std::string file_contents = "";
|
std::string file_contents;
|
||||||
ASSERT_OK(ReadFileToString(env_, log_file, &file_contents));
|
ASSERT_OK(ReadFileToString(env_, log_file, &file_contents));
|
||||||
std::istringstream contents(file_contents);
|
std::istringstream contents(file_contents);
|
||||||
|
|
||||||
|
|
|
@ -7,10 +7,11 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <stdio.h>
|
#include "utilities/table_properties_collectors/compact_on_deletion_collector.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <cstdio>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "port/stack_trace.h"
|
#include "port/stack_trace.h"
|
||||||
|
@ -19,7 +20,6 @@
|
||||||
#include "rocksdb/utilities/table_properties_collectors.h"
|
#include "rocksdb/utilities/table_properties_collectors.h"
|
||||||
#include "test_util/testharness.h"
|
#include "test_util/testharness.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "utilities/table_properties_collectors/compact_on_deletion_collector.h"
|
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
||||||
|
|
|
@ -34,9 +34,8 @@ struct LockInfo {
|
||||||
txn_ids.push_back(id);
|
txn_ids.push_back(id);
|
||||||
}
|
}
|
||||||
LockInfo(const LockInfo& lock_info)
|
LockInfo(const LockInfo& lock_info)
|
||||||
: exclusive(lock_info.exclusive),
|
|
||||||
txn_ids(lock_info.txn_ids),
|
= default;
|
||||||
expiration_time(lock_info.expiration_time) {}
|
|
||||||
void operator=(const LockInfo& lock_info) {
|
void operator=(const LockInfo& lock_info) {
|
||||||
exclusive = lock_info.exclusive;
|
exclusive = lock_info.exclusive;
|
||||||
txn_ids = lock_info.txn_ids;
|
txn_ids = lock_info.txn_ids;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue