2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-08-12 03:21:07 +00:00
|
|
|
|
2015-07-20 17:50:46 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
Experimental support for SST unique IDs (#8990)
Summary:
* New public header unique_id.h and function GetUniqueIdFromTableProperties
which computes a universally unique identifier based on table properties
of table files from recent RocksDB versions.
* Generation of DB session IDs is refactored so that they are
guaranteed unique in the lifetime of a process running RocksDB.
(SemiStructuredUniqueIdGen, new test included.) Along with file numbers,
this enables SST unique IDs to be guaranteed unique among SSTs generated
in a single process, and "better than random" between processes.
See https://github.com/pdillinger/unique_id
* In addition to public API producing 'external' unique IDs, there is a function
for producing 'internal' unique IDs, with functions for converting between the
two. In short, the external ID is "safe" for things people might do with it, and
the internal ID enables more "power user" features for the future. Specifically,
the external ID goes through a hashing layer so that any subset of bits in the
external ID can be used as a hash of the full ID, while also preserving
uniqueness guarantees in the first 128 bits (bijective both on first 128 bits
and on full 192 bits).
Intended follow-up:
* Use the internal unique IDs in cache keys. (Avoid conflicts with https://github.com/facebook/rocksdb/issues/8912) (The file offset can be XORed into
the third 64-bit value of the unique ID.)
* Publish the external unique IDs in FileStorageInfo (https://github.com/facebook/rocksdb/issues/8968)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8990
Test Plan:
Unit tests added, and checking of unique ids in stress test.
NOTE in stress test we do not generate nearly enough files to thoroughly
stress uniqueness, but the test trims off pieces of the ID to check for
uniqueness so that we can infer (with some assumptions) stronger
properties in the aggregate.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D31582865
Pulled By: pdillinger
fbshipit-source-id: 1f620c4c86af9abe2a8d177b9ccf2ad2b9f48243
2021-10-19 06:28:28 +00:00
|
|
|
#include "db/db_test_util.h"
|
2014-08-12 03:21:07 +00:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/cuckoo/cuckoo_table_factory.h"
|
|
|
|
#include "table/cuckoo/cuckoo_table_reader.h"
|
2017-04-06 21:49:13 +00:00
|
|
|
#include "table/meta_blocks.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2020-07-03 02:24:25 +00:00
|
|
|
#include "util/cast_util.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2014-08-12 03:21:07 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-08-12 03:21:07 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
class CuckooTableDBTest : public testing::Test {
|
2014-08-12 03:21:07 +00:00
|
|
|
private:
|
|
|
|
std::string dbname_;
|
|
|
|
Env* env_;
|
|
|
|
DB* db_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
CuckooTableDBTest() : env_(Env::Default()) {
|
2018-07-14 00:18:39 +00:00
|
|
|
dbname_ = test::PerThreadDBPath("cuckoo_table_db_test");
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
2014-08-12 03:21:07 +00:00
|
|
|
db_ = nullptr;
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~CuckooTableDBTest() override {
|
2014-08-12 03:21:07 +00:00
|
|
|
delete db_;
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
2014-08-12 03:21:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Options CurrentOptions() {
|
|
|
|
Options options;
|
2023-06-16 04:12:39 +00:00
|
|
|
options.level_compaction_dynamic_level_bytes = false;
|
2014-08-12 03:21:07 +00:00
|
|
|
options.table_factory.reset(NewCuckooTableFactory());
|
|
|
|
options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true));
|
|
|
|
options.allow_mmap_reads = true;
|
|
|
|
options.create_if_missing = true;
|
2016-11-16 17:24:52 +00:00
|
|
|
options.allow_concurrent_memtable_write = false;
|
2014-08-12 03:21:07 +00:00
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2020-07-03 02:24:25 +00:00
|
|
|
DBImpl* dbfull() { return static_cast_with_check<DBImpl>(db_); }
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
// The following util methods are copied from plain_table_db_test.
|
|
|
|
void Reopen(Options* options = nullptr) {
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
Options opts;
|
|
|
|
if (options != nullptr) {
|
|
|
|
opts = *options;
|
|
|
|
} else {
|
|
|
|
opts = CurrentOptions();
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
}
|
|
|
|
ASSERT_OK(DB::Open(opts, dbname_, &db_));
|
|
|
|
}
|
|
|
|
|
Fix a recovery corner case (#7621)
Summary:
Consider the following sequence of events:
1. Db flushed an SST with file number N, appended to MANIFEST, and tried to sync the MANIFEST.
2. Syncing MANIFEST failed and db crashed.
3. Db tried to recover with this MANIFEST. In the meantime, no entry about the newly-flushed SST was found in the MANIFEST. Therefore, RocksDB replayed WAL and tried to flush to an SST file reusing the same file number N. This failed because file system does not support overwrite. Then Db deleted this file.
4. Db crashed again.
5. Db tried to recover. When db read the MANIFEST, there was an entry referencing N.sst. This could happen probably because the append in step 1 finally reached the MANIFEST and became visible. Since N.sst had been deleted in step 3, recovery failed.
It is possible that N.sst created in step 1 is valid. Although step 3 would still fail since the MANIFEST was not synced properly in step 1 and 2, deleting N.sst would make it impossible for the db to recover even if the remaining part of MANIFEST was appended and visible after step 5.
After this PR, in step 3, immediately after recovering from MANIFEST, a new MANIFEST is created, then we find that N.sst is not referenced in the MANIFEST, so we delete it, and we'll not reuse N as file number. Then in step 5, since the new MANIFEST does not contain N.sst, the recovery failure situation in step 5 won't happen.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7621
Test Plan:
1. some tests are updated, because these tests assume that new MANIFEST is created after WAL recovery.
2. a new unit test is added in db_basic_test to simulate step 3.
Reviewed By: riversand963
Differential Revision: D24668144
Pulled By: cheng-chang
fbshipit-source-id: 90d7487fbad2bc3714f5ede46ea949895b15ae3b
2020-11-08 05:54:55 +00:00
|
|
|
void DestroyAndReopen(Options* options) {
|
|
|
|
assert(options);
|
|
|
|
ASSERT_OK(db_->Close());
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, *options));
|
|
|
|
Reopen(options);
|
|
|
|
}
|
|
|
|
|
2014-08-12 03:21:07 +00:00
|
|
|
Status Put(const Slice& k, const Slice& v) {
|
|
|
|
return db_->Put(WriteOptions(), k, v);
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
std::string Get(const std::string& k) {
|
|
|
|
ReadOptions options;
|
|
|
|
std::string result;
|
|
|
|
Status s = db_->Get(options, k, &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int NumTableFilesAtLevel(int level) {
|
|
|
|
std::string property;
|
2022-05-06 20:03:58 +00:00
|
|
|
EXPECT_TRUE(db_->GetProperty(
|
|
|
|
"rocksdb.num-files-at-level" + std::to_string(level), &property));
|
2014-08-12 03:21:07 +00:00
|
|
|
return atoi(property.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return spread of files per level
|
|
|
|
std::string FilesPerLevel() {
|
|
|
|
std::string result;
|
2014-11-11 21:47:22 +00:00
|
|
|
size_t last_non_zero_offset = 0;
|
2014-08-12 03:21:07 +00:00
|
|
|
for (int level = 0; level < db_->NumberLevels(); level++) {
|
|
|
|
int f = NumTableFilesAtLevel(level);
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
|
|
|
|
result += buf;
|
|
|
|
if (f > 0) {
|
|
|
|
last_non_zero_offset = result.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result.resize(last_non_zero_offset);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CuckooTableDBTest, Flush) {
|
2014-08-12 03:21:07 +00:00
|
|
|
// Try with empty DB first.
|
|
|
|
ASSERT_TRUE(dbfull() != nullptr);
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("key2"));
|
|
|
|
|
|
|
|
// Add some values to db.
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put("key1", "v1"));
|
|
|
|
ASSERT_OK(Put("key2", "v2"));
|
|
|
|
ASSERT_OK(Put("key3", "v3"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
TablePropertiesCollection ptc;
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ASSERT_OK(static_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
Experimental support for SST unique IDs (#8990)
Summary:
* New public header unique_id.h and function GetUniqueIdFromTableProperties
which computes a universally unique identifier based on table properties
of table files from recent RocksDB versions.
* Generation of DB session IDs is refactored so that they are
guaranteed unique in the lifetime of a process running RocksDB.
(SemiStructuredUniqueIdGen, new test included.) Along with file numbers,
this enables SST unique IDs to be guaranteed unique among SSTs generated
in a single process, and "better than random" between processes.
See https://github.com/pdillinger/unique_id
* In addition to public API producing 'external' unique IDs, there is a function
for producing 'internal' unique IDs, with functions for converting between the
two. In short, the external ID is "safe" for things people might do with it, and
the internal ID enables more "power user" features for the future. Specifically,
the external ID goes through a hashing layer so that any subset of bits in the
external ID can be used as a hash of the full ID, while also preserving
uniqueness guarantees in the first 128 bits (bijective both on first 128 bits
and on full 192 bits).
Intended follow-up:
* Use the internal unique IDs in cache keys. (Avoid conflicts with https://github.com/facebook/rocksdb/issues/8912) (The file offset can be XORed into
the third 64-bit value of the unique ID.)
* Publish the external unique IDs in FileStorageInfo (https://github.com/facebook/rocksdb/issues/8968)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8990
Test Plan:
Unit tests added, and checking of unique ids in stress test.
NOTE in stress test we do not generate nearly enough files to thoroughly
stress uniqueness, but the test trims off pieces of the ID to check for
uniqueness so that we can infer (with some assumptions) stronger
properties in the aggregate.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D31582865
Pulled By: pdillinger
fbshipit-source-id: 1f620c4c86af9abe2a8d177b9ccf2ad2b9f48243
2021-10-19 06:28:28 +00:00
|
|
|
VerifySstUniqueIds(ptc);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ(1U, ptc.size());
|
2014-08-13 00:35:09 +00:00
|
|
|
ASSERT_EQ(3U, ptc.begin()->second->num_entries);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
|
|
|
|
ASSERT_EQ("v1", Get("key1"));
|
|
|
|
ASSERT_EQ("v2", Get("key2"));
|
|
|
|
ASSERT_EQ("v3", Get("key3"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("key4"));
|
|
|
|
|
|
|
|
// Now add more keys and flush.
|
|
|
|
ASSERT_OK(Put("key4", "v4"));
|
|
|
|
ASSERT_OK(Put("key5", "v5"));
|
|
|
|
ASSERT_OK(Put("key6", "v6"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ASSERT_OK(static_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
Experimental support for SST unique IDs (#8990)
Summary:
* New public header unique_id.h and function GetUniqueIdFromTableProperties
which computes a universally unique identifier based on table properties
of table files from recent RocksDB versions.
* Generation of DB session IDs is refactored so that they are
guaranteed unique in the lifetime of a process running RocksDB.
(SemiStructuredUniqueIdGen, new test included.) Along with file numbers,
this enables SST unique IDs to be guaranteed unique among SSTs generated
in a single process, and "better than random" between processes.
See https://github.com/pdillinger/unique_id
* In addition to public API producing 'external' unique IDs, there is a function
for producing 'internal' unique IDs, with functions for converting between the
two. In short, the external ID is "safe" for things people might do with it, and
the internal ID enables more "power user" features for the future. Specifically,
the external ID goes through a hashing layer so that any subset of bits in the
external ID can be used as a hash of the full ID, while also preserving
uniqueness guarantees in the first 128 bits (bijective both on first 128 bits
and on full 192 bits).
Intended follow-up:
* Use the internal unique IDs in cache keys. (Avoid conflicts with https://github.com/facebook/rocksdb/issues/8912) (The file offset can be XORed into
the third 64-bit value of the unique ID.)
* Publish the external unique IDs in FileStorageInfo (https://github.com/facebook/rocksdb/issues/8968)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8990
Test Plan:
Unit tests added, and checking of unique ids in stress test.
NOTE in stress test we do not generate nearly enough files to thoroughly
stress uniqueness, but the test trims off pieces of the ID to check for
uniqueness so that we can infer (with some assumptions) stronger
properties in the aggregate.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D31582865
Pulled By: pdillinger
fbshipit-source-id: 1f620c4c86af9abe2a8d177b9ccf2ad2b9f48243
2021-10-19 06:28:28 +00:00
|
|
|
VerifySstUniqueIds(ptc);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ(2U, ptc.size());
|
|
|
|
auto row = ptc.begin();
|
2014-08-13 00:35:09 +00:00
|
|
|
ASSERT_EQ(3U, row->second->num_entries);
|
|
|
|
ASSERT_EQ(3U, (++row)->second->num_entries);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ("2", FilesPerLevel());
|
|
|
|
ASSERT_EQ("v1", Get("key1"));
|
|
|
|
ASSERT_EQ("v2", Get("key2"));
|
|
|
|
ASSERT_EQ("v3", Get("key3"));
|
|
|
|
ASSERT_EQ("v4", Get("key4"));
|
|
|
|
ASSERT_EQ("v5", Get("key5"));
|
|
|
|
ASSERT_EQ("v6", Get("key6"));
|
|
|
|
|
|
|
|
ASSERT_OK(Delete("key6"));
|
|
|
|
ASSERT_OK(Delete("key5"));
|
|
|
|
ASSERT_OK(Delete("key4"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ASSERT_OK(static_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
Experimental support for SST unique IDs (#8990)
Summary:
* New public header unique_id.h and function GetUniqueIdFromTableProperties
which computes a universally unique identifier based on table properties
of table files from recent RocksDB versions.
* Generation of DB session IDs is refactored so that they are
guaranteed unique in the lifetime of a process running RocksDB.
(SemiStructuredUniqueIdGen, new test included.) Along with file numbers,
this enables SST unique IDs to be guaranteed unique among SSTs generated
in a single process, and "better than random" between processes.
See https://github.com/pdillinger/unique_id
* In addition to public API producing 'external' unique IDs, there is a function
for producing 'internal' unique IDs, with functions for converting between the
two. In short, the external ID is "safe" for things people might do with it, and
the internal ID enables more "power user" features for the future. Specifically,
the external ID goes through a hashing layer so that any subset of bits in the
external ID can be used as a hash of the full ID, while also preserving
uniqueness guarantees in the first 128 bits (bijective both on first 128 bits
and on full 192 bits).
Intended follow-up:
* Use the internal unique IDs in cache keys. (Avoid conflicts with https://github.com/facebook/rocksdb/issues/8912) (The file offset can be XORed into
the third 64-bit value of the unique ID.)
* Publish the external unique IDs in FileStorageInfo (https://github.com/facebook/rocksdb/issues/8968)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8990
Test Plan:
Unit tests added, and checking of unique ids in stress test.
NOTE in stress test we do not generate nearly enough files to thoroughly
stress uniqueness, but the test trims off pieces of the ID to check for
uniqueness so that we can infer (with some assumptions) stronger
properties in the aggregate.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D31582865
Pulled By: pdillinger
fbshipit-source-id: 1f620c4c86af9abe2a8d177b9ccf2ad2b9f48243
2021-10-19 06:28:28 +00:00
|
|
|
VerifySstUniqueIds(ptc);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ(3U, ptc.size());
|
|
|
|
row = ptc.begin();
|
2014-08-13 00:35:09 +00:00
|
|
|
ASSERT_EQ(3U, row->second->num_entries);
|
|
|
|
ASSERT_EQ(3U, (++row)->second->num_entries);
|
|
|
|
ASSERT_EQ(3U, (++row)->second->num_entries);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ("3", FilesPerLevel());
|
|
|
|
ASSERT_EQ("v1", Get("key1"));
|
|
|
|
ASSERT_EQ("v2", Get("key2"));
|
|
|
|
ASSERT_EQ("v3", Get("key3"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("key4"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("key5"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("key6"));
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) {
|
2014-08-12 03:21:07 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_OK(Put("key1", "v1"));
|
|
|
|
ASSERT_OK(Put("key2", "v2"));
|
|
|
|
ASSERT_OK(Put("key1", "v3")); // Duplicate
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
TablePropertiesCollection ptc;
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
ASSERT_OK(static_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
Experimental support for SST unique IDs (#8990)
Summary:
* New public header unique_id.h and function GetUniqueIdFromTableProperties
which computes a universally unique identifier based on table properties
of table files from recent RocksDB versions.
* Generation of DB session IDs is refactored so that they are
guaranteed unique in the lifetime of a process running RocksDB.
(SemiStructuredUniqueIdGen, new test included.) Along with file numbers,
this enables SST unique IDs to be guaranteed unique among SSTs generated
in a single process, and "better than random" between processes.
See https://github.com/pdillinger/unique_id
* In addition to public API producing 'external' unique IDs, there is a function
for producing 'internal' unique IDs, with functions for converting between the
two. In short, the external ID is "safe" for things people might do with it, and
the internal ID enables more "power user" features for the future. Specifically,
the external ID goes through a hashing layer so that any subset of bits in the
external ID can be used as a hash of the full ID, while also preserving
uniqueness guarantees in the first 128 bits (bijective both on first 128 bits
and on full 192 bits).
Intended follow-up:
* Use the internal unique IDs in cache keys. (Avoid conflicts with https://github.com/facebook/rocksdb/issues/8912) (The file offset can be XORed into
the third 64-bit value of the unique ID.)
* Publish the external unique IDs in FileStorageInfo (https://github.com/facebook/rocksdb/issues/8968)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8990
Test Plan:
Unit tests added, and checking of unique ids in stress test.
NOTE in stress test we do not generate nearly enough files to thoroughly
stress uniqueness, but the test trims off pieces of the ID to check for
uniqueness so that we can infer (with some assumptions) stronger
properties in the aggregate.
Reviewed By: zhichao-cao, mrambacher
Differential Revision: D31582865
Pulled By: pdillinger
fbshipit-source-id: 1f620c4c86af9abe2a8d177b9ccf2ad2b9f48243
2021-10-19 06:28:28 +00:00
|
|
|
VerifySstUniqueIds(ptc);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ(1U, ptc.size());
|
2014-08-13 00:35:09 +00:00
|
|
|
ASSERT_EQ(2U, ptc.begin()->second->num_entries);
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
ASSERT_EQ("v3", Get("key1"));
|
|
|
|
ASSERT_EQ("v2", Get("key2"));
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
static std::string Key(int i) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "key_______%06d", i);
|
|
|
|
return std::string(buf);
|
|
|
|
}
|
2014-08-27 17:39:31 +00:00
|
|
|
static std::string Uint64Key(uint64_t i) {
|
|
|
|
std::string str;
|
|
|
|
str.resize(8);
|
2024-03-04 18:08:32 +00:00
|
|
|
memcpy(str.data(), static_cast<void*>(&i), 8);
|
2014-08-27 17:39:31 +00:00
|
|
|
return str;
|
|
|
|
}
|
|
|
|
} // namespace.
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CuckooTableDBTest, Uint64Comparator) {
|
2014-08-27 17:39:31 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.comparator = test::Uint64Comparator();
|
Fix a recovery corner case (#7621)
Summary:
Consider the following sequence of events:
1. Db flushed an SST with file number N, appended to MANIFEST, and tried to sync the MANIFEST.
2. Syncing MANIFEST failed and db crashed.
3. Db tried to recover with this MANIFEST. In the meantime, no entry about the newly-flushed SST was found in the MANIFEST. Therefore, RocksDB replayed WAL and tried to flush to an SST file reusing the same file number N. This failed because file system does not support overwrite. Then Db deleted this file.
4. Db crashed again.
5. Db tried to recover. When db read the MANIFEST, there was an entry referencing N.sst. This could happen probably because the append in step 1 finally reached the MANIFEST and became visible. Since N.sst had been deleted in step 3, recovery failed.
It is possible that N.sst created in step 1 is valid. Although step 3 would still fail since the MANIFEST was not synced properly in step 1 and 2, deleting N.sst would make it impossible for the db to recover even if the remaining part of MANIFEST was appended and visible after step 5.
After this PR, in step 3, immediately after recovering from MANIFEST, a new MANIFEST is created, then we find that N.sst is not referenced in the MANIFEST, so we delete it, and we'll not reuse N as file number. Then in step 5, since the new MANIFEST does not contain N.sst, the recovery failure situation in step 5 won't happen.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7621
Test Plan:
1. some tests are updated, because these tests assume that new MANIFEST is created after WAL recovery.
2. a new unit test is added in db_basic_test to simulate step 3.
Reviewed By: riversand963
Differential Revision: D24668144
Pulled By: cheng-chang
fbshipit-source-id: 90d7487fbad2bc3714f5ede46ea949895b15ae3b
2020-11-08 05:54:55 +00:00
|
|
|
DestroyAndReopen(&options);
|
2014-08-27 17:39:31 +00:00
|
|
|
|
|
|
|
ASSERT_OK(Put(Uint64Key(1), "v1"));
|
|
|
|
ASSERT_OK(Put(Uint64Key(2), "v2"));
|
|
|
|
ASSERT_OK(Put(Uint64Key(3), "v3"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-27 17:39:31 +00:00
|
|
|
|
|
|
|
ASSERT_EQ("v1", Get(Uint64Key(1)));
|
|
|
|
ASSERT_EQ("v2", Get(Uint64Key(2)));
|
|
|
|
ASSERT_EQ("v3", Get(Uint64Key(3)));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(Uint64Key(4)));
|
|
|
|
|
|
|
|
// Add more keys.
|
|
|
|
ASSERT_OK(Delete(Uint64Key(2))); // Delete.
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-27 17:39:31 +00:00
|
|
|
ASSERT_OK(Put(Uint64Key(3), "v0")); // Update.
|
|
|
|
ASSERT_OK(Put(Uint64Key(4), "v4"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-27 17:39:31 +00:00
|
|
|
ASSERT_EQ("v1", Get(Uint64Key(1)));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2)));
|
|
|
|
ASSERT_EQ("v0", Get(Uint64Key(3)));
|
|
|
|
ASSERT_EQ("v4", Get(Uint64Key(4)));
|
2014-08-12 03:21:07 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) {
|
2014-09-05 18:18:01 +00:00
|
|
|
// Create a big L0 file and check it compacts into multiple files in L1.
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 270 << 10;
|
|
|
|
// Two SST files should be created, each containing 14 keys.
|
|
|
|
// Number of buckets will be 16. Total size ~156 KB.
|
|
|
|
options.target_file_size_base = 160 << 10;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
// Write 28 values, each 10016 B ~ 10KB
|
|
|
|
for (int idx = 0; idx < 28; ++idx) {
|
2017-10-19 17:48:47 +00:00
|
|
|
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
|
2014-09-05 18:18:01 +00:00
|
|
|
}
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2014-09-05 18:18:01 +00:00
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
|
|
|
|
true /* disallow trivial move */));
|
2014-09-05 18:18:01 +00:00
|
|
|
ASSERT_EQ("0,2", FilesPerLevel());
|
|
|
|
for (int idx = 0; idx < 28; ++idx) {
|
2017-10-19 17:48:47 +00:00
|
|
|
ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx)));
|
2014-09-05 18:18:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) {
|
2014-08-12 03:21:07 +00:00
|
|
|
// Insert same key twice so that they go to different SST files. Then wait for
|
|
|
|
// compaction and check if the latest value is stored and old value removed.
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 100 << 10; // 100KB
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
// Write 11 values, each 10016 B
|
|
|
|
for (int idx = 0; idx < 11; ++idx) {
|
|
|
|
ASSERT_OK(Put(Key(idx), std::string(10000, 'a')));
|
|
|
|
}
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Generate one more file in level-0, and should trigger level-0 compaction
|
|
|
|
for (int idx = 0; idx < 11; ++idx) {
|
2017-10-19 17:48:47 +00:00
|
|
|
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
|
2014-08-12 03:21:07 +00:00
|
|
|
}
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
ASSERT_EQ("0,1", FilesPerLevel());
|
|
|
|
for (int idx = 0; idx < 11; ++idx) {
|
2017-10-19 17:48:47 +00:00
|
|
|
ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx)));
|
2014-08-12 03:21:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CuckooTableDBTest, AdaptiveTable) {
|
2014-08-12 03:21:07 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
|
2019-10-21 23:51:19 +00:00
|
|
|
// Ensure options compatible with PlainTable
|
|
|
|
options.prefix_extractor.reset(NewCappedPrefixTransform(8));
|
|
|
|
|
2014-08-12 03:21:07 +00:00
|
|
|
// Write some keys using cuckoo table.
|
|
|
|
options.table_factory.reset(NewCuckooTableFactory());
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put("key1", "v1"));
|
|
|
|
ASSERT_OK(Put("key2", "v2"));
|
|
|
|
ASSERT_OK(Put("key3", "v3"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
// Write some keys using plain table.
|
2020-02-14 16:15:24 +00:00
|
|
|
std::shared_ptr<TableFactory> block_based_factory(
|
|
|
|
NewBlockBasedTableFactory());
|
2022-11-02 21:34:24 +00:00
|
|
|
std::shared_ptr<TableFactory> plain_table_factory(NewPlainTableFactory());
|
|
|
|
std::shared_ptr<TableFactory> cuckoo_table_factory(NewCuckooTableFactory());
|
2014-08-12 03:21:07 +00:00
|
|
|
options.create_if_missing = false;
|
2022-11-02 21:34:24 +00:00
|
|
|
options.table_factory.reset(
|
|
|
|
NewAdaptiveTableFactory(plain_table_factory, block_based_factory,
|
|
|
|
plain_table_factory, cuckoo_table_factory));
|
2014-08-12 03:21:07 +00:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_OK(Put("key4", "v4"));
|
|
|
|
ASSERT_OK(Put("key1", "v5"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
// Write some keys using block based table.
|
2022-11-02 21:34:24 +00:00
|
|
|
options.table_factory.reset(
|
|
|
|
NewAdaptiveTableFactory(block_based_factory, block_based_factory,
|
|
|
|
plain_table_factory, cuckoo_table_factory));
|
2014-08-12 03:21:07 +00:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_OK(Put("key5", "v6"));
|
|
|
|
ASSERT_OK(Put("key2", "v7"));
|
2020-12-23 07:44:44 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2014-08-12 03:21:07 +00:00
|
|
|
|
|
|
|
ASSERT_EQ("v5", Get("key1"));
|
|
|
|
ASSERT_EQ("v7", Get("key2"));
|
|
|
|
ASSERT_EQ("v3", Get("key3"));
|
|
|
|
ASSERT_EQ("v4", Get("key4"));
|
|
|
|
ASSERT_EQ("v6", Get("key5"));
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-08-12 03:21:07 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
if (ROCKSDB_NAMESPACE::port::kLittleEndian) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2017-04-22 03:41:37 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2020-02-20 20:07:53 +00:00
|
|
|
} else {
|
2017-04-22 03:41:37 +00:00
|
|
|
fprintf(stderr, "SKIPPED as Cuckoo table doesn't support Big Endian\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2015-03-17 21:08:00 +00:00
|
|
|
}
|
2015-07-20 17:50:46 +00:00
|
|
|
|