2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2015-08-24 18:11:12 +00:00
|
|
|
#include <algorithm>
|
2019-10-14 22:19:31 +00:00
|
|
|
#include <array>
|
2014-10-29 00:52:32 +00:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
|
2019-10-14 22:19:31 +00:00
|
|
|
#include "db/blob_index.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
#include "db/column_family.h"
|
2019-09-03 15:50:47 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "db/flush_job.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
#include "db/version_set.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/writable_file_writer.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
|
|
|
#include "table/mock_table.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-10-28 18:54:33 +00:00
|
|
|
|
|
|
|
// TODO(icanadi) Mock out everything else:
|
|
|
|
// 1. VersionSet
|
2014-10-29 00:52:32 +00:00
|
|
|
// 2. Memtable
|
2015-03-17 21:08:00 +00:00
|
|
|
class FlushJobTest : public testing::Test {
|
2014-10-28 18:54:33 +00:00
|
|
|
public:
|
|
|
|
FlushJobTest()
|
|
|
|
: env_(Env::Default()),
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
fs_(std::make_shared<LegacyFileSystemWrapper>(env_)),
|
2018-07-14 00:18:39 +00:00
|
|
|
dbname_(test::PerThreadDBPath("flush_job_test")),
|
2016-09-23 23:34:04 +00:00
|
|
|
options_(),
|
|
|
|
db_options_(options_),
|
2018-10-16 02:59:20 +00:00
|
|
|
column_family_names_({kDefaultColumnFamilyName, "foo", "bar"}),
|
2015-03-17 22:04:37 +00:00
|
|
|
table_cache_(NewLRUCache(50000, 16)),
|
2016-06-21 01:01:03 +00:00
|
|
|
write_buffer_manager_(db_options_.db_write_buffer_size),
|
2014-10-29 00:52:32 +00:00
|
|
|
shutting_down_(false),
|
2014-11-14 19:35:48 +00:00
|
|
|
mock_table_factory_(new mock::MockTableFactory()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
2014-10-28 18:54:33 +00:00
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
2020-02-20 20:07:53 +00:00
|
|
|
db_options_.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2014-10-28 18:54:33 +00:00
|
|
|
// TODO(icanadi) Remove this once we mock out VersionSet
|
|
|
|
NewDB();
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
2014-10-29 00:52:32 +00:00
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
2018-10-16 02:59:20 +00:00
|
|
|
for (const auto& cf_name : column_family_names_) {
|
|
|
|
column_families.emplace_back(cf_name, cf_options_);
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
db_options_.env = env_;
|
|
|
|
db_options_.fs = fs_;
|
|
|
|
versions_.reset(new VersionSet(dbname_, &db_options_, env_options_,
|
|
|
|
table_cache_.get(), &write_buffer_manager_,
|
|
|
|
&write_controller_,
|
|
|
|
/*block_cache_tracer=*/nullptr));
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(versions_->Recover(column_families, false));
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void NewDB() {
|
2019-09-03 15:50:47 +00:00
|
|
|
SetIdentityFile(env_, dbname_);
|
2014-10-28 18:54:33 +00:00
|
|
|
VersionEdit new_db;
|
2019-09-03 15:50:47 +00:00
|
|
|
if (db_options_.write_dbid_to_manifest) {
|
|
|
|
DBImpl* impl = new DBImpl(DBOptions(), dbname_);
|
|
|
|
std::string db_id;
|
|
|
|
impl->GetDbIdentityFromIdentityFile(&db_id);
|
|
|
|
new_db.SetDBId(db_id);
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
2018-10-16 02:59:20 +00:00
|
|
|
autovector<VersionEdit> new_cfs;
|
|
|
|
SequenceNumber last_seq = 1;
|
|
|
|
uint32_t cf_id = 1;
|
|
|
|
for (size_t i = 1; i != column_family_names_.size(); ++i) {
|
|
|
|
VersionEdit new_cf;
|
|
|
|
new_cf.AddColumnFamily(column_family_names_[i]);
|
|
|
|
new_cf.SetColumnFamily(cf_id++);
|
|
|
|
new_cf.SetLogNumber(0);
|
|
|
|
new_cf.SetNextFile(2);
|
|
|
|
new_cf.SetLastSequence(last_seq++);
|
|
|
|
new_cfs.emplace_back(new_cf);
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:54:33 +00:00
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<WritableFile> file;
|
2014-10-28 18:54:33 +00:00
|
|
|
Status s = env_->NewWritableFile(
|
|
|
|
manifest, &file, env_->OptimizeForManifestWrite(env_options_));
|
|
|
|
ASSERT_OK(s);
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(file)), manifest, EnvOptions()));
|
2014-10-28 18:54:33 +00:00
|
|
|
{
|
2015-10-08 17:07:15 +00:00
|
|
|
log::Writer log(std::move(file_writer), 0, false);
|
2014-10-28 18:54:33 +00:00
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
2018-10-16 02:59:20 +00:00
|
|
|
|
|
|
|
for (const auto& e : new_cfs) {
|
|
|
|
record.clear();
|
|
|
|
e.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
|
|
|
s = SetCurrentFile(env_, dbname_, 1, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Env* env_;
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
std::shared_ptr<FileSystem> fs_;
|
2014-10-28 18:54:33 +00:00
|
|
|
std::string dbname_;
|
|
|
|
EnvOptions env_options_;
|
2016-09-23 23:34:04 +00:00
|
|
|
Options options_;
|
|
|
|
ImmutableDBOptions db_options_;
|
2018-10-16 02:59:20 +00:00
|
|
|
const std::vector<std::string> column_family_names_;
|
2014-10-28 18:54:33 +00:00
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager write_buffer_manager_;
|
2014-10-28 18:54:33 +00:00
|
|
|
ColumnFamilyOptions cf_options_;
|
|
|
|
std::unique_ptr<VersionSet> versions_;
|
2015-02-05 05:39:45 +00:00
|
|
|
InstrumentedMutex mutex_;
|
2014-10-28 18:54:33 +00:00
|
|
|
std::atomic<bool> shutting_down_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
2014-10-28 18:54:33 +00:00
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(FlushJobTest, Empty) {
|
2015-02-12 17:54:48 +00:00
|
|
|
JobContext job_context(0);
|
2014-10-28 18:54:33 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 17:15:54 +00:00
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
2019-03-20 00:24:09 +00:00
|
|
|
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
nullptr /* memtable_id */, env_options_, versions_.get(),
|
|
|
|
&mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
|
|
|
snapshot_checker, &job_context, nullptr, nullptr, nullptr,
|
|
|
|
kNoCompression, nullptr, &event_logger, false,
|
|
|
|
true /* sync_output_directory */,
|
|
|
|
true /* write_manifest */, Env::Priority::USER);
|
2016-07-19 22:12:46 +00:00
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
ASSERT_OK(flush_job.Run());
|
|
|
|
}
|
2014-11-15 00:57:17 +00:00
|
|
|
job_context.Clean();
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(FlushJobTest, NonEmpty) {
|
2015-02-12 17:54:48 +00:00
|
|
|
JobContext job_context(0);
|
2014-10-28 18:54:33 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2015-05-29 21:36:35 +00:00
|
|
|
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
2014-10-28 18:54:33 +00:00
|
|
|
new_mem->Ref();
|
2015-09-02 20:58:22 +00:00
|
|
|
auto inserted_keys = mock::MakeMockFile();
|
2015-08-24 18:11:12 +00:00
|
|
|
// Test data:
|
|
|
|
// seqno [ 1, 2 ... 8998, 8999, 9000, 9001, 9002 ... 9999 ]
|
|
|
|
// key [ 1001, 1002 ... 9998, 9999, 0, 1, 2 ... 999 ]
|
2016-11-01 03:35:54 +00:00
|
|
|
// range-delete "9995" -> "9999" at seqno 10000
|
2019-10-14 22:19:31 +00:00
|
|
|
// blob references with seqnos 10001..10006
|
2014-10-28 18:54:33 +00:00
|
|
|
for (int i = 1; i < 10000; ++i) {
|
2015-08-24 18:11:12 +00:00
|
|
|
std::string key(ToString((i + 1000) % 10000));
|
|
|
|
std::string value("value" + key);
|
2014-10-28 18:54:33 +00:00
|
|
|
new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
|
2016-11-01 03:35:54 +00:00
|
|
|
if ((i + 1000) % 10000 < 9995) {
|
|
|
|
InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
|
|
|
|
inserted_keys.insert({internal_key.Encode().ToString(), value});
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
2019-10-14 22:19:31 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
new_mem->Add(SequenceNumber(10000), kTypeRangeDeletion, "9995", "9999a");
|
|
|
|
InternalKey internal_key("9995", SequenceNumber(10000), kTypeRangeDeletion);
|
|
|
|
inserted_keys.insert({internal_key.Encode().ToString(), "9999a"});
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// Note: the first two blob references will not be considered when resolving
|
|
|
|
// the oldest blob file referenced (the first one is inlined TTL, while the
|
|
|
|
// second one is TTL and thus points to a TTL blob file).
|
|
|
|
constexpr std::array<uint64_t, 6> blob_file_numbers{
|
|
|
|
kInvalidBlobFileNumber, 5, 103, 17, 102, 101};
|
|
|
|
for (size_t i = 0; i < blob_file_numbers.size(); ++i) {
|
|
|
|
std::string key(ToString(i + 10001));
|
|
|
|
std::string blob_index;
|
|
|
|
if (i == 0) {
|
|
|
|
BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 1234567890ULL,
|
|
|
|
"foo");
|
|
|
|
} else if (i == 1) {
|
|
|
|
BlobIndex::EncodeBlobTTL(&blob_index, /* expiration */ 1234567890ULL,
|
|
|
|
blob_file_numbers[i], /* offset */ i << 10,
|
|
|
|
/* size */ i << 20, kNoCompression);
|
|
|
|
} else {
|
|
|
|
BlobIndex::EncodeBlob(&blob_index, blob_file_numbers[i],
|
|
|
|
/* offset */ i << 10, /* size */ i << 20,
|
|
|
|
kNoCompression);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SequenceNumber seq(i + 10001);
|
|
|
|
new_mem->Add(seq, kTypeBlobIndex, key, blob_index);
|
|
|
|
|
|
|
|
InternalKey internal_key(key, seq, kTypeBlobIndex);
|
|
|
|
inserted_keys.emplace_hint(inserted_keys.end(),
|
|
|
|
internal_key.Encode().ToString(), blob_index);
|
|
|
|
}
|
|
|
|
#endif
|
Support saving history in memtable_list
Summary:
For transactions, we are using the memtables to validate that there are no write conflicts. But after flushing, we don't have any memtables, and transactions could fail to commit. So we want to someone keep around some extra history to use for conflict checking. In addition, we want to provide a way to increase the size of this history if too many transactions fail to commit.
After chatting with people, it seems like everyone prefers just using Memtables to store this history (instead of a separate history structure). It seems like the best place for this is abstracted inside the memtable_list. I decide to create a separate list in MemtableListVersion as using the same list complicated the flush/installalflushresults logic too much.
This diff adds a new parameter to control how much memtable history to keep around after flushing. However, it sounds like people aren't too fond of adding new parameters. So I am making the default size of flushed+not-flushed memtables be set to max_write_buffers. This should not change the maximum amount of memory used, but make it more likely we're using closer the the limit. (We are now postponing deleting flushed memtables until the max_write_buffer limit is reached). So while we might use more memory on average, we are still obeying the limit set (and you could argue it's better to go ahead and use up memory now instead of waiting for a write stall to happen to test this limit).
However, if people are opposed to this default behavior, we can easily set it to 0 and require this parameter be set in order to use transactions.
Test Plan: Added a xfunc test to play around with setting different values of this parameter in all tests. Added testing in memtablelist_test and planning on adding more testing here.
Reviewers: sdong, rven, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37443
2015-05-28 23:34:24 +00:00
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
cfd->imm()->Add(new_mem, &to_delete);
|
|
|
|
for (auto& m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
2014-10-28 18:54:33 +00:00
|
|
|
|
EventLogger
Summary:
Here's my proposal for making our LOGs easier to read by machines.
The idea is to dump all events as JSON objects. JSON is easy to read by humans, but more importantly, it's easy to read by machines. That way, we can parse this, load into SQLite/mongo and then query or visualize.
I started with table_create and table_delete events, but if everybody agrees, I'll continue by adding more events (flush/compaction/etc etc)
Test Plan:
Ran db_bench. Observed:
2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros": 1421360005788015, "event": "table_file_creation", "file_number": 12, "file_size": 1909699}
2015/01/15-14:13:25.956500 110740000 EVENT_LOG_v1 {"time_micros": 1421360005956498, "event": "table_file_deletion", "file_number": 12}
Reviewers: yhchiang, rven, dhruba, MarkCallaghan, lgalanis, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31647
2015-03-13 17:15:54 +00:00
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
2019-03-20 00:24:09 +00:00
|
|
|
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
nullptr /* memtable_id */, env_options_, versions_.get(),
|
|
|
|
&mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
|
|
|
snapshot_checker, &job_context, nullptr, nullptr, nullptr,
|
|
|
|
kNoCompression, db_options_.statistics.get(),
|
|
|
|
&event_logger, true, true /* sync_output_directory */,
|
|
|
|
true /* write_manifest */, Env::Priority::USER);
|
2017-12-16 02:45:38 +00:00
|
|
|
|
|
|
|
HistogramData hist;
|
2018-07-27 23:00:26 +00:00
|
|
|
FileMetaData file_meta;
|
2015-08-24 18:11:12 +00:00
|
|
|
mutex_.Lock();
|
2016-07-19 22:12:46 +00:00
|
|
|
flush_job.PickMemTable();
|
2018-07-27 23:00:26 +00:00
|
|
|
ASSERT_OK(flush_job.Run(nullptr, &file_meta));
|
2015-08-24 18:11:12 +00:00
|
|
|
mutex_.Unlock();
|
2017-12-16 02:45:38 +00:00
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
|
|
|
|
2018-07-27 23:00:26 +00:00
|
|
|
ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString());
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_EQ("9999a", file_meta.largest.user_key().ToString());
|
2018-07-27 23:00:26 +00:00
|
|
|
ASSERT_EQ(1, file_meta.fd.smallest_seqno);
|
2019-10-14 22:19:31 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
ASSERT_EQ(10006, file_meta.fd.largest_seqno);
|
|
|
|
ASSERT_EQ(17, file_meta.oldest_blob_file_number);
|
|
|
|
#else
|
|
|
|
ASSERT_EQ(10000, file_meta.fd.largest_seqno);
|
|
|
|
#endif
|
2015-08-24 18:11:12 +00:00
|
|
|
mock_table_factory_->AssertSingleFile(inserted_keys);
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
2018-10-16 02:59:20 +00:00
|
|
|
TEST_F(FlushJobTest, FlushMemTablesSingleColumnFamily) {
|
|
|
|
const size_t num_mems = 2;
|
|
|
|
const size_t num_mems_to_flush = 1;
|
|
|
|
const size_t num_keys_per_table = 100;
|
|
|
|
JobContext job_context(0);
|
|
|
|
ColumnFamilyData* cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
std::vector<uint64_t> memtable_ids;
|
|
|
|
std::vector<MemTable*> new_mems;
|
|
|
|
for (size_t i = 0; i != num_mems; ++i) {
|
|
|
|
MemTable* mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
|
|
|
mem->SetID(i);
|
|
|
|
mem->Ref();
|
|
|
|
new_mems.emplace_back(mem);
|
|
|
|
memtable_ids.push_back(mem->GetID());
|
|
|
|
|
|
|
|
for (size_t j = 0; j < num_keys_per_table; ++j) {
|
|
|
|
std::string key(ToString(j + i * num_keys_per_table));
|
|
|
|
std::string value("value" + key);
|
|
|
|
mem->Add(SequenceNumber(j + i * num_keys_per_table), kTypeValue, key,
|
|
|
|
value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
for (auto mem : new_mems) {
|
|
|
|
cfd->imm()->Add(mem, &to_delete);
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
|
|
|
|
|
|
|
assert(memtable_ids.size() == num_mems);
|
|
|
|
uint64_t smallest_memtable_id = memtable_ids.front();
|
|
|
|
uint64_t flush_memtable_id = smallest_memtable_id + num_mems_to_flush - 1;
|
|
|
|
|
2019-03-20 00:24:09 +00:00
|
|
|
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
&flush_memtable_id, env_options_, versions_.get(), &mutex_,
|
|
|
|
&shutting_down_, {}, kMaxSequenceNumber, snapshot_checker,
|
|
|
|
&job_context, nullptr, nullptr, nullptr, kNoCompression,
|
|
|
|
db_options_.statistics.get(), &event_logger, true,
|
|
|
|
true /* sync_output_directory */,
|
|
|
|
true /* write_manifest */, Env::Priority::USER);
|
2018-10-16 02:59:20 +00:00
|
|
|
HistogramData hist;
|
|
|
|
FileMetaData file_meta;
|
|
|
|
mutex_.Lock();
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
ASSERT_OK(flush_job.Run(nullptr /* prep_tracker */, &file_meta));
|
|
|
|
mutex_.Unlock();
|
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
|
|
|
|
|
|
|
ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString());
|
|
|
|
ASSERT_EQ("99", file_meta.largest.user_key().ToString());
|
|
|
|
ASSERT_EQ(0, file_meta.fd.smallest_seqno);
|
|
|
|
ASSERT_EQ(SequenceNumber(num_mems_to_flush * num_keys_per_table - 1),
|
|
|
|
file_meta.fd.largest_seqno);
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_EQ(kInvalidBlobFileNumber, file_meta.oldest_blob_file_number);
|
2018-10-16 02:59:20 +00:00
|
|
|
|
|
|
|
for (auto m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
to_delete.clear();
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(FlushJobTest, FlushMemtablesMultipleColumnFamilies) {
|
|
|
|
autovector<ColumnFamilyData*> all_cfds;
|
|
|
|
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
|
|
|
all_cfds.push_back(cfd);
|
|
|
|
}
|
|
|
|
const std::vector<size_t> num_memtables = {2, 1, 3};
|
|
|
|
assert(num_memtables.size() == column_family_names_.size());
|
|
|
|
const size_t num_keys_per_memtable = 1000;
|
|
|
|
JobContext job_context(0);
|
|
|
|
std::vector<uint64_t> memtable_ids;
|
|
|
|
std::vector<SequenceNumber> smallest_seqs;
|
|
|
|
std::vector<SequenceNumber> largest_seqs;
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
SequenceNumber curr_seqno = 0;
|
|
|
|
size_t k = 0;
|
|
|
|
for (auto cfd : all_cfds) {
|
|
|
|
smallest_seqs.push_back(curr_seqno);
|
|
|
|
for (size_t i = 0; i != num_memtables[k]; ++i) {
|
|
|
|
MemTable* mem = cfd->ConstructNewMemtable(
|
|
|
|
*cfd->GetLatestMutableCFOptions(), kMaxSequenceNumber);
|
|
|
|
mem->SetID(i);
|
|
|
|
mem->Ref();
|
|
|
|
|
|
|
|
for (size_t j = 0; j != num_keys_per_memtable; ++j) {
|
|
|
|
std::string key(ToString(j + i * num_keys_per_memtable));
|
|
|
|
std::string value("value" + key);
|
|
|
|
mem->Add(curr_seqno++, kTypeValue, key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
cfd->imm()->Add(mem, &to_delete);
|
|
|
|
}
|
|
|
|
largest_seqs.push_back(curr_seqno - 1);
|
|
|
|
memtable_ids.push_back(num_memtables[k++] - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relevant
|
2019-10-16 17:39:00 +00:00
|
|
|
std::vector<std::unique_ptr<FlushJob>> flush_jobs;
|
2018-10-16 02:59:20 +00:00
|
|
|
k = 0;
|
|
|
|
for (auto cfd : all_cfds) {
|
|
|
|
std::vector<SequenceNumber> snapshot_seqs;
|
2019-10-16 17:39:00 +00:00
|
|
|
flush_jobs.emplace_back(new FlushJob(
|
2018-10-16 02:59:20 +00:00
|
|
|
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
&memtable_ids[k], env_options_, versions_.get(), &mutex_,
|
|
|
|
&shutting_down_, snapshot_seqs, kMaxSequenceNumber, snapshot_checker,
|
|
|
|
&job_context, nullptr, nullptr, nullptr, kNoCompression,
|
|
|
|
db_options_.statistics.get(), &event_logger, true,
|
2019-03-20 00:24:09 +00:00
|
|
|
false /* sync_output_directory */, false /* write_manifest */,
|
2019-10-16 17:39:00 +00:00
|
|
|
Env::Priority::USER));
|
2018-10-16 02:59:20 +00:00
|
|
|
k++;
|
|
|
|
}
|
|
|
|
HistogramData hist;
|
2019-01-31 22:28:53 +00:00
|
|
|
std::vector<FileMetaData> file_metas;
|
|
|
|
// Call reserve to avoid auto-resizing
|
|
|
|
file_metas.reserve(flush_jobs.size());
|
2018-10-16 02:59:20 +00:00
|
|
|
mutex_.Lock();
|
|
|
|
for (auto& job : flush_jobs) {
|
2019-10-16 17:39:00 +00:00
|
|
|
job->PickMemTable();
|
2018-10-16 02:59:20 +00:00
|
|
|
}
|
|
|
|
for (auto& job : flush_jobs) {
|
|
|
|
FileMetaData meta;
|
|
|
|
// Run will release and re-acquire mutex
|
2019-10-16 17:39:00 +00:00
|
|
|
ASSERT_OK(job->Run(nullptr /**/, &meta));
|
2018-10-16 02:59:20 +00:00
|
|
|
file_metas.emplace_back(meta);
|
|
|
|
}
|
2019-01-31 22:28:53 +00:00
|
|
|
autovector<FileMetaData*> file_meta_ptrs;
|
|
|
|
for (auto& meta : file_metas) {
|
|
|
|
file_meta_ptrs.push_back(&meta);
|
|
|
|
}
|
2018-10-16 02:59:20 +00:00
|
|
|
autovector<const autovector<MemTable*>*> mems_list;
|
|
|
|
for (size_t i = 0; i != all_cfds.size(); ++i) {
|
2019-10-16 17:39:00 +00:00
|
|
|
const auto& mems = flush_jobs[i]->GetMemTables();
|
2018-10-16 02:59:20 +00:00
|
|
|
mems_list.push_back(&mems);
|
|
|
|
}
|
|
|
|
autovector<const MutableCFOptions*> mutable_cf_options_list;
|
|
|
|
for (auto cfd : all_cfds) {
|
|
|
|
mutable_cf_options_list.push_back(cfd->GetLatestMutableCFOptions());
|
|
|
|
}
|
|
|
|
|
2019-01-04 04:53:52 +00:00
|
|
|
Status s = InstallMemtableAtomicFlushResults(
|
|
|
|
nullptr /* imm_lists */, all_cfds, mutable_cf_options_list, mems_list,
|
2019-01-31 22:28:53 +00:00
|
|
|
versions_.get(), &mutex_, file_meta_ptrs, &job_context.memtables_to_free,
|
2018-10-16 02:59:20 +00:00
|
|
|
nullptr /* db_directory */, nullptr /* log_buffer */);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
mutex_.Unlock();
|
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
|
|
|
k = 0;
|
|
|
|
for (const auto& file_meta : file_metas) {
|
|
|
|
ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString());
|
|
|
|
ASSERT_EQ("999", file_meta.largest.user_key()
|
|
|
|
.ToString()); // max key by bytewise comparator
|
|
|
|
ASSERT_EQ(smallest_seqs[k], file_meta.fd.smallest_seqno);
|
|
|
|
ASSERT_EQ(largest_seqs[k], file_meta.fd.largest_seqno);
|
|
|
|
// Verify that imm is empty
|
|
|
|
ASSERT_EQ(std::numeric_limits<uint64_t>::max(),
|
|
|
|
all_cfds[k]->imm()->GetEarliestMemTableID());
|
|
|
|
ASSERT_EQ(0, all_cfds[k]->imm()->GetLatestMemTableID());
|
|
|
|
++k;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
to_delete.clear();
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
2015-08-24 18:11:12 +00:00
|
|
|
TEST_F(FlushJobTest, Snapshots) {
|
|
|
|
JobContext job_context(0);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
kMaxSequenceNumber);
|
|
|
|
|
|
|
|
std::set<SequenceNumber> snapshots_set;
|
|
|
|
int keys = 10000;
|
|
|
|
int max_inserts_per_keys = 8;
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < keys / 2; ++i) {
|
2019-01-10 00:09:36 +00:00
|
|
|
snapshots_set.insert(rnd.Uniform(keys * (max_inserts_per_keys / 2)) + 1);
|
2015-08-24 18:11:12 +00:00
|
|
|
}
|
2019-01-10 00:09:36 +00:00
|
|
|
// set has already removed the duplicate snapshots
|
|
|
|
std::vector<SequenceNumber> snapshots(snapshots_set.begin(),
|
|
|
|
snapshots_set.end());
|
2015-08-24 18:11:12 +00:00
|
|
|
|
|
|
|
new_mem->Ref();
|
|
|
|
SequenceNumber current_seqno = 0;
|
2015-09-02 20:58:22 +00:00
|
|
|
auto inserted_keys = mock::MakeMockFile();
|
2015-08-24 18:11:12 +00:00
|
|
|
for (int i = 1; i < keys; ++i) {
|
|
|
|
std::string key(ToString(i));
|
|
|
|
int insertions = rnd.Uniform(max_inserts_per_keys);
|
|
|
|
for (int j = 0; j < insertions; ++j) {
|
|
|
|
std::string value(test::RandomHumanReadableString(&rnd, 10));
|
|
|
|
auto seqno = ++current_seqno;
|
|
|
|
new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value);
|
|
|
|
// a key is visible only if:
|
|
|
|
// 1. it's the last one written (j == insertions - 1)
|
|
|
|
// 2. there's a snapshot pointing at it
|
|
|
|
bool visible = (j == insertions - 1) ||
|
|
|
|
(snapshots_set.find(seqno) != snapshots_set.end());
|
|
|
|
if (visible) {
|
|
|
|
InternalKey internal_key(key, seqno, kTypeValue);
|
|
|
|
inserted_keys.insert({internal_key.Encode().ToString(), value});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
autovector<MemTable*> to_delete;
|
|
|
|
cfd->imm()->Add(new_mem, &to_delete);
|
|
|
|
for (auto& m : to_delete) {
|
|
|
|
delete m;
|
|
|
|
}
|
|
|
|
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
2019-03-20 00:24:09 +00:00
|
|
|
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
|
|
|
|
db_options_, *cfd->GetLatestMutableCFOptions(),
|
|
|
|
nullptr /* memtable_id */, env_options_, versions_.get(),
|
|
|
|
&mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
|
|
|
snapshot_checker, &job_context, nullptr, nullptr, nullptr,
|
|
|
|
kNoCompression, db_options_.statistics.get(),
|
|
|
|
&event_logger, true, true /* sync_output_directory */,
|
|
|
|
true /* write_manifest */, Env::Priority::USER);
|
2014-10-28 18:54:33 +00:00
|
|
|
mutex_.Lock();
|
2016-07-19 22:12:46 +00:00
|
|
|
flush_job.PickMemTable();
|
2014-10-28 18:54:33 +00:00
|
|
|
ASSERT_OK(flush_job.Run());
|
|
|
|
mutex_.Unlock();
|
2014-10-29 00:52:32 +00:00
|
|
|
mock_table_factory_->AssertSingleFile(inserted_keys);
|
2017-12-16 02:45:38 +00:00
|
|
|
HistogramData hist;
|
|
|
|
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
|
|
|
|
ASSERT_GT(hist.average, 0.0);
|
2014-11-15 00:57:17 +00:00
|
|
|
job_context.Clean();
|
2014-10-28 18:54:33 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-10-28 18:54:33 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|