2020-11-11 23:09:14 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "rocksdb/customizable.h"
|
|
|
|
|
|
|
|
#include <cctype>
|
|
|
|
#include <cinttypes>
|
|
|
|
#include <cstring>
|
|
|
|
#include <unordered_map>
|
2022-05-16 16:44:43 +00:00
|
|
|
#include <unordered_set>
|
2020-11-11 23:09:14 +00:00
|
|
|
|
2021-07-27 14:46:09 +00:00
|
|
|
#include "db/db_test_util.h"
|
2022-05-16 16:44:43 +00:00
|
|
|
#include "memory/jemalloc_nodump_allocator.h"
|
|
|
|
#include "memory/memkind_kmem_allocator.h"
|
2020-11-11 23:09:14 +00:00
|
|
|
#include "options/options_helper.h"
|
|
|
|
#include "options/options_parser.h"
|
2021-09-10 12:19:47 +00:00
|
|
|
#include "port/stack_trace.h"
|
2020-11-11 23:09:14 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2021-07-16 14:57:47 +00:00
|
|
|
#include "rocksdb/env_encryption.h"
|
2021-09-28 12:30:32 +00:00
|
|
|
#include "rocksdb/file_checksum.h"
|
2022-02-18 20:23:48 +00:00
|
|
|
#include "rocksdb/filter_policy.h"
|
2021-07-12 16:03:41 +00:00
|
|
|
#include "rocksdb/flush_block_policy.h"
|
2021-12-17 12:19:34 +00:00
|
|
|
#include "rocksdb/memory_allocator.h"
|
2021-07-06 16:17:13 +00:00
|
|
|
#include "rocksdb/secondary_cache.h"
|
2021-09-27 14:42:36 +00:00
|
|
|
#include "rocksdb/slice_transform.h"
|
2021-09-28 12:30:32 +00:00
|
|
|
#include "rocksdb/sst_partitioner.h"
|
2021-09-10 16:46:47 +00:00
|
|
|
#include "rocksdb/statistics.h"
|
2021-06-29 16:07:10 +00:00
|
|
|
#include "rocksdb/utilities/customizable_util.h"
|
2020-11-11 23:09:14 +00:00
|
|
|
#include "rocksdb/utilities/object_registry.h"
|
|
|
|
#include "rocksdb/utilities/options_type.h"
|
2022-02-18 20:23:48 +00:00
|
|
|
#include "table/block_based/filter_policy_internal.h"
|
2021-07-12 16:03:41 +00:00
|
|
|
#include "table/block_based/flush_block_policy.h"
|
2020-11-11 23:09:14 +00:00
|
|
|
#include "table/mock_table.h"
|
2021-09-21 15:53:03 +00:00
|
|
|
#include "test_util/mock_time_env.h"
|
2020-11-11 23:09:14 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2021-09-28 12:30:32 +00:00
|
|
|
#include "util/file_checksum_helper.h"
|
2021-07-16 14:57:47 +00:00
|
|
|
#include "util/string_util.h"
|
2021-08-06 15:26:23 +00:00
|
|
|
#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h"
|
2021-12-17 12:19:34 +00:00
|
|
|
#include "utilities/memory_allocators.h"
|
2021-12-29 15:55:17 +00:00
|
|
|
#include "utilities/merge_operators/bytesxor.h"
|
|
|
|
#include "utilities/merge_operators/sortlist.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend2.h"
|
2020-11-11 23:09:14 +00:00
|
|
|
|
|
|
|
#ifndef GFLAGS
|
|
|
|
bool FLAGS_enable_print = false;
|
|
|
|
#else
|
|
|
|
#include "util/gflags_compat.h"
|
|
|
|
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
|
|
|
DEFINE_bool(enable_print, false, "Print options generated to console.");
|
|
|
|
#endif // GFLAGS
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2021-08-19 17:09:30 +00:00
|
|
|
namespace {
|
2020-11-11 23:09:14 +00:00
|
|
|
class StringLogger : public Logger {
|
|
|
|
public:
|
|
|
|
using Logger::Logv;
|
|
|
|
void Logv(const char* format, va_list ap) override {
|
|
|
|
char buffer[1000];
|
|
|
|
vsnprintf(buffer, sizeof(buffer), format, ap);
|
|
|
|
string_.append(buffer);
|
|
|
|
}
|
|
|
|
const std::string& str() const { return string_; }
|
|
|
|
void clear() { string_.clear(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::string string_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class TestCustomizable : public Customizable {
|
|
|
|
public:
|
|
|
|
TestCustomizable(const std::string& name) : name_(name) {}
|
|
|
|
// Method to allow CheckedCast to work for this class
|
|
|
|
static const char* kClassName() {
|
|
|
|
return "TestCustomizable";
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Name() const override { return name_.c_str(); }
|
|
|
|
static const char* Type() { return "test.custom"; }
|
|
|
|
static Status CreateFromString(const ConfigOptions& opts,
|
|
|
|
const std::string& value,
|
|
|
|
std::unique_ptr<TestCustomizable>* result);
|
|
|
|
static Status CreateFromString(const ConfigOptions& opts,
|
|
|
|
const std::string& value,
|
|
|
|
std::shared_ptr<TestCustomizable>* result);
|
|
|
|
static Status CreateFromString(const ConfigOptions& opts,
|
|
|
|
const std::string& value,
|
|
|
|
TestCustomizable** result);
|
|
|
|
bool IsInstanceOf(const std::string& name) const override {
|
|
|
|
if (name == kClassName()) {
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return Customizable::IsInstanceOf(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
const std::string name_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct AOptions {
|
2021-08-19 17:09:30 +00:00
|
|
|
static const char* kName() { return "A"; }
|
2020-11-11 23:09:14 +00:00
|
|
|
int i = 0;
|
|
|
|
bool b = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> a_option_info = {
|
|
|
|
{"int",
|
|
|
|
{offsetof(struct AOptions, i), OptionType::kInt,
|
2021-02-19 18:25:39 +00:00
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kMutable}},
|
2020-11-11 23:09:14 +00:00
|
|
|
{"bool",
|
|
|
|
{offsetof(struct AOptions, b), OptionType::kBoolean,
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
|
|
|
};
|
2021-08-19 17:09:30 +00:00
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
class ACustomizable : public TestCustomizable {
|
|
|
|
public:
|
2021-04-26 10:12:35 +00:00
|
|
|
explicit ACustomizable(const std::string& id)
|
|
|
|
: TestCustomizable("A"), id_(id) {
|
2021-08-19 17:09:30 +00:00
|
|
|
RegisterOptions(&opts_, &a_option_info);
|
2020-11-11 23:09:14 +00:00
|
|
|
}
|
|
|
|
std::string GetId() const override { return id_; }
|
|
|
|
static const char* kClassName() { return "A"; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
AOptions opts_;
|
|
|
|
const std::string id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct BOptions {
|
|
|
|
std::string s;
|
|
|
|
bool b = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> b_option_info = {
|
|
|
|
{"string",
|
|
|
|
{offsetof(struct BOptions, s), OptionType::kString,
|
2021-06-28 19:27:39 +00:00
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
2020-11-11 23:09:14 +00:00
|
|
|
{"bool",
|
|
|
|
{offsetof(struct BOptions, b), OptionType::kBoolean,
|
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
|
|
|
};
|
|
|
|
|
|
|
|
class BCustomizable : public TestCustomizable {
|
|
|
|
private:
|
|
|
|
public:
|
2021-04-26 10:12:35 +00:00
|
|
|
explicit BCustomizable(const std::string& name) : TestCustomizable(name) {
|
|
|
|
RegisterOptions(name, &opts_, &b_option_info);
|
2020-11-11 23:09:14 +00:00
|
|
|
}
|
|
|
|
static const char* kClassName() { return "B"; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
BOptions opts_;
|
|
|
|
};
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
static int A_count = 0;
|
|
|
|
static int RegisterCustomTestObjects(ObjectLibrary& library,
|
|
|
|
const std::string& /*arg*/) {
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<TestCustomizable>(
|
2021-12-29 15:55:17 +00:00
|
|
|
ObjectLibrary::PatternEntry("A", true).AddSeparator("_"),
|
2021-08-19 17:09:30 +00:00
|
|
|
[](const std::string& name, std::unique_ptr<TestCustomizable>* guard,
|
|
|
|
std::string* /* msg */) {
|
|
|
|
guard->reset(new ACustomizable(name));
|
|
|
|
A_count++;
|
|
|
|
return guard->get();
|
|
|
|
});
|
2023-02-17 20:54:07 +00:00
|
|
|
library.AddFactory<TestCustomizable>(
|
|
|
|
"B", [](const std::string& name, std::unique_ptr<TestCustomizable>* guard,
|
|
|
|
std::string* /* msg */) {
|
|
|
|
guard->reset(new BCustomizable(name));
|
|
|
|
return guard->get();
|
|
|
|
});
|
2020-11-11 23:09:14 +00:00
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<TestCustomizable>(
|
2021-08-19 17:09:30 +00:00
|
|
|
"S", [](const std::string& name,
|
|
|
|
std::unique_ptr<TestCustomizable>* /* guard */,
|
|
|
|
std::string* /* msg */) { return new BCustomizable(name); });
|
|
|
|
size_t num_types;
|
|
|
|
return static_cast<int>(library.GetFactoryCount(&num_types));
|
2020-11-11 23:09:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct SimpleOptions {
|
2021-06-30 21:08:19 +00:00
|
|
|
static const char* kName() { return "simple"; }
|
2020-11-11 23:09:14 +00:00
|
|
|
bool b = true;
|
|
|
|
std::unique_ptr<TestCustomizable> cu;
|
|
|
|
std::shared_ptr<TestCustomizable> cs;
|
|
|
|
TestCustomizable* cp = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> simple_option_info = {
|
|
|
|
{"bool",
|
Use -Wno-invalid-offsetof instead of dangerous offset_of hack (#9563)
Summary:
After https://github.com/facebook/rocksdb/issues/9515 added a unique_ptr to Status, we see some
warnings-as-error in some internal builds like this:
```
stderr: rocksdb/src/db/compaction/compaction_job.cc:2839:7: error:
offset of on non-standard-layout type 'struct CompactionServiceResult'
[-Werror,-Winvalid-offsetof]
{offsetof(struct CompactionServiceResult, status),
^ ~~~~~~
```
I see three potential solutions to resolving this:
* Expand our use of an idiom that works around the warning (see offset_of
functions removed in this change, inspired by
https://gist.github.com/graphitemaster/494f21190bb2c63c5516) However,
this construction is invoking undefined behavior that assumes consistent
layout with no compiler-introduced indirection. A compiler incompatible
with our assumptions will likely compile the code and exhibit undefined
behavior.
* Migrate to something in place of offset, like a function mapping
CompactionServiceResult* to Status* (for the `status` field). This might
be required in the long term.
* **Selected:** Use our new C++17 dependency to use offsetof in a well-defined way
when the compiler allows it. From a comment on
https://gist.github.com/graphitemaster/494f21190bb2c63c5516:
> A final note: in C++17, offsetof is conditionally supported, which
> means that you can use it on any type (not just standard layout
> types) and the compiler will error if it can't compile it correctly.
> That appears to be the best option if you can live with C++17 and
> don't need constexpr support.
The C++17 semantics are confirmed on
https://en.cppreference.com/w/cpp/types/offsetof, so we can suppress the
warning as long as we accept that we might run into a compiler that
rejects the code, and at that point we will find a solution, such as
the more intrusive "migrate" solution above.
Although this is currently only showing in our buck build, it will
surely show up also with make and cmake, so I have updated those
configurations as well.
Also in the buck build, -Wno-expansion-to-defined does not appear to be
needed anymore (both current compiler configurations) so I
removed it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9563
Test Plan: Tried out buck builds with both current compiler configurations
Reviewed By: riversand963
Differential Revision: D34220931
Pulled By: pdillinger
fbshipit-source-id: d39436008259bd1eaaa87c77be69fb2a5b559e1f
2022-02-15 17:18:08 +00:00
|
|
|
{offsetof(struct SimpleOptions, b), OptionType::kBoolean,
|
2020-11-11 23:09:14 +00:00
|
|
|
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
2021-07-16 14:57:47 +00:00
|
|
|
{"unique",
|
|
|
|
OptionTypeInfo::AsCustomUniquePtr<TestCustomizable>(
|
Use -Wno-invalid-offsetof instead of dangerous offset_of hack (#9563)
Summary:
After https://github.com/facebook/rocksdb/issues/9515 added a unique_ptr to Status, we see some
warnings-as-error in some internal builds like this:
```
stderr: rocksdb/src/db/compaction/compaction_job.cc:2839:7: error:
offset of on non-standard-layout type 'struct CompactionServiceResult'
[-Werror,-Winvalid-offsetof]
{offsetof(struct CompactionServiceResult, status),
^ ~~~~~~
```
I see three potential solutions to resolving this:
* Expand our use of an idiom that works around the warning (see offset_of
functions removed in this change, inspired by
https://gist.github.com/graphitemaster/494f21190bb2c63c5516) However,
this construction is invoking undefined behavior that assumes consistent
layout with no compiler-introduced indirection. A compiler incompatible
with our assumptions will likely compile the code and exhibit undefined
behavior.
* Migrate to something in place of offset, like a function mapping
CompactionServiceResult* to Status* (for the `status` field). This might
be required in the long term.
* **Selected:** Use our new C++17 dependency to use offsetof in a well-defined way
when the compiler allows it. From a comment on
https://gist.github.com/graphitemaster/494f21190bb2c63c5516:
> A final note: in C++17, offsetof is conditionally supported, which
> means that you can use it on any type (not just standard layout
> types) and the compiler will error if it can't compile it correctly.
> That appears to be the best option if you can live with C++17 and
> don't need constexpr support.
The C++17 semantics are confirmed on
https://en.cppreference.com/w/cpp/types/offsetof, so we can suppress the
warning as long as we accept that we might run into a compiler that
rejects the code, and at that point we will find a solution, such as
the more intrusive "migrate" solution above.
Although this is currently only showing in our buck build, it will
surely show up also with make and cmake, so I have updated those
configurations as well.
Also in the buck build, -Wno-expansion-to-defined does not appear to be
needed anymore (both current compiler configurations) so I
removed it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9563
Test Plan: Tried out buck builds with both current compiler configurations
Reviewed By: riversand963
Differential Revision: D34220931
Pulled By: pdillinger
fbshipit-source-id: d39436008259bd1eaaa87c77be69fb2a5b559e1f
2022-02-15 17:18:08 +00:00
|
|
|
offsetof(struct SimpleOptions, cu), OptionVerificationType::kNormal,
|
2021-07-16 14:57:47 +00:00
|
|
|
OptionTypeFlags::kAllowNull)},
|
|
|
|
{"shared",
|
|
|
|
OptionTypeInfo::AsCustomSharedPtr<TestCustomizable>(
|
Use -Wno-invalid-offsetof instead of dangerous offset_of hack (#9563)
Summary:
After https://github.com/facebook/rocksdb/issues/9515 added a unique_ptr to Status, we see some
warnings-as-error in some internal builds like this:
```
stderr: rocksdb/src/db/compaction/compaction_job.cc:2839:7: error:
offset of on non-standard-layout type 'struct CompactionServiceResult'
[-Werror,-Winvalid-offsetof]
{offsetof(struct CompactionServiceResult, status),
^ ~~~~~~
```
I see three potential solutions to resolving this:
* Expand our use of an idiom that works around the warning (see offset_of
functions removed in this change, inspired by
https://gist.github.com/graphitemaster/494f21190bb2c63c5516) However,
this construction is invoking undefined behavior that assumes consistent
layout with no compiler-introduced indirection. A compiler incompatible
with our assumptions will likely compile the code and exhibit undefined
behavior.
* Migrate to something in place of offset, like a function mapping
CompactionServiceResult* to Status* (for the `status` field). This might
be required in the long term.
* **Selected:** Use our new C++17 dependency to use offsetof in a well-defined way
when the compiler allows it. From a comment on
https://gist.github.com/graphitemaster/494f21190bb2c63c5516:
> A final note: in C++17, offsetof is conditionally supported, which
> means that you can use it on any type (not just standard layout
> types) and the compiler will error if it can't compile it correctly.
> That appears to be the best option if you can live with C++17 and
> don't need constexpr support.
The C++17 semantics are confirmed on
https://en.cppreference.com/w/cpp/types/offsetof, so we can suppress the
warning as long as we accept that we might run into a compiler that
rejects the code, and at that point we will find a solution, such as
the more intrusive "migrate" solution above.
Although this is currently only showing in our buck build, it will
surely show up also with make and cmake, so I have updated those
configurations as well.
Also in the buck build, -Wno-expansion-to-defined does not appear to be
needed anymore (both current compiler configurations) so I
removed it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9563
Test Plan: Tried out buck builds with both current compiler configurations
Reviewed By: riversand963
Differential Revision: D34220931
Pulled By: pdillinger
fbshipit-source-id: d39436008259bd1eaaa87c77be69fb2a5b559e1f
2022-02-15 17:18:08 +00:00
|
|
|
offsetof(struct SimpleOptions, cs), OptionVerificationType::kNormal,
|
2021-07-16 14:57:47 +00:00
|
|
|
OptionTypeFlags::kAllowNull)},
|
|
|
|
{"pointer",
|
|
|
|
OptionTypeInfo::AsCustomRawPtr<TestCustomizable>(
|
Use -Wno-invalid-offsetof instead of dangerous offset_of hack (#9563)
Summary:
After https://github.com/facebook/rocksdb/issues/9515 added a unique_ptr to Status, we see some
warnings-as-error in some internal builds like this:
```
stderr: rocksdb/src/db/compaction/compaction_job.cc:2839:7: error:
offset of on non-standard-layout type 'struct CompactionServiceResult'
[-Werror,-Winvalid-offsetof]
{offsetof(struct CompactionServiceResult, status),
^ ~~~~~~
```
I see three potential solutions to resolving this:
* Expand our use of an idiom that works around the warning (see offset_of
functions removed in this change, inspired by
https://gist.github.com/graphitemaster/494f21190bb2c63c5516) However,
this construction is invoking undefined behavior that assumes consistent
layout with no compiler-introduced indirection. A compiler incompatible
with our assumptions will likely compile the code and exhibit undefined
behavior.
* Migrate to something in place of offset, like a function mapping
CompactionServiceResult* to Status* (for the `status` field). This might
be required in the long term.
* **Selected:** Use our new C++17 dependency to use offsetof in a well-defined way
when the compiler allows it. From a comment on
https://gist.github.com/graphitemaster/494f21190bb2c63c5516:
> A final note: in C++17, offsetof is conditionally supported, which
> means that you can use it on any type (not just standard layout
> types) and the compiler will error if it can't compile it correctly.
> That appears to be the best option if you can live with C++17 and
> don't need constexpr support.
The C++17 semantics are confirmed on
https://en.cppreference.com/w/cpp/types/offsetof, so we can suppress the
warning as long as we accept that we might run into a compiler that
rejects the code, and at that point we will find a solution, such as
the more intrusive "migrate" solution above.
Although this is currently only showing in our buck build, it will
surely show up also with make and cmake, so I have updated those
configurations as well.
Also in the buck build, -Wno-expansion-to-defined does not appear to be
needed anymore (both current compiler configurations) so I
removed it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9563
Test Plan: Tried out buck builds with both current compiler configurations
Reviewed By: riversand963
Differential Revision: D34220931
Pulled By: pdillinger
fbshipit-source-id: d39436008259bd1eaaa87c77be69fb2a5b559e1f
2022-02-15 17:18:08 +00:00
|
|
|
offsetof(struct SimpleOptions, cp), OptionVerificationType::kNormal,
|
2021-07-16 14:57:47 +00:00
|
|
|
OptionTypeFlags::kAllowNull)},
|
2020-11-11 23:09:14 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class SimpleConfigurable : public Configurable {
|
|
|
|
private:
|
|
|
|
SimpleOptions simple_;
|
|
|
|
|
|
|
|
public:
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleConfigurable() { RegisterOptions(&simple_, &simple_option_info); }
|
2020-11-11 23:09:14 +00:00
|
|
|
|
2021-04-26 10:12:35 +00:00
|
|
|
explicit SimpleConfigurable(
|
2020-11-11 23:09:14 +00:00
|
|
|
const std::unordered_map<std::string, OptionTypeInfo>* map) {
|
2021-06-30 21:08:19 +00:00
|
|
|
RegisterOptions(&simple_, map);
|
2021-02-19 18:25:39 +00:00
|
|
|
}
|
2020-11-11 23:09:14 +00:00
|
|
|
};
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
static void GetMapFromProperties(
|
|
|
|
const std::string& props,
|
|
|
|
std::unordered_map<std::string, std::string>* map) {
|
|
|
|
std::istringstream iss(props);
|
|
|
|
std::unordered_map<std::string, std::string> copy_map;
|
|
|
|
std::string line;
|
|
|
|
map->clear();
|
|
|
|
for (int line_num = 0; std::getline(iss, line); line_num++) {
|
|
|
|
std::string name;
|
|
|
|
std::string value;
|
|
|
|
ASSERT_OK(
|
|
|
|
RocksDBOptionsParser::ParseStatement(&name, &value, line, line_num));
|
|
|
|
(*map)[name] = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
Status TestCustomizable::CreateFromString(
|
|
|
|
const ConfigOptions& config_options, const std::string& value,
|
|
|
|
std::shared_ptr<TestCustomizable>* result) {
|
2023-02-17 20:54:07 +00:00
|
|
|
return LoadSharedObject<TestCustomizable>(config_options, value, result);
|
2021-08-19 17:09:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TestCustomizable::CreateFromString(
|
|
|
|
const ConfigOptions& config_options, const std::string& value,
|
|
|
|
std::unique_ptr<TestCustomizable>* result) {
|
2023-02-17 20:54:07 +00:00
|
|
|
return LoadUniqueObject<TestCustomizable>(config_options, value, result);
|
2021-08-19 17:09:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TestCustomizable::CreateFromString(const ConfigOptions& config_options,
|
|
|
|
const std::string& value,
|
|
|
|
TestCustomizable** result) {
|
2023-02-17 20:54:07 +00:00
|
|
|
return LoadStaticObject<TestCustomizable>(config_options, value, result);
|
2021-08-19 17:09:30 +00:00
|
|
|
}
|
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
class CustomizableTest : public testing::Test {
|
|
|
|
public:
|
2021-08-19 17:09:30 +00:00
|
|
|
CustomizableTest() {
|
|
|
|
config_options_.invoke_prepare_options = false;
|
|
|
|
config_options_.registry->AddLibrary("CustomizableTest",
|
|
|
|
RegisterCustomTestObjects, "");
|
|
|
|
}
|
2021-06-30 21:08:19 +00:00
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
ConfigOptions config_options_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Tests that a Customizable can be created by:
|
|
|
|
// - a simple name
|
|
|
|
// - a XXX.id option
|
|
|
|
// - a property with a name
|
|
|
|
TEST_F(CustomizableTest, CreateByNameTest) {
|
2022-01-11 14:32:42 +00:00
|
|
|
ObjectLibrary::Default()->AddFactory<TestCustomizable>(
|
2021-12-29 15:55:17 +00:00
|
|
|
ObjectLibrary::PatternEntry("TEST", false).AddSeparator("_"),
|
2020-11-11 23:09:14 +00:00
|
|
|
[](const std::string& name, std::unique_ptr<TestCustomizable>* guard,
|
|
|
|
std::string* /* msg */) {
|
|
|
|
guard->reset(new TestCustomizable(name));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
std::unique_ptr<Configurable> configurable(new SimpleConfigurable());
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = configurable->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_OK(
|
|
|
|
configurable->ConfigureFromString(config_options_, "unique={id=TEST_1}"));
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), "TEST_1");
|
|
|
|
ASSERT_OK(
|
|
|
|
configurable->ConfigureFromString(config_options_, "unique.id=TEST_2"));
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), "TEST_2");
|
|
|
|
ASSERT_OK(
|
|
|
|
configurable->ConfigureFromString(config_options_, "unique=TEST_3"));
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), "TEST_3");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, ToStringTest) {
|
|
|
|
std::unique_ptr<TestCustomizable> custom(new TestCustomizable("test"));
|
|
|
|
ASSERT_EQ(custom->ToString(config_options_), "test");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, SimpleConfigureTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opt_map = {
|
|
|
|
{"unique", "id=A;int=1;bool=true"},
|
|
|
|
{"shared", "id=B;string=s"},
|
|
|
|
};
|
|
|
|
std::unique_ptr<Configurable> configurable(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(configurable->ConfigureFromMap(config_options_, opt_map));
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = configurable->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), "A");
|
|
|
|
std::string opt_str;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_OK(configurable->GetOptionString(config_options_, &opt_str));
|
|
|
|
std::unique_ptr<Configurable> copy(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(copy->ConfigureFromString(config_options_, opt_str));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
configurable->AreEquivalent(config_options_, copy.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, ConfigureFromPropsTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opt_map = {
|
|
|
|
{"unique.id", "A"}, {"unique.A.int", "1"}, {"unique.A.bool", "true"},
|
|
|
|
{"shared.id", "B"}, {"shared.B.string", "s"},
|
|
|
|
};
|
|
|
|
std::unique_ptr<Configurable> configurable(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(configurable->ConfigureFromMap(config_options_, opt_map));
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = configurable->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), "A");
|
|
|
|
std::string opt_str;
|
|
|
|
std::string mismatch;
|
|
|
|
config_options_.delimiter = "\n";
|
|
|
|
std::unordered_map<std::string, std::string> props;
|
|
|
|
ASSERT_OK(configurable->GetOptionString(config_options_, &opt_str));
|
|
|
|
GetMapFromProperties(opt_str, &props);
|
|
|
|
std::unique_ptr<Configurable> copy(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(copy->ConfigureFromMap(config_options_, props));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
configurable->AreEquivalent(config_options_, copy.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, ConfigureFromShortTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opt_map = {
|
|
|
|
{"unique.id", "A"}, {"unique.A.int", "1"}, {"unique.A.bool", "true"},
|
|
|
|
{"shared.id", "B"}, {"shared.B.string", "s"},
|
|
|
|
};
|
|
|
|
std::unique_ptr<Configurable> configurable(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(configurable->ConfigureFromMap(config_options_, opt_map));
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = configurable->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), "A");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, AreEquivalentOptionsTest) {
|
|
|
|
std::unordered_map<std::string, std::string> opt_map = {
|
|
|
|
{"unique", "id=A;int=1;bool=true"},
|
|
|
|
{"shared", "id=A;int=1;bool=true"},
|
|
|
|
};
|
|
|
|
std::string mismatch;
|
|
|
|
ConfigOptions config_options = config_options_;
|
|
|
|
std::unique_ptr<Configurable> c1(new SimpleConfigurable());
|
|
|
|
std::unique_ptr<Configurable> c2(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(c1->ConfigureFromMap(config_options, opt_map));
|
|
|
|
ASSERT_OK(c2->ConfigureFromMap(config_options, opt_map));
|
|
|
|
ASSERT_TRUE(c1->AreEquivalent(config_options, c2.get(), &mismatch));
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = c1->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
simple->cu->AreEquivalent(config_options, simple->cs.get(), &mismatch));
|
|
|
|
ASSERT_OK(simple->cu->ConfigureOption(config_options, "int", "2"));
|
|
|
|
ASSERT_FALSE(
|
|
|
|
simple->cu->AreEquivalent(config_options, simple->cs.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(c1->AreEquivalent(config_options, c2.get(), &mismatch));
|
|
|
|
ConfigOptions loosely = config_options;
|
|
|
|
loosely.sanity_level = ConfigOptions::kSanityLevelLooselyCompatible;
|
|
|
|
ASSERT_TRUE(c1->AreEquivalent(loosely, c2.get(), &mismatch));
|
|
|
|
ASSERT_TRUE(simple->cu->AreEquivalent(loosely, simple->cs.get(), &mismatch));
|
|
|
|
|
|
|
|
ASSERT_OK(c1->ConfigureOption(config_options, "shared", "id=B;string=3"));
|
|
|
|
ASSERT_TRUE(c1->AreEquivalent(loosely, c2.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(c1->AreEquivalent(config_options, c2.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(simple->cs->AreEquivalent(loosely, simple->cu.get(), &mismatch));
|
|
|
|
simple->cs.reset();
|
|
|
|
ASSERT_TRUE(c1->AreEquivalent(loosely, c2.get(), &mismatch));
|
|
|
|
ASSERT_FALSE(c1->AreEquivalent(config_options, c2.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that we can initialize a customizable from its options
|
|
|
|
TEST_F(CustomizableTest, ConfigureStandaloneCustomTest) {
|
|
|
|
std::unique_ptr<TestCustomizable> base, copy;
|
2021-08-19 17:09:30 +00:00
|
|
|
const auto& registry = config_options_.registry;
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_OK(registry->NewUniqueObject<TestCustomizable>("A", &base));
|
|
|
|
ASSERT_OK(registry->NewUniqueObject<TestCustomizable>("A", ©));
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "int=33;bool=true"));
|
|
|
|
std::string opt_str;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_OK(base->GetOptionString(config_options_, &opt_str));
|
|
|
|
ASSERT_OK(copy->ConfigureFromString(config_options_, opt_str));
|
|
|
|
ASSERT_TRUE(base->AreEquivalent(config_options_, copy.get(), &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that we fail appropriately if the pattern is not registered
|
|
|
|
TEST_F(CustomizableTest, BadNameTest) {
|
|
|
|
config_options_.ignore_unsupported_options = false;
|
|
|
|
std::unique_ptr<Configurable> c1(new SimpleConfigurable());
|
|
|
|
ASSERT_NOK(
|
|
|
|
c1->ConfigureFromString(config_options_, "unique.shared.id=bad name"));
|
|
|
|
config_options_.ignore_unsupported_options = true;
|
|
|
|
ASSERT_OK(
|
|
|
|
c1->ConfigureFromString(config_options_, "unique.shared.id=bad name"));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that we fail appropriately if a bad option is passed to the underlying
|
|
|
|
// configurable
|
|
|
|
TEST_F(CustomizableTest, BadOptionTest) {
|
|
|
|
std::unique_ptr<Configurable> c1(new SimpleConfigurable());
|
|
|
|
ConfigOptions ignore = config_options_;
|
|
|
|
ignore.ignore_unknown_options = true;
|
|
|
|
|
|
|
|
ASSERT_NOK(c1->ConfigureFromString(config_options_, "A.int=11"));
|
|
|
|
ASSERT_NOK(c1->ConfigureFromString(config_options_, "shared={id=B;int=1}"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "shared={id=A;string=s}"));
|
|
|
|
ASSERT_NOK(c1->ConfigureFromString(config_options_, "B.int=11"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "B.int=11"));
|
|
|
|
ASSERT_NOK(c1->ConfigureFromString(config_options_, "A.string=s"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "A.string=s"));
|
|
|
|
// Test as detached
|
|
|
|
ASSERT_NOK(
|
|
|
|
c1->ConfigureFromString(config_options_, "shared.id=A;A.string=b}"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "shared.id=A;A.string=s}"));
|
|
|
|
}
|
|
|
|
|
2022-02-11 13:10:10 +00:00
|
|
|
TEST_F(CustomizableTest, FailingFactoryTest) {
|
|
|
|
std::shared_ptr<ObjectRegistry> registry = ObjectRegistry::NewInstance();
|
|
|
|
std::unique_ptr<Configurable> c1(new SimpleConfigurable());
|
|
|
|
ConfigOptions ignore = config_options_;
|
|
|
|
|
|
|
|
Status s;
|
|
|
|
ignore.registry->AddLibrary("failing")->AddFactory<TestCustomizable>(
|
|
|
|
"failing",
|
|
|
|
[](const std::string& /*uri*/,
|
|
|
|
std::unique_ptr<TestCustomizable>* /*guard */, std::string* errmsg) {
|
|
|
|
*errmsg = "Bad Factory";
|
|
|
|
return nullptr;
|
|
|
|
});
|
|
|
|
|
|
|
|
// If we are ignoring unknown and unsupported options, will see
|
|
|
|
// different errors for failing versus missing
|
|
|
|
ignore.ignore_unknown_options = false;
|
|
|
|
ignore.ignore_unsupported_options = false;
|
|
|
|
s = c1->ConfigureFromString(ignore, "shared.id=failing");
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
s = c1->ConfigureFromString(ignore, "unique.id=failing");
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
s = c1->ConfigureFromString(ignore, "shared.id=missing");
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
s = c1->ConfigureFromString(ignore, "unique.id=missing");
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
|
|
|
|
// If we are ignoring unsupported options, will see
|
|
|
|
// errors for failing but not missing
|
|
|
|
ignore.ignore_unknown_options = false;
|
|
|
|
ignore.ignore_unsupported_options = true;
|
|
|
|
s = c1->ConfigureFromString(ignore, "shared.id=failing");
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
s = c1->ConfigureFromString(ignore, "unique.id=failing");
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "shared.id=missing"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "unique.id=missing"));
|
|
|
|
|
|
|
|
// If we are ignoring unknown options, will see no errors
|
|
|
|
// for failing or missing
|
|
|
|
ignore.ignore_unknown_options = true;
|
|
|
|
ignore.ignore_unsupported_options = false;
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "shared.id=failing"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "unique.id=failing"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "shared.id=missing"));
|
|
|
|
ASSERT_OK(c1->ConfigureFromString(ignore, "unique.id=missing"));
|
|
|
|
}
|
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
// Tests that different IDs lead to different objects
|
|
|
|
TEST_F(CustomizableTest, UniqueIdTest) {
|
|
|
|
std::unique_ptr<Configurable> base(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=A_1;int=1;bool=true}"));
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = base->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), std::string("A_1"));
|
|
|
|
std::string opt_str;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_OK(base->GetOptionString(config_options_, &opt_str));
|
|
|
|
std::unique_ptr<Configurable> copy(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(copy->ConfigureFromString(config_options_, opt_str));
|
|
|
|
ASSERT_TRUE(base->AreEquivalent(config_options_, copy.get(), &mismatch));
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=A_2;int=1;bool=true}"));
|
|
|
|
ASSERT_FALSE(base->AreEquivalent(config_options_, copy.get(), &mismatch));
|
|
|
|
ASSERT_EQ(simple->cu->GetId(), std::string("A_2"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, IsInstanceOfTest) {
|
2021-08-19 17:09:30 +00:00
|
|
|
std::shared_ptr<TestCustomizable> tc = std::make_shared<ACustomizable>("A_1");
|
2020-11-11 23:09:14 +00:00
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_EQ(tc->GetId(), std::string("A_1"));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_TRUE(tc->IsInstanceOf("A"));
|
|
|
|
ASSERT_TRUE(tc->IsInstanceOf("TestCustomizable"));
|
|
|
|
ASSERT_FALSE(tc->IsInstanceOf("B"));
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_FALSE(tc->IsInstanceOf("A_1"));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(tc->CheckedCast<ACustomizable>(), tc.get());
|
|
|
|
ASSERT_EQ(tc->CheckedCast<TestCustomizable>(), tc.get());
|
|
|
|
ASSERT_EQ(tc->CheckedCast<BCustomizable>(), nullptr);
|
|
|
|
|
|
|
|
tc.reset(new BCustomizable("B"));
|
|
|
|
ASSERT_TRUE(tc->IsInstanceOf("B"));
|
|
|
|
ASSERT_TRUE(tc->IsInstanceOf("TestCustomizable"));
|
|
|
|
ASSERT_FALSE(tc->IsInstanceOf("A"));
|
|
|
|
ASSERT_EQ(tc->CheckedCast<BCustomizable>(), tc.get());
|
|
|
|
ASSERT_EQ(tc->CheckedCast<TestCustomizable>(), tc.get());
|
|
|
|
ASSERT_EQ(tc->CheckedCast<ACustomizable>(), nullptr);
|
|
|
|
}
|
|
|
|
|
2021-06-30 21:08:19 +00:00
|
|
|
TEST_F(CustomizableTest, PrepareOptionsTest) {
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> p_option_info = {
|
|
|
|
{"can_prepare",
|
|
|
|
{0, OptionType::kBoolean, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone}},
|
|
|
|
};
|
|
|
|
|
|
|
|
class PrepareCustomizable : public TestCustomizable {
|
|
|
|
public:
|
|
|
|
bool can_prepare_ = true;
|
|
|
|
|
|
|
|
PrepareCustomizable() : TestCustomizable("P") {
|
|
|
|
RegisterOptions("Prepare", &can_prepare_, &p_option_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PrepareOptions(const ConfigOptions& opts) override {
|
|
|
|
if (!can_prepare_) {
|
|
|
|
return Status::InvalidArgument("Cannot Prepare");
|
|
|
|
} else {
|
|
|
|
return TestCustomizable::PrepareOptions(opts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
ObjectLibrary::Default()->AddFactory<TestCustomizable>(
|
2021-06-30 21:08:19 +00:00
|
|
|
"P",
|
|
|
|
[](const std::string& /*name*/, std::unique_ptr<TestCustomizable>* guard,
|
|
|
|
std::string* /* msg */) {
|
|
|
|
guard->reset(new PrepareCustomizable());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
|
|
|
std::unique_ptr<Configurable> base(new SimpleConfigurable());
|
|
|
|
ConfigOptions prepared(config_options_);
|
|
|
|
prepared.invoke_prepare_options = true;
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(
|
|
|
|
prepared, "unique=A_1; shared={id=B;string=s}; pointer.id=S"));
|
|
|
|
SimpleOptions* simple = base->GetOptions<SimpleOptions>();
|
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_NE(simple->cs, nullptr);
|
|
|
|
ASSERT_NE(simple->cp, nullptr);
|
|
|
|
delete simple->cp;
|
|
|
|
base.reset(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(base->ConfigureFromString(
|
|
|
|
config_options_, "unique=A_1; shared={id=B;string=s}; pointer.id=S"));
|
|
|
|
|
|
|
|
simple = base->GetOptions<SimpleOptions>();
|
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_NE(simple->cs, nullptr);
|
|
|
|
ASSERT_NE(simple->cp, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(base->PrepareOptions(config_options_));
|
|
|
|
delete simple->cp;
|
|
|
|
base.reset(new SimpleConfigurable());
|
2021-08-19 17:09:30 +00:00
|
|
|
simple = base->GetOptions<SimpleOptions>();
|
|
|
|
ASSERT_NE(simple, nullptr);
|
2021-06-30 21:08:19 +00:00
|
|
|
|
|
|
|
ASSERT_NOK(
|
|
|
|
base->ConfigureFromString(prepared, "unique={id=P; can_prepare=false}"));
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
2021-06-30 21:08:19 +00:00
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
base->ConfigureFromString(prepared, "unique={id=P; can_prepare=true}"));
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
2021-06-30 21:08:19 +00:00
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=P; can_prepare=true}"));
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_OK(simple->cu->PrepareOptions(prepared));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=P; can_prepare=false}"));
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_NOK(simple->cu->PrepareOptions(prepared));
|
|
|
|
}
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
namespace {
|
2020-11-11 23:09:14 +00:00
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> inner_option_info = {
|
|
|
|
{"inner",
|
|
|
|
OptionTypeInfo::AsCustomSharedPtr<TestCustomizable>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kStringNameOnly)}
|
|
|
|
};
|
|
|
|
|
2021-11-30 21:22:27 +00:00
|
|
|
struct InnerOptions {
|
|
|
|
static const char* kName() { return "InnerOptions"; }
|
|
|
|
std::shared_ptr<Customizable> inner;
|
|
|
|
};
|
|
|
|
|
2021-06-17 15:38:30 +00:00
|
|
|
class InnerCustomizable : public Customizable {
|
|
|
|
public:
|
2021-11-30 21:22:27 +00:00
|
|
|
explicit InnerCustomizable(const std::shared_ptr<Customizable>& w) {
|
|
|
|
iopts_.inner = w;
|
|
|
|
RegisterOptions(&iopts_, &inner_option_info);
|
|
|
|
}
|
2021-06-17 15:38:30 +00:00
|
|
|
static const char* kClassName() { return "Inner"; }
|
2021-11-30 21:22:27 +00:00
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
|
2021-06-17 15:38:30 +00:00
|
|
|
bool IsInstanceOf(const std::string& name) const override {
|
|
|
|
if (name == kClassName()) {
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return Customizable::IsInstanceOf(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
2021-11-30 21:22:27 +00:00
|
|
|
const Customizable* Inner() const override { return iopts_.inner.get(); }
|
2021-06-17 15:38:30 +00:00
|
|
|
|
|
|
|
private:
|
2021-11-30 21:22:27 +00:00
|
|
|
InnerOptions iopts_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct WrappedOptions1 {
|
|
|
|
static const char* kName() { return "WrappedOptions1"; }
|
|
|
|
int i = 42;
|
2021-06-17 15:38:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class WrappedCustomizable1 : public InnerCustomizable {
|
|
|
|
public:
|
|
|
|
explicit WrappedCustomizable1(const std::shared_ptr<Customizable>& w)
|
2021-11-30 21:22:27 +00:00
|
|
|
: InnerCustomizable(w) {
|
|
|
|
RegisterOptions(&wopts_, nullptr);
|
|
|
|
}
|
2021-06-17 15:38:30 +00:00
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
static const char* kClassName() { return "Wrapped1"; }
|
2021-11-30 21:22:27 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
WrappedOptions1 wopts_;
|
2021-06-17 15:38:30 +00:00
|
|
|
};
|
|
|
|
|
2021-11-30 21:22:27 +00:00
|
|
|
struct WrappedOptions2 {
|
|
|
|
static const char* kName() { return "WrappedOptions2"; }
|
|
|
|
std::string s = "42";
|
|
|
|
};
|
2021-06-17 15:38:30 +00:00
|
|
|
class WrappedCustomizable2 : public InnerCustomizable {
|
|
|
|
public:
|
|
|
|
explicit WrappedCustomizable2(const std::shared_ptr<Customizable>& w)
|
|
|
|
: InnerCustomizable(w) {}
|
2021-11-30 21:22:27 +00:00
|
|
|
const void* GetOptionsPtr(const std::string& name) const override {
|
|
|
|
if (name == WrappedOptions2::kName()) {
|
|
|
|
return &wopts_;
|
|
|
|
} else {
|
|
|
|
return InnerCustomizable::GetOptionsPtr(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:38:30 +00:00
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
static const char* kClassName() { return "Wrapped2"; }
|
2021-11-30 21:22:27 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
WrappedOptions2 wopts_;
|
2021-06-17 15:38:30 +00:00
|
|
|
};
|
2021-08-19 17:09:30 +00:00
|
|
|
} // namespace
|
2021-06-17 15:38:30 +00:00
|
|
|
|
|
|
|
TEST_F(CustomizableTest, WrappedInnerTest) {
|
|
|
|
std::shared_ptr<TestCustomizable> ac =
|
|
|
|
std::make_shared<TestCustomizable>("A");
|
|
|
|
|
|
|
|
ASSERT_TRUE(ac->IsInstanceOf("A"));
|
|
|
|
ASSERT_TRUE(ac->IsInstanceOf("TestCustomizable"));
|
|
|
|
ASSERT_EQ(ac->CheckedCast<TestCustomizable>(), ac.get());
|
|
|
|
ASSERT_EQ(ac->CheckedCast<InnerCustomizable>(), nullptr);
|
|
|
|
ASSERT_EQ(ac->CheckedCast<WrappedCustomizable1>(), nullptr);
|
|
|
|
ASSERT_EQ(ac->CheckedCast<WrappedCustomizable2>(), nullptr);
|
|
|
|
std::shared_ptr<Customizable> wc1 =
|
|
|
|
std::make_shared<WrappedCustomizable1>(ac);
|
|
|
|
|
|
|
|
ASSERT_TRUE(wc1->IsInstanceOf(WrappedCustomizable1::kClassName()));
|
|
|
|
ASSERT_EQ(wc1->CheckedCast<WrappedCustomizable1>(), wc1.get());
|
|
|
|
ASSERT_EQ(wc1->CheckedCast<WrappedCustomizable2>(), nullptr);
|
|
|
|
ASSERT_EQ(wc1->CheckedCast<InnerCustomizable>(), wc1.get());
|
|
|
|
ASSERT_EQ(wc1->CheckedCast<TestCustomizable>(), ac.get());
|
|
|
|
|
|
|
|
std::shared_ptr<Customizable> wc2 =
|
|
|
|
std::make_shared<WrappedCustomizable2>(wc1);
|
|
|
|
ASSERT_TRUE(wc2->IsInstanceOf(WrappedCustomizable2::kClassName()));
|
|
|
|
ASSERT_EQ(wc2->CheckedCast<WrappedCustomizable2>(), wc2.get());
|
|
|
|
ASSERT_EQ(wc2->CheckedCast<WrappedCustomizable1>(), wc1.get());
|
|
|
|
ASSERT_EQ(wc2->CheckedCast<InnerCustomizable>(), wc2.get());
|
|
|
|
ASSERT_EQ(wc2->CheckedCast<TestCustomizable>(), ac.get());
|
|
|
|
}
|
|
|
|
|
2021-11-30 21:22:27 +00:00
|
|
|
TEST_F(CustomizableTest, CustomizableInnerTest) {
|
|
|
|
std::shared_ptr<Customizable> c =
|
|
|
|
std::make_shared<InnerCustomizable>(std::make_shared<ACustomizable>("a"));
|
|
|
|
std::shared_ptr<Customizable> wc1 = std::make_shared<WrappedCustomizable1>(c);
|
|
|
|
std::shared_ptr<Customizable> wc2 = std::make_shared<WrappedCustomizable2>(c);
|
|
|
|
auto inner = c->GetOptions<InnerOptions>();
|
|
|
|
ASSERT_NE(inner, nullptr);
|
|
|
|
|
|
|
|
auto aopts = c->GetOptions<AOptions>();
|
|
|
|
ASSERT_NE(aopts, nullptr);
|
|
|
|
ASSERT_EQ(aopts, wc1->GetOptions<AOptions>());
|
|
|
|
ASSERT_EQ(aopts, wc2->GetOptions<AOptions>());
|
|
|
|
auto w1opts = wc1->GetOptions<WrappedOptions1>();
|
|
|
|
ASSERT_NE(w1opts, nullptr);
|
|
|
|
ASSERT_EQ(c->GetOptions<WrappedOptions1>(), nullptr);
|
|
|
|
ASSERT_EQ(wc2->GetOptions<WrappedOptions1>(), nullptr);
|
|
|
|
|
|
|
|
auto w2opts = wc2->GetOptions<WrappedOptions2>();
|
|
|
|
ASSERT_NE(w2opts, nullptr);
|
|
|
|
ASSERT_EQ(c->GetOptions<WrappedOptions2>(), nullptr);
|
|
|
|
ASSERT_EQ(wc1->GetOptions<WrappedOptions2>(), nullptr);
|
|
|
|
}
|
|
|
|
|
2021-08-26 00:46:31 +00:00
|
|
|
TEST_F(CustomizableTest, CopyObjectTest) {
|
|
|
|
class CopyCustomizable : public Customizable {
|
|
|
|
public:
|
|
|
|
CopyCustomizable() : prepared_(0), validated_(0) {}
|
|
|
|
const char* Name() const override { return "CopyCustomizable"; }
|
|
|
|
|
|
|
|
Status PrepareOptions(const ConfigOptions& options) override {
|
|
|
|
prepared_++;
|
|
|
|
return Customizable::PrepareOptions(options);
|
|
|
|
}
|
|
|
|
Status ValidateOptions(const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts) const override {
|
|
|
|
validated_++;
|
|
|
|
return Customizable::ValidateOptions(db_opts, cf_opts);
|
|
|
|
}
|
|
|
|
int prepared_;
|
|
|
|
mutable int validated_;
|
|
|
|
};
|
|
|
|
|
|
|
|
CopyCustomizable c1;
|
|
|
|
ConfigOptions config_options;
|
|
|
|
Options options;
|
|
|
|
|
|
|
|
ASSERT_OK(c1.PrepareOptions(config_options));
|
|
|
|
ASSERT_OK(c1.ValidateOptions(options, options));
|
|
|
|
ASSERT_EQ(c1.prepared_, 1);
|
|
|
|
ASSERT_EQ(c1.validated_, 1);
|
|
|
|
CopyCustomizable c2 = c1;
|
|
|
|
ASSERT_OK(c1.PrepareOptions(config_options));
|
|
|
|
ASSERT_OK(c1.ValidateOptions(options, options));
|
|
|
|
ASSERT_EQ(c2.prepared_, 1);
|
|
|
|
ASSERT_EQ(c2.validated_, 1);
|
|
|
|
ASSERT_EQ(c1.prepared_, 2);
|
|
|
|
ASSERT_EQ(c1.validated_, 2);
|
|
|
|
}
|
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
TEST_F(CustomizableTest, TestStringDepth) {
|
|
|
|
ConfigOptions shallow = config_options_;
|
2021-11-30 21:22:27 +00:00
|
|
|
std::unique_ptr<Configurable> c(
|
|
|
|
new InnerCustomizable(std::make_shared<ACustomizable>("a")));
|
2020-11-11 23:09:14 +00:00
|
|
|
std::string opt_str;
|
|
|
|
shallow.depth = ConfigOptions::Depth::kDepthShallow;
|
|
|
|
ASSERT_OK(c->GetOptionString(shallow, &opt_str));
|
|
|
|
ASSERT_EQ(opt_str, "inner=a;");
|
|
|
|
shallow.depth = ConfigOptions::Depth::kDepthDetailed;
|
|
|
|
ASSERT_OK(c->GetOptionString(shallow, &opt_str));
|
|
|
|
ASSERT_NE(opt_str, "inner=a;");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that we only get a new customizable when it changes
|
2021-08-19 17:09:30 +00:00
|
|
|
TEST_F(CustomizableTest, NewUniqueCustomizableTest) {
|
2020-11-11 23:09:14 +00:00
|
|
|
std::unique_ptr<Configurable> base(new SimpleConfigurable());
|
|
|
|
A_count = 0;
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=A_1;int=1;bool=true}"));
|
2021-06-30 21:08:19 +00:00
|
|
|
SimpleOptions* simple = base->GetOptions<SimpleOptions>();
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_NE(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(A_count, 1); // Created one A
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=A_1;int=1;bool=false}"));
|
|
|
|
ASSERT_EQ(A_count, 2); // Create another A_1
|
2021-06-29 16:07:10 +00:00
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique={id=}"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
ASSERT_EQ(A_count, 2);
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_,
|
|
|
|
"unique={id=A_2;int=1;bool=false}"));
|
|
|
|
ASSERT_EQ(A_count, 3); // Created another A
|
2021-02-19 18:25:39 +00:00
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique.id="));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique.id=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(A_count, 3);
|
|
|
|
}
|
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
TEST_F(CustomizableTest, NewEmptyUniqueTest) {
|
|
|
|
std::unique_ptr<Configurable> base(new SimpleConfigurable());
|
|
|
|
SimpleOptions* simple = base->GetOptions<SimpleOptions>();
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
simple->cu.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique={id=}"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
simple->cu.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique={id=nullptr}"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
simple->cu.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique.id="));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
simple->cu.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
simple->cu.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "unique.id=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cu, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, NewEmptySharedTest) {
|
|
|
|
std::unique_ptr<Configurable> base(new SimpleConfigurable());
|
|
|
|
|
|
|
|
SimpleOptions* simple = base->GetOptions<SimpleOptions>();
|
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_EQ(simple->cs, nullptr);
|
|
|
|
simple->cs.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "shared={id=}"));
|
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_EQ(simple->cs, nullptr);
|
|
|
|
simple->cs.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "shared={id=nullptr}"));
|
|
|
|
ASSERT_EQ(simple->cs, nullptr);
|
|
|
|
simple->cs.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "shared.id="));
|
|
|
|
ASSERT_EQ(simple->cs, nullptr);
|
|
|
|
simple->cs.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "shared.id=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cs, nullptr);
|
|
|
|
simple->cs.reset(new BCustomizable("B"));
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "shared=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cs, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, NewEmptyStaticTest) {
|
|
|
|
std::unique_ptr<Configurable> base(new SimpleConfigurable());
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "pointer={id=}"));
|
|
|
|
SimpleOptions* simple = base->GetOptions<SimpleOptions>();
|
|
|
|
ASSERT_NE(simple, nullptr);
|
|
|
|
ASSERT_EQ(simple->cp, nullptr);
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "pointer={id=nullptr}"));
|
|
|
|
ASSERT_EQ(simple->cp, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "pointer="));
|
|
|
|
ASSERT_EQ(simple->cp, nullptr);
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "pointer=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cp, nullptr);
|
|
|
|
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "pointer.id="));
|
|
|
|
ASSERT_EQ(simple->cp, nullptr);
|
|
|
|
ASSERT_OK(base->ConfigureFromString(config_options_, "pointer.id=nullptr"));
|
|
|
|
ASSERT_EQ(simple->cp, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> vector_option_info = {
|
|
|
|
{"vector",
|
|
|
|
OptionTypeInfo::Vector<std::shared_ptr<TestCustomizable>>(
|
|
|
|
0, OptionVerificationType::kNormal,
|
|
|
|
|
|
|
|
OptionTypeFlags::kNone,
|
|
|
|
|
|
|
|
OptionTypeInfo::AsCustomSharedPtr<TestCustomizable>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kNone))},
|
|
|
|
};
|
|
|
|
class VectorConfigurable : public SimpleConfigurable {
|
|
|
|
public:
|
|
|
|
VectorConfigurable() { RegisterOptions("vector", &cv, &vector_option_info); }
|
|
|
|
std::vector<std::shared_ptr<TestCustomizable>> cv;
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, VectorConfigTest) {
|
|
|
|
VectorConfigurable orig, copy;
|
|
|
|
std::shared_ptr<TestCustomizable> c1, c2;
|
|
|
|
ASSERT_OK(TestCustomizable::CreateFromString(config_options_, "A", &c1));
|
|
|
|
ASSERT_OK(TestCustomizable::CreateFromString(config_options_, "B", &c2));
|
|
|
|
orig.cv.push_back(c1);
|
|
|
|
orig.cv.push_back(c2);
|
|
|
|
ASSERT_OK(orig.ConfigureFromString(config_options_, "unique=A2"));
|
|
|
|
std::string opt_str, mismatch;
|
|
|
|
ASSERT_OK(orig.GetOptionString(config_options_, &opt_str));
|
|
|
|
ASSERT_OK(copy.ConfigureFromString(config_options_, opt_str));
|
|
|
|
ASSERT_TRUE(orig.AreEquivalent(config_options_, ©, &mismatch));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, NoNameTest) {
|
|
|
|
// If Customizables are created without names, they are not
|
|
|
|
// part of the serialization (since they cannot be recreated)
|
|
|
|
VectorConfigurable orig, copy;
|
|
|
|
auto sopts = orig.GetOptions<SimpleOptions>();
|
|
|
|
auto copts = copy.GetOptions<SimpleOptions>();
|
|
|
|
sopts->cu.reset(new ACustomizable(""));
|
|
|
|
orig.cv.push_back(std::make_shared<ACustomizable>(""));
|
2021-12-29 15:55:17 +00:00
|
|
|
orig.cv.push_back(std::make_shared<ACustomizable>("A_1"));
|
2021-08-19 17:09:30 +00:00
|
|
|
std::string opt_str, mismatch;
|
|
|
|
ASSERT_OK(orig.GetOptionString(config_options_, &opt_str));
|
|
|
|
ASSERT_OK(copy.ConfigureFromString(config_options_, opt_str));
|
|
|
|
ASSERT_EQ(copy.cv.size(), 1U);
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_EQ(copy.cv[0]->GetId(), "A_1");
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_EQ(copts->cu, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
TEST_F(CustomizableTest, IgnoreUnknownObjects) {
|
|
|
|
ConfigOptions ignore = config_options_;
|
|
|
|
std::shared_ptr<TestCustomizable> shared;
|
|
|
|
std::unique_ptr<TestCustomizable> unique;
|
|
|
|
TestCustomizable* pointer = nullptr;
|
|
|
|
ignore.ignore_unsupported_options = false;
|
2023-02-17 20:54:07 +00:00
|
|
|
ASSERT_NOK(LoadSharedObject<TestCustomizable>(ignore, "Unknown", &shared));
|
|
|
|
ASSERT_NOK(LoadUniqueObject<TestCustomizable>(ignore, "Unknown", &unique));
|
|
|
|
ASSERT_NOK(LoadStaticObject<TestCustomizable>(ignore, "Unknown", &pointer));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(shared.get(), nullptr);
|
|
|
|
ASSERT_EQ(unique.get(), nullptr);
|
|
|
|
ASSERT_EQ(pointer, nullptr);
|
|
|
|
ignore.ignore_unsupported_options = true;
|
2023-02-17 20:54:07 +00:00
|
|
|
ASSERT_OK(LoadSharedObject<TestCustomizable>(ignore, "Unknown", &shared));
|
|
|
|
ASSERT_OK(LoadUniqueObject<TestCustomizable>(ignore, "Unknown", &unique));
|
|
|
|
ASSERT_OK(LoadStaticObject<TestCustomizable>(ignore, "Unknown", &pointer));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(shared.get(), nullptr);
|
|
|
|
ASSERT_EQ(unique.get(), nullptr);
|
|
|
|
ASSERT_EQ(pointer, nullptr);
|
2023-02-17 20:54:07 +00:00
|
|
|
ASSERT_OK(LoadSharedObject<TestCustomizable>(ignore, "id=Unknown", &shared));
|
|
|
|
ASSERT_OK(LoadUniqueObject<TestCustomizable>(ignore, "id=Unknown", &unique));
|
|
|
|
ASSERT_OK(LoadStaticObject<TestCustomizable>(ignore, "id=Unknown", &pointer));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(shared.get(), nullptr);
|
|
|
|
ASSERT_EQ(unique.get(), nullptr);
|
|
|
|
ASSERT_EQ(pointer, nullptr);
|
|
|
|
ASSERT_OK(LoadSharedObject<TestCustomizable>(ignore, "id=Unknown;option=bad",
|
2023-02-17 20:54:07 +00:00
|
|
|
&shared));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_OK(LoadUniqueObject<TestCustomizable>(ignore, "id=Unknown;option=bad",
|
2023-02-17 20:54:07 +00:00
|
|
|
&unique));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_OK(LoadStaticObject<TestCustomizable>(ignore, "id=Unknown;option=bad",
|
2023-02-17 20:54:07 +00:00
|
|
|
&pointer));
|
2020-11-11 23:09:14 +00:00
|
|
|
ASSERT_EQ(shared.get(), nullptr);
|
|
|
|
ASSERT_EQ(unique.get(), nullptr);
|
|
|
|
ASSERT_EQ(pointer, nullptr);
|
|
|
|
}
|
|
|
|
|
2021-07-16 22:04:29 +00:00
|
|
|
TEST_F(CustomizableTest, URLFactoryTest) {
|
|
|
|
std::unique_ptr<TestCustomizable> unique;
|
2022-01-11 14:32:42 +00:00
|
|
|
config_options_.registry->AddLibrary("URL")->AddFactory<TestCustomizable>(
|
2021-12-29 15:55:17 +00:00
|
|
|
ObjectLibrary::PatternEntry("Z", false).AddSeparator(""),
|
|
|
|
[](const std::string& name, std::unique_ptr<TestCustomizable>* guard,
|
|
|
|
std::string* /* msg */) {
|
|
|
|
guard->reset(new TestCustomizable(name));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2021-07-16 22:04:29 +00:00
|
|
|
ConfigOptions ignore = config_options_;
|
|
|
|
ignore.ignore_unsupported_options = false;
|
|
|
|
ignore.ignore_unsupported_options = false;
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_OK(TestCustomizable::CreateFromString(ignore, "Z=1;x=y", &unique));
|
2021-07-16 22:04:29 +00:00
|
|
|
ASSERT_NE(unique, nullptr);
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_EQ(unique->GetId(), "Z=1;x=y");
|
|
|
|
ASSERT_OK(TestCustomizable::CreateFromString(ignore, "Z;x=y", &unique));
|
2021-07-16 22:04:29 +00:00
|
|
|
ASSERT_NE(unique, nullptr);
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_EQ(unique->GetId(), "Z;x=y");
|
2021-07-16 22:04:29 +00:00
|
|
|
unique.reset();
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_OK(TestCustomizable::CreateFromString(ignore, "Z=1?x=y", &unique));
|
2021-07-16 22:04:29 +00:00
|
|
|
ASSERT_NE(unique, nullptr);
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_EQ(unique->GetId(), "Z=1?x=y");
|
2021-07-16 22:04:29 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 18:25:39 +00:00
|
|
|
TEST_F(CustomizableTest, MutableOptionsTest) {
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> mutable_option_info = {
|
|
|
|
{"mutable",
|
|
|
|
OptionTypeInfo::AsCustomSharedPtr<TestCustomizable>(
|
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kMutable)}};
|
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> immutable_option_info =
|
|
|
|
{{"immutable",
|
|
|
|
OptionTypeInfo::AsCustomSharedPtr<TestCustomizable>(
|
2021-06-30 21:08:19 +00:00
|
|
|
0, OptionVerificationType::kNormal, OptionTypeFlags::kAllowNull)}};
|
2021-02-19 18:25:39 +00:00
|
|
|
|
|
|
|
class MutableCustomizable : public Customizable {
|
|
|
|
private:
|
|
|
|
std::shared_ptr<TestCustomizable> mutable_;
|
|
|
|
std::shared_ptr<TestCustomizable> immutable_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
MutableCustomizable() {
|
2021-04-26 10:12:35 +00:00
|
|
|
RegisterOptions("mutable", &mutable_, &mutable_option_info);
|
|
|
|
RegisterOptions("immutable", &immutable_, &immutable_option_info);
|
2021-02-19 18:25:39 +00:00
|
|
|
}
|
|
|
|
const char* Name() const override { return "MutableCustomizable"; }
|
|
|
|
};
|
2021-08-19 17:09:30 +00:00
|
|
|
MutableCustomizable mc, mc2;
|
|
|
|
std::string mismatch;
|
|
|
|
std::string opt_str;
|
2021-02-19 18:25:39 +00:00
|
|
|
|
|
|
|
ConfigOptions options = config_options_;
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable", "{id=B;}"));
|
2021-08-19 17:09:30 +00:00
|
|
|
options.mutable_options_only = true;
|
|
|
|
ASSERT_OK(mc.GetOptionString(options, &opt_str));
|
|
|
|
ASSERT_OK(mc2.ConfigureFromString(options, opt_str));
|
|
|
|
ASSERT_TRUE(mc.AreEquivalent(options, &mc2, &mismatch));
|
|
|
|
|
|
|
|
options.mutable_options_only = false;
|
2021-02-19 18:25:39 +00:00
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "immutable", "{id=A; int=10}"));
|
|
|
|
auto* mm = mc.GetOptions<std::shared_ptr<TestCustomizable>>("mutable");
|
|
|
|
auto* im = mc.GetOptions<std::shared_ptr<TestCustomizable>>("immutable");
|
|
|
|
ASSERT_NE(mm, nullptr);
|
|
|
|
ASSERT_NE(mm->get(), nullptr);
|
|
|
|
ASSERT_NE(im, nullptr);
|
|
|
|
ASSERT_NE(im->get(), nullptr);
|
|
|
|
|
|
|
|
// Now only deal with mutable options
|
|
|
|
options.mutable_options_only = true;
|
|
|
|
|
|
|
|
// Setting nested immutable customizable options fails
|
|
|
|
ASSERT_NOK(mc.ConfigureOption(options, "immutable", "{id=B;}"));
|
|
|
|
ASSERT_NOK(mc.ConfigureOption(options, "immutable.id", "B"));
|
|
|
|
ASSERT_NOK(mc.ConfigureOption(options, "immutable.bool", "true"));
|
|
|
|
ASSERT_NOK(mc.ConfigureOption(options, "immutable", "bool=true"));
|
|
|
|
ASSERT_NOK(mc.ConfigureOption(options, "immutable", "{int=11;bool=true}"));
|
|
|
|
auto* im_a = im->get()->GetOptions<AOptions>("A");
|
|
|
|
ASSERT_NE(im_a, nullptr);
|
|
|
|
ASSERT_EQ(im_a->i, 10);
|
|
|
|
ASSERT_EQ(im_a->b, false);
|
|
|
|
|
|
|
|
// Setting nested mutable customizable options succeeds but the object did not
|
|
|
|
// change
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "immutable.int", "11"));
|
|
|
|
ASSERT_EQ(im_a->i, 11);
|
|
|
|
ASSERT_EQ(im_a, im->get()->GetOptions<AOptions>("A"));
|
|
|
|
|
|
|
|
// The mutable configurable itself can be changed
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable.id", "A"));
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable", "A"));
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable", "{id=A}"));
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable", "{bool=true}"));
|
|
|
|
|
|
|
|
// The Nested options in the mutable object can be changed
|
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable", "{bool=true}"));
|
|
|
|
auto* mm_a = mm->get()->GetOptions<AOptions>("A");
|
|
|
|
ASSERT_EQ(mm_a->b, true);
|
2021-06-28 19:27:39 +00:00
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "mutable", "{int=22;bool=false}"));
|
2021-02-19 18:25:39 +00:00
|
|
|
mm_a = mm->get()->GetOptions<AOptions>("A");
|
2021-06-28 19:27:39 +00:00
|
|
|
ASSERT_EQ(mm_a->i, 22);
|
2021-02-19 18:25:39 +00:00
|
|
|
ASSERT_EQ(mm_a->b, false);
|
2021-06-28 19:27:39 +00:00
|
|
|
|
|
|
|
// Only the mutable options should get serialized
|
|
|
|
options.mutable_options_only = false;
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_OK(mc.GetOptionString(options, &opt_str));
|
2021-06-28 19:27:39 +00:00
|
|
|
ASSERT_OK(mc.ConfigureOption(options, "immutable", "{id=B;}"));
|
|
|
|
options.mutable_options_only = true;
|
|
|
|
|
|
|
|
ASSERT_OK(mc.GetOptionString(options, &opt_str));
|
|
|
|
ASSERT_OK(mc2.ConfigureFromString(options, opt_str));
|
|
|
|
ASSERT_TRUE(mc.AreEquivalent(options, &mc2, &mismatch));
|
|
|
|
options.mutable_options_only = false;
|
|
|
|
ASSERT_FALSE(mc.AreEquivalent(options, &mc2, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "immutable");
|
2021-02-19 18:25:39 +00:00
|
|
|
}
|
2021-09-10 12:19:47 +00:00
|
|
|
|
|
|
|
TEST_F(CustomizableTest, CustomManagedObjects) {
|
|
|
|
std::shared_ptr<TestCustomizable> object1, object2;
|
|
|
|
ASSERT_OK(LoadManagedObject<TestCustomizable>(
|
|
|
|
config_options_, "id=A_1;int=1;bool=true", &object1));
|
2021-12-29 15:55:17 +00:00
|
|
|
ASSERT_NE(object1, nullptr);
|
2021-09-10 12:19:47 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
LoadManagedObject<TestCustomizable>(config_options_, "A_1", &object2));
|
|
|
|
ASSERT_EQ(object1, object2);
|
|
|
|
auto* opts = object2->GetOptions<AOptions>("A");
|
|
|
|
ASSERT_NE(opts, nullptr);
|
|
|
|
ASSERT_EQ(opts->i, 1);
|
|
|
|
ASSERT_EQ(opts->b, true);
|
|
|
|
ASSERT_OK(
|
|
|
|
LoadManagedObject<TestCustomizable>(config_options_, "A_2", &object2));
|
|
|
|
ASSERT_NE(object1, object2);
|
|
|
|
object1.reset();
|
|
|
|
ASSERT_OK(LoadManagedObject<TestCustomizable>(
|
|
|
|
config_options_, "id=A_1;int=2;bool=false", &object1));
|
|
|
|
opts = object1->GetOptions<AOptions>("A");
|
|
|
|
ASSERT_NE(opts, nullptr);
|
|
|
|
ASSERT_EQ(opts->i, 2);
|
|
|
|
ASSERT_EQ(opts->b, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CustomizableTest, CreateManagedObjects) {
|
|
|
|
class ManagedCustomizable : public Customizable {
|
|
|
|
public:
|
|
|
|
static const char* Type() { return "ManagedCustomizable"; }
|
|
|
|
static const char* kClassName() { return "Managed"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::string GetId() const override { return id_; }
|
|
|
|
ManagedCustomizable() { id_ = GenerateIndividualId(); }
|
|
|
|
static Status CreateFromString(
|
|
|
|
const ConfigOptions& opts, const std::string& value,
|
|
|
|
std::shared_ptr<ManagedCustomizable>* result) {
|
|
|
|
return LoadManagedObject<ManagedCustomizable>(opts, value, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::string id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
config_options_.registry->AddLibrary("Managed")
|
2022-01-11 14:32:42 +00:00
|
|
|
->AddFactory<ManagedCustomizable>(
|
2021-12-29 15:55:17 +00:00
|
|
|
ObjectLibrary::PatternEntry::AsIndividualId(
|
|
|
|
ManagedCustomizable::kClassName()),
|
|
|
|
[](const std::string& /*name*/,
|
|
|
|
std::unique_ptr<ManagedCustomizable>* guard,
|
|
|
|
std::string* /* msg */) {
|
2021-09-10 12:19:47 +00:00
|
|
|
guard->reset(new ManagedCustomizable());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
|
|
|
std::shared_ptr<ManagedCustomizable> mc1, mc2, mc3, obj;
|
|
|
|
// Create a "deadbeef" customizable
|
|
|
|
std::string deadbeef =
|
|
|
|
std::string(ManagedCustomizable::kClassName()) + "@0xdeadbeef#0001";
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, deadbeef, &mc1));
|
|
|
|
// Create an object with the base/class name
|
|
|
|
ASSERT_OK(ManagedCustomizable::CreateFromString(
|
|
|
|
config_options_, ManagedCustomizable::kClassName(), &mc2));
|
|
|
|
// Creating another with the base name returns a different object
|
|
|
|
ASSERT_OK(ManagedCustomizable::CreateFromString(
|
|
|
|
config_options_, ManagedCustomizable::kClassName(), &mc3));
|
|
|
|
// At this point, there should be 4 managed objects (deadbeef, mc1, 2, and 3)
|
|
|
|
std::vector<std::shared_ptr<ManagedCustomizable>> objects;
|
|
|
|
ASSERT_OK(config_options_.registry->ListManagedObjects(&objects));
|
|
|
|
ASSERT_EQ(objects.size(), 4U);
|
|
|
|
objects.clear();
|
|
|
|
// Three separate object, none of them equal
|
|
|
|
ASSERT_NE(mc1, mc2);
|
|
|
|
ASSERT_NE(mc1, mc3);
|
|
|
|
ASSERT_NE(mc2, mc3);
|
|
|
|
|
|
|
|
// Creating another object with "deadbeef" object
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, deadbeef, &obj));
|
|
|
|
ASSERT_EQ(mc1, obj);
|
|
|
|
// Create another with the IDs of the instances
|
|
|
|
ASSERT_OK(ManagedCustomizable::CreateFromString(config_options_, mc1->GetId(),
|
|
|
|
&obj));
|
|
|
|
ASSERT_EQ(mc1, obj);
|
|
|
|
ASSERT_OK(ManagedCustomizable::CreateFromString(config_options_, mc2->GetId(),
|
|
|
|
&obj));
|
|
|
|
ASSERT_EQ(mc2, obj);
|
|
|
|
ASSERT_OK(ManagedCustomizable::CreateFromString(config_options_, mc3->GetId(),
|
|
|
|
&obj));
|
|
|
|
ASSERT_EQ(mc3, obj);
|
|
|
|
|
|
|
|
// Now get rid of deadbeef. 2 Objects left (m2+m3)
|
|
|
|
mc1.reset();
|
|
|
|
ASSERT_EQ(
|
|
|
|
config_options_.registry->GetManagedObject<ManagedCustomizable>(deadbeef),
|
|
|
|
nullptr);
|
|
|
|
ASSERT_OK(config_options_.registry->ListManagedObjects(&objects));
|
|
|
|
ASSERT_EQ(objects.size(), 2U);
|
|
|
|
objects.clear();
|
|
|
|
|
|
|
|
// Associate deadbeef with #2
|
|
|
|
ASSERT_OK(config_options_.registry->SetManagedObject(deadbeef, mc2));
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, deadbeef, &obj));
|
|
|
|
ASSERT_EQ(mc2, obj);
|
|
|
|
obj.reset();
|
|
|
|
|
|
|
|
// Get the ID of mc2 and then reset it. 1 Object left
|
|
|
|
std::string mc2id = mc2->GetId();
|
|
|
|
mc2.reset();
|
|
|
|
ASSERT_EQ(
|
|
|
|
config_options_.registry->GetManagedObject<ManagedCustomizable>(mc2id),
|
|
|
|
nullptr);
|
|
|
|
ASSERT_OK(config_options_.registry->ListManagedObjects(&objects));
|
|
|
|
ASSERT_EQ(objects.size(), 1U);
|
|
|
|
objects.clear();
|
|
|
|
|
|
|
|
// Create another object with the old mc2id.
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, mc2id, &mc2));
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, mc2id, &obj));
|
|
|
|
ASSERT_EQ(mc2, obj);
|
|
|
|
|
|
|
|
// For good measure, create another deadbeef object
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, deadbeef, &mc1));
|
|
|
|
ASSERT_OK(
|
|
|
|
ManagedCustomizable::CreateFromString(config_options_, deadbeef, &obj));
|
|
|
|
ASSERT_EQ(mc1, obj);
|
|
|
|
}
|
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
|
2021-08-19 17:09:30 +00:00
|
|
|
namespace {
|
2021-07-06 16:17:13 +00:00
|
|
|
class TestSecondaryCache : public SecondaryCache {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "Test"; }
|
2021-07-12 16:03:41 +00:00
|
|
|
const char* Name() const override { return kClassName(); }
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
Status Insert(const Slice& /*key*/, Cache::ObjectPtr /*value*/,
|
2021-07-06 16:17:13 +00:00
|
|
|
const Cache::CacheItemHelper* /*helper*/) override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
const Slice& /*key*/, const Cache::CacheItemHelper* /*helper*/,
|
|
|
|
Cache::CreateContext* /*create_context*/, bool /*wait*/,
|
2023-03-15 19:08:17 +00:00
|
|
|
bool /*advise_erase*/, bool& kept_in_sec_cache) override {
|
|
|
|
kept_in_sec_cache = true;
|
2021-07-06 16:17:13 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
Avoid recompressing cold block in CompressedSecondaryCache (#10527)
Summary:
**Summary:**
When a block is firstly `Lookup` from the secondary cache, we just insert a dummy block in the primary cache (charging the actual size of the block) and don’t erase the block from the secondary cache. A standalone handle is returned from `Lookup`. Only if the block is hit again, we erase it from the secondary cache and add it into the primary cache.
When a block is firstly evicted from the primary cache to the secondary cache, we just insert a dummy block (size 0) in the secondary cache. When the block is evicted again, it is treated as a hot block and is inserted into the secondary cache.
**Implementation Details**
Add a new state of LRUHandle: The handle is never inserted into the LRUCache (both hash table and LRU list) and it doesn't experience the above three states. The entry can be freed when refs becomes 0. (refs >= 1 && in_cache == false && IS_STANDALONE == true)
The behaviors of `LRUCacheShard::Lookup()` are updated if the secondary_cache is CompressedSecondaryCache:
1. If a handle is found in primary cache:
1.1. If the handle's value is not nullptr, it is returned immediately.
1.2. If the handle's value is nullptr, this means the handle is a dummy one. For a dummy handle, if it was retrieved from secondary cache, it may still exist in secondary cache.
- 1.2.1. If no valid handle can be `Lookup` from secondary cache, return nullptr.
- 1.2.2. If the handle from secondary cache is valid, erase it from the secondary cache and add it into the primary cache.
2. If a handle is not found in primary cache:
2.1. If no valid handle can be `Lookup` from secondary cache, return nullptr.
2.2. If the handle from secondary cache is valid, insert a dummy block in the primary cache (charging the actual size of the block) and return a standalone handle.
The behaviors of `LRUCacheShard::Promote()` are updated as follows:
1. If `e->sec_handle` has value, one of the following steps can happen:
1.1. Insert a dummy handle and return a standalone handle to caller when `secondary_cache_` is `CompressedSecondaryCache` and e is a standalone handle.
1.2. Insert the item into the primary cache and return the handle to caller.
1.3. Exception handling.
3. If `e->sec_handle` has no value, mark the item as not in cache and charge the cache as its only metadata that'll shortly be released.
The behavior of `CompressedSecondaryCache::Insert()` is updated:
1. If a block is evicted from the primary cache for the first time, a dummy item is inserted.
4. If a dummy item is found for a block, the block is inserted into the secondary cache.
The behavior of `CompressedSecondaryCache:::Lookup()` is updated:
1. If a handle is not found or it is a dummy item, a nullptr is returned.
2. If `erase_handle` is true, the handle is erased.
The behaviors of `LRUCacheShard::Release()` are adjusted for the standalone handles.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10527
Test Plan:
1. stress tests.
5. unit tests.
6. CPU profiling for db_bench.
Reviewed By: siying
Differential Revision: D38747613
Pulled By: gitbw95
fbshipit-source-id: 74a1eba7e1957c9affb2bd2ae3e0194584fa6eca
2022-09-08 02:00:27 +00:00
|
|
|
|
|
|
|
bool SupportForceErase() const override { return false; }
|
|
|
|
|
2021-07-06 16:17:13 +00:00
|
|
|
void Erase(const Slice& /*key*/) override {}
|
|
|
|
|
|
|
|
// Wait for a collection of handles to become ready
|
|
|
|
void WaitAll(std::vector<SecondaryCacheResultHandle*> /*handles*/) override {}
|
|
|
|
|
|
|
|
std::string GetPrintableOptions() const override { return ""; }
|
|
|
|
};
|
|
|
|
|
2021-09-10 16:46:47 +00:00
|
|
|
class TestStatistics : public StatisticsImpl {
|
|
|
|
public:
|
|
|
|
TestStatistics() : StatisticsImpl(nullptr) {}
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
static const char* kClassName() { return "Test"; }
|
|
|
|
};
|
|
|
|
|
|
|
|
class TestFlushBlockPolicyFactory : public FlushBlockPolicyFactory {
|
|
|
|
public:
|
|
|
|
TestFlushBlockPolicyFactory() {}
|
|
|
|
|
|
|
|
static const char* kClassName() { return "TestFlushBlockPolicyFactory"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
|
|
|
|
FlushBlockPolicy* NewFlushBlockPolicy(
|
|
|
|
const BlockBasedTableOptions& /*table_options*/,
|
|
|
|
const BlockBuilder& /*data_block_builder*/) const override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-09-27 14:42:36 +00:00
|
|
|
class MockSliceTransform : public SliceTransform {
|
|
|
|
public:
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
|
|
|
|
Slice Transform(const Slice& /*key*/) const override { return Slice(); }
|
|
|
|
|
|
|
|
bool InDomain(const Slice& /*key*/) const override { return false; }
|
|
|
|
|
|
|
|
bool InRange(const Slice& /*key*/) const override { return false; }
|
|
|
|
};
|
|
|
|
|
2021-12-17 12:19:34 +00:00
|
|
|
class MockMemoryAllocator : public BaseMemoryAllocator {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "MockMemoryAllocator"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
};
|
|
|
|
|
2021-07-16 14:57:47 +00:00
|
|
|
class MockEncryptionProvider : public EncryptionProvider {
|
|
|
|
public:
|
|
|
|
explicit MockEncryptionProvider(const std::string& id) : id_(id) {}
|
2021-12-29 15:55:17 +00:00
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
2021-07-16 14:57:47 +00:00
|
|
|
size_t GetPrefixLength() const override { return 0; }
|
|
|
|
Status CreateNewPrefix(const std::string& /*fname*/, char* /*prefix*/,
|
|
|
|
size_t /*prefixLength*/) const override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status AddCipher(const std::string& /*descriptor*/, const char* /*cipher*/,
|
|
|
|
size_t /*len*/, bool /*for_write*/) override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status CreateCipherStream(
|
|
|
|
const std::string& /*fname*/, const EnvOptions& /*options*/,
|
|
|
|
Slice& /*prefix*/,
|
|
|
|
std::unique_ptr<BlockAccessCipherStream>* /*result*/) override {
|
|
|
|
return Status::NotSupported();
|
|
|
|
}
|
|
|
|
Status ValidateOptions(const DBOptions& db_opts,
|
|
|
|
const ColumnFamilyOptions& cf_opts) const override {
|
|
|
|
if (EndsWith(id_, "://test")) {
|
|
|
|
return EncryptionProvider::ValidateOptions(db_opts, cf_opts);
|
|
|
|
} else {
|
|
|
|
return Status::InvalidArgument("MockProvider not initialized");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::string id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockCipher : public BlockCipher {
|
|
|
|
public:
|
|
|
|
const char* Name() const override { return "Mock"; }
|
|
|
|
size_t BlockSize() override { return 0; }
|
|
|
|
Status Encrypt(char* /*data*/) override { return Status::NotSupported(); }
|
|
|
|
Status Decrypt(char* data) override { return Encrypt(data); }
|
|
|
|
};
|
2021-11-02 16:06:02 +00:00
|
|
|
|
|
|
|
class DummyFileSystem : public FileSystemWrapper {
|
|
|
|
public:
|
|
|
|
explicit DummyFileSystem(const std::shared_ptr<FileSystem>& t)
|
|
|
|
: FileSystemWrapper(t) {}
|
|
|
|
static const char* kClassName() { return "DummyFileSystem"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
};
|
|
|
|
|
2021-07-12 16:03:41 +00:00
|
|
|
|
2021-09-28 12:30:32 +00:00
|
|
|
|
|
|
|
class MockTablePropertiesCollectorFactory
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
private:
|
|
|
|
public:
|
|
|
|
TablePropertiesCollector* CreateTablePropertiesCollector(
|
|
|
|
TablePropertiesCollectorFactory::Context /*context*/) override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockSstPartitionerFactory : public SstPartitionerFactory {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::unique_ptr<SstPartitioner> CreatePartitioner(
|
|
|
|
const SstPartitioner::Context& /* context */) const override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockFileChecksumGenFactory : public FileChecksumGenFactory {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "Mock"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
std::unique_ptr<FileChecksumGenerator> CreateFileChecksumGenerator(
|
|
|
|
const FileChecksumGenContext& /*context*/) override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-02-18 20:23:48 +00:00
|
|
|
class MockFilterPolicy : public FilterPolicy {
|
|
|
|
public:
|
|
|
|
static const char* kClassName() { return "MockFilterPolicy"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
2022-03-23 17:00:54 +00:00
|
|
|
const char* CompatibilityName() const override { return Name(); }
|
2022-02-18 20:23:48 +00:00
|
|
|
FilterBitsBuilder* GetBuilderWithContext(
|
|
|
|
const FilterBuildingContext&) const override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
FilterBitsReader* GetFilterBitsReader(
|
|
|
|
const Slice& /*contents*/) const override {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-05-11 13:45:49 +00:00
|
|
|
static int RegisterLocalObjects(ObjectLibrary& library,
|
|
|
|
const std::string& /*arg*/) {
|
|
|
|
size_t num_types;
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<TableFactory>(
|
2021-08-19 17:09:30 +00:00
|
|
|
mock::MockTableFactory::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<TableFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new mock::MockTableFactory());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<EventListener>(
|
2021-08-19 17:09:30 +00:00
|
|
|
OnFileDeletionListener::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<EventListener>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new OnFileDeletionListener());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<EventListener>(
|
2021-08-19 17:09:30 +00:00
|
|
|
FlushCounterListener::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<EventListener>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new FlushCounterListener());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2021-05-11 13:45:49 +00:00
|
|
|
// Load any locally defined objects here
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<const SliceTransform>(
|
2021-09-27 14:42:36 +00:00
|
|
|
MockSliceTransform::kClassName(),
|
|
|
|
[](const std::string& /*uri*/,
|
|
|
|
std::unique_ptr<const SliceTransform>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockSliceTransform());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<Statistics>(
|
2021-09-10 16:46:47 +00:00
|
|
|
TestStatistics::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<Statistics>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestStatistics());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<EncryptionProvider>(
|
2021-12-29 15:55:17 +00:00
|
|
|
ObjectLibrary::PatternEntry(MockEncryptionProvider::kClassName(), true)
|
|
|
|
.AddSuffix("://test"),
|
2021-07-16 14:57:47 +00:00
|
|
|
[](const std::string& uri, std::unique_ptr<EncryptionProvider>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockEncryptionProvider(uri));
|
|
|
|
return guard->get();
|
|
|
|
});
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<BlockCipher>(
|
|
|
|
"Mock",
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<BlockCipher>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockCipher());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
library.AddFactory<MemoryAllocator>(
|
2021-12-17 12:19:34 +00:00
|
|
|
MockMemoryAllocator::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<MemoryAllocator>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockMemoryAllocator());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<FlushBlockPolicyFactory>(
|
2021-07-12 16:03:41 +00:00
|
|
|
TestFlushBlockPolicyFactory::kClassName(),
|
|
|
|
[](const std::string& /*uri*/,
|
|
|
|
std::unique_ptr<FlushBlockPolicyFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestFlushBlockPolicyFactory());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<SecondaryCache>(
|
2021-07-06 16:17:13 +00:00
|
|
|
TestSecondaryCache::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<SecondaryCache>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new TestSecondaryCache());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2021-09-28 12:30:32 +00:00
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<FileSystem>(
|
2021-11-02 16:06:02 +00:00
|
|
|
DummyFileSystem::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<FileSystem>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new DummyFileSystem(nullptr));
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<SstPartitionerFactory>(
|
2021-09-28 12:30:32 +00:00
|
|
|
MockSstPartitionerFactory::kClassName(),
|
|
|
|
[](const std::string& /*uri*/,
|
|
|
|
std::unique_ptr<SstPartitionerFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockSstPartitionerFactory());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<FileChecksumGenFactory>(
|
2021-09-28 12:30:32 +00:00
|
|
|
MockFileChecksumGenFactory::kClassName(),
|
|
|
|
[](const std::string& /*uri*/,
|
|
|
|
std::unique_ptr<FileChecksumGenFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockFileChecksumGenFactory());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2022-01-11 14:32:42 +00:00
|
|
|
library.AddFactory<TablePropertiesCollectorFactory>(
|
2021-09-28 12:30:32 +00:00
|
|
|
MockTablePropertiesCollectorFactory::kClassName(),
|
|
|
|
[](const std::string& /*uri*/,
|
|
|
|
std::unique_ptr<TablePropertiesCollectorFactory>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockTablePropertiesCollectorFactory());
|
|
|
|
return guard->get();
|
|
|
|
});
|
2021-12-01 14:55:59 +00:00
|
|
|
|
2022-02-18 20:23:48 +00:00
|
|
|
library.AddFactory<const FilterPolicy>(
|
|
|
|
MockFilterPolicy::kClassName(),
|
|
|
|
[](const std::string& /*uri*/, std::unique_ptr<const FilterPolicy>* guard,
|
|
|
|
std::string* /* errmsg */) {
|
|
|
|
guard->reset(new MockFilterPolicy());
|
|
|
|
return guard->get();
|
|
|
|
});
|
|
|
|
|
2021-05-11 13:45:49 +00:00
|
|
|
return static_cast<int>(library.GetFactoryCount(&num_types));
|
|
|
|
}
|
2021-08-19 17:09:30 +00:00
|
|
|
} // namespace
|
2021-05-11 13:45:49 +00:00
|
|
|
|
|
|
|
class LoadCustomizableTest : public testing::Test {
|
|
|
|
public:
|
2021-07-16 14:57:47 +00:00
|
|
|
LoadCustomizableTest() {
|
|
|
|
config_options_.ignore_unsupported_options = false;
|
|
|
|
config_options_.invoke_prepare_options = false;
|
|
|
|
}
|
2021-05-11 13:45:49 +00:00
|
|
|
bool RegisterTests(const std::string& arg) {
|
2021-08-19 17:09:30 +00:00
|
|
|
config_options_.registry->AddLibrary("custom-tests",
|
|
|
|
test::RegisterTestObjects, arg);
|
2021-05-11 13:45:49 +00:00
|
|
|
config_options_.registry->AddLibrary("local-tests", RegisterLocalObjects,
|
|
|
|
arg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-05-16 16:44:43 +00:00
|
|
|
template <typename T, typename U>
|
|
|
|
Status TestCreateStatic(const std::string& name, U** result,
|
|
|
|
bool delete_result = false) {
|
|
|
|
Status s = T::CreateFromString(config_options_, name, result);
|
|
|
|
if (s.ok()) {
|
|
|
|
EXPECT_NE(*result, nullptr);
|
|
|
|
EXPECT_TRUE(*result != nullptr && (*result)->IsInstanceOf(name));
|
|
|
|
}
|
|
|
|
if (delete_result) {
|
|
|
|
delete *result;
|
|
|
|
*result = nullptr;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U>
|
|
|
|
std::shared_ptr<U> ExpectCreateShared(const std::string& name,
|
|
|
|
std::shared_ptr<U>* object) {
|
|
|
|
EXPECT_OK(T::CreateFromString(config_options_, name, object));
|
|
|
|
EXPECT_NE(object->get(), nullptr);
|
|
|
|
EXPECT_TRUE(object->get()->IsInstanceOf(name));
|
|
|
|
return *object;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
std::shared_ptr<T> ExpectCreateShared(const std::string& name) {
|
|
|
|
std::shared_ptr<T> result;
|
|
|
|
return ExpectCreateShared<T>(name, &result);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U>
|
|
|
|
Status TestExpectedBuiltins(
|
|
|
|
const std::string& mock, const std::unordered_set<std::string>& expected,
|
|
|
|
std::shared_ptr<U>* object, std::vector<std::string>* failed,
|
|
|
|
const std::function<std::vector<std::string>(const std::string&)>& alt =
|
|
|
|
nullptr) {
|
|
|
|
std::unordered_set<std::string> factories = expected;
|
|
|
|
Status s = T::CreateFromString(config_options_, mock, object);
|
|
|
|
EXPECT_NOK(s);
|
|
|
|
std::vector<std::string> builtins;
|
|
|
|
ObjectLibrary::Default()->GetFactoryNames(T::Type(), &builtins);
|
|
|
|
factories.insert(builtins.begin(), builtins.end());
|
|
|
|
Status result;
|
|
|
|
int created = 0;
|
|
|
|
for (const auto& name : factories) {
|
|
|
|
created++;
|
|
|
|
s = T::CreateFromString(config_options_, name, object);
|
|
|
|
if (!s.ok() && alt != nullptr) {
|
|
|
|
for (const auto& alt_name : alt(name)) {
|
|
|
|
s = T::CreateFromString(config_options_, alt_name, object);
|
|
|
|
if (s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
result = s;
|
|
|
|
failed->push_back(name);
|
|
|
|
} else {
|
|
|
|
EXPECT_NE(object->get(), nullptr);
|
|
|
|
EXPECT_TRUE(object->get()->IsInstanceOf(name));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::vector<std::string> plugins;
|
|
|
|
ObjectRegistry::Default()->GetFactoryNames(T::Type(), &plugins);
|
|
|
|
if (plugins.size() > builtins.size()) {
|
|
|
|
for (const auto& name : plugins) {
|
|
|
|
if (factories.find(name) == factories.end()) {
|
|
|
|
created++;
|
|
|
|
s = T::CreateFromString(config_options_, name, object);
|
|
|
|
if (!s.ok() && alt != nullptr) {
|
|
|
|
for (const auto& alt_name : alt(name)) {
|
|
|
|
s = T::CreateFromString(config_options_, alt_name, object);
|
|
|
|
if (s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
failed->push_back(name);
|
|
|
|
if (result.ok()) {
|
|
|
|
result = s;
|
|
|
|
}
|
|
|
|
printf("%s: Failed creating plugin[%s]: %s\n", T::Type(),
|
|
|
|
name.c_str(), s.ToString().c_str());
|
|
|
|
} else if (object->get() == nullptr ||
|
|
|
|
!object->get()->IsInstanceOf(name)) {
|
|
|
|
failed->push_back(name);
|
|
|
|
printf("%s: Invalid plugin[%s]\n", T::Type(), name.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("%s: Created %d (expected+builtins+plugins %d+%d+%d) %d Failed\n",
|
|
|
|
T::Type(), created, (int)expected.size(),
|
|
|
|
(int)(factories.size() - expected.size()),
|
|
|
|
(int)(plugins.size() - builtins.size()), (int)failed->size());
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
Status TestSharedBuiltins(const std::string& mock,
|
|
|
|
const std::string& expected,
|
|
|
|
std::vector<std::string>* failed = nullptr) {
|
|
|
|
std::unordered_set<std::string> values;
|
|
|
|
if (!expected.empty()) {
|
|
|
|
values.insert(expected);
|
|
|
|
}
|
|
|
|
std::shared_ptr<T> object;
|
|
|
|
if (failed != nullptr) {
|
|
|
|
return TestExpectedBuiltins<T>(mock, values, &object, failed);
|
|
|
|
} else {
|
|
|
|
std::vector<std::string> failures;
|
|
|
|
Status s = TestExpectedBuiltins<T>(mock, values, &object, &failures);
|
|
|
|
EXPECT_EQ(0U, failures.size());
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U>
|
|
|
|
Status TestStaticBuiltins(const std::string& mock, U** object,
|
|
|
|
const std::unordered_set<std::string>& expected,
|
|
|
|
std::vector<std::string>* failed,
|
|
|
|
bool delete_objects = false) {
|
|
|
|
std::unordered_set<std::string> factories = expected;
|
|
|
|
Status s = TestCreateStatic<T>(mock, object, delete_objects);
|
|
|
|
EXPECT_NOK(s);
|
|
|
|
std::vector<std::string> builtins;
|
|
|
|
ObjectLibrary::Default()->GetFactoryNames(T::Type(), &builtins);
|
|
|
|
factories.insert(builtins.begin(), builtins.end());
|
|
|
|
int created = 0;
|
|
|
|
Status result;
|
|
|
|
for (const auto& name : factories) {
|
|
|
|
created++;
|
|
|
|
s = TestCreateStatic<T>(name, object, delete_objects);
|
|
|
|
if (!s.ok()) {
|
|
|
|
result = s;
|
|
|
|
failed->push_back(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::vector<std::string> plugins;
|
|
|
|
ObjectRegistry::Default()->GetFactoryNames(T::Type(), &plugins);
|
|
|
|
if (plugins.size() > builtins.size()) {
|
|
|
|
for (const auto& name : plugins) {
|
|
|
|
if (factories.find(name) == factories.end()) {
|
|
|
|
created++;
|
|
|
|
s = T::CreateFromString(config_options_, name, object);
|
|
|
|
if (!s.ok() || *object == nullptr ||
|
|
|
|
!((*object)->IsInstanceOf(name))) {
|
|
|
|
failed->push_back(name);
|
|
|
|
if (result.ok() && !s.ok()) {
|
|
|
|
result = s;
|
|
|
|
}
|
|
|
|
printf("%s: Failed creating plugin[%s]: %s\n", T::Type(),
|
|
|
|
name.c_str(), s.ToString().c_str());
|
|
|
|
}
|
|
|
|
if (delete_objects) {
|
|
|
|
delete *object;
|
|
|
|
*object = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("%s: Created %d (expected+builtins+plugins %d+%d+%d) %d Failed\n",
|
|
|
|
T::Type(), created, (int)expected.size(),
|
|
|
|
(int)(factories.size() - expected.size()),
|
|
|
|
(int)(plugins.size() - builtins.size()), (int)failed->size());
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-05-11 13:45:49 +00:00
|
|
|
protected:
|
|
|
|
DBOptions db_opts_;
|
|
|
|
ColumnFamilyOptions cf_opts_;
|
|
|
|
ConfigOptions config_options_;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(LoadCustomizableTest, LoadTableFactoryTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
TestSharedBuiltins<TableFactory>(mock::MockTableFactory::kClassName(),
|
|
|
|
TableFactory::kBlockBasedTableName()));
|
2021-07-12 16:03:41 +00:00
|
|
|
std::string opts_str = "table_factory=";
|
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2021-10-19 17:42:04 +00:00
|
|
|
config_options_, cf_opts_,
|
|
|
|
opts_str + TableFactory::kBlockBasedTableName(), &cf_opts_));
|
|
|
|
ASSERT_NE(cf_opts_.table_factory.get(), nullptr);
|
|
|
|
ASSERT_STREQ(cf_opts_.table_factory->Name(),
|
2021-07-12 16:03:41 +00:00
|
|
|
TableFactory::kBlockBasedTableName());
|
2021-05-11 13:45:49 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<TableFactory>(mock::MockTableFactory::kClassName());
|
2021-08-19 17:09:30 +00:00
|
|
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
2021-10-19 17:42:04 +00:00
|
|
|
config_options_, cf_opts_,
|
|
|
|
opts_str + mock::MockTableFactory::kClassName(), &cf_opts_));
|
|
|
|
ASSERT_NE(cf_opts_.table_factory.get(), nullptr);
|
|
|
|
ASSERT_STREQ(cf_opts_.table_factory->Name(),
|
2021-08-19 17:09:30 +00:00
|
|
|
mock::MockTableFactory::kClassName());
|
2021-05-11 13:45:49 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-11 13:21:55 +00:00
|
|
|
|
2021-11-02 16:06:02 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadFileSystemTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<FileSystem>(DummyFileSystem::kClassName(),
|
|
|
|
FileSystem::kDefaultName()));
|
2021-11-02 16:06:02 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
auto fs = ExpectCreateShared<FileSystem>(DummyFileSystem::kClassName());
|
|
|
|
ASSERT_FALSE(fs->IsInstanceOf(FileSystem::kDefaultName()));
|
2021-11-02 16:06:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-06 16:17:13 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadSecondaryCacheTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
TestSharedBuiltins<SecondaryCache>(TestSecondaryCache::kClassName(), ""));
|
2021-07-06 16:17:13 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<SecondaryCache>(TestSecondaryCache::kClassName());
|
2021-07-06 16:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-28 12:30:32 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadSstPartitionerFactoryTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<SstPartitionerFactory>(
|
|
|
|
"Mock", SstPartitionerFixedPrefixFactory::kClassName()));
|
2021-09-28 12:30:32 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<SstPartitionerFactory>("Mock");
|
2021-09-28 12:30:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(LoadCustomizableTest, LoadChecksumGenFactoryTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<FileChecksumGenFactory>("Mock", ""));
|
2021-09-28 12:30:32 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<FileChecksumGenFactory>("Mock");
|
2021-09-28 12:30:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(LoadCustomizableTest, LoadTablePropertiesCollectorFactoryTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<TablePropertiesCollectorFactory>(
|
|
|
|
MockTablePropertiesCollectorFactory::kClassName(), ""));
|
2021-09-28 12:30:32 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<TablePropertiesCollectorFactory>(
|
|
|
|
MockTablePropertiesCollectorFactory::kClassName());
|
2021-09-28 12:30:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-11 13:21:55 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadComparatorTest) {
|
|
|
|
const Comparator* bytewise = BytewiseComparator();
|
|
|
|
const Comparator* reverse = ReverseBytewiseComparator();
|
|
|
|
const Comparator* result = nullptr;
|
2022-05-16 16:44:43 +00:00
|
|
|
std::unordered_set<std::string> expected = {bytewise->Name(),
|
|
|
|
reverse->Name()};
|
|
|
|
std::vector<std::string> failures;
|
|
|
|
ASSERT_OK(TestStaticBuiltins<Comparator>(
|
|
|
|
test::SimpleSuffixReverseComparator::kClassName(), &result, expected,
|
|
|
|
&failures));
|
2021-06-11 13:21:55 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestCreateStatic<Comparator>(
|
|
|
|
test::SimpleSuffixReverseComparator::kClassName(), &result));
|
2021-06-11 13:21:55 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-11 13:45:49 +00:00
|
|
|
|
2021-09-27 14:42:36 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadSliceTransformFactoryTest) {
|
|
|
|
std::shared_ptr<const SliceTransform> result;
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failures;
|
|
|
|
std::unordered_set<std::string> expected = {"rocksdb.Noop", "fixed",
|
|
|
|
"rocksdb.FixedPrefix", "capped",
|
|
|
|
"rocksdb.CappedPrefix"};
|
|
|
|
ASSERT_OK(TestExpectedBuiltins<SliceTransform>(
|
|
|
|
"Mock", expected, &result, &failures, [](const std::string& name) {
|
|
|
|
std::vector<std::string> names = {name + ":22", name + ".22"};
|
|
|
|
return names;
|
|
|
|
}));
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
|
|
|
config_options_, "rocksdb.FixedPrefix.22", &result));
|
|
|
|
ASSERT_NE(result.get(), nullptr);
|
|
|
|
ASSERT_TRUE(result->IsInstanceOf("fixed"));
|
|
|
|
ASSERT_OK(SliceTransform::CreateFromString(
|
2022-05-16 16:44:43 +00:00
|
|
|
config_options_, "rocksdb.CappedPrefix.22", &result));
|
2021-09-27 14:42:36 +00:00
|
|
|
ASSERT_NE(result.get(), nullptr);
|
|
|
|
ASSERT_TRUE(result->IsInstanceOf("capped"));
|
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<SliceTransform>("Mock", &result);
|
2021-09-27 14:42:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-10 16:46:47 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadStatisticsTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<Statistics>(TestStatistics::kClassName(),
|
|
|
|
"BasicStatistics"));
|
|
|
|
// Empty will create a default BasicStatistics
|
2021-09-10 16:46:47 +00:00
|
|
|
ASSERT_OK(
|
2022-05-16 16:44:43 +00:00
|
|
|
Statistics::CreateFromString(config_options_, "", &db_opts_.statistics));
|
|
|
|
ASSERT_NE(db_opts_.statistics, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts_.statistics->Name(), "BasicStatistics");
|
|
|
|
|
2021-09-10 16:46:47 +00:00
|
|
|
ASSERT_NOK(GetDBOptionsFromString(config_options_, db_opts_,
|
|
|
|
"statistics=Test", &db_opts_));
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_options_, db_opts_,
|
|
|
|
"statistics=BasicStatistics", &db_opts_));
|
|
|
|
ASSERT_NE(db_opts_.statistics, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts_.statistics->Name(), "BasicStatistics");
|
|
|
|
|
|
|
|
if (RegisterTests("test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
auto stats = ExpectCreateShared<Statistics>(TestStatistics::kClassName());
|
2021-09-10 16:46:47 +00:00
|
|
|
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(config_options_, db_opts_,
|
|
|
|
"statistics=Test", &db_opts_));
|
|
|
|
ASSERT_NE(db_opts_.statistics, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts_.statistics->Name(), TestStatistics::kClassName());
|
|
|
|
|
|
|
|
ASSERT_OK(GetDBOptionsFromString(
|
|
|
|
config_options_, db_opts_, "statistics={id=Test;inner=BasicStatistics}",
|
|
|
|
&db_opts_));
|
|
|
|
ASSERT_NE(db_opts_.statistics, nullptr);
|
|
|
|
ASSERT_STREQ(db_opts_.statistics->Name(), TestStatistics::kClassName());
|
|
|
|
auto* inner = db_opts_.statistics->GetOptions<std::shared_ptr<Statistics>>(
|
|
|
|
"StatisticsOptions");
|
|
|
|
ASSERT_NE(inner, nullptr);
|
|
|
|
ASSERT_NE(inner->get(), nullptr);
|
|
|
|
ASSERT_STREQ(inner->get()->Name(), "BasicStatistics");
|
|
|
|
|
|
|
|
ASSERT_OK(Statistics::CreateFromString(
|
|
|
|
config_options_, "id=BasicStatistics;inner=Test", &stats));
|
|
|
|
ASSERT_NE(stats, nullptr);
|
|
|
|
ASSERT_STREQ(stats->Name(), "BasicStatistics");
|
|
|
|
inner = stats->GetOptions<std::shared_ptr<Statistics>>("StatisticsOptions");
|
|
|
|
ASSERT_NE(inner, nullptr);
|
|
|
|
ASSERT_NE(inner->get(), nullptr);
|
|
|
|
ASSERT_STREQ(inner->get()->Name(), TestStatistics::kClassName());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-08 14:45:59 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadMemTableRepFactoryTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
std::unordered_set<std::string> expected = {
|
|
|
|
SkipListFactory::kClassName(),
|
|
|
|
SkipListFactory::kNickName(),
|
|
|
|
};
|
2021-09-08 14:45:59 +00:00
|
|
|
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failures;
|
|
|
|
std::shared_ptr<MemTableRepFactory> factory;
|
|
|
|
Status s = TestExpectedBuiltins<MemTableRepFactory>(
|
|
|
|
"SpecialSkipListFactory", expected, &factory, &failures);
|
|
|
|
// There is a "cuckoo" factory registered that we expect to fail. Ignore the
|
|
|
|
// error if this is the one
|
|
|
|
if (s.ok() || failures.size() > 1 || failures[0] != "cuckoo") {
|
|
|
|
ASSERT_OK(s);
|
|
|
|
}
|
2021-09-08 14:45:59 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<MemTableRepFactory>("SpecialSkipListFactory");
|
2021-09-08 14:45:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-06 15:26:23 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadMergeOperatorTest) {
|
|
|
|
std::shared_ptr<MergeOperator> result;
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failed;
|
|
|
|
std::unordered_set<std::string> expected = {
|
|
|
|
"put", "put_v1", "PutOperator", "uint64add", "UInt64AddOperator",
|
|
|
|
"max", "MaxOperator",
|
|
|
|
};
|
|
|
|
expected.insert({
|
|
|
|
StringAppendOperator::kClassName(),
|
|
|
|
StringAppendOperator::kNickName(),
|
|
|
|
StringAppendTESTOperator::kClassName(),
|
|
|
|
StringAppendTESTOperator::kNickName(),
|
|
|
|
SortList::kClassName(),
|
|
|
|
SortList::kNickName(),
|
|
|
|
BytesXOROperator::kClassName(),
|
|
|
|
BytesXOROperator::kNickName(),
|
|
|
|
});
|
|
|
|
|
|
|
|
ASSERT_OK(TestExpectedBuiltins<MergeOperator>("Changling", expected, &result,
|
|
|
|
&failed));
|
2021-08-06 15:26:23 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<MergeOperator>("Changling");
|
2021-08-06 15:26:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(LoadCustomizableTest, LoadCompactionFilterFactoryTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<CompactionFilterFactory>("Changling", ""));
|
2021-08-06 15:26:23 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<CompactionFilterFactory>("Changling");
|
2021-08-06 15:26:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(LoadCustomizableTest, LoadCompactionFilterTest) {
|
|
|
|
const CompactionFilter* result = nullptr;
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failures;
|
|
|
|
ASSERT_OK(TestStaticBuiltins<CompactionFilter>("Changling", &result, {},
|
|
|
|
&failures, true));
|
2021-08-06 15:26:23 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestCreateStatic<CompactionFilter>("Changling", &result, true));
|
2021-08-06 15:26:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-27 14:46:09 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadEventListenerTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<EventListener>(
|
|
|
|
OnFileDeletionListener::kClassName(), ""));
|
2021-07-27 14:46:09 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<EventListener>(OnFileDeletionListener::kClassName());
|
|
|
|
ExpectCreateShared<EventListener>(FlushCounterListener::kClassName());
|
2021-07-27 14:46:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-16 14:57:47 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadEncryptionProviderTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failures;
|
2021-07-16 14:57:47 +00:00
|
|
|
std::shared_ptr<EncryptionProvider> result;
|
|
|
|
ASSERT_OK(
|
2022-05-16 16:44:43 +00:00
|
|
|
TestExpectedBuiltins<EncryptionProvider>("Mock", {}, &result, &failures));
|
|
|
|
if (!failures.empty()) {
|
|
|
|
ASSERT_EQ(failures[0], "1://test");
|
|
|
|
ASSERT_EQ(failures.size(), 1U);
|
|
|
|
}
|
|
|
|
|
|
|
|
result = ExpectCreateShared<EncryptionProvider>("CTR");
|
2021-10-19 17:42:04 +00:00
|
|
|
ASSERT_NOK(result->ValidateOptions(db_opts_, cf_opts_));
|
2021-07-16 14:57:47 +00:00
|
|
|
ASSERT_OK(EncryptionProvider::CreateFromString(config_options_, "CTR://test",
|
|
|
|
&result));
|
|
|
|
ASSERT_NE(result, nullptr);
|
|
|
|
ASSERT_STREQ(result->Name(), "CTR");
|
2021-10-19 17:42:04 +00:00
|
|
|
ASSERT_OK(result->ValidateOptions(db_opts_, cf_opts_));
|
2021-07-16 14:57:47 +00:00
|
|
|
|
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<EncryptionProvider>("Mock");
|
2021-07-16 14:57:47 +00:00
|
|
|
ASSERT_OK(EncryptionProvider::CreateFromString(config_options_,
|
|
|
|
"Mock://test", &result));
|
|
|
|
ASSERT_NE(result, nullptr);
|
|
|
|
ASSERT_STREQ(result->Name(), "Mock");
|
2021-10-19 17:42:04 +00:00
|
|
|
ASSERT_OK(result->ValidateOptions(db_opts_, cf_opts_));
|
2021-07-16 14:57:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(LoadCustomizableTest, LoadEncryptionCipherTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<BlockCipher>("Mock", "ROT13"));
|
2021-07-16 14:57:47 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<BlockCipher>("Mock");
|
2021-07-16 14:57:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-21 15:53:03 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadSystemClockTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_OK(TestSharedBuiltins<SystemClock>(MockSystemClock::kClassName(),
|
|
|
|
SystemClock::kDefaultName()));
|
2021-09-21 15:53:03 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
auto result =
|
|
|
|
ExpectCreateShared<SystemClock>(MockSystemClock::kClassName());
|
|
|
|
ASSERT_FALSE(result->IsInstanceOf(SystemClock::kDefaultName()));
|
2021-09-21 15:53:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-17 12:19:34 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadMemoryAllocatorTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failures;
|
|
|
|
Status s = TestSharedBuiltins<MemoryAllocator>(
|
|
|
|
MockMemoryAllocator::kClassName(), DefaultMemoryAllocator::kClassName(),
|
|
|
|
&failures);
|
|
|
|
if (failures.empty()) {
|
|
|
|
ASSERT_OK(s);
|
|
|
|
} else {
|
|
|
|
ASSERT_NOK(s);
|
|
|
|
for (const auto& failure : failures) {
|
|
|
|
if (failure == JemallocNodumpAllocator::kClassName()) {
|
|
|
|
ASSERT_FALSE(JemallocNodumpAllocator::IsSupported());
|
|
|
|
} else if (failure == MemkindKmemAllocator::kClassName()) {
|
|
|
|
ASSERT_FALSE(MemkindKmemAllocator::IsSupported());
|
|
|
|
} else {
|
|
|
|
printf("BYPASSED: %s -- %s\n", failure.c_str(), s.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 12:19:34 +00:00
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<MemoryAllocator>(MockMemoryAllocator::kClassName());
|
2021-12-17 12:19:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-18 20:23:48 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadFilterPolicyTest) {
|
2022-05-16 16:44:43 +00:00
|
|
|
const std::string kAutoBloom = BloomFilterPolicy::kClassName();
|
|
|
|
const std::string kAutoRibbon = RibbonFilterPolicy::kClassName();
|
|
|
|
|
2022-02-18 20:23:48 +00:00
|
|
|
std::shared_ptr<const FilterPolicy> result;
|
2022-05-16 16:44:43 +00:00
|
|
|
std::vector<std::string> failures;
|
|
|
|
std::unordered_set<std::string> expected = {
|
|
|
|
ReadOnlyBuiltinFilterPolicy::kClassName(),
|
|
|
|
};
|
2022-02-18 20:23:48 +00:00
|
|
|
|
2022-05-16 16:44:43 +00:00
|
|
|
expected.insert({
|
|
|
|
kAutoBloom,
|
|
|
|
BloomFilterPolicy::kNickName(),
|
|
|
|
kAutoRibbon,
|
|
|
|
RibbonFilterPolicy::kNickName(),
|
|
|
|
});
|
|
|
|
ASSERT_OK(TestExpectedBuiltins<const FilterPolicy>(
|
|
|
|
"Mock", expected, &result, &failures, [](const std::string& name) {
|
|
|
|
std::vector<std::string> names = {name + ":1.234"};
|
|
|
|
return names;
|
|
|
|
}));
|
2022-02-18 20:23:48 +00:00
|
|
|
ASSERT_OK(FilterPolicy::CreateFromString(
|
2022-05-16 16:44:43 +00:00
|
|
|
config_options_, kAutoBloom + ":1.234:false", &result));
|
|
|
|
ASSERT_NE(result.get(), nullptr);
|
|
|
|
ASSERT_TRUE(result->IsInstanceOf(kAutoBloom));
|
|
|
|
ASSERT_OK(FilterPolicy::CreateFromString(
|
|
|
|
config_options_, kAutoBloom + ":1.234:false", &result));
|
|
|
|
ASSERT_NE(result.get(), nullptr);
|
|
|
|
ASSERT_TRUE(result->IsInstanceOf(kAutoBloom));
|
|
|
|
ASSERT_OK(FilterPolicy::CreateFromString(config_options_,
|
|
|
|
kAutoRibbon + ":1.234:-1", &result));
|
|
|
|
ASSERT_NE(result.get(), nullptr);
|
|
|
|
ASSERT_TRUE(result->IsInstanceOf(kAutoRibbon));
|
|
|
|
ASSERT_OK(FilterPolicy::CreateFromString(config_options_,
|
|
|
|
kAutoRibbon + ":1.234:56", &result));
|
|
|
|
ASSERT_NE(result.get(), nullptr);
|
|
|
|
ASSERT_TRUE(result->IsInstanceOf(kAutoRibbon));
|
|
|
|
|
|
|
|
if (RegisterTests("Test")) {
|
|
|
|
ExpectCreateShared<FilterPolicy>(MockFilterPolicy::kClassName(), &result);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<TableFactory> table;
|
2022-02-18 20:23:48 +00:00
|
|
|
|
|
|
|
std::string table_opts = "id=BlockBasedTable; filter_policy=";
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(config_options_,
|
|
|
|
table_opts + "nullptr", &table));
|
|
|
|
ASSERT_NE(table.get(), nullptr);
|
|
|
|
auto bbto = table->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_EQ(bbto->filter_policy.get(), nullptr);
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(
|
|
|
|
config_options_, table_opts + ReadOnlyBuiltinFilterPolicy::kClassName(),
|
|
|
|
&table));
|
|
|
|
bbto = table->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_NE(bbto->filter_policy.get(), nullptr);
|
|
|
|
ASSERT_STREQ(bbto->filter_policy->Name(),
|
|
|
|
ReadOnlyBuiltinFilterPolicy::kClassName());
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(
|
|
|
|
config_options_, table_opts + MockFilterPolicy::kClassName(), &table));
|
|
|
|
bbto = table->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
2022-05-16 16:44:43 +00:00
|
|
|
ASSERT_NE(bbto->filter_policy.get(), nullptr);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
bbto->filter_policy->IsInstanceOf(MockFilterPolicy::kClassName()));
|
2022-02-18 20:23:48 +00:00
|
|
|
}
|
|
|
|
|
2021-07-12 16:03:41 +00:00
|
|
|
TEST_F(LoadCustomizableTest, LoadFlushBlockPolicyFactoryTest) {
|
|
|
|
std::shared_ptr<FlushBlockPolicyFactory> result;
|
2022-05-16 16:44:43 +00:00
|
|
|
std::shared_ptr<TableFactory> table;
|
|
|
|
std::vector<std::string> failed;
|
|
|
|
std::unordered_set<std::string> expected = {
|
|
|
|
FlushBlockBySizePolicyFactory::kClassName(),
|
|
|
|
FlushBlockEveryKeyPolicyFactory::kClassName(),
|
|
|
|
};
|
|
|
|
|
|
|
|
ASSERT_OK(TestExpectedBuiltins<FlushBlockPolicyFactory>(
|
|
|
|
TestFlushBlockPolicyFactory::kClassName(), expected, &result, &failed));
|
2021-07-12 16:03:41 +00:00
|
|
|
|
2022-05-16 16:44:43 +00:00
|
|
|
// An empty policy name creates a BySize policy
|
2021-07-12 16:03:41 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
FlushBlockPolicyFactory::CreateFromString(config_options_, "", &result));
|
|
|
|
ASSERT_NE(result, nullptr);
|
|
|
|
ASSERT_STREQ(result->Name(), FlushBlockBySizePolicyFactory::kClassName());
|
|
|
|
|
|
|
|
std::string table_opts = "id=BlockBasedTable; flush_block_policy_factory=";
|
|
|
|
ASSERT_OK(TableFactory::CreateFromString(
|
|
|
|
config_options_,
|
|
|
|
table_opts + FlushBlockEveryKeyPolicyFactory::kClassName(), &table));
|
|
|
|
auto bbto = table->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_NE(bbto->flush_block_policy_factory.get(), nullptr);
|
|
|
|
ASSERT_STREQ(bbto->flush_block_policy_factory->Name(),
|
|
|
|
FlushBlockEveryKeyPolicyFactory::kClassName());
|
|
|
|
if (RegisterTests("Test")) {
|
2022-05-16 16:44:43 +00:00
|
|
|
ExpectCreateShared<FlushBlockPolicyFactory>(
|
|
|
|
TestFlushBlockPolicyFactory::kClassName());
|
2021-07-12 16:03:41 +00:00
|
|
|
ASSERT_OK(TableFactory::CreateFromString(
|
2021-09-10 16:46:47 +00:00
|
|
|
config_options_, table_opts + TestFlushBlockPolicyFactory::kClassName(),
|
|
|
|
&table));
|
2021-07-12 16:03:41 +00:00
|
|
|
bbto = table->GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(bbto, nullptr);
|
|
|
|
ASSERT_NE(bbto->flush_block_policy_factory.get(), nullptr);
|
|
|
|
ASSERT_STREQ(bbto->flush_block_policy_factory->Name(),
|
2021-09-10 16:46:47 +00:00
|
|
|
TestFlushBlockPolicyFactory::kClassName());
|
2021-07-12 16:03:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-11 23:09:14 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2021-09-10 12:19:47 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2020-11-11 23:09:14 +00:00
|
|
|
#ifdef GFLAGS
|
|
|
|
ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
#endif // GFLAGS
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|