mirror of https://github.com/facebook/rocksdb.git
CLANG Tidy
Summary: Closes https://github.com/facebook/rocksdb/pull/2502 Differential Revision: D5326498 Pulled By: siying fbshipit-source-id: 2f0ac6dc6ca5ddb23cecf67a278c086e52646714
This commit is contained in:
parent
dc3d2e4d21
commit
e517bfa2c2
|
@ -900,7 +900,7 @@ class EnvWrapper : public Env {
|
|||
public:
|
||||
// Initialize an EnvWrapper that delegates all calls to *t
|
||||
explicit EnvWrapper(Env* t) : target_(t) { }
|
||||
virtual ~EnvWrapper();
|
||||
~EnvWrapper() override;
|
||||
|
||||
// Return the target to which this Env forwards all calls
|
||||
Env* target() const { return target_; }
|
||||
|
@ -935,8 +935,8 @@ class EnvWrapper : public Env {
|
|||
const EnvOptions& options) override {
|
||||
return target_->NewRandomRWFile(fname, result, options);
|
||||
}
|
||||
virtual Status NewDirectory(const std::string& name,
|
||||
unique_ptr<Directory>* result) override {
|
||||
Status NewDirectory(const std::string& name,
|
||||
unique_ptr<Directory>* result) override {
|
||||
return target_->NewDirectory(name, result);
|
||||
}
|
||||
Status FileExists(const std::string& f) override {
|
||||
|
@ -998,15 +998,14 @@ class EnvWrapper : public Env {
|
|||
return target_->StartThread(f, a);
|
||||
}
|
||||
void WaitForJoin() override { return target_->WaitForJoin(); }
|
||||
virtual unsigned int GetThreadPoolQueueLen(
|
||||
Priority pri = LOW) const override {
|
||||
unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const override {
|
||||
return target_->GetThreadPoolQueueLen(pri);
|
||||
}
|
||||
virtual Status GetTestDirectory(std::string* path) override {
|
||||
Status GetTestDirectory(std::string* path) override {
|
||||
return target_->GetTestDirectory(path);
|
||||
}
|
||||
virtual Status NewLogger(const std::string& fname,
|
||||
shared_ptr<Logger>* result) override {
|
||||
Status NewLogger(const std::string& fname,
|
||||
shared_ptr<Logger>* result) override {
|
||||
return target_->NewLogger(fname, result);
|
||||
}
|
||||
uint64_t NowMicros() override { return target_->NowMicros(); }
|
||||
|
@ -1098,10 +1097,10 @@ class WritableFileWrapper : public WritableFile {
|
|||
return target_->InvalidateCache(offset, length);
|
||||
}
|
||||
|
||||
virtual void SetPreallocationBlockSize(size_t size) override {
|
||||
void SetPreallocationBlockSize(size_t size) override {
|
||||
target_->SetPreallocationBlockSize(size);
|
||||
}
|
||||
virtual void PrepareWrite(size_t offset, size_t len) override {
|
||||
void PrepareWrite(size_t offset, size_t len) override {
|
||||
target_->PrepareWrite(offset, len);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,11 +48,9 @@ class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory {
|
|||
public:
|
||||
FlushBlockBySizePolicyFactory() {}
|
||||
|
||||
virtual const char* Name() const override {
|
||||
return "FlushBlockBySizePolicyFactory";
|
||||
}
|
||||
const char* Name() const override { return "FlushBlockBySizePolicyFactory"; }
|
||||
|
||||
virtual FlushBlockPolicy* NewFlushBlockPolicy(
|
||||
FlushBlockPolicy* NewFlushBlockPolicy(
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const BlockBuilder& data_block_builder) const override;
|
||||
|
||||
|
|
|
@ -188,7 +188,7 @@ class MergeOperator {
|
|||
// The simpler, associative merge operator.
|
||||
class AssociativeMergeOperator : public MergeOperator {
|
||||
public:
|
||||
virtual ~AssociativeMergeOperator() {}
|
||||
~AssociativeMergeOperator() override {}
|
||||
|
||||
// Gives the client a way to express the read -> modify -> write semantics
|
||||
// key: (IN) The key that's associated with this merge operation.
|
||||
|
@ -212,14 +212,12 @@ class AssociativeMergeOperator : public MergeOperator {
|
|||
|
||||
private:
|
||||
// Default implementations of the MergeOperator functions
|
||||
virtual bool FullMergeV2(const MergeOperationInput& merge_in,
|
||||
MergeOperationOutput* merge_out) const override;
|
||||
bool FullMergeV2(const MergeOperationInput& merge_in,
|
||||
MergeOperationOutput* merge_out) const override;
|
||||
|
||||
virtual bool PartialMerge(const Slice& key,
|
||||
const Slice& left_operand,
|
||||
const Slice& right_operand,
|
||||
std::string* new_value,
|
||||
Logger* logger) const override;
|
||||
bool PartialMerge(const Slice& key, const Slice& left_operand,
|
||||
const Slice& right_operand, std::string* new_value,
|
||||
Logger* logger) const override;
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
|
|
|
@ -39,9 +39,9 @@ class SimCache : public Cache {
|
|||
public:
|
||||
SimCache() {}
|
||||
|
||||
virtual ~SimCache() {}
|
||||
~SimCache() override {}
|
||||
|
||||
virtual const char* Name() const override { return "SimCache"; }
|
||||
const char* Name() const override { return "SimCache"; }
|
||||
|
||||
// returns the maximum configured capacity of the simcache for simulation
|
||||
virtual size_t GetSimCapacity() const = 0;
|
||||
|
|
|
@ -148,7 +148,7 @@ class TransactionDB : public StackableDB {
|
|||
StackableDB* db, const TransactionDBOptions& txn_db_options,
|
||||
const std::vector<size_t>& compaction_enabled_cf_indices,
|
||||
const std::vector<ColumnFamilyHandle*>& handles, TransactionDB** dbptr);
|
||||
virtual ~TransactionDB() {}
|
||||
~TransactionDB() override {}
|
||||
|
||||
// Starts a new Transaction.
|
||||
//
|
||||
|
|
|
@ -97,7 +97,7 @@ class WriteBatchWithIndex : public WriteBatchBase {
|
|||
size_t reserved_bytes = 0, bool overwrite_key = false,
|
||||
size_t max_bytes = 0);
|
||||
|
||||
virtual ~WriteBatchWithIndex();
|
||||
~WriteBatchWithIndex() override;
|
||||
|
||||
using WriteBatchBase::Put;
|
||||
Status Put(ColumnFamilyHandle* column_family, const Slice& key,
|
||||
|
|
|
@ -61,7 +61,7 @@ struct SavePoint {
|
|||
class WriteBatch : public WriteBatchBase {
|
||||
public:
|
||||
explicit WriteBatch(size_t reserved_bytes = 0, size_t max_bytes = 0);
|
||||
~WriteBatch();
|
||||
~WriteBatch() override;
|
||||
|
||||
using WriteBatchBase::Put;
|
||||
// Store the mapping "key->value" in the database.
|
||||
|
|
|
@ -62,8 +62,7 @@ public class WriteBatchThreadedTest {
|
|||
try (final WriteBatch wb = new WriteBatch();
|
||||
final WriteOptions w_opt = new WriteOptions()) {
|
||||
for (int i = offset; i < offset + 100; i++) {
|
||||
wb.put(ByteBuffer.allocate(4).putInt(i).array(),
|
||||
"parallel rocks test".getBytes());
|
||||
wb.put(ByteBuffer.allocate(4).putInt(i).array(), "parallel rocks test".getBytes());
|
||||
}
|
||||
db.write(w_opt, wb);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue