2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2012-09-24 21:01:01 +00:00
|
|
|
#include <algorithm>
|
2012-11-26 21:56:45 +00:00
|
|
|
#include <set>
|
2013-11-18 05:58:16 +00:00
|
|
|
#include <unistd.h>
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/db_impl.h"
|
|
|
|
#include "db/filename.h"
|
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2013-08-25 05:48:51 +00:00
|
|
|
#include "db/db_statistics.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/compaction_filter.h"
|
|
|
|
#include "rocksdb/env.h"
|
2013-10-29 00:54:09 +00:00
|
|
|
#include "rocksdb/table.h"
|
2012-04-17 15:36:46 +00:00
|
|
|
#include "util/hash.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/logging.h"
|
2011-09-01 19:08:02 +00:00
|
|
|
#include "util/mutexlock.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
2013-03-21 22:59:47 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-11-01 17:50:08 +00:00
|
|
|
static bool SnappyCompressionSupported(const CompressionOptions& options) {
|
|
|
|
std::string out;
|
|
|
|
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
|
|
|
return port::Snappy_Compress(options, in.data(), in.size(), &out);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ZlibCompressionSupported(const CompressionOptions& options) {
|
|
|
|
std::string out;
|
|
|
|
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
|
|
|
return port::Zlib_Compress(options, in.data(), in.size(), &out);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool BZip2CompressionSupported(const CompressionOptions& options) {
|
|
|
|
std::string out;
|
|
|
|
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
|
|
|
return port::BZip2_Compress(options, in.data(), in.size(), &out);
|
|
|
|
}
|
2012-10-28 06:13:17 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
static std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2012-11-06 20:02:18 +00:00
|
|
|
namespace anon {
|
2012-04-17 15:36:46 +00:00
|
|
|
class AtomicCounter {
|
|
|
|
private:
|
|
|
|
port::Mutex mu_;
|
|
|
|
int count_;
|
|
|
|
public:
|
|
|
|
AtomicCounter() : count_(0) { }
|
|
|
|
void Increment() {
|
|
|
|
MutexLock l(&mu_);
|
|
|
|
count_++;
|
|
|
|
}
|
|
|
|
int Read() {
|
|
|
|
MutexLock l(&mu_);
|
|
|
|
return count_;
|
|
|
|
}
|
|
|
|
void Reset() {
|
|
|
|
MutexLock l(&mu_);
|
|
|
|
count_ = 0;
|
|
|
|
}
|
|
|
|
};
|
2013-07-28 18:53:08 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
// Special Env used to delay background operations
|
|
|
|
class SpecialEnv : public EnvWrapper {
|
|
|
|
public:
|
2013-03-01 02:04:58 +00:00
|
|
|
// sstable Sync() calls are blocked while this pointer is non-nullptr.
|
2011-06-22 02:36:45 +00:00
|
|
|
port::AtomicPointer delay_sstable_sync_;
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
// Simulate no-space errors while this pointer is non-nullptr.
|
2012-01-25 22:56:52 +00:00
|
|
|
port::AtomicPointer no_space_;
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
// Simulate non-writable file system while this pointer is non-nullptr
|
2012-08-22 23:57:51 +00:00
|
|
|
port::AtomicPointer non_writable_;
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
// Force sync of manifest files to fail while this pointer is non-nullptr
|
2013-01-08 20:00:13 +00:00
|
|
|
port::AtomicPointer manifest_sync_error_;
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
// Force write to manifest files to fail while this pointer is non-nullptr
|
2013-01-08 20:00:13 +00:00
|
|
|
port::AtomicPointer manifest_write_error_;
|
|
|
|
|
2013-10-28 19:36:02 +00:00
|
|
|
// Force write to log files to fail while this pointer is non-nullptr
|
|
|
|
port::AtomicPointer log_write_error_;
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
bool count_random_reads_;
|
2012-11-06 20:02:18 +00:00
|
|
|
anon::AtomicCounter random_read_counter_;
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2012-11-06 20:02:18 +00:00
|
|
|
anon::AtomicCounter sleep_counter_;
|
2012-08-22 23:57:51 +00:00
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
explicit SpecialEnv(Env* base) : EnvWrapper(base) {
|
2013-03-01 02:04:58 +00:00
|
|
|
delay_sstable_sync_.Release_Store(nullptr);
|
|
|
|
no_space_.Release_Store(nullptr);
|
|
|
|
non_writable_.Release_Store(nullptr);
|
2012-04-17 15:36:46 +00:00
|
|
|
count_random_reads_ = false;
|
2013-03-01 02:04:58 +00:00
|
|
|
manifest_sync_error_.Release_Store(nullptr);
|
|
|
|
manifest_write_error_.Release_Store(nullptr);
|
2013-10-28 19:36:02 +00:00
|
|
|
log_write_error_.Release_Store(nullptr);
|
2013-01-08 20:00:13 +00:00
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2013-03-15 00:00:04 +00:00
|
|
|
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
|
|
|
|
const EnvOptions& soptions) {
|
2011-06-22 02:36:45 +00:00
|
|
|
class SSTableFile : public WritableFile {
|
|
|
|
private:
|
|
|
|
SpecialEnv* env_;
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<WritableFile> base_;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
public:
|
2013-01-20 10:07:13 +00:00
|
|
|
SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
|
2011-06-22 02:36:45 +00:00
|
|
|
: env_(env),
|
2013-01-20 10:07:13 +00:00
|
|
|
base_(std::move(base)) {
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
2012-01-25 22:56:52 +00:00
|
|
|
Status Append(const Slice& data) {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (env_->no_space_.Acquire_Load() != nullptr) {
|
2012-01-25 22:56:52 +00:00
|
|
|
// Drop writes on the floor
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return base_->Append(data);
|
|
|
|
}
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
Status Close() { return base_->Close(); }
|
|
|
|
Status Flush() { return base_->Flush(); }
|
|
|
|
Status Sync() {
|
2013-03-01 02:04:58 +00:00
|
|
|
while (env_->delay_sstable_sync_.Acquire_Load() != nullptr) {
|
2011-06-22 02:36:45 +00:00
|
|
|
env_->SleepForMicroseconds(100000);
|
|
|
|
}
|
|
|
|
return base_->Sync();
|
|
|
|
}
|
|
|
|
};
|
2013-01-08 20:00:13 +00:00
|
|
|
class ManifestFile : public WritableFile {
|
|
|
|
private:
|
|
|
|
SpecialEnv* env_;
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<WritableFile> base_;
|
2013-01-08 20:00:13 +00:00
|
|
|
public:
|
2013-01-20 10:07:13 +00:00
|
|
|
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
|
|
|
: env_(env), base_(std::move(b)) { }
|
2013-01-08 20:00:13 +00:00
|
|
|
Status Append(const Slice& data) {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (env_->manifest_write_error_.Acquire_Load() != nullptr) {
|
2013-01-08 20:00:13 +00:00
|
|
|
return Status::IOError("simulated writer error");
|
|
|
|
} else {
|
|
|
|
return base_->Append(data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Status Close() { return base_->Close(); }
|
|
|
|
Status Flush() { return base_->Flush(); }
|
|
|
|
Status Sync() {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (env_->manifest_sync_error_.Acquire_Load() != nullptr) {
|
2013-01-08 20:00:13 +00:00
|
|
|
return Status::IOError("simulated sync error");
|
|
|
|
} else {
|
|
|
|
return base_->Sync();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2013-10-28 19:36:02 +00:00
|
|
|
class LogFile : public WritableFile {
|
|
|
|
private:
|
|
|
|
SpecialEnv* env_;
|
|
|
|
unique_ptr<WritableFile> base_;
|
|
|
|
public:
|
|
|
|
LogFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
|
|
|
: env_(env), base_(std::move(b)) { }
|
|
|
|
Status Append(const Slice& data) {
|
|
|
|
if (env_->log_write_error_.Acquire_Load() != nullptr) {
|
|
|
|
return Status::IOError("simulated writer error");
|
|
|
|
} else {
|
|
|
|
return base_->Append(data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Status Close() { return base_->Close(); }
|
|
|
|
Status Flush() { return base_->Flush(); }
|
|
|
|
Status Sync() { return base_->Sync(); }
|
|
|
|
};
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
if (non_writable_.Acquire_Load() != nullptr) {
|
2013-01-08 20:00:13 +00:00
|
|
|
return Status::IOError("simulated write error");
|
|
|
|
}
|
2012-08-22 23:57:51 +00:00
|
|
|
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = target()->NewWritableFile(f, r, soptions);
|
2011-06-22 02:36:45 +00:00
|
|
|
if (s.ok()) {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (strstr(f.c_str(), ".sst") != nullptr) {
|
2013-01-20 10:07:13 +00:00
|
|
|
r->reset(new SSTableFile(this, std::move(*r)));
|
2013-03-01 02:04:58 +00:00
|
|
|
} else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
|
2013-01-20 10:07:13 +00:00
|
|
|
r->reset(new ManifestFile(this, std::move(*r)));
|
2013-10-28 19:36:02 +00:00
|
|
|
} else if (strstr(f.c_str(), "log") != nullptr) {
|
|
|
|
r->reset(new LogFile(this, std::move(*r)));
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-01-20 10:07:13 +00:00
|
|
|
Status NewRandomAccessFile(const std::string& f,
|
2013-03-15 00:00:04 +00:00
|
|
|
unique_ptr<RandomAccessFile>* r,
|
|
|
|
const EnvOptions& soptions) {
|
2012-04-17 15:36:46 +00:00
|
|
|
class CountingFile : public RandomAccessFile {
|
|
|
|
private:
|
2013-01-20 10:07:13 +00:00
|
|
|
unique_ptr<RandomAccessFile> target_;
|
2012-11-06 20:02:18 +00:00
|
|
|
anon::AtomicCounter* counter_;
|
2012-04-17 15:36:46 +00:00
|
|
|
public:
|
2013-01-20 10:07:13 +00:00
|
|
|
CountingFile(unique_ptr<RandomAccessFile>&& target,
|
|
|
|
anon::AtomicCounter* counter)
|
|
|
|
: target_(std::move(target)), counter_(counter) {
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const {
|
|
|
|
counter_->Increment();
|
|
|
|
return target_->Read(offset, n, result, scratch);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-03-15 00:00:04 +00:00
|
|
|
Status s = target()->NewRandomAccessFile(f, r, soptions);
|
2012-04-17 15:36:46 +00:00
|
|
|
if (s.ok() && count_random_reads_) {
|
2013-01-20 10:07:13 +00:00
|
|
|
r->reset(new CountingFile(std::move(*r), &random_read_counter_));
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
2012-08-22 23:57:51 +00:00
|
|
|
|
|
|
|
virtual void SleepForMicroseconds(int micros) {
|
|
|
|
sleep_counter_.Increment();
|
|
|
|
target()->SleepForMicroseconds(micros);
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
class DBTest {
|
2012-04-17 15:36:46 +00:00
|
|
|
private:
|
|
|
|
const FilterPolicy* filter_policy_;
|
|
|
|
|
2013-08-23 06:10:02 +00:00
|
|
|
protected:
|
2012-04-17 15:36:46 +00:00
|
|
|
// Sequence of option configurations to try
|
|
|
|
enum OptionConfig {
|
|
|
|
kDefault,
|
2013-08-23 06:10:02 +00:00
|
|
|
kVectorRep,
|
2013-03-21 22:59:47 +00:00
|
|
|
kMergePut,
|
2012-04-17 15:36:46 +00:00
|
|
|
kFilter,
|
2012-08-27 06:45:35 +00:00
|
|
|
kUncompressed,
|
2012-06-23 02:30:03 +00:00
|
|
|
kNumLevel_3,
|
2012-09-06 00:44:13 +00:00
|
|
|
kDBLogDir,
|
2013-10-01 21:46:52 +00:00
|
|
|
kWalDir,
|
2013-01-11 01:18:50 +00:00
|
|
|
kManifestFileSize,
|
2013-02-28 22:09:30 +00:00
|
|
|
kCompactOnFlush,
|
2013-03-02 20:56:04 +00:00
|
|
|
kPerfOptions,
|
2013-07-06 01:49:18 +00:00
|
|
|
kDeletesFilterFirst,
|
2013-12-03 20:42:15 +00:00
|
|
|
kHashSkipList,
|
2013-08-02 18:46:47 +00:00
|
|
|
kUniversalCompaction,
|
2013-09-02 06:23:40 +00:00
|
|
|
kCompressedBlockCache,
|
2012-04-17 15:36:46 +00:00
|
|
|
kEnd
|
|
|
|
};
|
|
|
|
int option_config_;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
|
|
|
std::string dbname_;
|
2011-06-22 02:36:45 +00:00
|
|
|
SpecialEnv* env_;
|
2011-03-18 22:37:00 +00:00
|
|
|
DB* db_;
|
|
|
|
|
|
|
|
Options last_options_;
|
|
|
|
|
2013-08-02 18:46:47 +00:00
|
|
|
// Skip some options, as they may not be applicable to a specific test.
|
|
|
|
// To add more skip constants, use values 4, 8, 16, etc.
|
|
|
|
enum OptionSkip {
|
|
|
|
kNoSkip = 0,
|
|
|
|
kSkipDeletesFilterFirst = 1,
|
2013-10-18 01:33:18 +00:00
|
|
|
kSkipUniversalCompaction = 2,
|
|
|
|
kSkipMergePut = 4
|
2013-08-02 18:46:47 +00:00
|
|
|
};
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
DBTest() : option_config_(kDefault),
|
|
|
|
env_(new SpecialEnv(Env::Default())) {
|
|
|
|
filter_policy_ = NewBloomFilterPolicy(10);
|
2011-03-18 22:37:00 +00:00
|
|
|
dbname_ = test::TmpDir() + "/db_test";
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname_, Options()));
|
2013-03-01 02:04:58 +00:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
|
|
|
|
~DBTest() {
|
|
|
|
delete db_;
|
2013-10-11 01:02:10 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname_, Options()));
|
2011-06-22 02:36:45 +00:00
|
|
|
delete env_;
|
2012-04-17 15:36:46 +00:00
|
|
|
delete filter_policy_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch to a fresh database with the next option configuration to
|
|
|
|
// test. Return false if there are no more configurations to test.
|
2013-08-02 18:46:47 +00:00
|
|
|
bool ChangeOptions(int skip_mask = kNoSkip) {
|
2012-08-27 06:45:35 +00:00
|
|
|
option_config_++;
|
2013-08-02 18:46:47 +00:00
|
|
|
|
|
|
|
// skip some options
|
|
|
|
if (skip_mask & kSkipDeletesFilterFirst &&
|
|
|
|
option_config_ == kDeletesFilterFirst) {
|
|
|
|
option_config_++;
|
|
|
|
}
|
|
|
|
if (skip_mask & kSkipUniversalCompaction &&
|
|
|
|
option_config_ == kUniversalCompaction) {
|
|
|
|
option_config_++;
|
|
|
|
}
|
2013-10-18 01:33:18 +00:00
|
|
|
if (skip_mask & kSkipMergePut && option_config_ == kMergePut) {
|
|
|
|
option_config_++;
|
|
|
|
}
|
2012-08-27 06:45:35 +00:00
|
|
|
if (option_config_ >= kEnd) {
|
2013-10-04 17:21:03 +00:00
|
|
|
Destroy(&last_options_);
|
2012-04-17 15:36:46 +00:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
DestroyAndReopen();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Switch between different compaction styles (we have only 2 now).
|
2013-10-01 21:46:52 +00:00
|
|
|
bool ChangeCompactOptions(Options* prev_options = nullptr) {
|
2013-08-07 22:20:41 +00:00
|
|
|
if (option_config_ == kDefault) {
|
|
|
|
option_config_ = kUniversalCompaction;
|
2013-10-01 21:46:52 +00:00
|
|
|
if (prev_options == nullptr) {
|
|
|
|
prev_options = &last_options_;
|
|
|
|
}
|
|
|
|
Destroy(prev_options);
|
|
|
|
TryReopen();
|
2013-08-07 22:20:41 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Return the current option configuration.
|
|
|
|
Options CurrentOptions() {
|
|
|
|
Options options;
|
|
|
|
switch (option_config_) {
|
2013-12-03 20:42:15 +00:00
|
|
|
case kHashSkipList:
|
|
|
|
options.memtable_factory.reset(
|
|
|
|
NewHashSkipListRepFactory(NewFixedPrefixTransform(1)));
|
2013-08-23 06:10:02 +00:00
|
|
|
break;
|
2013-03-21 22:59:47 +00:00
|
|
|
case kMergePut:
|
2013-08-20 20:35:28 +00:00
|
|
|
options.merge_operator = MergeOperators::CreatePutOperator();
|
2013-03-21 22:59:47 +00:00
|
|
|
break;
|
2012-04-17 15:36:46 +00:00
|
|
|
case kFilter:
|
|
|
|
options.filter_policy = filter_policy_;
|
|
|
|
break;
|
2012-08-27 06:45:35 +00:00
|
|
|
case kUncompressed:
|
2012-09-06 00:44:13 +00:00
|
|
|
options.compression = kNoCompression;
|
|
|
|
break;
|
2012-06-23 02:30:03 +00:00
|
|
|
case kNumLevel_3:
|
2012-09-06 00:44:13 +00:00
|
|
|
options.num_levels = 3;
|
|
|
|
break;
|
|
|
|
case kDBLogDir:
|
|
|
|
options.db_log_dir = test::TmpDir();
|
|
|
|
break;
|
2013-10-01 21:46:52 +00:00
|
|
|
case kWalDir:
|
|
|
|
options.wal_dir = "/tmp/wal";
|
|
|
|
break;
|
2013-01-11 01:18:50 +00:00
|
|
|
case kManifestFileSize:
|
|
|
|
options.max_manifest_file_size = 50; // 50 bytes
|
2013-02-28 22:09:30 +00:00
|
|
|
case kCompactOnFlush:
|
2013-10-04 17:21:03 +00:00
|
|
|
options.purge_redundant_kvs_while_flush =
|
|
|
|
!options.purge_redundant_kvs_while_flush;
|
2013-03-02 20:56:04 +00:00
|
|
|
break;
|
|
|
|
case kPerfOptions:
|
2013-08-05 22:43:49 +00:00
|
|
|
options.hard_rate_limit = 2.0;
|
|
|
|
options.rate_limit_delay_max_milliseconds = 2;
|
2013-03-02 20:56:04 +00:00
|
|
|
// TODO -- test more options
|
|
|
|
break;
|
2013-07-06 01:49:18 +00:00
|
|
|
case kDeletesFilterFirst:
|
2013-07-12 23:56:52 +00:00
|
|
|
options.filter_deletes = true;
|
2013-07-06 01:49:18 +00:00
|
|
|
break;
|
2013-08-23 06:10:02 +00:00
|
|
|
case kVectorRep:
|
2013-09-25 05:23:19 +00:00
|
|
|
options.memtable_factory.reset(new VectorRepFactory(100));
|
2013-08-23 06:10:02 +00:00
|
|
|
break;
|
2013-08-02 18:46:47 +00:00
|
|
|
case kUniversalCompaction:
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
break;
|
2013-09-02 06:23:40 +00:00
|
|
|
case kCompressedBlockCache:
|
|
|
|
options.block_cache_compressed = NewLRUCache(8*1024*1024);
|
|
|
|
break;
|
2012-04-17 15:36:46 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return options;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DBImpl* dbfull() {
|
|
|
|
return reinterpret_cast<DBImpl*>(db_);
|
|
|
|
}
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
void Reopen(Options* options = nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
void Close() {
|
|
|
|
delete db_;
|
2013-03-01 02:04:58 +00:00
|
|
|
db_ = nullptr;
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
void DestroyAndReopen(Options* options = nullptr) {
|
2013-10-04 17:21:03 +00:00
|
|
|
//Destroy using last options
|
|
|
|
Destroy(&last_options_);
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Destroy(Options* options) {
|
2011-03-18 22:37:00 +00:00
|
|
|
delete db_;
|
2013-03-01 02:04:58 +00:00
|
|
|
db_ = nullptr;
|
2013-10-11 01:02:10 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname_, *options));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-08-18 07:26:50 +00:00
|
|
|
Status PureReopen(Options* options, DB** db) {
|
|
|
|
return DB::Open(*options, dbname_, db);
|
|
|
|
}
|
|
|
|
|
2013-10-01 21:46:52 +00:00
|
|
|
Status TryReopen(Options* options = nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
delete db_;
|
2013-03-01 02:04:58 +00:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Options opts;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (options != nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
opts = *options;
|
|
|
|
} else {
|
2012-04-17 15:36:46 +00:00
|
|
|
opts = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
opts.create_if_missing = true;
|
|
|
|
}
|
|
|
|
last_options_ = opts;
|
|
|
|
|
|
|
|
return DB::Open(opts, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) {
|
2013-03-21 22:59:47 +00:00
|
|
|
if (kMergePut == option_config_ ) {
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
return db_->Merge(wo, k, v);
|
2013-03-21 22:59:47 +00:00
|
|
|
} else {
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
return db_->Put(wo, k, v);
|
2013-03-21 22:59:47 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status Delete(const std::string& k) {
|
2011-04-12 19:38:58 +00:00
|
|
|
return db_->Delete(WriteOptions(), k);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
ReadOptions options;
|
2013-09-02 06:23:40 +00:00
|
|
|
options.verify_checksums = true;
|
2011-03-18 22:37:00 +00:00
|
|
|
options.snapshot = snapshot;
|
|
|
|
std::string result;
|
|
|
|
Status s = db_->Get(options, k, &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
// Return a string that contains all key,value pairs in order,
|
|
|
|
// formatted like "(k1->v1)(k2->v2)".
|
|
|
|
std::string Contents() {
|
|
|
|
std::vector<std::string> forward;
|
|
|
|
std::string result;
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
std::string s = IterStatus(iter);
|
|
|
|
result.push_back('(');
|
|
|
|
result.append(s);
|
|
|
|
result.push_back(')');
|
|
|
|
forward.push_back(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check reverse iteration results are the reverse of forward results
|
2012-11-06 20:02:18 +00:00
|
|
|
unsigned int matched = 0;
|
2011-10-31 17:22:06 +00:00
|
|
|
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
|
|
|
|
ASSERT_LT(matched, forward.size());
|
|
|
|
ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
|
|
|
|
matched++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(matched, forward.size());
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string AllEntriesFor(const Slice& user_key) {
|
|
|
|
Iterator* iter = dbfull()->TEST_NewInternalIterator();
|
|
|
|
InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
iter->Seek(target.Encode());
|
|
|
|
std::string result;
|
|
|
|
if (!iter->status().ok()) {
|
|
|
|
result = iter->status().ToString();
|
|
|
|
} else {
|
|
|
|
result = "[ ";
|
|
|
|
bool first = true;
|
|
|
|
while (iter->Valid()) {
|
2013-11-17 21:52:55 +00:00
|
|
|
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!ParseInternalKey(iter->key(), &ikey)) {
|
|
|
|
result += "CORRUPTED";
|
|
|
|
} else {
|
2012-04-17 15:36:46 +00:00
|
|
|
if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!first) {
|
|
|
|
result += ", ";
|
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
switch (ikey.type) {
|
|
|
|
case kTypeValue:
|
|
|
|
result += iter->value().ToString();
|
|
|
|
break;
|
2013-03-21 22:59:47 +00:00
|
|
|
case kTypeMerge:
|
|
|
|
// keep it the same as kTypeValue for testing kMergePut
|
|
|
|
result += iter->value().ToString();
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
case kTypeDeletion:
|
|
|
|
result += "DEL";
|
|
|
|
break;
|
2013-08-14 23:32:46 +00:00
|
|
|
case kTypeLogData:
|
|
|
|
assert(false);
|
|
|
|
break;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
if (!first) {
|
|
|
|
result += " ";
|
|
|
|
}
|
|
|
|
result += "]";
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int NumTableFilesAtLevel(int level) {
|
2011-04-12 19:38:58 +00:00
|
|
|
std::string property;
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_TRUE(
|
2013-10-05 05:32:05 +00:00
|
|
|
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
|
2011-04-12 19:38:58 +00:00
|
|
|
&property));
|
|
|
|
return atoi(property.c_str());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
int TotalTableFiles() {
|
|
|
|
int result = 0;
|
2012-06-23 02:30:03 +00:00
|
|
|
for (int level = 0; level < db_->NumberLevels(); level++) {
|
2011-06-22 02:36:45 +00:00
|
|
|
result += NumTableFilesAtLevel(level);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
// Return spread of files per level
|
|
|
|
std::string FilesPerLevel() {
|
|
|
|
std::string result;
|
|
|
|
int last_non_zero_offset = 0;
|
2012-06-23 02:30:03 +00:00
|
|
|
for (int level = 0; level < db_->NumberLevels(); level++) {
|
2011-10-05 23:30:28 +00:00
|
|
|
int f = NumTableFilesAtLevel(level);
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
|
|
|
|
result += buf;
|
|
|
|
if (f > 0) {
|
|
|
|
last_non_zero_offset = result.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result.resize(last_non_zero_offset);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-01-25 22:56:52 +00:00
|
|
|
int CountFiles() {
|
|
|
|
std::vector<std::string> files;
|
|
|
|
env_->GetChildren(dbname_, &files);
|
2013-10-01 21:46:52 +00:00
|
|
|
|
|
|
|
std::vector<std::string> logfiles;
|
|
|
|
if (dbname_ != last_options_.wal_dir) {
|
|
|
|
env_->GetChildren(last_options_.wal_dir, &logfiles);
|
|
|
|
}
|
|
|
|
|
|
|
|
return static_cast<int>(files.size() + logfiles.size());
|
2012-01-25 22:56:52 +00:00
|
|
|
}
|
|
|
|
|
2012-10-29 18:12:24 +00:00
|
|
|
int CountLiveFiles() {
|
|
|
|
std::vector<std::string> files;
|
|
|
|
uint64_t manifest_file_size;
|
|
|
|
db_->GetLiveFiles(files, &manifest_file_size);
|
|
|
|
return files.size();
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t Size(const Slice& start, const Slice& limit) {
|
|
|
|
Range r(start, limit);
|
|
|
|
uint64_t size;
|
|
|
|
db_->GetApproximateSizes(&r, 1, &size);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2011-03-22 18:32:49 +00:00
|
|
|
void Compact(const Slice& start, const Slice& limit) {
|
2011-10-05 23:30:28 +00:00
|
|
|
db_->CompactRange(&start, &limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do n memtable compactions, each of which produces an sstable
|
|
|
|
// covering the range [small,large].
|
|
|
|
void MakeTables(int n, const std::string& small, const std::string& large) {
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
Put(small, "begin");
|
|
|
|
Put(large, "end");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2011-03-22 18:32:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
// Prevent pushing of new sstables into deeper levels by adding
|
|
|
|
// tables that cover a specified range to all levels.
|
|
|
|
void FillLevels(const std::string& smallest, const std::string& largest) {
|
2012-06-23 02:30:03 +00:00
|
|
|
MakeTables(db_->NumberLevels(), smallest, largest);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2011-03-22 18:32:49 +00:00
|
|
|
void DumpFileCounts(const char* label) {
|
|
|
|
fprintf(stderr, "---\n%s:\n", label);
|
|
|
|
fprintf(stderr, "maxoverlap: %lld\n",
|
|
|
|
static_cast<long long>(
|
|
|
|
dbfull()->TEST_MaxNextLevelOverlappingBytes()));
|
2012-06-23 02:30:03 +00:00
|
|
|
for (int level = 0; level < db_->NumberLevels(); level++) {
|
2011-03-22 18:32:49 +00:00
|
|
|
int num = NumTableFilesAtLevel(level);
|
|
|
|
if (num > 0) {
|
|
|
|
fprintf(stderr, " level %3d : %d files\n", level, num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
std::string DumpSSTableList() {
|
|
|
|
std::string property;
|
2013-10-05 05:32:05 +00:00
|
|
|
db_->GetProperty("rocksdb.sstables", &property);
|
2011-10-05 23:30:28 +00:00
|
|
|
return property;
|
|
|
|
}
|
|
|
|
|
2011-03-25 20:27:43 +00:00
|
|
|
std::string IterStatus(Iterator* iter) {
|
|
|
|
std::string result;
|
|
|
|
if (iter->Valid()) {
|
|
|
|
result = iter->key().ToString() + "->" + iter->value().ToString();
|
|
|
|
} else {
|
|
|
|
result = "(invalid)";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2013-03-21 22:12:35 +00:00
|
|
|
|
|
|
|
Options OptionsForLogIterTest() {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.WAL_ttl_seconds = 1000;
|
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<TransactionLogIterator> OpenTransactionLogIter(
|
2013-10-25 02:09:02 +00:00
|
|
|
const SequenceNumber seq) {
|
2013-03-21 22:12:35 +00:00
|
|
|
unique_ptr<TransactionLogIterator> iter;
|
|
|
|
Status status = dbfull()->GetUpdatesSince(seq, &iter);
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(status);
|
2013-03-21 22:12:35 +00:00
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
return std::move(iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string DummyString(size_t len, char c = 'a') {
|
|
|
|
return std::string(len, c);
|
|
|
|
}
|
2013-10-18 01:33:18 +00:00
|
|
|
|
|
|
|
void VerifyIterLast(std::string expected_key) {
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), expected_key);
|
|
|
|
delete iter;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2013-11-17 07:44:39 +00:00
|
|
|
static std::string Key(int i) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "key%06d", i);
|
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, Empty) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(db_ != nullptr);
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, ReadWrite) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_OK(Put("bar", "v2"));
|
|
|
|
ASSERT_OK(Put("foo", "v3"));
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-11-13 06:46:51 +00:00
|
|
|
// Make sure that when options.block_cache is set, after a new table is
|
|
|
|
// created its index/filter blocks are added to block cache.
|
|
|
|
TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
std::unique_ptr<const FilterPolicy> filter_policy(NewBloomFilterPolicy(20));
|
|
|
|
options.filter_policy = filter_policy.get();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.statistics = rocksdb::CreateDBStatistics();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "key", "val"));
|
|
|
|
// Create a new talbe.
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
|
|
|
|
// index/filter blocks added to block cache right after table creation.
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_INDEX_MISS));
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(2, /* only index/filter were added */
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
|
|
|
ASSERT_EQ(0,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_DATA_MISS));
|
|
|
|
|
|
|
|
// Make sure filter block is in cache.
|
|
|
|
std::string value;
|
|
|
|
ReadOptions ropt;
|
|
|
|
db_->KeyMayExist(ReadOptions(), "key", &value);
|
|
|
|
|
|
|
|
// Miss count should remain the same.
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
|
|
|
|
|
|
|
|
db_->KeyMayExist(ReadOptions(), "key", &value);
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(2,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
|
|
|
|
|
|
|
|
// Make sure index block is in cache.
|
|
|
|
auto index_block_hit =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT);
|
|
|
|
value = Get("key");
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(index_block_hit + 1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
|
|
|
|
|
|
|
|
value = Get("key");
|
|
|
|
ASSERT_EQ(1,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
|
|
|
|
ASSERT_EQ(index_block_hit + 2,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
|
|
|
|
}
|
|
|
|
|
2013-01-24 18:54:26 +00:00
|
|
|
TEST(DBTest, LevelLimitReopen) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
const std::string value(1024 * 1024, ' ');
|
|
|
|
int i = 0;
|
|
|
|
while (NumTableFilesAtLevel(2) == 0) {
|
|
|
|
ASSERT_OK(Put(Key(i++), value));
|
|
|
|
}
|
|
|
|
|
|
|
|
options.num_levels = 1;
|
2013-05-23 17:56:36 +00:00
|
|
|
options.max_bytes_for_level_multiplier_additional.resize(1, 1);
|
2013-01-24 18:54:26 +00:00
|
|
|
Status s = TryReopen(&options);
|
|
|
|
ASSERT_EQ(s.IsCorruption(), true);
|
|
|
|
ASSERT_EQ(s.ToString(),
|
|
|
|
"Corruption: VersionEdit: db already has "
|
|
|
|
"more levels than options.num_levels");
|
|
|
|
|
|
|
|
options.num_levels = 10;
|
2013-05-23 17:56:36 +00:00
|
|
|
options.max_bytes_for_level_multiplier_additional.resize(10, 1);
|
2013-01-24 18:54:26 +00:00
|
|
|
ASSERT_OK(TryReopen(&options));
|
|
|
|
}
|
|
|
|
|
2013-01-15 22:05:42 +00:00
|
|
|
TEST(DBTest, Preallocation) {
|
|
|
|
const std::string src = dbname_ + "/alloc_test";
|
|
|
|
unique_ptr<WritableFile> srcfile;
|
2013-06-07 22:35:17 +00:00
|
|
|
const EnvOptions soptions;
|
2013-03-15 00:00:04 +00:00
|
|
|
ASSERT_OK(env_->NewWritableFile(src, &srcfile, soptions));
|
2013-01-15 22:05:42 +00:00
|
|
|
srcfile->SetPreallocationBlockSize(1024 * 1024);
|
|
|
|
|
|
|
|
// No writes should mean no preallocation
|
|
|
|
size_t block_size, last_allocated_block;
|
|
|
|
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
|
|
|
|
ASSERT_EQ(last_allocated_block, 0UL);
|
|
|
|
|
|
|
|
// Small write should preallocate one block
|
|
|
|
srcfile->Append("test");
|
|
|
|
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
|
|
|
|
ASSERT_EQ(last_allocated_block, 1UL);
|
|
|
|
|
|
|
|
// Write an entire preallocation block, make sure we increased by two.
|
|
|
|
std::string buf(block_size, ' ');
|
|
|
|
srcfile->Append(buf);
|
|
|
|
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
|
|
|
|
ASSERT_EQ(last_allocated_block, 2UL);
|
|
|
|
|
|
|
|
// Write five more blocks at once, ensure we're where we need to be.
|
|
|
|
buf = std::string(block_size * 5, ' ');
|
|
|
|
srcfile->Append(buf);
|
|
|
|
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
|
|
|
|
ASSERT_EQ(last_allocated_block, 7UL);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, PutDeleteGet) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
|
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-11-13 06:46:51 +00:00
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
TEST(DBTest, GetFromImmutableLayer) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 100000; // Small write buffer
|
|
|
|
Reopen(&options);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls
|
|
|
|
Put("k1", std::string(100000, 'x')); // Fill memtable
|
|
|
|
Put("k2", std::string(100000, 'y')); // Trigger compaction
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2013-03-01 02:04:58 +00:00
|
|
|
env_->delay_sstable_sync_.Release_Store(nullptr); // Release sync calls
|
2012-04-17 15:36:46 +00:00
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, GetFromVersions) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, GetSnapshot) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
// Try with both a short key and a long key
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
|
|
|
|
ASSERT_OK(Put(key, "v1"));
|
|
|
|
const Snapshot* s1 = db_->GetSnapshot();
|
|
|
|
ASSERT_OK(Put(key, "v2"));
|
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_EQ("v1", Get(key, s1));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_EQ("v1", Get(key, s1));
|
|
|
|
db_->ReleaseSnapshot(s1);
|
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, GetLevel0Ordering) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
// Check that we process level-0 files in correct order. The code
|
|
|
|
// below generates two level-0 files where the earlier one comes
|
|
|
|
// before the later one in the level-0 file list since the earlier
|
|
|
|
// one has a smaller "smallest" key.
|
|
|
|
ASSERT_OK(Put("bar", "b"));
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(Put("foo", "v2"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, GetOrderedByLevels) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
Compact("a", "z");
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_OK(Put("foo", "v2"));
|
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, GetPicksCorrectFile) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
// Arrange to have multiple files in a non-level-0 level.
|
|
|
|
ASSERT_OK(Put("a", "va"));
|
|
|
|
Compact("a", "b");
|
|
|
|
ASSERT_OK(Put("x", "vx"));
|
|
|
|
Compact("x", "y");
|
|
|
|
ASSERT_OK(Put("f", "vf"));
|
|
|
|
Compact("f", "g");
|
|
|
|
ASSERT_EQ("va", Get("a"));
|
|
|
|
ASSERT_EQ("vf", Get("f"));
|
|
|
|
ASSERT_EQ("vx", Get("x"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
TEST(DBTest, GetEncountersEmptyLevel) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
// Arrange for the following to happen:
|
|
|
|
// * sstable A in level 0
|
|
|
|
// * nothing in level 1
|
|
|
|
// * sstable B in level 2
|
|
|
|
// Then do enough Get() calls to arrange for an automatic compaction
|
|
|
|
// of sstable A. A bug would cause the compaction to be marked as
|
|
|
|
// occuring at level 1 (instead of the correct level 0).
|
|
|
|
|
|
|
|
// Step 1: First place sstables in levels 0 and 2
|
|
|
|
int compaction_count = 0;
|
|
|
|
while (NumTableFilesAtLevel(0) == 0 ||
|
|
|
|
NumTableFilesAtLevel(2) == 0) {
|
|
|
|
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
|
|
|
|
compaction_count++;
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Step 2: clear level 1 if necessary.
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
|
|
|
|
|
2012-08-27 06:45:35 +00:00
|
|
|
// Step 3: read a bunch of times
|
|
|
|
for (int i = 0; i < 1000; i++) {
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("missing"));
|
|
|
|
}
|
2012-08-27 06:45:35 +00:00
|
|
|
|
|
|
|
// Step 4: Wait for compaction to finish
|
|
|
|
env_->SleepForMicroseconds(1000000);
|
|
|
|
|
2012-09-27 08:05:38 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1); // XXX
|
2013-08-02 18:46:47 +00:00
|
|
|
} while (ChangeOptions(kSkipUniversalCompaction));
|
2011-09-01 19:08:02 +00:00
|
|
|
}
|
|
|
|
|
2013-07-26 19:57:01 +00:00
|
|
|
// KeyMayExist can lead to a few false positives, but not false negatives.
|
|
|
|
// To make test deterministic, use a much larger number of bits per key-20 than
|
|
|
|
// bits in the key, so that false positives are eliminated
|
2013-07-06 01:49:18 +00:00
|
|
|
TEST(DBTest, KeyMayExist) {
|
|
|
|
do {
|
2013-07-26 19:57:01 +00:00
|
|
|
ReadOptions ropts;
|
|
|
|
std::string value;
|
2013-07-06 01:49:18 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.filter_policy = NewBloomFilterPolicy(20);
|
2013-10-04 04:49:15 +00:00
|
|
|
options.statistics = rocksdb::CreateDBStatistics();
|
2013-07-06 01:49:18 +00:00
|
|
|
Reopen(&options);
|
|
|
|
|
2013-07-26 19:57:01 +00:00
|
|
|
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
|
2013-07-06 01:49:18 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "a", "b"));
|
2013-07-26 19:57:01 +00:00
|
|
|
bool value_found = false;
|
|
|
|
ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
|
|
|
|
ASSERT_TRUE(value_found);
|
|
|
|
ASSERT_EQ("b", value);
|
2013-07-06 01:49:18 +00:00
|
|
|
|
|
|
|
dbfull()->Flush(FlushOptions());
|
2013-07-26 19:57:01 +00:00
|
|
|
value.clear();
|
2013-08-25 05:48:51 +00:00
|
|
|
|
|
|
|
long numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
|
2013-11-13 06:46:51 +00:00
|
|
|
long cache_added =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
|
2013-07-26 19:57:01 +00:00
|
|
|
ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
|
2013-08-25 20:40:02 +00:00
|
|
|
ASSERT_TRUE(!value_found);
|
2013-08-25 05:48:51 +00:00
|
|
|
// assert that no new files were opened and no new blocks were
|
|
|
|
// read into block cache.
|
|
|
|
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
|
2013-11-13 06:46:51 +00:00
|
|
|
ASSERT_EQ(cache_added,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
2013-07-06 01:49:18 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "a"));
|
2013-08-25 05:48:51 +00:00
|
|
|
|
|
|
|
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
|
2013-11-13 06:46:51 +00:00
|
|
|
cache_added =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
|
2013-07-26 19:57:01 +00:00
|
|
|
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
|
2013-08-25 05:48:51 +00:00
|
|
|
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
|
2013-11-13 06:46:51 +00:00
|
|
|
ASSERT_EQ(cache_added,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
2013-07-06 01:49:18 +00:00
|
|
|
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
2013-08-25 05:48:51 +00:00
|
|
|
|
|
|
|
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
|
2013-11-13 06:46:51 +00:00
|
|
|
cache_added =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
|
2013-07-26 19:57:01 +00:00
|
|
|
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
|
2013-08-25 05:48:51 +00:00
|
|
|
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
|
2013-11-13 06:46:51 +00:00
|
|
|
ASSERT_EQ(cache_added,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
2013-07-06 01:49:18 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "c"));
|
2013-08-25 05:48:51 +00:00
|
|
|
|
|
|
|
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
|
2013-11-13 06:46:51 +00:00
|
|
|
cache_added =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
|
2013-07-26 19:57:01 +00:00
|
|
|
ASSERT_TRUE(!db_->KeyMayExist(ropts, "c", &value));
|
2013-08-25 05:48:51 +00:00
|
|
|
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
|
2013-11-13 06:46:51 +00:00
|
|
|
ASSERT_EQ(cache_added,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
2013-07-11 21:05:31 +00:00
|
|
|
|
|
|
|
delete options.filter_policy;
|
2013-07-06 01:49:18 +00:00
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2013-08-25 05:48:51 +00:00
|
|
|
TEST(DBTest, NonBlockingIteration) {
|
|
|
|
do {
|
|
|
|
ReadOptions non_blocking_opts, regular_opts;
|
|
|
|
Options options = CurrentOptions();
|
2013-10-04 04:49:15 +00:00
|
|
|
options.statistics = rocksdb::CreateDBStatistics();
|
2013-08-25 05:48:51 +00:00
|
|
|
non_blocking_opts.read_tier = kBlockCacheTier;
|
|
|
|
Reopen(&options);
|
|
|
|
// write one kv to the database.
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), "a", "b"));
|
|
|
|
|
|
|
|
// scan using non-blocking iterator. We should find it because
|
|
|
|
// it is in memtable.
|
|
|
|
Iterator* iter = db_->NewIterator(non_blocking_opts);
|
|
|
|
int count = 0;
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2013-08-25 05:48:51 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(count, 1);
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// flush memtable to storage. Now, the key should not be in the
|
|
|
|
// memtable neither in the block cache.
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
|
|
|
|
// verify that a non-blocking iterator does not find any
|
|
|
|
// kvs. Neither does it do any IOs to storage.
|
|
|
|
long numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
|
2013-11-13 06:46:51 +00:00
|
|
|
long cache_added =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
|
2013-08-25 05:48:51 +00:00
|
|
|
iter = db_->NewIterator(non_blocking_opts);
|
|
|
|
count = 0;
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(count, 0);
|
|
|
|
ASSERT_TRUE(iter->status().IsIncomplete());
|
|
|
|
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
|
2013-11-13 06:46:51 +00:00
|
|
|
ASSERT_EQ(cache_added,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
2013-08-25 05:48:51 +00:00
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// read in the specified block via a regular get
|
|
|
|
ASSERT_EQ(Get("a"), "b");
|
|
|
|
|
|
|
|
// verify that we can find it via a non-blocking scan
|
|
|
|
numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
|
2013-11-13 06:46:51 +00:00
|
|
|
cache_added =
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
|
2013-08-25 05:48:51 +00:00
|
|
|
iter = db_->NewIterator(non_blocking_opts);
|
|
|
|
count = 0;
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2013-08-25 05:48:51 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(count, 1);
|
|
|
|
ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
|
2013-11-13 06:46:51 +00:00
|
|
|
ASSERT_EQ(cache_added,
|
|
|
|
options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
|
2013-08-25 05:48:51 +00:00
|
|
|
delete iter;
|
|
|
|
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2013-07-12 23:56:52 +00:00
|
|
|
// A delete is skipped for key if KeyMayExist(key) returns False
|
|
|
|
// Tests Writebatch consistency and proper delete behaviour
|
|
|
|
TEST(DBTest, FilterDeletes) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.filter_policy = NewBloomFilterPolicy(20);
|
|
|
|
options.filter_deletes = true;
|
|
|
|
Reopen(&options);
|
|
|
|
WriteBatch batch;
|
|
|
|
|
|
|
|
batch.Delete("a");
|
|
|
|
dbfull()->Write(WriteOptions(), &batch);
|
|
|
|
ASSERT_EQ(AllEntriesFor("a"), "[ ]"); // Delete skipped
|
|
|
|
batch.Clear();
|
|
|
|
|
|
|
|
batch.Put("a", "b");
|
|
|
|
batch.Delete("a");
|
|
|
|
dbfull()->Write(WriteOptions(), &batch);
|
|
|
|
ASSERT_EQ(Get("a"), "NOT_FOUND");
|
|
|
|
ASSERT_EQ(AllEntriesFor("a"), "[ DEL, b ]"); // Delete issued
|
|
|
|
batch.Clear();
|
|
|
|
|
|
|
|
batch.Delete("c");
|
|
|
|
batch.Put("c", "d");
|
|
|
|
dbfull()->Write(WriteOptions(), &batch);
|
|
|
|
ASSERT_EQ(Get("c"), "d");
|
|
|
|
ASSERT_EQ(AllEntriesFor("c"), "[ d ]"); // Delete skipped
|
|
|
|
batch.Clear();
|
|
|
|
|
|
|
|
dbfull()->Flush(FlushOptions()); // A stray Flush
|
|
|
|
|
|
|
|
batch.Delete("c");
|
|
|
|
dbfull()->Write(WriteOptions(), &batch);
|
|
|
|
ASSERT_EQ(AllEntriesFor("c"), "[ DEL, d ]"); // Delete issued
|
|
|
|
batch.Clear();
|
2013-07-12 23:56:52 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
delete options.filter_policy;
|
|
|
|
} while (ChangeCompactOptions());
|
2013-07-12 23:56:52 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 20:27:43 +00:00
|
|
|
TEST(DBTest, IterEmpty) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->Seek("foo");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
delete iter;
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, IterSingle) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("a", "va"));
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->Seek("a");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("b");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, IterMulti) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("a", "va"));
|
|
|
|
ASSERT_OK(Put("b", "vb"));
|
|
|
|
ASSERT_OK(Put("c", "vc"));
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Seek("a");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Seek("ax");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Seek("b");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Seek("z");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
// Switch from reverse to forward
|
|
|
|
iter->SeekToLast();
|
|
|
|
iter->Prev();
|
|
|
|
iter->Prev();
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
|
|
|
|
// Switch from forward to reverse
|
|
|
|
iter->SeekToFirst();
|
|
|
|
iter->Next();
|
|
|
|
iter->Next();
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
|
|
|
|
// Make sure iter stays at snapshot
|
|
|
|
ASSERT_OK(Put("a", "va2"));
|
|
|
|
ASSERT_OK(Put("a2", "va3"));
|
|
|
|
ASSERT_OK(Put("b", "vb2"));
|
|
|
|
ASSERT_OK(Put("c", "vc2"));
|
|
|
|
ASSERT_OK(Delete("b"));
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
}
|
|
|
|
|
2013-07-28 18:53:08 +00:00
|
|
|
// Check that we can skip over a run of user keys
|
|
|
|
// by using reseek rather than sequential scan
|
|
|
|
TEST(DBTest, IterReseek) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.max_sequential_skip_in_iterations = 3;
|
|
|
|
options.create_if_missing = true;
|
2013-10-04 04:49:15 +00:00
|
|
|
options.statistics = rocksdb::CreateDBStatistics();
|
2013-07-28 18:53:08 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
// insert two keys with same userkey and verify that
|
|
|
|
// reseek is not invoked. For each of these test cases,
|
|
|
|
// verify that we can find the next key "b".
|
|
|
|
ASSERT_OK(Put("a", "one"));
|
|
|
|
ASSERT_OK(Put("a", "two"));
|
|
|
|
ASSERT_OK(Put("b", "bone"));
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->two");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->bone");
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// insert a total of three keys with same userkey and verify
|
|
|
|
// that reseek is still not invoked.
|
|
|
|
ASSERT_OK(Put("a", "three"));
|
|
|
|
iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->three");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->bone");
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// insert a total of four keys with same userkey and verify
|
|
|
|
// that reseek is invoked.
|
|
|
|
ASSERT_OK(Put("a", "four"));
|
|
|
|
iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->four");
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), 0);
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), 1);
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->bone");
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// Testing reverse iterator
|
|
|
|
// At this point, we have three versions of "a" and one version of "b".
|
|
|
|
// The reseek statistics is already at 1.
|
|
|
|
int num_reseeks = (int)options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION);
|
|
|
|
|
|
|
|
// Insert another version of b and assert that reseek is not invoked
|
|
|
|
ASSERT_OK(Put("b", "btwo"));
|
|
|
|
iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->btwo");
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks);
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks+1);
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->four");
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// insert two more versions of b. This makes a total of 4 versions
|
|
|
|
// of b and 4 versions of a.
|
|
|
|
ASSERT_OK(Put("b", "bthree"));
|
|
|
|
ASSERT_OK(Put("b", "bfour"));
|
|
|
|
iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->bfour");
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks + 2);
|
|
|
|
iter->Prev();
|
|
|
|
|
|
|
|
// the previous Prev call should have invoked reseek
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(
|
|
|
|
NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks + 3);
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->four");
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2011-03-25 20:27:43 +00:00
|
|
|
TEST(DBTest, IterSmallAndLargeMix) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("a", "va"));
|
|
|
|
ASSERT_OK(Put("b", std::string(100000, 'b')));
|
|
|
|
ASSERT_OK(Put("c", "vc"));
|
|
|
|
ASSERT_OK(Put("d", std::string(100000, 'd')));
|
|
|
|
ASSERT_OK(Put("e", std::string(100000, 'e')));
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-25 20:27:43 +00:00
|
|
|
}
|
|
|
|
|
2011-08-16 01:21:01 +00:00
|
|
|
TEST(DBTest, IterMultiWithDelete) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("a", "va"));
|
|
|
|
ASSERT_OK(Put("b", "vb"));
|
|
|
|
ASSERT_OK(Put("c", "vc"));
|
|
|
|
ASSERT_OK(Delete("b"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("b"));
|
2011-08-16 01:21:01 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->Seek("c");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
2013-03-21 22:59:47 +00:00
|
|
|
if (!CurrentOptions().merge_operator) {
|
|
|
|
// TODO: merge operator does not support backward iteration yet
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
delete iter;
|
|
|
|
} while (ChangeOptions());
|
2011-08-16 01:21:01 +00:00
|
|
|
}
|
|
|
|
|
2013-10-18 01:33:18 +00:00
|
|
|
TEST(DBTest, IterPrevMaxSkip) {
|
|
|
|
do {
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
db_->Put(WriteOptions(), "key1", "v1");
|
|
|
|
db_->Put(WriteOptions(), "key2", "v2");
|
|
|
|
db_->Put(WriteOptions(), "key3", "v3");
|
|
|
|
db_->Put(WriteOptions(), "key4", "v4");
|
|
|
|
db_->Put(WriteOptions(), "key5", "v5");
|
|
|
|
}
|
|
|
|
|
|
|
|
VerifyIterLast("key5->v5");
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "key5"));
|
|
|
|
VerifyIterLast("key4->v4");
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "key4"));
|
|
|
|
VerifyIterLast("key3->v3");
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "key3"));
|
|
|
|
VerifyIterLast("key2->v2");
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "key2"));
|
|
|
|
VerifyIterLast("key1->v1");
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), "key1"));
|
|
|
|
VerifyIterLast("(invalid)");
|
|
|
|
} while (ChangeOptions(kSkipMergePut));
|
|
|
|
}
|
|
|
|
|
2013-09-28 18:39:08 +00:00
|
|
|
TEST(DBTest, IterWithSnapshot) {
|
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("key1", "val1"));
|
|
|
|
ASSERT_OK(Put("key2", "val2"));
|
|
|
|
ASSERT_OK(Put("key3", "val3"));
|
|
|
|
ASSERT_OK(Put("key4", "val4"));
|
|
|
|
ASSERT_OK(Put("key5", "val5"));
|
|
|
|
|
|
|
|
const Snapshot *snapshot = db_->GetSnapshot();
|
|
|
|
ReadOptions options;
|
|
|
|
options.snapshot = snapshot;
|
|
|
|
Iterator* iter = db_->NewIterator(options);
|
|
|
|
|
|
|
|
// Put more values after the snapshot
|
|
|
|
ASSERT_OK(Put("key100", "val100"));
|
|
|
|
ASSERT_OK(Put("key101", "val101"));
|
|
|
|
|
|
|
|
iter->Seek("key5");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "key5->val5");
|
|
|
|
if (!CurrentOptions().merge_operator) {
|
|
|
|
// TODO: merge operator does not support backward iteration yet
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "key4->val4");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "key3->val3");
|
|
|
|
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "key4->val4");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "key5->val5");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
}
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, Recover) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_OK(Put("baz", "v5"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v5", Get("baz"));
|
|
|
|
ASSERT_OK(Put("bar", "v2"));
|
|
|
|
ASSERT_OK(Put("foo", "v3"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
ASSERT_OK(Put("foo", "v4"));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
ASSERT_EQ("v5", Get("baz"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-08-17 23:06:05 +00:00
|
|
|
TEST(DBTest, RollLog) {
|
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_OK(Put("baz", "v5"));
|
|
|
|
|
|
|
|
Reopen();
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
ASSERT_OK(Put("foo", "v4"));
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2012-07-05 20:39:28 +00:00
|
|
|
TEST(DBTest, WAL) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
|
2012-07-05 20:39:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v1", Get("bar"));
|
2012-07-05 20:39:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
writeOpt.disableWAL = false;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2"));
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2"));
|
2012-07-05 20:39:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
|
|
|
// Both value's should be present.
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
2012-07-05 20:39:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3"));
|
|
|
|
writeOpt.disableWAL = false;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3"));
|
2012-07-05 20:39:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
|
|
|
// again both values should be present.
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
ASSERT_EQ("v3", Get("bar"));
|
|
|
|
} while (ChangeCompactOptions());
|
2012-07-05 20:39:28 +00:00
|
|
|
}
|
|
|
|
|
2012-08-18 07:26:50 +00:00
|
|
|
TEST(DBTest, CheckLock) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
DB* localdb;
|
|
|
|
Options options = CurrentOptions();
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(TryReopen(&options));
|
2013-08-07 22:20:41 +00:00
|
|
|
|
|
|
|
// second open should fail
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_TRUE(!(PureReopen(&options, &localdb)).ok());
|
2013-08-07 22:20:41 +00:00
|
|
|
} while (ChangeCompactOptions());
|
2012-08-18 07:26:50 +00:00
|
|
|
}
|
|
|
|
|
2013-09-05 00:24:35 +00:00
|
|
|
TEST(DBTest, FlushMultipleMemtable) {
|
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
options.max_write_buffer_number = 4;
|
|
|
|
options.min_write_buffer_number_to_merge = 3;
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
|
|
|
|
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v1", Get("bar"));
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
} while (ChangeCompactOptions());
|
|
|
|
}
|
|
|
|
|
2013-10-04 22:17:54 +00:00
|
|
|
TEST(DBTest, NumImmutableMemTable) {
|
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
options.max_write_buffer_number = 4;
|
|
|
|
options.min_write_buffer_number_to_merge = 3;
|
|
|
|
options.write_buffer_size = 1000000;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
std::string big_value(1000000, 'x');
|
|
|
|
std::string num;
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
|
2013-10-06 08:12:02 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
2013-10-04 22:17:54 +00:00
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
|
2013-10-06 08:12:02 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
2013-10-04 22:17:54 +00:00
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
|
2013-10-06 08:12:02 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
2013-10-04 22:17:54 +00:00
|
|
|
ASSERT_EQ(num, "2");
|
|
|
|
|
|
|
|
dbfull()->Flush(FlushOptions());
|
2013-10-06 08:12:02 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
2013-10-04 22:17:54 +00:00
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
} while (ChangeCompactOptions());
|
|
|
|
}
|
|
|
|
|
2012-07-06 18:42:09 +00:00
|
|
|
TEST(DBTest, FLUSH) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
|
|
|
|
// this will now also flush the last 2 writes
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
|
2012-07-06 18:42:09 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v1", Get("bar"));
|
2012-07-06 18:42:09 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2"));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2"));
|
|
|
|
dbfull()->Flush(FlushOptions());
|
2012-07-06 18:42:09 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
2012-07-06 18:42:09 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
writeOpt.disableWAL = false;
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3"));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3"));
|
|
|
|
dbfull()->Flush(FlushOptions());
|
2012-07-06 18:42:09 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
|
|
|
// 'foo' should be there because its put
|
|
|
|
// has WAL enabled.
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
ASSERT_EQ("v3", Get("bar"));
|
|
|
|
} while (ChangeCompactOptions());
|
2012-07-06 18:42:09 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, RecoveryWithEmptyLog) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_OK(Put("foo", "v2"));
|
|
|
|
Reopen();
|
|
|
|
Reopen();
|
|
|
|
ASSERT_OK(Put("foo", "v3"));
|
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
// Check that writes done during a memtable compaction are recovered
|
|
|
|
// if the database is shutdown during the memtable compaction.
|
|
|
|
TEST(DBTest, RecoverDuringMemtableCompaction) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 1000000;
|
|
|
|
Reopen(&options);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Trigger a long memtable compaction and reopen the database during it
|
|
|
|
ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
|
|
|
|
ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
|
|
|
|
ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
|
|
|
|
ASSERT_OK(Put("bar", "v2")); // Goes to new log file
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
|
|
|
|
ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, MinorCompactionsHappen) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 10000;
|
|
|
|
Reopen(&options);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
const int N = 500;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
int starting_num_tables = TotalTableFiles();
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
|
|
|
|
}
|
|
|
|
int ending_num_tables = TotalTableFiles();
|
|
|
|
ASSERT_GT(ending_num_tables, starting_num_tables);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
|
|
|
|
}
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 01:18:50 +00:00
|
|
|
TEST(DBTest, ManifestRollOver) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.max_manifest_file_size = 10 ; // 10 bytes
|
2013-01-11 01:18:50 +00:00
|
|
|
Reopen(&options);
|
2013-08-07 22:20:41 +00:00
|
|
|
{
|
|
|
|
ASSERT_OK(Put("manifest_key1", std::string(1000, '1')));
|
|
|
|
ASSERT_OK(Put("manifest_key2", std::string(1000, '2')));
|
|
|
|
ASSERT_OK(Put("manifest_key3", std::string(1000, '3')));
|
2013-10-18 21:50:54 +00:00
|
|
|
uint64_t manifest_before_flush =
|
2013-08-07 22:20:41 +00:00
|
|
|
dbfull()->TEST_Current_Manifest_FileNo();
|
|
|
|
dbfull()->Flush(FlushOptions()); // This should trigger LogAndApply.
|
|
|
|
uint64_t manifest_after_flush =
|
|
|
|
dbfull()->TEST_Current_Manifest_FileNo();
|
2013-10-18 21:50:54 +00:00
|
|
|
ASSERT_GT(manifest_after_flush, manifest_before_flush);
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_GT(dbfull()->TEST_Current_Manifest_FileNo(),
|
|
|
|
manifest_after_flush);
|
|
|
|
// check if a new manifest file got inserted or not.
|
|
|
|
ASSERT_EQ(std::string(1000, '1'), Get("manifest_key1"));
|
|
|
|
ASSERT_EQ(std::string(1000, '2'), Get("manifest_key2"));
|
|
|
|
ASSERT_EQ(std::string(1000, '3'), Get("manifest_key3"));
|
|
|
|
}
|
|
|
|
} while (ChangeCompactOptions());
|
2013-01-11 01:18:50 +00:00
|
|
|
}
|
|
|
|
|
2013-10-18 21:50:54 +00:00
|
|
|
TEST(DBTest, IdentityAcrossRestarts) {
|
|
|
|
do {
|
|
|
|
std::string idfilename = IdentityFileName(dbname_);
|
|
|
|
unique_ptr<SequentialFile> idfile;
|
|
|
|
const EnvOptions soptions;
|
|
|
|
ASSERT_OK(env_->NewSequentialFile(idfilename, &idfile, soptions));
|
|
|
|
char buffer1[100];
|
|
|
|
Slice id1;
|
|
|
|
ASSERT_OK(idfile->Read(100, &id1, buffer1));
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
Reopen(&options);
|
|
|
|
char buffer2[100];
|
|
|
|
Slice id2;
|
|
|
|
ASSERT_OK(env_->NewSequentialFile(idfilename, &idfile, soptions));
|
|
|
|
ASSERT_OK(idfile->Read(100, &id2, buffer2));
|
|
|
|
// id1 should match id2 because identity was not regenerated
|
|
|
|
ASSERT_EQ(id1.ToString(), id2.ToString());
|
|
|
|
|
|
|
|
ASSERT_OK(env_->DeleteFile(idfilename));
|
|
|
|
Reopen(&options);
|
|
|
|
char buffer3[100];
|
|
|
|
Slice id3;
|
|
|
|
ASSERT_OK(env_->NewSequentialFile(idfilename, &idfile, soptions));
|
|
|
|
ASSERT_OK(idfile->Read(100, &id3, buffer3));
|
|
|
|
// id1 should NOT match id2 because identity was regenerated
|
|
|
|
ASSERT_NE(id1.ToString(0), id3.ToString());
|
|
|
|
} while (ChangeCompactOptions());
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, RecoverWithLargeLog) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
{
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_OK(Put("big1", std::string(200000, '1')));
|
|
|
|
ASSERT_OK(Put("big2", std::string(200000, '2')));
|
|
|
|
ASSERT_OK(Put("small3", std::string(10, '3')));
|
|
|
|
ASSERT_OK(Put("small4", std::string(10, '4')));
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that if we re-open with a small write buffer size that
|
|
|
|
// we flush table files in the middle of a large log file.
|
2012-04-17 15:36:46 +00:00
|
|
|
Options options = CurrentOptions();
|
2013-08-07 22:20:41 +00:00
|
|
|
options.write_buffer_size = 100000;
|
2011-03-18 22:37:00 +00:00
|
|
|
Reopen(&options);
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
|
|
|
|
ASSERT_EQ(std::string(200000, '1'), Get("big1"));
|
|
|
|
ASSERT_EQ(std::string(200000, '2'), Get("big2"));
|
|
|
|
ASSERT_EQ(std::string(10, '3'), Get("small3"));
|
|
|
|
ASSERT_EQ(std::string(10, '4'), Get("small4"));
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(0), 1);
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, CompactionsGenerateMultipleFiles) {
|
2012-04-17 15:36:46 +00:00
|
|
|
Options options = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
options.write_buffer_size = 100000000; // Large write buffer
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 80; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 100000));
|
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reopening moves updates to level-0
|
|
|
|
Reopen(&options);
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(1), 1);
|
|
|
|
for (int i = 0; i < 80; i++) {
|
|
|
|
ASSERT_EQ(Get(Key(i)), values[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-23 02:30:03 +00:00
|
|
|
TEST(DBTest, CompactionTrigger) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.num_levels = 3;
|
|
|
|
options.max_mem_compaction_level = 0;
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
for (int num = 0;
|
2012-11-27 05:16:21 +00:00
|
|
|
num < options.level0_file_num_compaction_trigger - 1;
|
|
|
|
num++) {
|
2012-06-23 02:30:03 +00:00
|
|
|
std::vector<std::string> values;
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 10000));
|
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
2012-06-23 02:30:03 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
//generate one more file in level-0, and should trigger level-0 compaction
|
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 10000));
|
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 1);
|
|
|
|
}
|
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
TEST(DBTest, UniversalCompactionTrigger) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
2013-09-17 20:56:30 +00:00
|
|
|
// trigger compaction if there are >= 4 files
|
|
|
|
options.level0_file_num_compaction_trigger = 4;
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
int key_idx = 0;
|
|
|
|
|
|
|
|
// Stage 1:
|
|
|
|
// Generate a set of files at level 0, but don't trigger level-0
|
|
|
|
// compaction.
|
|
|
|
for (int num = 0;
|
2013-09-17 20:56:30 +00:00
|
|
|
num < options.level0_file_num_compaction_trigger-1;
|
2013-08-07 22:20:41 +00:00
|
|
|
num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate one more file at level-0, which should trigger level-0
|
|
|
|
// compaction.
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
// Suppose each file flushed from mem table has size 1. Now we compact
|
|
|
|
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
|
|
|
|
// file of size 4.
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
for (int i = 1; i < options.num_levels ; i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stage 2:
|
|
|
|
// Now we have one file at level 0, with size 4. We also have some data in
|
|
|
|
// mem table. Let's continue generating new files at level 0, but don't
|
|
|
|
// trigger level-0 compaction.
|
|
|
|
// First, clean up memtable before inserting new data. This will generate
|
|
|
|
// a level-0 file, with size around 0.4 (according to previously written
|
|
|
|
// data amount).
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
for (int num = 0;
|
2013-09-17 20:56:30 +00:00
|
|
|
num < options.level0_file_num_compaction_trigger-3;
|
2013-08-07 22:20:41 +00:00
|
|
|
num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate one more file at level-0, which should trigger level-0
|
|
|
|
// compaction.
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
|
|
|
|
// After comapction, we should have 2 files, with size 4, 2.4.
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
|
|
|
|
for (int i = 1; i < options.num_levels ; i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stage 3:
|
|
|
|
// Now we have 2 files at level 0, with size 4 and 2.4. Continue
|
|
|
|
// generating new files at level 0.
|
|
|
|
for (int num = 0;
|
2013-09-17 20:56:30 +00:00
|
|
|
num < options.level0_file_num_compaction_trigger-3;
|
2013-08-07 22:20:41 +00:00
|
|
|
num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate one more file at level-0, which should trigger level-0
|
|
|
|
// compaction.
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
// Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
|
|
|
|
// After comapction, we should have 3 files, with size 4, 2.4, 2.
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
|
|
|
|
for (int i = 1; i < options.num_levels ; i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stage 4:
|
|
|
|
// Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
|
|
|
|
// new file of size 1.
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
// Level-0 compaction is triggered, but no file will be picked up.
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 4);
|
|
|
|
for (int i = 1; i < options.num_levels ; i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stage 5:
|
|
|
|
// Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
|
|
|
|
// a new file of size 1.
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
// All files at level 0 will be compacted into a single one.
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
for (int i = 1; i < options.num_levels ; i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-09 23:06:10 +00:00
|
|
|
TEST(DBTest, UniversalCompactionSizeAmplification) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
2013-09-18 03:02:54 +00:00
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
2013-09-09 23:06:10 +00:00
|
|
|
|
|
|
|
// Trigger compaction if size amplification exceeds 110%
|
|
|
|
options.compaction_options_universal.
|
|
|
|
max_size_amplification_percent = 110;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
int key_idx = 0;
|
|
|
|
|
|
|
|
// Generate two files in Level 0. Both files are approx the same size.
|
|
|
|
for (int num = 0;
|
2013-09-18 03:02:54 +00:00
|
|
|
num < options.level0_file_num_compaction_trigger-1;
|
2013-09-09 23:06:10 +00:00
|
|
|
num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
2013-09-09 23:06:10 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
|
|
|
|
|
|
|
|
// Flush whatever is remaining in memtable. This is typically
|
|
|
|
// small, which should not trigger size ratio based compaction
|
|
|
|
// but will instead trigger size amplification.
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
|
2013-10-16 20:32:53 +00:00
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
|
2013-09-09 23:06:10 +00:00
|
|
|
// Verify that size amplification did occur
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
}
|
|
|
|
|
2013-10-02 23:20:17 +00:00
|
|
|
TEST(DBTest, UniversalCompactionOptions) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.level0_file_num_compaction_trigger = 4;
|
|
|
|
options.num_levels = 1;
|
2013-10-17 20:33:39 +00:00
|
|
|
options.compaction_options_universal.compression_size_percent = -1;
|
2013-10-02 23:20:17 +00:00
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
int key_idx = 0;
|
|
|
|
|
|
|
|
for (int num = 0;
|
|
|
|
num < options.level0_file_num_compaction_trigger;
|
|
|
|
num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
2013-10-02 23:20:17 +00:00
|
|
|
|
|
|
|
if (num < options.level0_file_num_compaction_trigger - 1) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
for (int i = 1; i < options.num_levels ; i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-27 21:32:56 +00:00
|
|
|
#if defined(SNAPPY) && defined(ZLIB) && defined(BZIP2)
|
|
|
|
TEST(DBTest, CompressedCache) {
|
|
|
|
int num_iter = 80;
|
|
|
|
|
|
|
|
// Run this test three iterations.
|
|
|
|
// Iteration 1: only a uncompressed block cache
|
|
|
|
// Iteration 2: only a compressed block cache
|
|
|
|
// Iteration 3: both block cache and compressed cache
|
|
|
|
for (int iter = 0; iter < 3; iter++) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 64*1024; // small write buffer
|
|
|
|
options.statistics = rocksdb::CreateDBStatistics();
|
|
|
|
|
|
|
|
switch (iter) {
|
|
|
|
case 0:
|
|
|
|
// only uncompressed block cache
|
|
|
|
options.block_cache = NewLRUCache(8*1024);
|
|
|
|
options.block_cache_compressed = nullptr;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// no block cache, only compressed cache
|
|
|
|
options.no_block_cache = true;
|
|
|
|
options.block_cache = nullptr;
|
|
|
|
options.block_cache_compressed = NewLRUCache(8*1024);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// both compressed and uncompressed block cache
|
|
|
|
options.block_cache = NewLRUCache(1024);
|
|
|
|
options.block_cache_compressed = NewLRUCache(8*1024);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT_TRUE(false);
|
|
|
|
}
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
std::vector<std::string> values;
|
|
|
|
std::string str;
|
|
|
|
for (int i = 0; i < num_iter; i++) {
|
|
|
|
if (i % 4 == 0) { // high compression ratio
|
|
|
|
str = RandomString(&rnd, 1000);
|
|
|
|
}
|
|
|
|
values.push_back(str);
|
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
// flush all data from memtable so that reads are from block cache
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
|
|
|
|
for (int i = 0; i < num_iter; i++) {
|
|
|
|
ASSERT_EQ(Get(Key(i)), values[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that we triggered the appropriate code paths in the cache
|
|
|
|
switch (iter) {
|
|
|
|
case 0:
|
|
|
|
// only uncompressed block cache
|
|
|
|
ASSERT_GT(options.statistics.get()->getTickerCount(BLOCK_CACHE_MISS),
|
|
|
|
0);
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount
|
|
|
|
(BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
// no block cache, only compressed cache
|
|
|
|
ASSERT_EQ(options.statistics.get()->getTickerCount(BLOCK_CACHE_MISS),
|
|
|
|
0);
|
|
|
|
ASSERT_GT(options.statistics.get()->getTickerCount
|
|
|
|
(BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
// both compressed and uncompressed block cache
|
|
|
|
ASSERT_GT(options.statistics.get()->getTickerCount(BLOCK_CACHE_MISS),
|
|
|
|
0);
|
|
|
|
ASSERT_GT(options.statistics.get()->getTickerCount
|
|
|
|
(BLOCK_CACHE_COMPRESSED_MISS), 0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT_TRUE(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-17 07:44:39 +00:00
|
|
|
static std::string CompressibleString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::CompressibleString(rnd, 0.8, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-10-17 20:33:39 +00:00
|
|
|
TEST(DBTest, UniversalCompactionCompressRatio1) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.num_levels = 1;
|
|
|
|
options.compaction_options_universal.compression_size_percent = 70;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
int key_idx = 0;
|
|
|
|
|
|
|
|
// The first compaction (2) is compressed.
|
|
|
|
for (int num = 0; num < 2; num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
}
|
|
|
|
ASSERT_LT((int ) dbfull()->TEST_GetLevel0TotalSize(), 120000 * 2 * 0.9);
|
|
|
|
|
|
|
|
// The second compaction (4) is compressed
|
|
|
|
for (int num = 0; num < 2; num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
}
|
|
|
|
ASSERT_LT((int ) dbfull()->TEST_GetLevel0TotalSize(), 120000 * 4 * 0.9);
|
|
|
|
|
|
|
|
// The third compaction (2 4) is compressed since this time it is
|
|
|
|
// (1 1 3.2) and 3.2/5.2 doesn't reach ratio.
|
|
|
|
for (int num = 0; num < 2; num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
}
|
|
|
|
ASSERT_LT((int ) dbfull()->TEST_GetLevel0TotalSize(), 120000 * 6 * 0.9);
|
|
|
|
|
|
|
|
// When we start for the compaction up to (2 4 8), the latest
|
|
|
|
// compressed is not compressed.
|
|
|
|
for (int num = 0; num < 8; num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
}
|
2013-11-17 07:44:39 +00:00
|
|
|
ASSERT_GT((int) dbfull()->TEST_GetLevel0TotalSize(),
|
2013-10-17 20:33:39 +00:00
|
|
|
120000 * 12 * 0.8 + 110000 * 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, UniversalCompactionCompressRatio2) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.num_levels = 1;
|
|
|
|
options.compaction_options_universal.compression_size_percent = 95;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
int key_idx = 0;
|
|
|
|
|
|
|
|
// When we start for the compaction up to (2 4 8), the latest
|
|
|
|
// compressed is compressed given the size ratio to compress.
|
|
|
|
for (int num = 0; num < 14; num++) {
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
|
|
|
|
key_idx++;
|
|
|
|
}
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
}
|
|
|
|
ASSERT_LT((int ) dbfull()->TEST_GetLevel0TotalSize(),
|
|
|
|
120000 * 12 * 0.8 + 110000 * 2);
|
|
|
|
}
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-10-17 20:33:39 +00:00
|
|
|
|
2013-09-04 20:13:08 +00:00
|
|
|
TEST(DBTest, ConvertCompactionStyle) {
|
|
|
|
Random rnd(301);
|
|
|
|
int max_key_level_insert = 200;
|
|
|
|
int max_key_universal_insert = 600;
|
|
|
|
|
|
|
|
// Stage 1: generate a db with level compaction
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.num_levels = 4;
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
options.max_bytes_for_level_base = 500<<10; // 500KB
|
|
|
|
options.max_bytes_for_level_multiplier = 1;
|
|
|
|
options.target_file_size_base = 200<<10; // 200KB
|
|
|
|
options.target_file_size_multiplier = 1;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
for (int i = 0; i <= max_key_level_insert; i++) {
|
|
|
|
// each value is 10K
|
|
|
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000)));
|
|
|
|
}
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
|
|
|
|
ASSERT_GT(TotalTableFiles(), 1);
|
|
|
|
int non_level0_num_files = 0;
|
|
|
|
for (int i = 1; i < dbfull()->NumberLevels(); i++) {
|
|
|
|
non_level0_num_files += NumTableFilesAtLevel(i);
|
|
|
|
}
|
|
|
|
ASSERT_GT(non_level0_num_files, 0);
|
|
|
|
|
|
|
|
// Stage 2: reopen with universal compaction - should fail
|
|
|
|
options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
Status s = TryReopen(&options);
|
|
|
|
ASSERT_TRUE(s.IsInvalidArgument());
|
|
|
|
|
|
|
|
// Stage 3: compact into a single file and move the file to level 0
|
|
|
|
options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.target_file_size_base = INT_MAX;
|
|
|
|
options.target_file_size_multiplier = 1;
|
|
|
|
options.max_bytes_for_level_base = INT_MAX;
|
|
|
|
options.max_bytes_for_level_multiplier = 1;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr,
|
|
|
|
true /* reduce level */,
|
|
|
|
0 /* reduce to level 0 */);
|
|
|
|
|
|
|
|
for (int i = 0; i < dbfull()->NumberLevels(); i++) {
|
|
|
|
int num = NumTableFilesAtLevel(i);
|
|
|
|
if (i == 0) {
|
|
|
|
ASSERT_EQ(num, 1);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(num, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stage 4: re-open in universal compaction style and do some db operations
|
|
|
|
options = CurrentOptions();
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000)));
|
|
|
|
}
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
|
|
|
|
for (int i = 1; i < dbfull()->NumberLevels(); i++) {
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify keys inserted in both level compaction style and universal
|
|
|
|
// compaction style
|
|
|
|
std::string keys_in_db;
|
|
|
|
Iterator* iter = dbfull()->NewIterator(ReadOptions());
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
keys_in_db.append(iter->key().ToString());
|
|
|
|
keys_in_db.push_back(',');
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
std::string expected_keys;
|
|
|
|
for (int i = 0; i <= max_key_universal_insert; i++) {
|
|
|
|
expected_keys.append(Key(i));
|
|
|
|
expected_keys.push_back(',');
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_EQ(keys_in_db, expected_keys);
|
|
|
|
}
|
|
|
|
|
2012-11-01 17:50:08 +00:00
|
|
|
void MinLevelHelper(DBTest* self, Options& options) {
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
for (int num = 0;
|
|
|
|
num < options.level0_file_num_compaction_trigger - 1;
|
|
|
|
num++)
|
|
|
|
{
|
|
|
|
std::vector<std::string> values;
|
|
|
|
// Write 120KB (12 values, each 10K)
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 10000));
|
|
|
|
ASSERT_OK(self->Put(Key(i), values[i]));
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
self->dbfull()->TEST_WaitForFlushMemTable();
|
2012-11-01 17:50:08 +00:00
|
|
|
ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
//generate one more file in level-0, and should trigger level-0 compaction
|
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 10000));
|
|
|
|
ASSERT_OK(self->Put(Key(i), values[i]));
|
|
|
|
}
|
|
|
|
self->dbfull()->TEST_WaitForCompact();
|
|
|
|
|
|
|
|
ASSERT_EQ(self->NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(self->NumTableFilesAtLevel(1), 1);
|
|
|
|
}
|
|
|
|
|
2012-11-16 20:55:21 +00:00
|
|
|
// returns false if the calling-Test should be skipped
|
|
|
|
bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
|
2012-11-01 17:50:08 +00:00
|
|
|
int lev, int strategy) {
|
2012-11-05 06:04:14 +00:00
|
|
|
fprintf(stderr, "Test with compression options : window_bits = %d, level = %d, strategy = %d}\n", wbits, lev, strategy);
|
2012-11-01 17:50:08 +00:00
|
|
|
options.write_buffer_size = 100<<10; //100KB
|
|
|
|
options.num_levels = 3;
|
|
|
|
options.max_mem_compaction_level = 0;
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
2012-10-28 06:13:17 +00:00
|
|
|
options.create_if_missing = true;
|
2012-11-01 17:50:08 +00:00
|
|
|
|
|
|
|
if (SnappyCompressionSupported(CompressionOptions(wbits, lev, strategy))) {
|
|
|
|
type = kSnappyCompression;
|
|
|
|
fprintf(stderr, "using snappy\n");
|
|
|
|
} else if (ZlibCompressionSupported(
|
|
|
|
CompressionOptions(wbits, lev, strategy))) {
|
|
|
|
type = kZlibCompression;
|
|
|
|
fprintf(stderr, "using zlib\n");
|
|
|
|
} else if (BZip2CompressionSupported(
|
|
|
|
CompressionOptions(wbits, lev, strategy))) {
|
|
|
|
type = kBZip2Compression;
|
|
|
|
fprintf(stderr, "using bzip2\n");
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "skipping test, compression disabled\n");
|
2012-11-16 20:55:21 +00:00
|
|
|
return false;
|
2012-11-01 17:50:08 +00:00
|
|
|
}
|
2013-01-24 18:54:26 +00:00
|
|
|
options.compression_per_level.resize(options.num_levels);
|
2012-10-28 06:13:17 +00:00
|
|
|
|
|
|
|
// do not compress L0
|
|
|
|
for (int i = 0; i < 1; i++) {
|
|
|
|
options.compression_per_level[i] = kNoCompression;
|
|
|
|
}
|
|
|
|
for (int i = 1; i < options.num_levels; i++) {
|
|
|
|
options.compression_per_level[i] = type;
|
|
|
|
}
|
2012-11-16 20:55:21 +00:00
|
|
|
return true;
|
2012-11-01 17:50:08 +00:00
|
|
|
}
|
2013-08-07 22:20:41 +00:00
|
|
|
|
2012-11-01 17:50:08 +00:00
|
|
|
TEST(DBTest, MinLevelToCompress1) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
CompressionType type;
|
2012-11-16 20:55:21 +00:00
|
|
|
if (!MinLevelToCompress(type, options, -14, -1, 0)) {
|
|
|
|
return;
|
|
|
|
}
|
2012-11-01 17:50:08 +00:00
|
|
|
Reopen(&options);
|
|
|
|
MinLevelHelper(this, options);
|
|
|
|
|
|
|
|
// do not compress L0 and L1
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
options.compression_per_level[i] = kNoCompression;
|
|
|
|
}
|
|
|
|
for (int i = 2; i < options.num_levels; i++) {
|
|
|
|
options.compression_per_level[i] = type;
|
|
|
|
}
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
MinLevelHelper(this, options);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, MinLevelToCompress2) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
CompressionType type;
|
2012-11-16 20:55:21 +00:00
|
|
|
if (!MinLevelToCompress(type, options, 15, -1, 0)) {
|
|
|
|
return;
|
|
|
|
}
|
2012-11-01 17:50:08 +00:00
|
|
|
Reopen(&options);
|
|
|
|
MinLevelHelper(this, options);
|
|
|
|
|
2012-10-28 06:13:17 +00:00
|
|
|
// do not compress L0 and L1
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
options.compression_per_level[i] = kNoCompression;
|
|
|
|
}
|
|
|
|
for (int i = 2; i < options.num_levels; i++) {
|
|
|
|
options.compression_per_level[i] = type;
|
|
|
|
}
|
2012-11-01 17:50:08 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
MinLevelHelper(this, options);
|
|
|
|
}
|
2012-10-28 06:13:17 +00:00
|
|
|
|
2011-07-15 00:20:57 +00:00
|
|
|
TEST(DBTest, RepeatedWritesToSameKey) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 100000; // Small write buffer
|
|
|
|
Reopen(&options);
|
2011-07-15 00:20:57 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// We must have at most one file per level except for level-0,
|
|
|
|
// which may have up to kL0_StopWritesTrigger files.
|
|
|
|
const int kMaxFiles = dbfull()->NumberLevels() +
|
|
|
|
dbfull()->Level0StopWriteTrigger();
|
2011-07-15 00:20:57 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Random rnd(301);
|
|
|
|
std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
|
|
|
|
for (int i = 0; i < 5 * kMaxFiles; i++) {
|
|
|
|
Put("key", value);
|
|
|
|
ASSERT_LE(TotalTableFiles(), kMaxFiles);
|
|
|
|
}
|
|
|
|
} while (ChangeCompactOptions());
|
2011-07-15 00:20:57 +00:00
|
|
|
}
|
|
|
|
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
TEST(DBTest, InPlaceUpdate) {
|
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.inplace_update_support = true;
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 100000;
|
|
|
|
|
|
|
|
// Update key with values of smaller size
|
|
|
|
Reopen(&options);
|
|
|
|
int numValues = 10;
|
|
|
|
for (int i = numValues; i > 0; i--) {
|
|
|
|
std::string value = DummyString(i, 'a');
|
|
|
|
ASSERT_OK(Put("key", value));
|
|
|
|
ASSERT_EQ(value, Get("key"));
|
|
|
|
}
|
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
Iterator* iter = dbfull()->TEST_NewInternalIterator();
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(iter->status().ok(), true);
|
|
|
|
while (iter->Valid()) {
|
2013-11-17 21:52:55 +00:00
|
|
|
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
ikey.sequence = -1;
|
|
|
|
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
|
|
|
|
count++;
|
|
|
|
// All updates with the same sequence number.
|
|
|
|
ASSERT_EQ(ikey.sequence, (unsigned)1);
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
// Only 1 instance for that key.
|
|
|
|
ASSERT_EQ(count, 1);
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// Update key with values of larger size
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
numValues = 10;
|
|
|
|
for (int i = 0; i < numValues; i++) {
|
|
|
|
std::string value = DummyString(i, 'a');
|
|
|
|
ASSERT_OK(Put("key", value));
|
|
|
|
ASSERT_EQ(value, Get("key"));
|
|
|
|
}
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
iter = dbfull()->TEST_NewInternalIterator();
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(iter->status().ok(), true);
|
|
|
|
int seq = numValues;
|
|
|
|
while (iter->Valid()) {
|
2013-11-17 21:52:55 +00:00
|
|
|
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 21:12:47 +00:00
|
|
|
ikey.sequence = -1;
|
|
|
|
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
|
|
|
|
count++;
|
|
|
|
// No inplace updates. All updates are puts with new seq number
|
|
|
|
ASSERT_EQ(ikey.sequence, (unsigned)seq--);
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
// All 10 updates exist in the internal iterator
|
|
|
|
ASSERT_EQ(count, numValues);
|
|
|
|
delete iter;
|
|
|
|
|
|
|
|
|
|
|
|
} while (ChangeCompactOptions());
|
|
|
|
}
|
|
|
|
|
2012-10-29 08:13:41 +00:00
|
|
|
// This is a static filter used for filtering
|
|
|
|
// kvs during the compaction process.
|
|
|
|
static int cfilter_count;
|
|
|
|
static std::string NEW_VALUE = "NewValue";
|
2013-05-12 09:36:59 +00:00
|
|
|
|
|
|
|
class KeepFilter : public CompactionFilter {
|
|
|
|
public:
|
|
|
|
virtual bool Filter(int level, const Slice& key,
|
|
|
|
const Slice& value, std::string* new_value,
|
|
|
|
bool* value_changed) const override {
|
|
|
|
cfilter_count++;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char* Name() const override {
|
|
|
|
return "KeepFilter";
|
|
|
|
}
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
class DeleteFilter : public CompactionFilter {
|
|
|
|
public:
|
|
|
|
virtual bool Filter(int level, const Slice& key,
|
|
|
|
const Slice& value, std::string* new_value,
|
|
|
|
bool* value_changed) const override {
|
|
|
|
cfilter_count++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char* Name() const override {
|
|
|
|
return "DeleteFilter";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class ChangeFilter : public CompactionFilter {
|
|
|
|
public:
|
2013-07-06 01:49:18 +00:00
|
|
|
explicit ChangeFilter(int argv) : argv_(argv) {}
|
2013-05-12 09:36:59 +00:00
|
|
|
|
|
|
|
virtual bool Filter(int level, const Slice& key,
|
|
|
|
const Slice& value, std::string* new_value,
|
|
|
|
bool* value_changed) const override {
|
|
|
|
assert(argv_ == 100);
|
|
|
|
assert(new_value != nullptr);
|
|
|
|
*new_value = NEW_VALUE;
|
|
|
|
*value_changed = true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char* Name() const override {
|
|
|
|
return "ChangeFilter";
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const int argv_;
|
|
|
|
};
|
2012-10-29 08:13:41 +00:00
|
|
|
|
2013-08-13 17:56:20 +00:00
|
|
|
class KeepFilterFactory : public CompactionFilterFactory {
|
|
|
|
public:
|
|
|
|
virtual std::unique_ptr<CompactionFilter>
|
2013-10-27 06:01:26 +00:00
|
|
|
CreateCompactionFilter(const CompactionFilter::Context& context) override {
|
2013-08-13 17:56:20 +00:00
|
|
|
return std::unique_ptr<CompactionFilter>(new KeepFilter());
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char* Name() const override {
|
|
|
|
return "KeepFilterFactory";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class DeleteFilterFactory : public CompactionFilterFactory {
|
|
|
|
public:
|
|
|
|
virtual std::unique_ptr<CompactionFilter>
|
2013-10-27 06:01:26 +00:00
|
|
|
CreateCompactionFilter(const CompactionFilter::Context& context) override {
|
2013-08-13 17:56:20 +00:00
|
|
|
return std::unique_ptr<CompactionFilter>(new DeleteFilter());
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char* Name() const override {
|
|
|
|
return "DeleteFilterFactory";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class ChangeFilterFactory : public CompactionFilterFactory {
|
|
|
|
public:
|
|
|
|
explicit ChangeFilterFactory(int argv) : argv_(argv) {}
|
|
|
|
|
|
|
|
virtual std::unique_ptr<CompactionFilter>
|
2013-10-27 06:01:26 +00:00
|
|
|
CreateCompactionFilter(const CompactionFilter::Context& context) override {
|
2013-08-13 17:56:20 +00:00
|
|
|
return std::unique_ptr<CompactionFilter>(new ChangeFilter(argv_));
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char* Name() const override {
|
|
|
|
return "ChangeFilterFactory";
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const int argv_;
|
|
|
|
};
|
|
|
|
|
2012-10-29 08:13:41 +00:00
|
|
|
TEST(DBTest, CompactionFilter) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.num_levels = 3;
|
|
|
|
options.max_mem_compaction_level = 0;
|
2013-08-13 17:56:20 +00:00
|
|
|
options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
|
2012-10-29 08:13:41 +00:00
|
|
|
Reopen(&options);
|
|
|
|
|
2013-05-03 22:26:12 +00:00
|
|
|
// Write 100K keys, these are written to a few files in L0.
|
2012-10-29 08:13:41 +00:00
|
|
|
const std::string value(10, 'x');
|
2013-05-03 22:26:12 +00:00
|
|
|
for (int i = 0; i < 100000; i++) {
|
2012-10-29 08:13:41 +00:00
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
Put(key, value);
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-10-29 08:13:41 +00:00
|
|
|
|
|
|
|
// Push all files to the highest level L2. Verify that
|
|
|
|
// the compaction is each level invokes the filter for
|
|
|
|
// all the keys in that level.
|
|
|
|
cfilter_count = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_EQ(cfilter_count, 100000);
|
|
|
|
cfilter_count = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_EQ(cfilter_count, 100000);
|
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
ASSERT_NE(NumTableFilesAtLevel(2), 0);
|
|
|
|
cfilter_count = 0;
|
|
|
|
|
2013-02-15 22:31:24 +00:00
|
|
|
// All the files are in the lowest level.
|
|
|
|
// Verify that all but the 100001st record
|
|
|
|
// has sequence number zero. The 100001st record
|
|
|
|
// is at the tip of this snapshot and cannot
|
|
|
|
// be zeroed out.
|
2013-05-03 22:26:12 +00:00
|
|
|
// TODO: figure out sequence number squashtoo
|
2013-02-15 22:31:24 +00:00
|
|
|
int count = 0;
|
|
|
|
int total = 0;
|
|
|
|
Iterator* iter = dbfull()->TEST_NewInternalIterator();
|
|
|
|
iter->SeekToFirst();
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2013-02-15 22:31:24 +00:00
|
|
|
while (iter->Valid()) {
|
2013-11-17 21:52:55 +00:00
|
|
|
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
|
2013-02-15 22:31:24 +00:00
|
|
|
ikey.sequence = -1;
|
|
|
|
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
|
|
|
|
total++;
|
|
|
|
if (ikey.sequence != 0) {
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
iter->Next();
|
|
|
|
}
|
2013-05-03 22:26:12 +00:00
|
|
|
ASSERT_EQ(total, 100000);
|
2013-02-15 22:31:24 +00:00
|
|
|
ASSERT_EQ(count, 1);
|
|
|
|
delete iter;
|
|
|
|
|
2013-05-03 22:26:12 +00:00
|
|
|
// overwrite all the 100K keys once again.
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
2012-10-29 08:13:41 +00:00
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
Put(key, value);
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-10-29 08:13:41 +00:00
|
|
|
|
|
|
|
// push all files to the highest level L2. This
|
|
|
|
// means that all keys should pass at least once
|
|
|
|
// via the compaction filter
|
|
|
|
cfilter_count = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_EQ(cfilter_count, 100000);
|
|
|
|
cfilter_count = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_EQ(cfilter_count, 100000);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
ASSERT_NE(NumTableFilesAtLevel(2), 0);
|
|
|
|
|
|
|
|
// create a new database with the compaction
|
|
|
|
// filter in such a way that it deletes all keys
|
2013-08-13 17:56:20 +00:00
|
|
|
options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
|
2012-10-29 08:13:41 +00:00
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
// write all the keys once again.
|
2013-05-03 22:26:12 +00:00
|
|
|
for (int i = 0; i < 100000; i++) {
|
2012-10-29 08:13:41 +00:00
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
Put(key, value);
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_NE(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(2), 0);
|
|
|
|
|
|
|
|
// Push all files to the highest level L2. This
|
|
|
|
// triggers the compaction filter to delete all keys,
|
|
|
|
// verify that at the end of the compaction process,
|
|
|
|
// nothing is left.
|
|
|
|
cfilter_count = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_EQ(cfilter_count, 100000);
|
|
|
|
cfilter_count = 0;
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
ASSERT_EQ(cfilter_count, 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
|
2013-05-03 22:26:12 +00:00
|
|
|
// Scan the entire database to ensure that nothing is left
|
2013-02-15 22:31:24 +00:00
|
|
|
iter = db_->NewIterator(ReadOptions());
|
2012-10-29 08:13:41 +00:00
|
|
|
iter->SeekToFirst();
|
2013-02-15 22:31:24 +00:00
|
|
|
count = 0;
|
|
|
|
while (iter->Valid()) {
|
|
|
|
count++;
|
|
|
|
iter->Next();
|
|
|
|
}
|
2013-05-03 22:26:12 +00:00
|
|
|
ASSERT_EQ(count, 0);
|
2013-02-15 22:31:24 +00:00
|
|
|
delete iter;
|
|
|
|
|
|
|
|
// The sequence number of the remaining record
|
2013-03-04 18:44:04 +00:00
|
|
|
// is not zeroed out even though it is at the
|
2013-02-15 22:31:24 +00:00
|
|
|
// level Lmax because this record is at the tip
|
2013-05-03 22:26:12 +00:00
|
|
|
// TODO: remove the following or design a different
|
|
|
|
// test
|
2013-02-15 22:31:24 +00:00
|
|
|
count = 0;
|
|
|
|
iter = dbfull()->TEST_NewInternalIterator();
|
|
|
|
iter->SeekToFirst();
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2012-10-29 08:13:41 +00:00
|
|
|
while (iter->Valid()) {
|
2013-11-17 21:52:55 +00:00
|
|
|
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
|
2013-02-15 22:31:24 +00:00
|
|
|
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
|
2013-03-19 21:53:22 +00:00
|
|
|
ASSERT_NE(ikey.sequence, (unsigned)0);
|
2012-10-29 08:13:41 +00:00
|
|
|
count++;
|
|
|
|
iter->Next();
|
|
|
|
}
|
2013-05-03 22:26:12 +00:00
|
|
|
ASSERT_EQ(count, 0);
|
2012-10-29 08:13:41 +00:00
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, CompactionFilterWithValueChange) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.num_levels = 3;
|
|
|
|
options.max_mem_compaction_level = 0;
|
2013-08-13 17:56:20 +00:00
|
|
|
options.compaction_filter_factory =
|
|
|
|
std::make_shared<ChangeFilterFactory>(100);
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen(&options);
|
2012-10-29 08:13:41 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Write 100K+1 keys, these are written to a few files
|
|
|
|
// in L0. We do this so that the current snapshot points
|
|
|
|
// to the 100001 key.The compaction filter is not invoked
|
|
|
|
// on keys that are visible via a snapshot because we
|
|
|
|
// anyways cannot delete it.
|
|
|
|
const std::string value(10, 'x');
|
|
|
|
for (int i = 0; i < 100001; i++) {
|
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
Put(key, value);
|
|
|
|
}
|
2012-10-29 08:13:41 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// push all files to lower levels
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// re-write all data again
|
|
|
|
for (int i = 0; i < 100001; i++) {
|
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
Put(key, value);
|
|
|
|
}
|
2012-10-29 08:13:41 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// push all files to lower levels. This should
|
|
|
|
// invoke the compaction filter for all 100000 keys.
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-10-29 08:13:41 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// verify that all keys now have the new value that
|
|
|
|
// was set by the compaction process.
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
std::string newvalue = Get(key);
|
|
|
|
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
|
|
|
|
}
|
|
|
|
} while (ChangeCompactOptions());
|
2012-10-29 08:13:41 +00:00
|
|
|
}
|
|
|
|
|
2011-03-22 18:32:49 +00:00
|
|
|
TEST(DBTest, SparseMerge) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
Reopen(&options);
|
2011-03-22 18:32:49 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
FillLevels("A", "Z");
|
|
|
|
|
|
|
|
// Suppose there is:
|
|
|
|
// small amount of data with prefix A
|
|
|
|
// large amount of data with prefix B
|
|
|
|
// small amount of data with prefix C
|
|
|
|
// and that recent updates have made small changes to all three prefixes.
|
|
|
|
// Check that we do not do a compaction that merges all of B in one shot.
|
|
|
|
const std::string value(1000, 'x');
|
|
|
|
Put("A", "va");
|
|
|
|
// Write approximately 100MB of "B" values
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "B%010d", i);
|
|
|
|
Put(key, value);
|
|
|
|
}
|
|
|
|
Put("C", "vc");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2011-03-22 18:32:49 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Make sparse update
|
|
|
|
Put("A", "va2");
|
|
|
|
Put("B100", "bvalue2");
|
|
|
|
Put("C", "vc2");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-08-07 22:20:41 +00:00
|
|
|
|
|
|
|
// Compactions should not cause us to create a situation where
|
|
|
|
// a file overlaps too much data at the next level.
|
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
|
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-22 18:32:49 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|
|
|
bool result = (val >= low) && (val <= high);
|
|
|
|
if (!result) {
|
|
|
|
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
|
|
|
(unsigned long long)(val),
|
|
|
|
(unsigned long long)(low),
|
|
|
|
(unsigned long long)(high));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, ApproximateSizes) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 100000000; // Large write buffer
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
DestroyAndReopen();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
const int N = 80;
|
|
|
|
static const int S1 = 100000;
|
|
|
|
static const int S2 = 105000; // Allow some expansion from metadata
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// 0 because GetApproximateSizes() does not account for memtable space
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
|
2011-04-19 23:01:25 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Check sizes across recovery by reopening a few times
|
|
|
|
for (int run = 0; run < 3; run++) {
|
|
|
|
Reopen(&options);
|
2011-04-20 22:48:11 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
for (int compact_start = 0; compact_start < N; compact_start += 10) {
|
|
|
|
for (int i = 0; i < N; i += 10) {
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
|
|
|
|
ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
|
|
|
|
|
|
|
|
std::string cstart_str = Key(compact_start);
|
|
|
|
std::string cend_str = Key(compact_start + 9);
|
|
|
|
Slice cstart = cstart_str;
|
|
|
|
Slice cend = cend_str;
|
|
|
|
dbfull()->TEST_CompactRange(0, &cstart, &cend);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2011-04-20 22:48:11 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(1), 0);
|
|
|
|
}
|
2013-08-02 18:46:47 +00:00
|
|
|
} while (ChangeOptions(kSkipUniversalCompaction));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
Reopen();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Random rnd(301);
|
|
|
|
std::string big1 = RandomString(&rnd, 100000);
|
|
|
|
ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_OK(Put(Key(2), big1));
|
|
|
|
ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_OK(Put(Key(4), big1));
|
|
|
|
ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
|
|
|
|
ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
|
|
|
|
|
|
|
|
// Check sizes across recovery by reopening a few times
|
|
|
|
for (int run = 0; run < 3; run++) {
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
|
|
|
|
|
|
|
|
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
|
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, IteratorPinsRef) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Put("foo", "hello");
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Get iterator that will yield the current contents of the DB.
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Write to force compactions
|
|
|
|
Put("foo", "newvalue1");
|
|
|
|
for (int i = 0; i < 100; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
|
|
|
|
}
|
|
|
|
Put("foo", "newvalue2");
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("foo", iter->key().ToString());
|
|
|
|
ASSERT_EQ("hello", iter->value().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeCompactOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, Snapshot) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
Put("foo", "v1");
|
|
|
|
const Snapshot* s1 = db_->GetSnapshot();
|
|
|
|
Put("foo", "v2");
|
|
|
|
const Snapshot* s2 = db_->GetSnapshot();
|
|
|
|
Put("foo", "v3");
|
|
|
|
const Snapshot* s3 = db_->GetSnapshot();
|
|
|
|
|
|
|
|
Put("foo", "v4");
|
|
|
|
ASSERT_EQ("v1", Get("foo", s1));
|
|
|
|
ASSERT_EQ("v2", Get("foo", s2));
|
|
|
|
ASSERT_EQ("v3", Get("foo", s3));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(s3);
|
|
|
|
ASSERT_EQ("v1", Get("foo", s1));
|
|
|
|
ASSERT_EQ("v2", Get("foo", s2));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
db_->ReleaseSnapshot(s1);
|
|
|
|
ASSERT_EQ("v2", Get("foo", s2));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
db_->ReleaseSnapshot(s2);
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
TEST(DBTest, HiddenValuesAreRemoved) {
|
|
|
|
do {
|
|
|
|
Random rnd(301);
|
|
|
|
FillLevels("a", "z");
|
|
|
|
|
|
|
|
std::string big = RandomString(&rnd, 50000);
|
|
|
|
Put("foo", big);
|
|
|
|
Put("pastfoo", "v");
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
Put("foo", "tiny");
|
|
|
|
Put("pastfoo2", "v2"); // Advance sequence number one more
|
|
|
|
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_GT(NumTableFilesAtLevel(0), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(big, Get("foo", snapshot));
|
|
|
|
ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
|
|
|
|
Slice x("x");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, &x);
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_GE(NumTableFilesAtLevel(1), 1);
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, &x);
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
|
2013-08-02 18:46:47 +00:00
|
|
|
} while (ChangeOptions(kSkipUniversalCompaction));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-11-27 05:16:21 +00:00
|
|
|
TEST(DBTest, CompactBetweenSnapshots) {
|
|
|
|
do {
|
|
|
|
Random rnd(301);
|
|
|
|
FillLevels("a", "z");
|
|
|
|
|
|
|
|
Put("foo", "first");
|
|
|
|
const Snapshot* snapshot1 = db_->GetSnapshot();
|
|
|
|
Put("foo", "second");
|
|
|
|
Put("foo", "third");
|
|
|
|
Put("foo", "fourth");
|
|
|
|
const Snapshot* snapshot2 = db_->GetSnapshot();
|
|
|
|
Put("foo", "fifth");
|
|
|
|
Put("foo", "sixth");
|
|
|
|
|
|
|
|
// All entries (including duplicates) exist
|
|
|
|
// before any compaction is triggered.
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2012-11-27 05:16:21 +00:00
|
|
|
ASSERT_EQ("sixth", Get("foo"));
|
|
|
|
ASSERT_EQ("fourth", Get("foo", snapshot2));
|
|
|
|
ASSERT_EQ("first", Get("foo", snapshot1));
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"),
|
|
|
|
"[ sixth, fifth, fourth, third, second, first ]");
|
|
|
|
|
|
|
|
// After a compaction, "second", "third" and "fifth" should
|
|
|
|
// be removed
|
|
|
|
FillLevels("a", "z");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
2012-11-27 05:16:21 +00:00
|
|
|
ASSERT_EQ("sixth", Get("foo"));
|
|
|
|
ASSERT_EQ("fourth", Get("foo", snapshot2));
|
|
|
|
ASSERT_EQ("first", Get("foo", snapshot1));
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth, first ]");
|
|
|
|
|
|
|
|
// after we release the snapshot1, only two values left
|
|
|
|
db_->ReleaseSnapshot(snapshot1);
|
|
|
|
FillLevels("a", "z");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
2012-11-27 05:16:21 +00:00
|
|
|
|
|
|
|
// We have only one valid snapshot snapshot2. Since snapshot1 is
|
|
|
|
// not valid anymore, "first" should be removed by a compaction.
|
|
|
|
ASSERT_EQ("sixth", Get("foo"));
|
|
|
|
ASSERT_EQ("fourth", Get("foo", snapshot2));
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth ]");
|
|
|
|
|
|
|
|
// after we release the snapshot2, only one value should be left
|
|
|
|
db_->ReleaseSnapshot(snapshot2);
|
|
|
|
FillLevels("a", "z");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
2012-11-27 05:16:21 +00:00
|
|
|
ASSERT_EQ("sixth", Get("foo"));
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth ]");
|
|
|
|
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, DeletionMarkers1) {
|
|
|
|
Put("foo", "v1");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2012-06-23 02:30:03 +00:00
|
|
|
const int last = dbfull()->MaxMemCompactionLevel();
|
2011-06-22 02:36:45 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
|
|
|
|
|
|
|
// Place a table at level last-1 to prevent merging with preceding mutation
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2011-06-22 02:36:45 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Delete("foo");
|
|
|
|
Put("foo", "v2");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2
|
2013-02-28 22:09:30 +00:00
|
|
|
if (CurrentOptions().purge_redundant_kvs_while_flush) {
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
|
|
|
}
|
2011-10-05 23:30:28 +00:00
|
|
|
Slice z("z");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(last-2, nullptr, &z);
|
2011-03-18 22:37:00 +00:00
|
|
|
// DEL eliminated, but v1 remains because we aren't compacting that level
|
|
|
|
// (DEL can be eliminated because v2 hides v1).
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
|
2011-06-22 02:36:45 +00:00
|
|
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
|
|
|
// DEL is removed. (as is v1).
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, DeletionMarkers2) {
|
|
|
|
Put("foo", "v1");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2012-06-23 02:30:03 +00:00
|
|
|
const int last = dbfull()->MaxMemCompactionLevel();
|
2011-06-22 02:36:45 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
|
|
|
|
|
|
|
// Place a table at level last-1 to prevent merging with preceding mutation
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2011-06-22 02:36:45 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Delete("foo");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
|
2011-06-22 02:36:45 +00:00
|
|
|
// DEL kept: "last" file overlaps
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
|
2011-06-22 02:36:45 +00:00
|
|
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
|
|
|
// DEL is removed. (as is v1).
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
|
|
|
}
|
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
TEST(DBTest, OverlapInLevel0) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
2012-11-26 21:56:45 +00:00
|
|
|
int tmp = dbfull()->MaxMemCompactionLevel();
|
2012-06-23 02:30:03 +00:00
|
|
|
ASSERT_EQ(tmp, 2) << "Fix test to match config";
|
2011-10-05 23:30:28 +00:00
|
|
|
|
2013-10-04 17:21:03 +00:00
|
|
|
//Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(Put("100", "v100"));
|
|
|
|
ASSERT_OK(Put("999", "v999"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(Delete("100"));
|
|
|
|
ASSERT_OK(Delete("999"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("0,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Make files spanning the following ranges in level-0:
|
|
|
|
// files[0] 200 .. 900
|
|
|
|
// files[1] 300 .. 500
|
|
|
|
// Note that files are sorted by smallest key.
|
|
|
|
ASSERT_OK(Put("300", "v300"));
|
|
|
|
ASSERT_OK(Put("500", "v500"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(Put("200", "v200"));
|
|
|
|
ASSERT_OK(Put("600", "v600"));
|
|
|
|
ASSERT_OK(Put("900", "v900"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("2,1,1", FilesPerLevel());
|
2011-10-05 23:30:28 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Compact away the placeholder files we created initially
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
|
|
|
dbfull()->TEST_CompactRange(2, nullptr, nullptr);
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("2", FilesPerLevel());
|
2011-10-05 23:30:28 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Do a memtable compaction. Before bug-fix, the compaction would
|
|
|
|
// not detect the overlap with level-0 files and would incorrectly place
|
|
|
|
// the deletion in a deeper level.
|
|
|
|
ASSERT_OK(Delete("600"));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_EQ("3", FilesPerLevel());
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("600"));
|
2013-08-02 18:46:47 +00:00
|
|
|
} while (ChangeOptions(kSkipUniversalCompaction));
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
TEST(DBTest, L0_CompactionBug_Issue44_a) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Reopen();
|
|
|
|
ASSERT_OK(Put("b", "v"));
|
|
|
|
Reopen();
|
|
|
|
ASSERT_OK(Delete("b"));
|
|
|
|
ASSERT_OK(Delete("a"));
|
|
|
|
Reopen();
|
|
|
|
ASSERT_OK(Delete("a"));
|
|
|
|
Reopen();
|
|
|
|
ASSERT_OK(Put("a", "v"));
|
|
|
|
Reopen();
|
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("(a->v)", Contents());
|
|
|
|
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
|
|
|
|
ASSERT_EQ("(a->v)", Contents());
|
|
|
|
} while (ChangeCompactOptions());
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, L0_CompactionBug_Issue44_b) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Reopen();
|
|
|
|
Put("","");
|
|
|
|
Reopen();
|
|
|
|
Delete("e");
|
|
|
|
Put("","");
|
|
|
|
Reopen();
|
|
|
|
Put("c", "cv");
|
|
|
|
Reopen();
|
|
|
|
Put("","");
|
|
|
|
Reopen();
|
|
|
|
Put("","");
|
|
|
|
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
|
|
|
|
Reopen();
|
|
|
|
Put("d","dv");
|
|
|
|
Reopen();
|
|
|
|
Put("","");
|
|
|
|
Reopen();
|
|
|
|
Delete("d");
|
|
|
|
Delete("b");
|
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("(->)(c->cv)", Contents());
|
|
|
|
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
|
|
|
|
ASSERT_EQ("(->)(c->cv)", Contents());
|
|
|
|
} while (ChangeCompactOptions());
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, ComparatorCheck) {
|
|
|
|
class NewComparator : public Comparator {
|
|
|
|
public:
|
2013-10-05 05:32:05 +00:00
|
|
|
virtual const char* Name() const { return "rocksdb.NewComparator"; }
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual int Compare(const Slice& a, const Slice& b) const {
|
|
|
|
return BytewiseComparator()->Compare(a, b);
|
|
|
|
}
|
|
|
|
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
|
|
|
|
BytewiseComparator()->FindShortestSeparator(s, l);
|
|
|
|
}
|
|
|
|
virtual void FindShortSuccessor(std::string* key) const {
|
|
|
|
BytewiseComparator()->FindShortSuccessor(key);
|
|
|
|
}
|
|
|
|
};
|
2013-10-01 21:46:52 +00:00
|
|
|
Options new_options;
|
|
|
|
NewComparator cmp;
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
2013-10-01 21:46:52 +00:00
|
|
|
new_options = CurrentOptions();
|
2013-08-07 22:20:41 +00:00
|
|
|
new_options.comparator = &cmp;
|
|
|
|
Status s = TryReopen(&new_options);
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
|
|
|
|
<< s.ToString();
|
2013-10-01 21:46:52 +00:00
|
|
|
} while (ChangeCompactOptions(&new_options));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
TEST(DBTest, CustomComparator) {
|
|
|
|
class NumberComparator : public Comparator {
|
|
|
|
public:
|
|
|
|
virtual const char* Name() const { return "test.NumberComparator"; }
|
|
|
|
virtual int Compare(const Slice& a, const Slice& b) const {
|
2011-11-14 17:06:16 +00:00
|
|
|
return ToNumber(a) - ToNumber(b);
|
|
|
|
}
|
|
|
|
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
|
|
|
|
ToNumber(*s); // Check format
|
|
|
|
ToNumber(l); // Check format
|
|
|
|
}
|
|
|
|
virtual void FindShortSuccessor(std::string* key) const {
|
|
|
|
ToNumber(*key); // Check format
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
static int ToNumber(const Slice& x) {
|
|
|
|
// Check that there are no extra characters.
|
|
|
|
ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
|
|
|
|
<< EscapeString(x);
|
|
|
|
int val;
|
|
|
|
char ignored;
|
|
|
|
ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
|
|
|
|
<< EscapeString(x);
|
|
|
|
return val;
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
};
|
2013-10-01 21:46:52 +00:00
|
|
|
Options new_options;
|
|
|
|
NumberComparator cmp;
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
2013-10-01 21:46:52 +00:00
|
|
|
new_options = CurrentOptions();
|
2013-08-07 22:20:41 +00:00
|
|
|
new_options.create_if_missing = true;
|
|
|
|
new_options.comparator = &cmp;
|
|
|
|
new_options.filter_policy = nullptr; // Cannot use bloom filters
|
|
|
|
new_options.write_buffer_size = 1000; // Compact more often
|
|
|
|
DestroyAndReopen(&new_options);
|
|
|
|
ASSERT_OK(Put("[10]", "ten"));
|
|
|
|
ASSERT_OK(Put("[0x14]", "twenty"));
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
ASSERT_EQ("ten", Get("[10]"));
|
|
|
|
ASSERT_EQ("ten", Get("[0xa]"));
|
|
|
|
ASSERT_EQ("twenty", Get("[20]"));
|
|
|
|
ASSERT_EQ("twenty", Get("[0x14]"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("[15]"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
|
|
|
|
Compact("[0]", "[9999]");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int run = 0; run < 2; run++) {
|
|
|
|
for (int i = 0; i < 1000; i++) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "[%d]", i*10);
|
|
|
|
ASSERT_OK(Put(buf, buf));
|
|
|
|
}
|
|
|
|
Compact("[0]", "[1000000]");
|
2011-11-14 17:06:16 +00:00
|
|
|
}
|
2013-10-01 21:46:52 +00:00
|
|
|
} while (ChangeCompactOptions(&new_options));
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
|
2011-10-05 23:30:28 +00:00
|
|
|
TEST(DBTest, ManualCompaction) {
|
2012-06-23 02:30:03 +00:00
|
|
|
ASSERT_EQ(dbfull()->MaxMemCompactionLevel(), 2)
|
2011-10-05 23:30:28 +00:00
|
|
|
<< "Need to update this test to match kMaxMemCompactLevel";
|
|
|
|
|
|
|
|
MakeTables(3, "p", "q");
|
|
|
|
ASSERT_EQ("1,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compaction range falls before files
|
|
|
|
Compact("", "c");
|
|
|
|
ASSERT_EQ("1,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compaction range falls after files
|
|
|
|
Compact("r", "z");
|
|
|
|
ASSERT_EQ("1,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compaction range overlaps files
|
|
|
|
Compact("p1", "p9");
|
|
|
|
ASSERT_EQ("0,0,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Populate a different range
|
|
|
|
MakeTables(3, "c", "e");
|
|
|
|
ASSERT_EQ("1,1,2", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compact just the new range
|
|
|
|
Compact("b", "f");
|
|
|
|
ASSERT_EQ("0,0,2", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compact all
|
|
|
|
MakeTables(1, "a", "z");
|
|
|
|
ASSERT_EQ("0,1,2", FilesPerLevel());
|
2013-03-01 02:04:58 +00:00
|
|
|
db_->CompactRange(nullptr, nullptr);
|
2011-10-05 23:30:28 +00:00
|
|
|
ASSERT_EQ("0,0,1", FilesPerLevel());
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
TEST(DBTest, DBOpen_Options) {
|
|
|
|
std::string dbname = test::TmpDir() + "/db_options_test";
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does not exist, and create_if_missing == false: error
|
2013-03-01 02:04:58 +00:00
|
|
|
DB* db = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Options opts;
|
|
|
|
opts.create_if_missing = false;
|
|
|
|
Status s = DB::Open(opts, dbname, &db);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
|
|
|
|
ASSERT_TRUE(db == nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does not exist, and create_if_missing == true: OK
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
s = DB::Open(opts, dbname, &db);
|
|
|
|
ASSERT_OK(s);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does exist, and error_if_exists == true: error
|
|
|
|
opts.create_if_missing = false;
|
|
|
|
opts.error_if_exists = true;
|
|
|
|
s = DB::Open(opts, dbname, &db);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
|
|
|
|
ASSERT_TRUE(db == nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does exist, and error_if_exists == false: OK
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
opts.error_if_exists = false;
|
|
|
|
s = DB::Open(opts, dbname, &db);
|
|
|
|
ASSERT_OK(s);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-10-29 22:25:01 +00:00
|
|
|
TEST(DBTest, DBOpen_Change_NumLevels) {
|
|
|
|
std::string dbname = test::TmpDir() + "/db_change_num_levels";
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2012-10-29 22:25:01 +00:00
|
|
|
Options opts;
|
|
|
|
Status s;
|
2013-03-01 02:04:58 +00:00
|
|
|
DB* db = nullptr;
|
2012-10-29 22:25:01 +00:00
|
|
|
opts.create_if_missing = true;
|
2012-10-29 23:26:20 +00:00
|
|
|
s = DB::Open(opts, dbname, &db);
|
2012-10-29 22:25:01 +00:00
|
|
|
ASSERT_OK(s);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2012-10-29 23:26:20 +00:00
|
|
|
db->Put(WriteOptions(), "a", "123");
|
|
|
|
db->Put(WriteOptions(), "b", "234");
|
2013-03-01 02:04:58 +00:00
|
|
|
db->CompactRange(nullptr, nullptr);
|
2012-10-29 23:26:20 +00:00
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2012-10-29 22:25:01 +00:00
|
|
|
|
|
|
|
opts.create_if_missing = false;
|
|
|
|
opts.num_levels = 2;
|
2012-10-29 23:26:20 +00:00
|
|
|
s = DB::Open(opts, dbname, &db);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "Corruption") != nullptr);
|
|
|
|
ASSERT_TRUE(db == nullptr);
|
2012-10-29 22:25:01 +00:00
|
|
|
}
|
|
|
|
|
2012-12-17 19:26:59 +00:00
|
|
|
TEST(DBTest, DestroyDBMetaDatabase) {
|
|
|
|
std::string dbname = test::TmpDir() + "/db_meta";
|
|
|
|
std::string metadbname = MetaDatabaseName(dbname, 0);
|
|
|
|
std::string metametadbname = MetaDatabaseName(metadbname, 0);
|
|
|
|
|
|
|
|
// Destroy previous versions if they exist. Using the long way.
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(DestroyDB(metametadbname, Options()));
|
|
|
|
ASSERT_OK(DestroyDB(metadbname, Options()));
|
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2012-12-17 19:26:59 +00:00
|
|
|
|
|
|
|
// Setup databases
|
|
|
|
Options opts;
|
|
|
|
opts.create_if_missing = true;
|
2013-03-01 02:04:58 +00:00
|
|
|
DB* db = nullptr;
|
2012-12-17 19:26:59 +00:00
|
|
|
ASSERT_OK(DB::Open(opts, dbname, &db));
|
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2012-12-17 19:26:59 +00:00
|
|
|
ASSERT_OK(DB::Open(opts, metadbname, &db));
|
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2012-12-17 19:26:59 +00:00
|
|
|
ASSERT_OK(DB::Open(opts, metametadbname, &db));
|
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2012-12-17 19:26:59 +00:00
|
|
|
|
|
|
|
// Delete databases
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2012-12-17 19:26:59 +00:00
|
|
|
|
|
|
|
// Check if deletion worked.
|
|
|
|
opts.create_if_missing = false;
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_TRUE(!(DB::Open(opts, dbname, &db)).ok());
|
|
|
|
ASSERT_TRUE(!(DB::Open(opts, metadbname, &db)).ok());
|
|
|
|
ASSERT_TRUE(!(DB::Open(opts, metametadbname, &db)).ok());
|
2012-12-17 19:26:59 +00:00
|
|
|
}
|
|
|
|
|
2012-01-25 22:56:52 +00:00
|
|
|
// Check that number of files does not grow when we are out of space
|
|
|
|
TEST(DBTest, NoSpace) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(&options);
|
2012-01-25 22:56:52 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
Compact("a", "z");
|
|
|
|
const int num_files = CountFiles();
|
|
|
|
env_->no_space_.Release_Store(env_); // Force out-of-space errors
|
|
|
|
env_->sleep_counter_.Reset();
|
|
|
|
for (int i = 0; i < 5; i++) {
|
|
|
|
for (int level = 0; level < dbfull()->NumberLevels()-1; level++) {
|
|
|
|
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
|
|
|
|
}
|
2012-01-25 22:56:52 +00:00
|
|
|
}
|
2013-08-07 22:20:41 +00:00
|
|
|
env_->no_space_.Release_Store(nullptr);
|
|
|
|
ASSERT_LT(CountFiles(), num_files + 3);
|
2012-08-22 23:57:51 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Check that compaction attempts slept after errors
|
|
|
|
ASSERT_GE(env_->sleep_counter_.Read(), 5);
|
|
|
|
} while (ChangeCompactOptions());
|
2012-08-22 23:57:51 +00:00
|
|
|
}
|
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
TEST(DBTest, NonWritableFileSystem) {
|
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 1000;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_OK(Put("foo", "v1"));
|
|
|
|
env_->non_writable_.Release_Store(env_); // Force errors for new files
|
|
|
|
std::string big(100000, 'x');
|
|
|
|
int errors = 0;
|
|
|
|
for (int i = 0; i < 20; i++) {
|
|
|
|
if (!Put("foo", big).ok()) {
|
|
|
|
errors++;
|
|
|
|
env_->SleepForMicroseconds(100000);
|
|
|
|
}
|
2012-08-22 23:57:51 +00:00
|
|
|
}
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_GT(errors, 0);
|
|
|
|
env_->non_writable_.Release_Store(nullptr);
|
|
|
|
} while (ChangeCompactOptions());
|
2012-01-25 22:56:52 +00:00
|
|
|
}
|
|
|
|
|
2013-01-08 20:00:13 +00:00
|
|
|
TEST(DBTest, ManifestWriteError) {
|
|
|
|
// Test for the following problem:
|
|
|
|
// (a) Compaction produces file F
|
|
|
|
// (b) Log record containing F is written to MANIFEST file, but Sync() fails
|
|
|
|
// (c) GC deletes F
|
|
|
|
// (d) After reopening DB, reads fail since deleted F is named in log record
|
|
|
|
|
|
|
|
// We iterate twice. In the second iteration, everything is the
|
|
|
|
// same except the log record never makes it to the MANIFEST file.
|
|
|
|
for (int iter = 0; iter < 2; iter++) {
|
|
|
|
port::AtomicPointer* error_type = (iter == 0)
|
|
|
|
? &env_->manifest_sync_error_
|
|
|
|
: &env_->manifest_write_error_;
|
|
|
|
|
|
|
|
// Insert foo=>bar mapping
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.error_if_exists = false;
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// Memtable compaction (will succeed)
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-01-08 20:00:13 +00:00
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
const int last = dbfull()->MaxMemCompactionLevel();
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
|
|
|
|
|
|
|
|
// Merging compaction (will fail)
|
|
|
|
error_type->Release_Store(env_);
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
|
2013-01-08 20:00:13 +00:00
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// Recovery: should not lose data
|
2013-03-01 02:04:58 +00:00
|
|
|
error_type->Release_Store(nullptr);
|
2013-01-08 20:00:13 +00:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-28 19:36:02 +00:00
|
|
|
TEST(DBTest, PutFailsParanoid) {
|
|
|
|
// Test the following:
|
|
|
|
// (a) A random put fails in paranoid mode (simulate by sync fail)
|
|
|
|
// (b) All other puts have to fail, even if writes would succeed
|
|
|
|
// (c) All of that should happen ONLY if paranoid_checks = true
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.error_if_exists = false;
|
|
|
|
options.paranoid_checks = true;
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
|
|
|
ASSERT_OK(Put("foo1", "bar1"));
|
|
|
|
// simulate error
|
|
|
|
env_->log_write_error_.Release_Store(env_);
|
|
|
|
s = Put("foo2", "bar2");
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
env_->log_write_error_.Release_Store(nullptr);
|
|
|
|
s = Put("foo3", "bar3");
|
|
|
|
// the next put should fail, too
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
// but we're still able to read
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// do the same thing with paranoid checks off
|
|
|
|
options.paranoid_checks = false;
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
|
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
|
|
|
ASSERT_OK(Put("foo1", "bar1"));
|
|
|
|
// simulate error
|
|
|
|
env_->log_write_error_.Release_Store(env_);
|
|
|
|
s = Put("foo2", "bar2");
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
env_->log_write_error_.Release_Store(nullptr);
|
|
|
|
s = Put("foo3", "bar3");
|
|
|
|
// the next put should NOT fail
|
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
}
|
|
|
|
|
2012-01-25 22:56:52 +00:00
|
|
|
TEST(DBTest, FilesDeletedAfterCompaction) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
2012-01-25 22:56:52 +00:00
|
|
|
ASSERT_OK(Put("foo", "v2"));
|
|
|
|
Compact("a", "z");
|
2013-08-07 22:20:41 +00:00
|
|
|
const int num_files = CountLiveFiles();
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
ASSERT_OK(Put("foo", "v2"));
|
|
|
|
Compact("a", "z");
|
|
|
|
}
|
|
|
|
ASSERT_EQ(CountLiveFiles(), num_files);
|
|
|
|
} while (ChangeCompactOptions());
|
2012-01-25 22:56:52 +00:00
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
TEST(DBTest, BloomFilter) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
env_->count_random_reads_ = true;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
2013-11-13 06:46:51 +00:00
|
|
|
options.no_block_cache = true;
|
2013-08-07 22:20:41 +00:00
|
|
|
options.filter_policy = NewBloomFilterPolicy(10);
|
|
|
|
Reopen(&options);
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Populate multiple layers
|
|
|
|
const int N = 10000;
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), Key(i)));
|
|
|
|
}
|
|
|
|
Compact("a", "z");
|
|
|
|
for (int i = 0; i < N; i += 100) {
|
|
|
|
ASSERT_OK(Put(Key(i), Key(i)));
|
|
|
|
}
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Prevent auto compactions triggered by seeks
|
|
|
|
env_->delay_sstable_sync_.Release_Store(env_);
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Lookup present keys. Should rarely read from small sstable.
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(Key(i), Get(Key(i)));
|
|
|
|
}
|
|
|
|
int reads = env_->random_read_counter_.Read();
|
|
|
|
fprintf(stderr, "%d present => %d reads\n", N, reads);
|
|
|
|
ASSERT_GE(reads, N);
|
|
|
|
ASSERT_LE(reads, N + 2*N/100);
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Lookup present keys. Should rarely read from either sstable.
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
|
|
|
|
}
|
|
|
|
reads = env_->random_read_counter_.Read();
|
|
|
|
fprintf(stderr, "%d missing => %d reads\n", N, reads);
|
|
|
|
ASSERT_LE(reads, 3*N/100);
|
2012-04-17 15:36:46 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
env_->delay_sstable_sync_.Release_Store(nullptr);
|
|
|
|
Close();
|
|
|
|
delete options.filter_policy;
|
|
|
|
} while (ChangeCompactOptions());
|
2012-04-17 15:36:46 +00:00
|
|
|
}
|
|
|
|
|
2012-09-15 00:11:35 +00:00
|
|
|
TEST(DBTest, SnapshotFiles) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
const EnvOptions soptions;
|
|
|
|
options.write_buffer_size = 100000000; // Large write buffer
|
|
|
|
Reopen(&options);
|
2012-09-15 00:11:35 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Random rnd(301);
|
2012-09-24 21:01:01 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 80; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 100000));
|
|
|
|
ASSERT_OK(Put(Key(i), values[i]));
|
|
|
|
}
|
2012-09-24 21:01:01 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// assert that nothing makes it to disk yet.
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
2012-09-24 21:01:01 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// get a file snapshot
|
|
|
|
uint64_t manifest_number = 0;
|
|
|
|
uint64_t manifest_size = 0;
|
|
|
|
std::vector<std::string> files;
|
|
|
|
dbfull()->DisableFileDeletions();
|
|
|
|
dbfull()->GetLiveFiles(files, &manifest_size);
|
|
|
|
|
|
|
|
// CURRENT, MANIFEST, *.sst files
|
|
|
|
ASSERT_EQ(files.size(), 3U);
|
|
|
|
|
|
|
|
uint64_t number = 0;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
// copy these files to a new snapshot directory
|
|
|
|
std::string snapdir = dbname_ + ".snapdir/";
|
|
|
|
std::string mkdir = "mkdir -p " + snapdir;
|
|
|
|
ASSERT_EQ(system(mkdir.c_str()), 0);
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < files.size(); i++) {
|
2013-11-08 23:23:46 +00:00
|
|
|
// our clients require that GetLiveFiles returns
|
|
|
|
// files with "/" as first character!
|
|
|
|
ASSERT_EQ(files[i][0], '/');
|
|
|
|
std::string src = dbname_ + files[i];
|
|
|
|
std::string dest = snapdir + files[i];
|
2013-08-07 22:20:41 +00:00
|
|
|
|
|
|
|
uint64_t size;
|
|
|
|
ASSERT_OK(env_->GetFileSize(src, &size));
|
|
|
|
|
|
|
|
// record the number and the size of the
|
|
|
|
// latest manifest file
|
|
|
|
if (ParseFileName(files[i].substr(1), &number, &type)) {
|
|
|
|
if (type == kDescriptorFile) {
|
|
|
|
if (number > manifest_number) {
|
|
|
|
manifest_number = number;
|
|
|
|
ASSERT_GE(size, manifest_size);
|
|
|
|
size = manifest_size; // copy only valid MANIFEST data
|
|
|
|
}
|
2012-09-24 21:01:01 +00:00
|
|
|
}
|
|
|
|
}
|
2013-08-07 22:20:41 +00:00
|
|
|
unique_ptr<SequentialFile> srcfile;
|
|
|
|
ASSERT_OK(env_->NewSequentialFile(src, &srcfile, soptions));
|
|
|
|
unique_ptr<WritableFile> destfile;
|
|
|
|
ASSERT_OK(env_->NewWritableFile(dest, &destfile, soptions));
|
|
|
|
|
|
|
|
char buffer[4096];
|
|
|
|
Slice slice;
|
|
|
|
while (size > 0) {
|
2013-11-13 04:05:28 +00:00
|
|
|
uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_OK(srcfile->Read(one, &slice, buffer));
|
|
|
|
ASSERT_OK(destfile->Append(slice));
|
|
|
|
size -= slice.size();
|
|
|
|
}
|
|
|
|
ASSERT_OK(destfile->Close());
|
2012-09-24 21:01:01 +00:00
|
|
|
}
|
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// release file snapshot
|
|
|
|
dbfull()->DisableFileDeletions();
|
2012-09-15 00:11:35 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// overwrite one key, this key should not appear in the snapshot
|
|
|
|
std::vector<std::string> extras;
|
|
|
|
for (unsigned int i = 0; i < 1; i++) {
|
|
|
|
extras.push_back(RandomString(&rnd, 100000));
|
|
|
|
ASSERT_OK(Put(Key(i), extras[i]));
|
|
|
|
}
|
2012-11-01 17:50:08 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// verify that data in the snapshot are correct
|
|
|
|
Options opts;
|
|
|
|
DB* snapdb;
|
|
|
|
opts.create_if_missing = false;
|
|
|
|
Status stat = DB::Open(opts, snapdir, &snapdb);
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(stat);
|
2013-08-07 22:20:41 +00:00
|
|
|
|
|
|
|
ReadOptions roptions;
|
|
|
|
std::string val;
|
|
|
|
for (unsigned int i = 0; i < 80; i++) {
|
|
|
|
stat = snapdb->Get(roptions, Key(i), &val);
|
|
|
|
ASSERT_EQ(values[i].compare(val), 0);
|
|
|
|
}
|
|
|
|
delete snapdb;
|
|
|
|
|
|
|
|
// look at the new live files after we added an 'extra' key
|
|
|
|
// and after we took the first snapshot.
|
|
|
|
uint64_t new_manifest_number = 0;
|
|
|
|
uint64_t new_manifest_size = 0;
|
|
|
|
std::vector<std::string> newfiles;
|
|
|
|
dbfull()->DisableFileDeletions();
|
|
|
|
dbfull()->GetLiveFiles(newfiles, &new_manifest_size);
|
|
|
|
|
|
|
|
// find the new manifest file. assert that this manifest file is
|
|
|
|
// the same one as in the previous snapshot. But its size should be
|
|
|
|
// larger because we added an extra key after taking the
|
|
|
|
// previous shapshot.
|
|
|
|
for (unsigned int i = 0; i < newfiles.size(); i++) {
|
|
|
|
std::string src = dbname_ + "/" + newfiles[i];
|
|
|
|
// record the lognumber and the size of the
|
|
|
|
// latest manifest file
|
|
|
|
if (ParseFileName(newfiles[i].substr(1), &number, &type)) {
|
|
|
|
if (type == kDescriptorFile) {
|
|
|
|
if (number > new_manifest_number) {
|
|
|
|
uint64_t size;
|
|
|
|
new_manifest_number = number;
|
|
|
|
ASSERT_OK(env_->GetFileSize(src, &size));
|
|
|
|
ASSERT_GE(size, new_manifest_size);
|
|
|
|
}
|
2012-09-24 21:01:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(manifest_number, new_manifest_number);
|
|
|
|
ASSERT_GT(new_manifest_size, manifest_size);
|
2012-11-01 17:50:08 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// release file snapshot
|
|
|
|
dbfull()->DisableFileDeletions();
|
|
|
|
} while (ChangeCompactOptions());
|
2012-09-15 00:11:35 +00:00
|
|
|
}
|
|
|
|
|
2013-02-28 22:09:30 +00:00
|
|
|
TEST(DBTest, CompactOnFlush) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.purge_redundant_kvs_while_flush = true;
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
Reopen(&options);
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Put("foo", "v1");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v1 ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Write two new keys
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Case1: Delete followed by a put
|
|
|
|
Delete("foo");
|
|
|
|
Put("foo", "v2");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// After the current memtable is flushed, the DEL should
|
|
|
|
// have been removed
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Case 2: Delete followed by another delete
|
|
|
|
Delete("foo");
|
|
|
|
Delete("foo");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, DEL, v2 ]");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v2 ]");
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Case 3: Put followed by a delete
|
|
|
|
Put("foo", "v3");
|
|
|
|
Delete("foo");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v3 ]");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL ]");
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Case 4: Put followed by another Put
|
|
|
|
Put("foo", "v4");
|
|
|
|
Put("foo", "v5");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v5, v4 ]");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]");
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// clear database
|
|
|
|
Delete("foo");
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Case 5: Put followed by snapshot followed by another Put
|
|
|
|
// Both puts should remain.
|
|
|
|
Put("foo", "v6");
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
Put("foo", "v7");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v7, v6 ]");
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// clear database
|
|
|
|
Delete("foo");
|
|
|
|
dbfull()->CompactRange(nullptr, nullptr);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
2013-02-28 22:09:30 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
// Case 5: snapshot followed by a put followed by another Put
|
|
|
|
// Only the last put should remain.
|
|
|
|
const Snapshot* snapshot1 = db_->GetSnapshot();
|
|
|
|
Put("foo", "v8");
|
|
|
|
Put("foo", "v9");
|
2013-10-14 22:12:15 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v9 ]");
|
|
|
|
db_->ReleaseSnapshot(snapshot1);
|
|
|
|
} while (ChangeCompactOptions());
|
2013-02-28 22:09:30 +00:00
|
|
|
}
|
|
|
|
|
2013-05-06 18:41:01 +00:00
|
|
|
std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
|
2012-11-26 21:56:45 +00:00
|
|
|
std::vector<std::string> files;
|
2013-05-06 18:41:01 +00:00
|
|
|
std::vector<uint64_t> log_files;
|
2012-11-26 21:56:45 +00:00
|
|
|
env->GetChildren(path, &files);
|
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
for (size_t i = 0; i < files.size(); ++i) {
|
|
|
|
if (ParseFileName(files[i], &number, &type)) {
|
|
|
|
if (type == kLogFile) {
|
2013-05-06 18:41:01 +00:00
|
|
|
log_files.push_back(number);
|
2012-11-26 21:56:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-06 18:41:01 +00:00
|
|
|
return std::move(log_files);
|
2012-11-26 21:56:45 +00:00
|
|
|
}
|
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
TEST(DBTest, WALArchivalTtl) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.WAL_ttl_seconds = 1000;
|
|
|
|
DestroyAndReopen(&options);
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
// TEST : Create DB with a ttl and no size limit.
|
2013-08-07 22:20:41 +00:00
|
|
|
// Put some keys. Count the log files present in the DB just after insert.
|
|
|
|
// Re-open db. Causes deletion/archival to take place.
|
|
|
|
// Assert that the files moved under "/archive".
|
2013-11-07 02:46:28 +00:00
|
|
|
// Reopen db with small ttl.
|
|
|
|
// Assert that archive was removed.
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
std::string archiveDir = ArchivalDirectory(dbname_);
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
for (int j = 0; j < 10; ++j) {
|
2013-11-07 02:46:28 +00:00
|
|
|
ASSERT_OK(Put(Key(10 * i + j), DummyString(1024)));
|
2013-08-07 22:20:41 +00:00
|
|
|
}
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
std::vector<uint64_t> log_files = ListLogFiles(env_, dbname_);
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
options.create_if_missing = false;
|
|
|
|
Reopen(&options);
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
std::vector<uint64_t> logs = ListLogFiles(env_, archiveDir);
|
|
|
|
std::set<uint64_t> archivedFiles(logs.begin(), logs.end());
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
for (auto& log : log_files) {
|
2013-08-07 22:20:41 +00:00
|
|
|
ASSERT_TRUE(archivedFiles.find(log) != archivedFiles.end());
|
|
|
|
}
|
2012-11-26 21:56:45 +00:00
|
|
|
}
|
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
std::vector<uint64_t> log_files = ListLogFiles(env_, archiveDir);
|
|
|
|
ASSERT_TRUE(log_files.size() > 0);
|
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
options.WAL_ttl_seconds = 1;
|
2013-11-07 02:46:28 +00:00
|
|
|
env_->SleepForMicroseconds(2 * 1000 * 1000);
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen(&options);
|
2012-11-26 21:56:45 +00:00
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
log_files = ListLogFiles(env_, archiveDir);
|
|
|
|
ASSERT_TRUE(log_files.empty());
|
2013-08-07 22:20:41 +00:00
|
|
|
} while (ChangeCompactOptions());
|
2012-11-26 21:56:45 +00:00
|
|
|
}
|
2012-09-27 08:05:38 +00:00
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
uint64_t GetLogDirSize(std::string dir_path, SpecialEnv* env) {
|
|
|
|
uint64_t dir_size = 0;
|
|
|
|
std::vector<std::string> files;
|
|
|
|
env->GetChildren(dir_path, &files);
|
|
|
|
for (auto& f : files) {
|
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
if (ParseFileName(f, &number, &type) && type == kLogFile) {
|
|
|
|
std::string const file_path = dir_path + "/" + f;
|
|
|
|
uint64_t file_size;
|
|
|
|
env->GetFileSize(file_path, &file_size);
|
|
|
|
dir_size += file_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dir_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, WALArchivalSizeLimit) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
2013-11-07 02:46:28 +00:00
|
|
|
options.WAL_ttl_seconds = 0;
|
|
|
|
options.WAL_size_limit_MB = 1000;
|
|
|
|
|
|
|
|
// TEST : Create DB with huge size limit and no ttl.
|
|
|
|
// Put some keys. Count the archived log files present in the DB
|
|
|
|
// just after insert. Assert that there are many enough.
|
|
|
|
// Change size limit. Re-open db.
|
|
|
|
// Assert that archive is not greater than WAL_size_limit_MB.
|
|
|
|
// Set ttl and time_to_check_ to small values. Re-open db.
|
|
|
|
// Assert that there are no archived logs left.
|
2013-05-06 18:41:01 +00:00
|
|
|
|
2013-11-07 02:46:28 +00:00
|
|
|
DestroyAndReopen(&options);
|
|
|
|
for (int i = 0; i < 128 * 128; ++i) {
|
|
|
|
ASSERT_OK(Put(Key(i), DummyString(1024)));
|
|
|
|
}
|
2013-08-07 22:20:41 +00:00
|
|
|
Reopen(&options);
|
2013-11-07 02:46:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
std::string archive_dir = ArchivalDirectory(dbname_);
|
|
|
|
std::vector<std::uint64_t> log_files = ListLogFiles(env_, archive_dir);
|
2013-11-07 02:46:28 +00:00
|
|
|
ASSERT_TRUE(log_files.size() > 2);
|
|
|
|
|
|
|
|
options.WAL_size_limit_MB = 8;
|
|
|
|
Reopen(&options);
|
|
|
|
dbfull()->TEST_PurgeObsoleteteWAL();
|
|
|
|
|
|
|
|
uint64_t archive_size = GetLogDirSize(archive_dir, env_);
|
|
|
|
ASSERT_TRUE(archive_size <= options.WAL_size_limit_MB * 1024 * 1024);
|
|
|
|
|
|
|
|
options.WAL_ttl_seconds = 1;
|
|
|
|
dbfull()->TEST_SetDefaultTimeToCheck(1);
|
2013-08-07 22:20:41 +00:00
|
|
|
env_->SleepForMicroseconds(2 * 1000 * 1000);
|
2013-11-07 02:46:28 +00:00
|
|
|
Reopen(&options);
|
2013-08-07 22:20:41 +00:00
|
|
|
dbfull()->TEST_PurgeObsoleteteWAL();
|
2013-11-07 02:46:28 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
log_files = ListLogFiles(env_, archive_dir);
|
|
|
|
ASSERT_TRUE(log_files.empty());
|
|
|
|
} while (ChangeCompactOptions());
|
2013-05-06 18:41:01 +00:00
|
|
|
}
|
|
|
|
|
2013-10-25 02:09:02 +00:00
|
|
|
SequenceNumber ReadRecords(
|
|
|
|
std::unique_ptr<TransactionLogIterator>& iter,
|
|
|
|
int& count) {
|
|
|
|
count = 0;
|
2013-03-21 22:12:35 +00:00
|
|
|
SequenceNumber lastSequence = 0;
|
2013-10-25 02:09:02 +00:00
|
|
|
BatchResult res;
|
2013-03-21 22:12:35 +00:00
|
|
|
while (iter->Valid()) {
|
2013-10-25 02:09:02 +00:00
|
|
|
res = iter->GetBatch();
|
2013-03-21 22:12:35 +00:00
|
|
|
ASSERT_TRUE(res.sequence > lastSequence);
|
2013-10-25 02:09:02 +00:00
|
|
|
++count;
|
2013-03-21 22:12:35 +00:00
|
|
|
lastSequence = res.sequence;
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2013-03-21 22:12:35 +00:00
|
|
|
iter->Next();
|
|
|
|
}
|
2013-10-25 02:09:02 +00:00
|
|
|
return res.sequence;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ExpectRecords(
|
|
|
|
const int expected_no_records,
|
|
|
|
std::unique_ptr<TransactionLogIterator>& iter) {
|
|
|
|
int num_records;
|
|
|
|
ReadRecords(iter, num_records);
|
|
|
|
ASSERT_EQ(num_records, expected_no_records);
|
2013-03-21 22:12:35 +00:00
|
|
|
}
|
|
|
|
|
2012-11-30 01:28:37 +00:00
|
|
|
TEST(DBTest, TransactionLogIterator) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
Put("key1", DummyString(1024));
|
|
|
|
Put("key2", DummyString(1024));
|
|
|
|
Put("key2", DummyString(1024));
|
|
|
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
|
|
|
|
{
|
|
|
|
auto iter = OpenTransactionLogIter(0);
|
|
|
|
ExpectRecords(3, iter);
|
|
|
|
}
|
|
|
|
Reopen(&options);
|
|
|
|
env_->SleepForMicroseconds(2 * 1000 * 1000);{
|
|
|
|
Put("key4", DummyString(1024));
|
|
|
|
Put("key5", DummyString(1024));
|
|
|
|
Put("key6", DummyString(1024));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
auto iter = OpenTransactionLogIter(0);
|
|
|
|
ExpectRecords(6, iter);
|
|
|
|
}
|
|
|
|
} while (ChangeCompactOptions());
|
2012-11-30 01:28:37 +00:00
|
|
|
}
|
|
|
|
|
2013-03-18 21:50:59 +00:00
|
|
|
TEST(DBTest, TransactionLogIteratorMoveOverZeroFiles) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
// Do a plain Reopen.
|
|
|
|
Put("key1", DummyString(1024));
|
|
|
|
// Two reopens should create a zero record WAL file.
|
|
|
|
Reopen(&options);
|
|
|
|
Reopen(&options);
|
2013-03-18 21:50:59 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
Put("key2", DummyString(1024));
|
2013-03-21 22:12:35 +00:00
|
|
|
|
2013-08-07 22:20:41 +00:00
|
|
|
auto iter = OpenTransactionLogIter(0);
|
|
|
|
ExpectRecords(2, iter);
|
|
|
|
} while (ChangeCompactOptions());
|
2013-03-21 22:12:35 +00:00
|
|
|
}
|
2013-03-28 20:13:35 +00:00
|
|
|
|
2013-11-17 07:44:39 +00:00
|
|
|
// TODO(kailiu) disable the in non-linux platforms to temporarily solve
|
|
|
|
// // the unit test failure.
|
|
|
|
#ifdef OS_LINUX
|
2013-03-28 20:13:35 +00:00
|
|
|
TEST(DBTest, TransactionLogIteratorStallAtLastRecord) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
Put("key1", DummyString(1024));
|
|
|
|
auto iter = OpenTransactionLogIter(0);
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
Put("key2", DummyString(1024));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
} while (ChangeCompactOptions());
|
2013-03-28 20:13:35 +00:00
|
|
|
}
|
2013-11-17 07:44:39 +00:00
|
|
|
#endif
|
2013-03-28 20:13:35 +00:00
|
|
|
|
2013-03-28 20:19:07 +00:00
|
|
|
TEST(DBTest, TransactionLogIteratorJustEmptyFile) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
unique_ptr<TransactionLogIterator> iter;
|
|
|
|
Status status = dbfull()->GetUpdatesSince(0, &iter);
|
2013-10-13 22:28:24 +00:00
|
|
|
// Check that an empty iterator is returned
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
2013-08-07 22:20:41 +00:00
|
|
|
} while (ChangeCompactOptions());
|
2013-03-28 20:19:07 +00:00
|
|
|
}
|
2013-04-03 00:18:27 +00:00
|
|
|
|
|
|
|
TEST(DBTest, TransactionLogIteratorCheckAfterRestart) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
Put("key1", DummyString(1024));
|
|
|
|
Put("key2", DummyString(1023));
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
Reopen(&options);
|
|
|
|
auto iter = OpenTransactionLogIter(0);
|
|
|
|
ExpectRecords(2, iter);
|
|
|
|
} while (ChangeCompactOptions());
|
2013-04-03 00:18:27 +00:00
|
|
|
}
|
|
|
|
|
2013-10-25 02:09:02 +00:00
|
|
|
TEST(DBTest, TransactionLogIteratorCorruptedLog) {
|
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
for (int i = 0; i < 1024; i++) {
|
|
|
|
Put("key"+std::to_string(i), DummyString(10));
|
|
|
|
}
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
// Corrupt this log to create a gap
|
|
|
|
rocksdb::VectorLogPtr wal_files;
|
|
|
|
ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
|
|
|
|
const auto logfilePath = dbname_ + "/" + wal_files.front()->PathName();
|
|
|
|
ASSERT_EQ(
|
|
|
|
0,
|
|
|
|
truncate(logfilePath.c_str(), wal_files.front()->SizeFileBytes() / 2));
|
|
|
|
// Insert a new entry to a new log file
|
|
|
|
Put("key1025", DummyString(10));
|
|
|
|
// Try to read from the beginning. Should stop before the gap and read less
|
|
|
|
// than 1025 entries
|
|
|
|
auto iter = OpenTransactionLogIter(0);
|
|
|
|
int count;
|
|
|
|
int last_sequence_read = ReadRecords(iter, count);
|
|
|
|
ASSERT_LT(last_sequence_read, 1025);
|
|
|
|
// Try to read past the gap, should be able to seek to key1025
|
|
|
|
auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
|
|
|
|
ExpectRecords(1, iter2);
|
|
|
|
} while (ChangeCompactOptions());
|
|
|
|
}
|
|
|
|
|
2013-04-08 23:28:09 +00:00
|
|
|
TEST(DBTest, TransactionLogIteratorBatchOperations) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put("key1", DummyString(1024));
|
|
|
|
batch.Put("key2", DummyString(1024));
|
|
|
|
batch.Put("key3", DummyString(1024));
|
|
|
|
batch.Delete("key2");
|
|
|
|
dbfull()->Write(WriteOptions(), &batch);
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
Reopen(&options);
|
|
|
|
Put("key4", DummyString(1024));
|
|
|
|
auto iter = OpenTransactionLogIter(3);
|
2013-10-13 22:28:24 +00:00
|
|
|
ExpectRecords(2, iter);
|
2013-08-07 22:20:41 +00:00
|
|
|
} while (ChangeCompactOptions());
|
2013-04-08 23:28:09 +00:00
|
|
|
}
|
|
|
|
|
2013-08-14 23:32:46 +00:00
|
|
|
TEST(DBTest, TransactionLogIteratorBlobs) {
|
|
|
|
Options options = OptionsForLogIterTest();
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
{
|
|
|
|
WriteBatch batch;
|
|
|
|
batch.Put("key1", DummyString(1024));
|
|
|
|
batch.Put("key2", DummyString(1024));
|
|
|
|
batch.PutLogData(Slice("blob1"));
|
|
|
|
batch.Put("key3", DummyString(1024));
|
|
|
|
batch.PutLogData(Slice("blob2"));
|
|
|
|
batch.Delete("key2");
|
|
|
|
dbfull()->Write(WriteOptions(), &batch);
|
|
|
|
Reopen(&options);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto res = OpenTransactionLogIter(0)->GetBatch();
|
|
|
|
struct Handler : public WriteBatch::Handler {
|
|
|
|
std::string seen;
|
|
|
|
virtual void Put(const Slice& key, const Slice& value) {
|
|
|
|
seen += "Put(" + key.ToString() + ", " + std::to_string(value.size()) +
|
|
|
|
")";
|
|
|
|
}
|
|
|
|
virtual void Merge(const Slice& key, const Slice& value) {
|
|
|
|
seen += "Merge(" + key.ToString() + ", " + std::to_string(value.size()) +
|
|
|
|
")";
|
|
|
|
}
|
|
|
|
virtual void LogData(const Slice& blob) {
|
|
|
|
seen += "LogData(" + blob.ToString() + ")";
|
|
|
|
}
|
|
|
|
virtual void Delete(const Slice& key) {
|
|
|
|
seen += "Delete(" + key.ToString() + ")";
|
|
|
|
}
|
|
|
|
} handler;
|
|
|
|
res.writeBatchPtr->Iterate(&handler);
|
|
|
|
ASSERT_EQ("Put(key1, 1024)"
|
|
|
|
"Put(key2, 1024)"
|
|
|
|
"LogData(blob1)"
|
|
|
|
"Put(key3, 1024)"
|
|
|
|
"LogData(blob2)"
|
|
|
|
"Delete(key2)", handler.seen);
|
|
|
|
}
|
|
|
|
|
2012-09-27 08:05:38 +00:00
|
|
|
TEST(DBTest, ReadCompaction) {
|
|
|
|
std::string value(4096, '4'); // a string of size 4K
|
|
|
|
{
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_open_files = 20; // only 10 file in file-cache
|
|
|
|
options.target_file_size_base = 512;
|
|
|
|
options.write_buffer_size = 64 * 1024;
|
2013-03-01 02:04:58 +00:00
|
|
|
options.filter_policy = nullptr;
|
2012-09-27 08:05:38 +00:00
|
|
|
options.block_size = 4096;
|
2013-11-13 06:46:51 +00:00
|
|
|
options.no_block_cache = true;
|
2012-09-27 08:05:38 +00:00
|
|
|
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
// Write 8MB (2000 values, each 4K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 2000; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), value));
|
|
|
|
}
|
|
|
|
|
|
|
|
// clear level 0 and 1 if necessary.
|
2013-10-14 22:12:15 +00:00
|
|
|
dbfull()->TEST_FlushMemTable();
|
2013-03-01 02:04:58 +00:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-09-27 08:05:38 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
|
|
|
|
// write some new keys into level 0
|
|
|
|
for (int i = 0; i < 2000; i = i + 16) {
|
|
|
|
ASSERT_OK(Put(Key(i), value));
|
|
|
|
}
|
|
|
|
dbfull()->Flush(FlushOptions());
|
|
|
|
|
|
|
|
// Wait for any write compaction to finish
|
|
|
|
dbfull()->TEST_WaitForCompact();
|
|
|
|
|
|
|
|
// remember number of files in each level
|
|
|
|
int l1 = NumTableFilesAtLevel(0);
|
|
|
|
int l2 = NumTableFilesAtLevel(1);
|
|
|
|
int l3 = NumTableFilesAtLevel(3);
|
|
|
|
ASSERT_NE(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_NE(NumTableFilesAtLevel(1), 0);
|
|
|
|
ASSERT_NE(NumTableFilesAtLevel(2), 0);
|
|
|
|
|
|
|
|
// read a bunch of times, trigger read compaction
|
|
|
|
for (int j = 0; j < 100; j++) {
|
|
|
|
for (int i = 0; i < 2000; i++) {
|
|
|
|
Get(Key(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// wait for read compaction to finish
|
|
|
|
env_->SleepForMicroseconds(1000000);
|
|
|
|
|
|
|
|
// verify that the number of files have decreased
|
|
|
|
// in some level, indicating that there was a compaction
|
|
|
|
ASSERT_TRUE(NumTableFilesAtLevel(0) < l1 ||
|
|
|
|
NumTableFilesAtLevel(1) < l2 ||
|
2012-11-01 17:50:08 +00:00
|
|
|
NumTableFilesAtLevel(2) < l3);
|
2012-09-27 08:05:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-28 00:53:58 +00:00
|
|
|
// Multi-threaded test:
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
static const int kNumThreads = 4;
|
|
|
|
static const int kTestSeconds = 10;
|
|
|
|
static const int kNumKeys = 1000;
|
|
|
|
|
|
|
|
struct MTState {
|
|
|
|
DBTest* test;
|
|
|
|
port::AtomicPointer stop;
|
|
|
|
port::AtomicPointer counter[kNumThreads];
|
|
|
|
port::AtomicPointer thread_done[kNumThreads];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MTThread {
|
|
|
|
MTState* state;
|
|
|
|
int id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void MTThreadBody(void* arg) {
|
|
|
|
MTThread* t = reinterpret_cast<MTThread*>(arg);
|
2012-01-25 22:56:52 +00:00
|
|
|
int id = t->id;
|
2011-05-28 00:53:58 +00:00
|
|
|
DB* db = t->state->test->db_;
|
|
|
|
uintptr_t counter = 0;
|
2012-01-25 22:56:52 +00:00
|
|
|
fprintf(stderr, "... starting thread %d\n", id);
|
|
|
|
Random rnd(1000 + id);
|
2011-05-28 00:53:58 +00:00
|
|
|
std::string value;
|
|
|
|
char valbuf[1500];
|
2013-03-01 02:04:58 +00:00
|
|
|
while (t->state->stop.Acquire_Load() == nullptr) {
|
2012-01-25 22:56:52 +00:00
|
|
|
t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
|
2011-05-28 00:53:58 +00:00
|
|
|
|
|
|
|
int key = rnd.Uniform(kNumKeys);
|
|
|
|
char keybuf[20];
|
|
|
|
snprintf(keybuf, sizeof(keybuf), "%016d", key);
|
|
|
|
|
|
|
|
if (rnd.OneIn(2)) {
|
|
|
|
// Write values of the form <key, my id, counter>.
|
|
|
|
// We add some padding for force compactions.
|
|
|
|
snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
|
2012-01-25 22:56:52 +00:00
|
|
|
key, id, static_cast<int>(counter));
|
2013-03-21 22:59:47 +00:00
|
|
|
ASSERT_OK(t->state->test->Put(Slice(keybuf), Slice(valbuf)));
|
2011-05-28 00:53:58 +00:00
|
|
|
} else {
|
|
|
|
// Read a value and verify that it matches the pattern written above.
|
|
|
|
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
// Key has not yet been written
|
|
|
|
} else {
|
|
|
|
// Check that the writer thread counter is >= the counter in the value
|
|
|
|
ASSERT_OK(s);
|
|
|
|
int k, w, c;
|
|
|
|
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
|
|
|
|
ASSERT_EQ(k, key);
|
|
|
|
ASSERT_GE(w, 0);
|
|
|
|
ASSERT_LT(w, kNumThreads);
|
2012-11-06 20:02:18 +00:00
|
|
|
ASSERT_LE((unsigned int)c, reinterpret_cast<uintptr_t>(
|
2011-05-28 00:53:58 +00:00
|
|
|
t->state->counter[w].Acquire_Load()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
counter++;
|
|
|
|
}
|
2012-01-25 22:56:52 +00:00
|
|
|
t->state->thread_done[id].Release_Store(t);
|
|
|
|
fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace
|
2011-05-28 00:53:58 +00:00
|
|
|
|
|
|
|
TEST(DBTest, MultiThreaded) {
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
// Initialize state
|
|
|
|
MTState mt;
|
|
|
|
mt.test = this;
|
|
|
|
mt.stop.Release_Store(0);
|
|
|
|
for (int id = 0; id < kNumThreads; id++) {
|
|
|
|
mt.counter[id].Release_Store(0);
|
|
|
|
mt.thread_done[id].Release_Store(0);
|
|
|
|
}
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Start threads
|
|
|
|
MTThread thread[kNumThreads];
|
|
|
|
for (int id = 0; id < kNumThreads; id++) {
|
|
|
|
thread[id].state = &mt;
|
|
|
|
thread[id].id = id;
|
|
|
|
env_->StartThread(MTThreadBody, &thread[id]);
|
|
|
|
}
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Let them run for a while
|
|
|
|
env_->SleepForMicroseconds(kTestSeconds * 1000000);
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
// Stop the threads and wait for them to finish
|
|
|
|
mt.stop.Release_Store(&mt);
|
|
|
|
for (int id = 0; id < kNumThreads; id++) {
|
2013-03-01 02:04:58 +00:00
|
|
|
while (mt.thread_done[id].Acquire_Load() == nullptr) {
|
2012-04-17 15:36:46 +00:00
|
|
|
env_->SleepForMicroseconds(100000);
|
|
|
|
}
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
} while (ChangeOptions());
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
namespace {
|
|
|
|
typedef std::map<std::string, std::string> KVMap;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
class ModelDB: public DB {
|
|
|
|
public:
|
2011-05-21 02:17:43 +00:00
|
|
|
class ModelSnapshot : public Snapshot {
|
|
|
|
public:
|
|
|
|
KVMap map_;
|
|
|
|
};
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
explicit ModelDB(const Options& options): options_(options) { }
|
|
|
|
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
|
|
|
|
return DB::Put(o, k, v);
|
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
virtual Status Merge(const WriteOptions& o, const Slice& k, const Slice& v) {
|
|
|
|
return DB::Merge(o, k, v);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual Status Delete(const WriteOptions& o, const Slice& key) {
|
|
|
|
return DB::Delete(o, key);
|
|
|
|
}
|
|
|
|
virtual Status Get(const ReadOptions& options,
|
|
|
|
const Slice& key, std::string* value) {
|
2013-06-05 18:22:38 +00:00
|
|
|
return Status::NotSupported(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual std::vector<Status> MultiGet(const ReadOptions& options,
|
|
|
|
const std::vector<Slice>& keys,
|
|
|
|
std::vector<std::string>* values) {
|
|
|
|
std::vector<Status> s(keys.size(),
|
|
|
|
Status::NotSupported("Not implemented."));
|
|
|
|
return s;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-07-26 19:57:01 +00:00
|
|
|
virtual bool KeyMayExist(const ReadOptions& options,
|
|
|
|
const Slice& key,
|
|
|
|
std::string* value,
|
|
|
|
bool* value_found = nullptr) {
|
|
|
|
if (value_found != nullptr) {
|
|
|
|
*value_found = false;
|
|
|
|
}
|
2013-07-06 01:49:18 +00:00
|
|
|
return true; // Not Supported directly
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
virtual Iterator* NewIterator(const ReadOptions& options) {
|
2013-03-01 02:04:58 +00:00
|
|
|
if (options.snapshot == nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
KVMap* saved = new KVMap;
|
|
|
|
*saved = map_;
|
|
|
|
return new ModelIter(saved, true);
|
|
|
|
} else {
|
|
|
|
const KVMap* snapshot_state =
|
2011-05-21 02:17:43 +00:00
|
|
|
&(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
|
2011-03-18 22:37:00 +00:00
|
|
|
return new ModelIter(snapshot_state, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
virtual const Snapshot* GetSnapshot() {
|
2011-05-21 02:17:43 +00:00
|
|
|
ModelSnapshot* snapshot = new ModelSnapshot;
|
|
|
|
snapshot->map_ = map_;
|
|
|
|
return snapshot;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void ReleaseSnapshot(const Snapshot* snapshot) {
|
2011-05-21 02:17:43 +00:00
|
|
|
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
|
2011-05-21 02:17:43 +00:00
|
|
|
class Handler : public WriteBatch::Handler {
|
|
|
|
public:
|
|
|
|
KVMap* map_;
|
|
|
|
virtual void Put(const Slice& key, const Slice& value) {
|
|
|
|
(*map_)[key.ToString()] = value.ToString();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-03-21 22:59:47 +00:00
|
|
|
virtual void Merge(const Slice& key, const Slice& value) {
|
|
|
|
// ignore merge for now
|
|
|
|
//(*map_)[key.ToString()] = value.ToString();
|
|
|
|
}
|
2011-05-21 02:17:43 +00:00
|
|
|
virtual void Delete(const Slice& key) {
|
|
|
|
map_->erase(key.ToString());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
Handler handler;
|
|
|
|
handler.map_ = &map_;
|
|
|
|
return batch->Iterate(&handler);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
virtual bool GetProperty(const Slice& property, std::string* value) {
|
2011-03-18 22:37:00 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
sizes[i] = 0;
|
|
|
|
}
|
|
|
|
}
|
2013-06-30 06:21:36 +00:00
|
|
|
virtual void CompactRange(const Slice* start, const Slice* end,
|
2013-09-04 20:13:08 +00:00
|
|
|
bool reduce_level, int target_level) {
|
2011-10-05 23:30:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-23 02:30:03 +00:00
|
|
|
virtual int NumberLevels()
|
|
|
|
{
|
2012-11-29 00:42:36 +00:00
|
|
|
return 1;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual int MaxMemCompactionLevel()
|
|
|
|
{
|
2012-11-29 00:42:36 +00:00
|
|
|
return 1;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual int Level0StopWriteTrigger()
|
|
|
|
{
|
2012-11-29 00:42:36 +00:00
|
|
|
return -1;
|
2012-06-23 02:30:03 +00:00
|
|
|
}
|
|
|
|
|
2013-11-25 20:39:23 +00:00
|
|
|
virtual Env* GetEnv() const {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2013-11-25 23:51:50 +00:00
|
|
|
virtual const Options& GetOptions() const {
|
|
|
|
return options_;
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
virtual Status Flush(const rocksdb::FlushOptions& options) {
|
2012-07-06 18:42:09 +00:00
|
|
|
Status ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-09-15 00:11:35 +00:00
|
|
|
virtual Status DisableFileDeletions() {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
virtual Status EnableFileDeletions() {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2013-10-03 21:38:32 +00:00
|
|
|
virtual Status GetLiveFiles(std::vector<std::string>&, uint64_t* size,
|
|
|
|
bool flush_memtable = true) {
|
2012-09-15 00:11:35 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-08-06 19:54:37 +00:00
|
|
|
virtual Status GetSortedWalFiles(VectorLogPtr& files) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-24 06:39:23 +00:00
|
|
|
virtual Status DeleteFile(std::string name) {
|
2013-08-06 19:54:37 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-25 02:09:02 +00:00
|
|
|
virtual SequenceNumber GetLatestSequenceNumber() const {
|
2012-12-10 23:37:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-10-04 04:49:15 +00:00
|
|
|
virtual Status GetUpdatesSince(rocksdb::SequenceNumber,
|
|
|
|
unique_ptr<rocksdb::TransactionLogIterator>*) {
|
2012-11-30 01:28:37 +00:00
|
|
|
return Status::NotSupported("Not supported in Model DB");
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
|
|
|
class ModelIter: public Iterator {
|
|
|
|
public:
|
|
|
|
ModelIter(const KVMap* map, bool owned)
|
|
|
|
: map_(map), owned_(owned), iter_(map_->end()) {
|
|
|
|
}
|
|
|
|
~ModelIter() {
|
|
|
|
if (owned_) delete map_;
|
|
|
|
}
|
|
|
|
virtual bool Valid() const { return iter_ != map_->end(); }
|
|
|
|
virtual void SeekToFirst() { iter_ = map_->begin(); }
|
|
|
|
virtual void SeekToLast() {
|
|
|
|
if (map_->empty()) {
|
|
|
|
iter_ = map_->end();
|
|
|
|
} else {
|
|
|
|
iter_ = map_->find(map_->rbegin()->first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
virtual void Seek(const Slice& k) {
|
|
|
|
iter_ = map_->lower_bound(k.ToString());
|
|
|
|
}
|
|
|
|
virtual void Next() { ++iter_; }
|
|
|
|
virtual void Prev() { --iter_; }
|
|
|
|
virtual Slice key() const { return iter_->first; }
|
|
|
|
virtual Slice value() const { return iter_->second; }
|
|
|
|
virtual Status status() const { return Status::OK(); }
|
|
|
|
private:
|
|
|
|
const KVMap* const map_;
|
|
|
|
const bool owned_; // Do we own map_
|
|
|
|
KVMap::const_iterator iter_;
|
|
|
|
};
|
|
|
|
const Options options_;
|
|
|
|
KVMap map_;
|
|
|
|
};
|
|
|
|
|
2013-08-23 06:10:02 +00:00
|
|
|
static std::string RandomKey(Random* rnd, int minimum = 0) {
|
|
|
|
int len;
|
|
|
|
do {
|
|
|
|
len = (rnd->OneIn(3)
|
|
|
|
? 1 // Short sometimes to encourage collisions
|
|
|
|
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
|
|
|
|
} while (len < minimum);
|
2011-03-18 22:37:00 +00:00
|
|
|
return test::RandomKey(rnd, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool CompareIterators(int step,
|
|
|
|
DB* model,
|
|
|
|
DB* db,
|
|
|
|
const Snapshot* model_snap,
|
|
|
|
const Snapshot* db_snap) {
|
|
|
|
ReadOptions options;
|
|
|
|
options.snapshot = model_snap;
|
|
|
|
Iterator* miter = model->NewIterator(options);
|
|
|
|
options.snapshot = db_snap;
|
|
|
|
Iterator* dbiter = db->NewIterator(options);
|
|
|
|
bool ok = true;
|
|
|
|
int count = 0;
|
|
|
|
for (miter->SeekToFirst(), dbiter->SeekToFirst();
|
|
|
|
ok && miter->Valid() && dbiter->Valid();
|
|
|
|
miter->Next(), dbiter->Next()) {
|
|
|
|
count++;
|
|
|
|
if (miter->key().compare(dbiter->key()) != 0) {
|
|
|
|
fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
|
|
|
|
step,
|
|
|
|
EscapeString(miter->key()).c_str(),
|
|
|
|
EscapeString(dbiter->key()).c_str());
|
|
|
|
ok = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (miter->value().compare(dbiter->value()) != 0) {
|
|
|
|
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
|
|
|
|
step,
|
|
|
|
EscapeString(miter->key()).c_str(),
|
|
|
|
EscapeString(miter->value()).c_str(),
|
|
|
|
EscapeString(miter->value()).c_str());
|
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ok) {
|
|
|
|
if (miter->Valid() != dbiter->Valid()) {
|
|
|
|
fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
|
|
|
|
step, miter->Valid(), dbiter->Valid());
|
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete miter;
|
|
|
|
delete dbiter;
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, Randomized) {
|
|
|
|
Random rnd(test::RandomSeed());
|
2012-04-17 15:36:46 +00:00
|
|
|
do {
|
|
|
|
ModelDB model(CurrentOptions());
|
|
|
|
const int N = 10000;
|
2013-03-01 02:04:58 +00:00
|
|
|
const Snapshot* model_snap = nullptr;
|
|
|
|
const Snapshot* db_snap = nullptr;
|
2012-04-17 15:36:46 +00:00
|
|
|
std::string k, v;
|
|
|
|
for (int step = 0; step < N; step++) {
|
|
|
|
// TODO(sanjay): Test Get() works
|
|
|
|
int p = rnd.Uniform(100);
|
2013-08-23 06:10:02 +00:00
|
|
|
int minimum = 0;
|
2013-12-03 20:42:15 +00:00
|
|
|
if (option_config_ == kHashSkipList) {
|
2013-08-23 06:10:02 +00:00
|
|
|
minimum = 1;
|
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
if (p < 45) { // Put
|
2013-08-23 06:10:02 +00:00
|
|
|
k = RandomKey(&rnd, minimum);
|
2012-04-17 15:36:46 +00:00
|
|
|
v = RandomString(&rnd,
|
|
|
|
rnd.OneIn(20)
|
|
|
|
? 100 + rnd.Uniform(100)
|
|
|
|
: rnd.Uniform(8));
|
|
|
|
ASSERT_OK(model.Put(WriteOptions(), k, v));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), k, v));
|
|
|
|
|
|
|
|
} else if (p < 90) { // Delete
|
2013-08-23 06:10:02 +00:00
|
|
|
k = RandomKey(&rnd, minimum);
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(model.Delete(WriteOptions(), k));
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), k));
|
|
|
|
|
|
|
|
|
|
|
|
} else { // Multi-element batch
|
|
|
|
WriteBatch b;
|
|
|
|
const int num = rnd.Uniform(8);
|
|
|
|
for (int i = 0; i < num; i++) {
|
|
|
|
if (i == 0 || !rnd.OneIn(10)) {
|
2013-08-23 06:10:02 +00:00
|
|
|
k = RandomKey(&rnd, minimum);
|
2012-04-17 15:36:46 +00:00
|
|
|
} else {
|
|
|
|
// Periodically re-use the same key from the previous iter, so
|
|
|
|
// we have multiple entries in the write batch for the same key
|
|
|
|
}
|
|
|
|
if (rnd.OneIn(2)) {
|
|
|
|
v = RandomString(&rnd, rnd.Uniform(10));
|
|
|
|
b.Put(k, v);
|
|
|
|
} else {
|
|
|
|
b.Delete(k);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_OK(model.Write(WriteOptions(), &b));
|
|
|
|
ASSERT_OK(db_->Write(WriteOptions(), &b));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
if ((step % 100) == 0) {
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
|
2012-04-17 15:36:46 +00:00
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
|
|
|
|
// Save a snapshot from each DB this time that we'll use next
|
|
|
|
// time we compare things, to make sure the current state is
|
|
|
|
// preserved with the snapshot
|
2013-03-01 02:04:58 +00:00
|
|
|
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
|
|
|
|
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
Reopen();
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 15:36:46 +00:00
|
|
|
model_snap = model.GetSnapshot();
|
|
|
|
db_snap = db_->GetSnapshot();
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-03-01 02:04:58 +00:00
|
|
|
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
|
|
|
|
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
|
2013-08-07 22:25:00 +00:00
|
|
|
} while (ChangeOptions(kSkipDeletesFilterFirst));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2013-06-05 18:22:38 +00:00
|
|
|
TEST(DBTest, MultiGetSimple) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(),"k1","v1"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(),"k2","v2"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(),"k3","v3"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(),"k4","v4"));
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(),"k4"));
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(),"k5","v5"));
|
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(),"no_key"));
|
|
|
|
|
|
|
|
std::vector<Slice> keys(6);
|
|
|
|
keys[0] = "k1";
|
|
|
|
keys[1] = "k2";
|
|
|
|
keys[2] = "k3";
|
|
|
|
keys[3] = "k4";
|
|
|
|
keys[4] = "k5";
|
|
|
|
keys[5] = "no_key";
|
|
|
|
|
|
|
|
std::vector<std::string> values(20,"Temporary data to be overwritten");
|
|
|
|
|
|
|
|
std::vector<Status> s = db_->MultiGet(ReadOptions(),keys,&values);
|
|
|
|
ASSERT_EQ(values.size(),keys.size());
|
|
|
|
ASSERT_EQ(values[0], "v1");
|
|
|
|
ASSERT_EQ(values[1], "v2");
|
|
|
|
ASSERT_EQ(values[2], "v3");
|
|
|
|
ASSERT_EQ(values[4], "v5");
|
|
|
|
|
|
|
|
ASSERT_OK(s[0]);
|
|
|
|
ASSERT_OK(s[1]);
|
|
|
|
ASSERT_OK(s[2]);
|
|
|
|
ASSERT_TRUE(s[3].IsNotFound());
|
|
|
|
ASSERT_OK(s[4]);
|
|
|
|
ASSERT_TRUE(s[5].IsNotFound());
|
|
|
|
} while (ChangeCompactOptions());
|
2013-06-05 18:22:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, MultiGetEmpty) {
|
2013-08-07 22:20:41 +00:00
|
|
|
do {
|
|
|
|
// Empty Key Set
|
|
|
|
std::vector<Slice> keys;
|
|
|
|
std::vector<std::string> values;
|
|
|
|
std::vector<Status> s = db_->MultiGet(ReadOptions(),keys,&values);
|
|
|
|
ASSERT_EQ((int)s.size(),0);
|
|
|
|
|
|
|
|
// Empty Database, Empty Key Set
|
|
|
|
DestroyAndReopen();
|
|
|
|
s = db_->MultiGet(ReadOptions(), keys, &values);
|
|
|
|
ASSERT_EQ((int)s.size(),0);
|
|
|
|
|
|
|
|
// Empty Database, Search for Keys
|
|
|
|
keys.resize(2);
|
|
|
|
keys[0] = "a";
|
|
|
|
keys[1] = "b";
|
|
|
|
s = db_->MultiGet(ReadOptions(),keys,&values);
|
|
|
|
ASSERT_EQ((int)s.size(), 2);
|
|
|
|
ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound());
|
|
|
|
} while (ChangeCompactOptions());
|
2013-06-05 18:22:38 +00:00
|
|
|
}
|
|
|
|
|
2013-08-13 21:04:56 +00:00
|
|
|
void PrefixScanInit(DBTest *dbtest) {
|
|
|
|
char buf[100];
|
|
|
|
std::string keystr;
|
|
|
|
const int small_range_sstfiles = 5;
|
|
|
|
const int big_range_sstfiles = 5;
|
|
|
|
|
|
|
|
// Generate 11 sst files with the following prefix ranges.
|
|
|
|
// GROUP 0: [0,10] (level 1)
|
|
|
|
// GROUP 1: [1,2], [2,3], [3,4], [4,5], [5, 6] (level 0)
|
|
|
|
// GROUP 2: [0,6], [0,7], [0,8], [0,9], [0,10] (level 0)
|
|
|
|
//
|
|
|
|
// A seek with the previous API would do 11 random I/Os (to all the
|
|
|
|
// files). With the new API and a prefix filter enabled, we should
|
|
|
|
// only do 2 random I/O, to the 2 files containing the key.
|
|
|
|
|
|
|
|
// GROUP 0
|
|
|
|
snprintf(buf, sizeof(buf), "%02d______:start", 0);
|
|
|
|
keystr = std::string(buf);
|
|
|
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
|
|
|
snprintf(buf, sizeof(buf), "%02d______:end", 10);
|
|
|
|
keystr = std::string(buf);
|
|
|
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbtest->dbfull()->TEST_FlushMemTable();
|
2013-08-13 21:04:56 +00:00
|
|
|
dbtest->dbfull()->CompactRange(nullptr, nullptr); // move to level 1
|
|
|
|
|
|
|
|
// GROUP 1
|
|
|
|
for (int i = 1; i <= small_range_sstfiles; i++) {
|
|
|
|
snprintf(buf, sizeof(buf), "%02d______:start", i);
|
|
|
|
keystr = std::string(buf);
|
|
|
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
|
|
|
snprintf(buf, sizeof(buf), "%02d______:end", i+1);
|
|
|
|
keystr = std::string(buf);
|
|
|
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbtest->dbfull()->TEST_FlushMemTable();
|
2013-08-13 21:04:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GROUP 2
|
|
|
|
for (int i = 1; i <= big_range_sstfiles; i++) {
|
|
|
|
std::string keystr;
|
|
|
|
snprintf(buf, sizeof(buf), "%02d______:start", 0);
|
|
|
|
keystr = std::string(buf);
|
|
|
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
|
|
|
snprintf(buf, sizeof(buf), "%02d______:end",
|
|
|
|
small_range_sstfiles+i+1);
|
|
|
|
keystr = std::string(buf);
|
|
|
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
2013-10-14 22:12:15 +00:00
|
|
|
dbtest->dbfull()->TEST_FlushMemTable();
|
2013-08-13 21:04:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DBTest, PrefixScan) {
|
2013-12-03 20:42:15 +00:00
|
|
|
ReadOptions ro = ReadOptions();
|
|
|
|
int count;
|
|
|
|
Slice prefix;
|
|
|
|
Slice key;
|
|
|
|
char buf[100];
|
|
|
|
Iterator* iter;
|
|
|
|
snprintf(buf, sizeof(buf), "03______:");
|
|
|
|
prefix = Slice(buf, 8);
|
|
|
|
key = Slice(buf, 9);
|
|
|
|
auto prefix_extractor = NewFixedPrefixTransform(8);
|
|
|
|
// db configs
|
|
|
|
env_->count_random_reads_ = true;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.no_block_cache = true;
|
|
|
|
options.filter_policy = NewBloomFilterPolicy(10);
|
|
|
|
options.prefix_extractor = prefix_extractor;
|
|
|
|
options.whole_key_filtering = false;
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.max_background_compactions = 2;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.disable_seek_compaction = true;
|
|
|
|
options.memtable_factory.reset(NewHashSkipListRepFactory(prefix_extractor));
|
2013-08-13 21:04:56 +00:00
|
|
|
|
2013-12-03 20:42:15 +00:00
|
|
|
// prefix specified, with blooms: 2 RAND I/Os
|
|
|
|
// SeekToFirst
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
PrefixScanInit(this);
|
|
|
|
count = 0;
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
ro.prefix = &prefix;
|
|
|
|
iter = db_->NewIterator(ro);
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
assert(iter->key().starts_with(prefix));
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
delete iter;
|
|
|
|
ASSERT_EQ(count, 2);
|
|
|
|
ASSERT_EQ(env_->random_read_counter_.Read(), 2);
|
2013-08-13 21:04:56 +00:00
|
|
|
|
2013-12-03 20:42:15 +00:00
|
|
|
// prefix specified, with blooms: 2 RAND I/Os
|
|
|
|
// Seek
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
PrefixScanInit(this);
|
|
|
|
count = 0;
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
ro.prefix = &prefix;
|
|
|
|
iter = db_->NewIterator(ro);
|
|
|
|
for (iter->Seek(key); iter->Valid(); iter->Next()) {
|
|
|
|
assert(iter->key().starts_with(prefix));
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
delete iter;
|
|
|
|
ASSERT_EQ(count, 2);
|
|
|
|
ASSERT_EQ(env_->random_read_counter_.Read(), 2);
|
2013-08-13 21:04:56 +00:00
|
|
|
|
2013-12-03 20:42:15 +00:00
|
|
|
// no prefix specified: 11 RAND I/Os
|
|
|
|
DestroyAndReopen(&options);
|
|
|
|
PrefixScanInit(this);
|
|
|
|
count = 0;
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
iter = db_->NewIterator(ReadOptions());
|
|
|
|
for (iter->Seek(prefix); iter->Valid(); iter->Next()) {
|
|
|
|
if (! iter->key().starts_with(prefix)) {
|
|
|
|
break;
|
2013-08-13 21:04:56 +00:00
|
|
|
}
|
2013-12-03 20:42:15 +00:00
|
|
|
count++;
|
2013-08-13 21:04:56 +00:00
|
|
|
}
|
2013-12-03 20:42:15 +00:00
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
delete iter;
|
|
|
|
ASSERT_EQ(count, 2);
|
|
|
|
ASSERT_EQ(env_->random_read_counter_.Read(), 11);
|
|
|
|
Close();
|
|
|
|
delete options.filter_policy;
|
2013-08-13 21:04:56 +00:00
|
|
|
}
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
std::string MakeKey(unsigned int num) {
|
|
|
|
char buf[30];
|
|
|
|
snprintf(buf, sizeof(buf), "%016u", num);
|
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BM_LogAndApply(int iters, int num_base_files) {
|
2013-10-05 05:32:05 +00:00
|
|
|
std::string dbname = test::TmpDir() + "/rocksdb_test_benchmark";
|
2013-10-04 17:21:03 +00:00
|
|
|
ASSERT_OK(DestroyDB(dbname, Options()));
|
2011-05-21 02:17:43 +00:00
|
|
|
|
2013-03-01 02:04:58 +00:00
|
|
|
DB* db = nullptr;
|
2011-05-21 02:17:43 +00:00
|
|
|
Options opts;
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
Status s = DB::Open(opts, dbname, &db);
|
|
|
|
ASSERT_OK(s);
|
2013-03-01 02:04:58 +00:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
delete db;
|
2013-03-01 02:04:58 +00:00
|
|
|
db = nullptr;
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
Env* env = Env::Default();
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
port::Mutex mu;
|
|
|
|
MutexLock l(&mu);
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
Options options;
|
2013-06-07 22:35:17 +00:00
|
|
|
EnvOptions sopt;
|
2013-03-15 00:00:04 +00:00
|
|
|
VersionSet vset(dbname, &options, sopt, nullptr, &cmp);
|
2011-05-21 02:17:43 +00:00
|
|
|
ASSERT_OK(vset.Recover());
|
2012-06-23 02:30:03 +00:00
|
|
|
VersionEdit vbase(vset.NumberLevels());
|
2011-05-21 02:17:43 +00:00
|
|
|
uint64_t fnum = 1;
|
|
|
|
for (int i = 0; i < num_base_files; i++) {
|
|
|
|
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
|
|
|
|
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
|
2013-06-14 05:09:08 +00:00
|
|
|
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit, 1, 1);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
uint64_t start_micros = env->NowMicros();
|
|
|
|
|
|
|
|
for (int i = 0; i < iters; i++) {
|
2012-06-23 02:30:03 +00:00
|
|
|
VersionEdit vedit(vset.NumberLevels());
|
2011-05-21 02:17:43 +00:00
|
|
|
vedit.DeleteFile(2, fnum);
|
|
|
|
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
|
|
|
|
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
|
2013-06-14 05:09:08 +00:00
|
|
|
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit, 1, 1);
|
2011-09-01 19:08:02 +00:00
|
|
|
vset.LogAndApply(&vedit, &mu);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
uint64_t stop_micros = env->NowMicros();
|
|
|
|
unsigned int us = stop_micros - start_micros;
|
|
|
|
char buf[16];
|
|
|
|
snprintf(buf, sizeof(buf), "%d", num_base_files);
|
|
|
|
fprintf(stderr,
|
|
|
|
"BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
|
|
|
|
buf, iters, us, ((float)us) / iters);
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2011-05-21 02:17:43 +00:00
|
|
|
if (argc > 1 && std::string(argv[1]) == "--benchmark") {
|
2013-10-04 04:49:15 +00:00
|
|
|
rocksdb::BM_LogAndApply(1000, 1);
|
|
|
|
rocksdb::BM_LogAndApply(1000, 100);
|
|
|
|
rocksdb::BM_LogAndApply(1000, 10000);
|
|
|
|
rocksdb::BM_LogAndApply(100, 100000);
|
2011-05-21 02:17:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
return rocksdb::test::RunAllTests();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|