2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// Endian-neutral encoding:
|
|
|
|
// * Fixed-length numbers are encoded with least-significant byte first
|
|
|
|
// * In addition we support variable length "varint" encoding
|
|
|
|
// * Strings are encoded prefixed by their length in varint format
|
|
|
|
|
2013-10-05 05:32:05 +00:00
|
|
|
#pragma once
|
2014-01-23 23:51:26 +00:00
|
|
|
#include <algorithm>
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <string>
|
2014-01-25 00:15:05 +00:00
|
|
|
|
|
|
|
#include "rocksdb/write_batch.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "port/port.h"
|
|
|
|
|
2017-04-22 03:41:37 +00:00
|
|
|
// Some processors does not allow unaligned access to memory
|
|
|
|
#if defined(__sparc)
|
|
|
|
#define PLATFORM_UNALIGNED_ACCESS_NOT_ALLOWED
|
|
|
|
#endif
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
Fixed compile warnings in posix_logger.h and coding.h
Summary:
Fixed the following compile warnings:
/Users/yhchiang/rocksdb/util/posix_logger.h:32:11: error: unused variable 'kDebugLogChunkSize' [-Werror,-Wunused-const-variable]
const int kDebugLogChunkSize = 128 * 1024;
^
/Users/yhchiang/rocksdb/util/coding.h:24:20: error: unused variable 'kMaxVarint32Length' [-Werror,-Wunused-const-variable]
const unsigned int kMaxVarint32Length = 5;
^
2 errors generated.
Test Plan: make clean rocksdb
Reviewers: igor, sdong, anthony, IslamAbdelRahman, rven, kradhakrishnan, adamretter
Reviewed By: adamretter
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D56223
2016-03-31 23:01:47 +00:00
|
|
|
// The maximum length of a varint in bytes for 64-bit.
|
2013-01-31 23:20:24 +00:00
|
|
|
const unsigned int kMaxVarint64Length = 10;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Standard Put... routines append to a string
|
2018-07-17 06:34:47 +00:00
|
|
|
extern void PutFixed16(std::string* dst, uint16_t value);
|
2011-03-18 22:37:00 +00:00
|
|
|
extern void PutFixed32(std::string* dst, uint32_t value);
|
|
|
|
extern void PutFixed64(std::string* dst, uint64_t value);
|
|
|
|
extern void PutVarint32(std::string* dst, uint32_t value);
|
2016-06-13 16:57:43 +00:00
|
|
|
extern void PutVarint32Varint32(std::string* dst, uint32_t value1,
|
|
|
|
uint32_t value2);
|
|
|
|
extern void PutVarint32Varint32Varint32(std::string* dst, uint32_t value1,
|
|
|
|
uint32_t value2, uint32_t value3);
|
2011-03-18 22:37:00 +00:00
|
|
|
extern void PutVarint64(std::string* dst, uint64_t value);
|
2016-06-13 16:57:43 +00:00
|
|
|
extern void PutVarint64Varint64(std::string* dst, uint64_t value1,
|
|
|
|
uint64_t value2);
|
|
|
|
extern void PutVarint32Varint64(std::string* dst, uint32_t value1,
|
|
|
|
uint64_t value2);
|
|
|
|
extern void PutVarint32Varint32Varint64(std::string* dst, uint32_t value1,
|
|
|
|
uint32_t value2, uint64_t value3);
|
2011-03-18 22:37:00 +00:00
|
|
|
extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
|
2013-11-07 20:37:58 +00:00
|
|
|
extern void PutLengthPrefixedSliceParts(std::string* dst,
|
|
|
|
const SliceParts& slice_parts);
|
2019-07-25 22:23:46 +00:00
|
|
|
extern void PutLengthPrefixedSlicePartsWithPadding(
|
|
|
|
std::string* dst, const SliceParts& slice_parts, size_t pad_sz);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Standard Get... routines parse a value from the beginning of a Slice
|
|
|
|
// and advance the slice past the parsed value.
|
2014-06-20 09:14:14 +00:00
|
|
|
extern bool GetFixed64(Slice* input, uint64_t* value);
|
2017-04-18 19:00:36 +00:00
|
|
|
extern bool GetFixed32(Slice* input, uint32_t* value);
|
2018-07-17 06:34:47 +00:00
|
|
|
extern bool GetFixed16(Slice* input, uint16_t* value);
|
2011-03-18 22:37:00 +00:00
|
|
|
extern bool GetVarint32(Slice* input, uint32_t* value);
|
|
|
|
extern bool GetVarint64(Slice* input, uint64_t* value);
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
extern bool GetVarsignedint64(Slice* input, int64_t* value);
|
2011-03-18 22:37:00 +00:00
|
|
|
extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
|
2014-01-24 06:59:04 +00:00
|
|
|
// This function assumes data is well-formed.
|
2013-08-23 06:10:02 +00:00
|
|
|
extern Slice GetLengthPrefixedSlice(const char* data);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
[RocksDB] BackupableDB
Summary:
In this diff I present you BackupableDB v1. You can easily use it to backup your DB and it will do incremental snapshots for you.
Let's first describe how you would use BackupableDB. It's inheriting StackableDB interface so you can easily construct it with your DB object -- it will add a method RollTheSnapshot() to the DB object. When you call RollTheSnapshot(), current snapshot of the DB will be stored in the backup dir. To restore, you can just call RestoreDBFromBackup() on a BackupableDB (which is a static method) and it will restore all files from the backup dir. In the next version, it will even support automatic backuping every X minutes.
There are multiple things you can configure:
1. backup_env and db_env can be different, which is awesome because then you can easily backup to HDFS or wherever you feel like.
2. sync - if true, it *guarantees* backup consistency on machine reboot
3. number of snapshots to keep - this will keep last N snapshots around if you want, for some reason, be able to restore from an earlier snapshot. All the backuping is done in incremental fashion - if we already have 00010.sst, we will not copy it again. *IMPORTANT* -- This is based on assumption that 00010.sst never changes - two files named 00010.sst from the same DB will always be exactly the same. Is this true? I always copy manifest, current and log files.
4. You can decide if you want to flush the memtables before you backup, or you're fine with backing up the log files -- either way, you get a complete and consistent view of the database at a time of backup.
5. More things you can find in BackupableDBOptions
Here is the directory structure I use:
backup_dir/CURRENT_SNAPSHOT - just 4 bytes holding the latest snapshot
0, 1, 2, ... - files containing serialized version of each snapshot - containing a list of files
files/*.sst - sst files shared between snapshots - if one snapshot references 00010.sst and another one needs to backup it from the DB, it will just reference the same file
files/ 0/, 1/, 2/, ... - snapshot directories containing private snapshot files - current, manifest and log files
All the files are ref counted and deleted immediatelly when they get out of scope.
Some other stuff in this diff:
1. Added GetEnv() method to the DB. Discussed with @haobo and we agreed that it seems right thing to do.
2. Fixed StackableDB interface. The way it was set up before, I was not able to implement BackupableDB.
Test Plan:
I have a unittest, but please don't look at this yet. I just hacked it up to help me with debugging. I will write a lot of good tests and update the diff.
Also, `make asan_check`
Reviewers: dhruba, haobo, emayanke
Reviewed By: dhruba
CC: leveldb, haobo
Differential Revision: https://reviews.facebook.net/D14295
2013-12-09 22:06:52 +00:00
|
|
|
extern Slice GetSliceUntil(Slice* slice, char delimiter);
|
|
|
|
|
2018-08-23 17:04:10 +00:00
|
|
|
// Borrowed from
|
|
|
|
// https://github.com/facebook/fbthrift/blob/449a5f77f9f9bae72c9eb5e78093247eef185c04/thrift/lib/cpp/util/VarintUtils-inl.h#L202-L208
|
2018-08-09 23:49:45 +00:00
|
|
|
constexpr inline uint64_t i64ToZigzag(const int64_t l) {
|
|
|
|
return (static_cast<uint64_t>(l) << 1) ^ static_cast<uint64_t>(l >> 63);
|
|
|
|
}
|
|
|
|
inline int64_t zigzagToI64(uint64_t n) {
|
|
|
|
return (n >> 1) ^ -static_cast<int64_t>(n & 1);
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Pointer-based variants of GetVarint... These either store a value
|
|
|
|
// in *v and return a pointer just past the parsed value, or return
|
2013-03-01 02:04:58 +00:00
|
|
|
// nullptr on error. These routines only look at bytes in the range
|
2011-03-18 22:37:00 +00:00
|
|
|
// [p..limit-1]
|
|
|
|
extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
|
|
|
|
extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);
|
2018-08-09 23:49:45 +00:00
|
|
|
inline const char* GetVarsignedint64Ptr(const char* p, const char* limit,
|
|
|
|
int64_t* value) {
|
|
|
|
uint64_t u = 0;
|
|
|
|
const char* ret = GetVarint64Ptr(p, limit, &u);
|
|
|
|
*value = zigzagToI64(u);
|
|
|
|
return ret;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Returns the length of the varint32 or varint64 encoding of "v"
|
|
|
|
extern int VarintLength(uint64_t v);
|
|
|
|
|
|
|
|
// Lower-level versions of Put... that write directly into a character buffer
|
|
|
|
// REQUIRES: dst has enough space for the value being written
|
2018-07-17 06:34:47 +00:00
|
|
|
extern void EncodeFixed16(char* dst, uint16_t value);
|
2011-03-18 22:37:00 +00:00
|
|
|
extern void EncodeFixed32(char* dst, uint32_t value);
|
|
|
|
extern void EncodeFixed64(char* dst, uint64_t value);
|
|
|
|
|
|
|
|
// Lower-level versions of Put... that write directly into a character buffer
|
|
|
|
// and return a pointer just past the last byte written.
|
|
|
|
// REQUIRES: dst has enough space for the value being written
|
|
|
|
extern char* EncodeVarint32(char* dst, uint32_t value);
|
|
|
|
extern char* EncodeVarint64(char* dst, uint64_t value);
|
|
|
|
|
|
|
|
// Lower-level versions of Get... that read directly from a character buffer
|
|
|
|
// without any bounds checking.
|
|
|
|
|
2018-07-17 06:34:47 +00:00
|
|
|
inline uint16_t DecodeFixed16(const char* ptr) {
|
|
|
|
if (port::kLittleEndian) {
|
|
|
|
// Load the raw bytes
|
|
|
|
uint16_t result;
|
|
|
|
memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
|
|
|
|
return result;
|
|
|
|
} else {
|
|
|
|
return ((static_cast<uint16_t>(static_cast<unsigned char>(ptr[0]))) |
|
|
|
|
(static_cast<uint16_t>(static_cast<unsigned char>(ptr[1])) << 8));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
inline uint32_t DecodeFixed32(const char* ptr) {
|
|
|
|
if (port::kLittleEndian) {
|
|
|
|
// Load the raw bytes
|
|
|
|
uint32_t result;
|
|
|
|
memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
|
|
|
|
return result;
|
|
|
|
} else {
|
2011-10-05 23:30:28 +00:00
|
|
|
return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
|
|
|
|
| (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
|
|
|
|
| (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
|
|
|
|
| (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64_t DecodeFixed64(const char* ptr) {
|
|
|
|
if (port::kLittleEndian) {
|
|
|
|
// Load the raw bytes
|
|
|
|
uint64_t result;
|
|
|
|
memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
|
|
|
|
return result;
|
|
|
|
} else {
|
|
|
|
uint64_t lo = DecodeFixed32(ptr);
|
|
|
|
uint64_t hi = DecodeFixed32(ptr + 4);
|
|
|
|
return (hi << 32) | lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Internal routine for use by fallback path of GetVarint32Ptr
|
|
|
|
extern const char* GetVarint32PtrFallback(const char* p,
|
|
|
|
const char* limit,
|
|
|
|
uint32_t* value);
|
|
|
|
inline const char* GetVarint32Ptr(const char* p,
|
|
|
|
const char* limit,
|
|
|
|
uint32_t* value) {
|
|
|
|
if (p < limit) {
|
|
|
|
uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
|
|
|
|
if ((result & 128) == 0) {
|
|
|
|
*value = result;
|
|
|
|
return p + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return GetVarint32PtrFallback(p, limit, value);
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
// -- Implementation of the functions declared above
|
2018-07-17 06:34:47 +00:00
|
|
|
inline void EncodeFixed16(char* buf, uint16_t value) {
|
|
|
|
if (port::kLittleEndian) {
|
|
|
|
memcpy(buf, &value, sizeof(value));
|
|
|
|
} else {
|
|
|
|
buf[0] = value & 0xff;
|
|
|
|
buf[1] = (value >> 8) & 0xff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline void EncodeFixed32(char* buf, uint32_t value) {
|
2017-04-22 03:41:37 +00:00
|
|
|
if (port::kLittleEndian) {
|
|
|
|
memcpy(buf, &value, sizeof(value));
|
|
|
|
} else {
|
|
|
|
buf[0] = value & 0xff;
|
|
|
|
buf[1] = (value >> 8) & 0xff;
|
|
|
|
buf[2] = (value >> 16) & 0xff;
|
|
|
|
buf[3] = (value >> 24) & 0xff;
|
|
|
|
}
|
2014-01-23 23:51:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void EncodeFixed64(char* buf, uint64_t value) {
|
2017-04-22 03:41:37 +00:00
|
|
|
if (port::kLittleEndian) {
|
|
|
|
memcpy(buf, &value, sizeof(value));
|
|
|
|
} else {
|
|
|
|
buf[0] = value & 0xff;
|
|
|
|
buf[1] = (value >> 8) & 0xff;
|
|
|
|
buf[2] = (value >> 16) & 0xff;
|
|
|
|
buf[3] = (value >> 24) & 0xff;
|
|
|
|
buf[4] = (value >> 32) & 0xff;
|
|
|
|
buf[5] = (value >> 40) & 0xff;
|
|
|
|
buf[6] = (value >> 48) & 0xff;
|
|
|
|
buf[7] = (value >> 56) & 0xff;
|
|
|
|
}
|
2014-01-23 23:51:26 +00:00
|
|
|
}
|
|
|
|
|
2016-06-13 16:57:43 +00:00
|
|
|
// Pull the last 8 bits and cast it to a character
|
2018-07-17 06:34:47 +00:00
|
|
|
inline void PutFixed16(std::string* dst, uint16_t value) {
|
|
|
|
if (port::kLittleEndian) {
|
|
|
|
dst->append(const_cast<const char*>(reinterpret_cast<char*>(&value)),
|
|
|
|
sizeof(value));
|
|
|
|
} else {
|
|
|
|
char buf[sizeof(value)];
|
|
|
|
EncodeFixed16(buf, value);
|
|
|
|
dst->append(buf, sizeof(buf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline void PutFixed32(std::string* dst, uint32_t value) {
|
2017-04-22 03:41:37 +00:00
|
|
|
if (port::kLittleEndian) {
|
|
|
|
dst->append(const_cast<const char*>(reinterpret_cast<char*>(&value)),
|
|
|
|
sizeof(value));
|
|
|
|
} else {
|
|
|
|
char buf[sizeof(value)];
|
|
|
|
EncodeFixed32(buf, value);
|
|
|
|
dst->append(buf, sizeof(buf));
|
|
|
|
}
|
2014-01-23 23:51:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutFixed64(std::string* dst, uint64_t value) {
|
2017-04-22 03:41:37 +00:00
|
|
|
if (port::kLittleEndian) {
|
|
|
|
dst->append(const_cast<const char*>(reinterpret_cast<char*>(&value)),
|
|
|
|
sizeof(value));
|
|
|
|
} else {
|
|
|
|
char buf[sizeof(value)];
|
|
|
|
EncodeFixed64(buf, value);
|
|
|
|
dst->append(buf, sizeof(buf));
|
|
|
|
}
|
2014-01-23 23:51:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutVarint32(std::string* dst, uint32_t v) {
|
|
|
|
char buf[5];
|
|
|
|
char* ptr = EncodeVarint32(buf, v);
|
2014-11-11 21:47:22 +00:00
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
2014-01-23 23:51:26 +00:00
|
|
|
}
|
|
|
|
|
2016-06-13 16:57:43 +00:00
|
|
|
inline void PutVarint32Varint32(std::string* dst, uint32_t v1, uint32_t v2) {
|
|
|
|
char buf[10];
|
|
|
|
char* ptr = EncodeVarint32(buf, v1);
|
|
|
|
ptr = EncodeVarint32(ptr, v2);
|
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutVarint32Varint32Varint32(std::string* dst, uint32_t v1,
|
|
|
|
uint32_t v2, uint32_t v3) {
|
|
|
|
char buf[15];
|
|
|
|
char* ptr = EncodeVarint32(buf, v1);
|
|
|
|
ptr = EncodeVarint32(ptr, v2);
|
|
|
|
ptr = EncodeVarint32(ptr, v3);
|
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline char* EncodeVarint64(char* dst, uint64_t v) {
|
|
|
|
static const unsigned int B = 128;
|
|
|
|
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
|
|
|
|
while (v >= B) {
|
|
|
|
*(ptr++) = (v & (B - 1)) | B;
|
|
|
|
v >>= 7;
|
|
|
|
}
|
|
|
|
*(ptr++) = static_cast<unsigned char>(v);
|
|
|
|
return reinterpret_cast<char*>(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutVarint64(std::string* dst, uint64_t v) {
|
2018-08-09 23:49:45 +00:00
|
|
|
char buf[kMaxVarint64Length];
|
2014-01-23 23:51:26 +00:00
|
|
|
char* ptr = EncodeVarint64(buf, v);
|
2014-11-11 21:47:22 +00:00
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
2014-01-23 23:51:26 +00:00
|
|
|
}
|
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
inline void PutVarsignedint64(std::string* dst, int64_t v) {
|
|
|
|
char buf[kMaxVarint64Length];
|
|
|
|
// Using Zigzag format to convert signed to unsigned
|
|
|
|
char* ptr = EncodeVarint64(buf, i64ToZigzag(v));
|
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
|
|
|
}
|
|
|
|
|
2016-06-13 16:57:43 +00:00
|
|
|
inline void PutVarint64Varint64(std::string* dst, uint64_t v1, uint64_t v2) {
|
|
|
|
char buf[20];
|
|
|
|
char* ptr = EncodeVarint64(buf, v1);
|
|
|
|
ptr = EncodeVarint64(ptr, v2);
|
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutVarint32Varint64(std::string* dst, uint32_t v1, uint64_t v2) {
|
|
|
|
char buf[15];
|
|
|
|
char* ptr = EncodeVarint32(buf, v1);
|
|
|
|
ptr = EncodeVarint64(ptr, v2);
|
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutVarint32Varint32Varint64(std::string* dst, uint32_t v1,
|
|
|
|
uint32_t v2, uint64_t v3) {
|
|
|
|
char buf[20];
|
|
|
|
char* ptr = EncodeVarint32(buf, v1);
|
|
|
|
ptr = EncodeVarint32(ptr, v2);
|
|
|
|
ptr = EncodeVarint64(ptr, v3);
|
|
|
|
dst->append(buf, static_cast<size_t>(ptr - buf));
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
|
2014-11-11 21:47:22 +00:00
|
|
|
PutVarint32(dst, static_cast<uint32_t>(value.size()));
|
2014-01-23 23:51:26 +00:00
|
|
|
dst->append(value.data(), value.size());
|
|
|
|
}
|
|
|
|
|
2019-07-25 22:23:46 +00:00
|
|
|
inline void PutLengthPrefixedSliceParts(std::string* dst, size_t total_bytes,
|
2014-01-23 23:51:26 +00:00
|
|
|
const SliceParts& slice_parts) {
|
|
|
|
for (int i = 0; i < slice_parts.num_parts; ++i) {
|
|
|
|
total_bytes += slice_parts.parts[i].size();
|
|
|
|
}
|
2015-11-15 18:49:14 +00:00
|
|
|
PutVarint32(dst, static_cast<uint32_t>(total_bytes));
|
2014-01-23 23:51:26 +00:00
|
|
|
for (int i = 0; i < slice_parts.num_parts; ++i) {
|
|
|
|
dst->append(slice_parts.parts[i].data(), slice_parts.parts[i].size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 22:23:46 +00:00
|
|
|
inline void PutLengthPrefixedSliceParts(std::string* dst,
|
|
|
|
const SliceParts& slice_parts) {
|
|
|
|
PutLengthPrefixedSliceParts(dst, /*total_bytes=*/0, slice_parts);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void PutLengthPrefixedSlicePartsWithPadding(
|
|
|
|
std::string* dst, const SliceParts& slice_parts, size_t pad_sz) {
|
|
|
|
PutLengthPrefixedSliceParts(dst, /*total_bytes=*/pad_sz, slice_parts);
|
|
|
|
dst->append(pad_sz, '\0');
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline int VarintLength(uint64_t v) {
|
|
|
|
int len = 1;
|
|
|
|
while (v >= 128) {
|
|
|
|
v >>= 7;
|
|
|
|
len++;
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2014-06-20 09:14:14 +00:00
|
|
|
inline bool GetFixed64(Slice* input, uint64_t* value) {
|
|
|
|
if (input->size() < sizeof(uint64_t)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*value = DecodeFixed64(input->data());
|
|
|
|
input->remove_prefix(sizeof(uint64_t));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-18 19:00:36 +00:00
|
|
|
inline bool GetFixed32(Slice* input, uint32_t* value) {
|
|
|
|
if (input->size() < sizeof(uint32_t)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*value = DecodeFixed32(input->data());
|
|
|
|
input->remove_prefix(sizeof(uint32_t));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-07-17 06:34:47 +00:00
|
|
|
inline bool GetFixed16(Slice* input, uint16_t* value) {
|
|
|
|
if (input->size() < sizeof(uint16_t)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*value = DecodeFixed16(input->data());
|
|
|
|
input->remove_prefix(sizeof(uint16_t));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline bool GetVarint32(Slice* input, uint32_t* value) {
|
|
|
|
const char* p = input->data();
|
|
|
|
const char* limit = p + input->size();
|
|
|
|
const char* q = GetVarint32Ptr(p, limit, value);
|
|
|
|
if (q == nullptr) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2014-11-11 21:47:22 +00:00
|
|
|
*input = Slice(q, static_cast<size_t>(limit - q));
|
2014-01-23 23:51:26 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool GetVarint64(Slice* input, uint64_t* value) {
|
|
|
|
const char* p = input->data();
|
|
|
|
const char* limit = p + input->size();
|
|
|
|
const char* q = GetVarint64Ptr(p, limit, value);
|
|
|
|
if (q == nullptr) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2014-11-11 21:47:22 +00:00
|
|
|
*input = Slice(q, static_cast<size_t>(limit - q));
|
2014-01-23 23:51:26 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 03:50:35 +00:00
|
|
|
inline bool GetVarsignedint64(Slice* input, int64_t* value) {
|
|
|
|
const char* p = input->data();
|
|
|
|
const char* limit = p + input->size();
|
|
|
|
const char* q = GetVarsignedint64Ptr(p, limit, value);
|
|
|
|
if (q == nullptr) {
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
*input = Slice(q, static_cast<size_t>(limit - q));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-02 22:16:39 +00:00
|
|
|
// Provide an interface for platform independent endianness transformation
|
|
|
|
inline uint64_t EndianTransform(uint64_t input, size_t size) {
|
|
|
|
char* pos = reinterpret_cast<char*>(&input);
|
|
|
|
uint64_t ret_val = 0;
|
|
|
|
for (size_t i = 0; i < size; ++i) {
|
|
|
|
ret_val |= (static_cast<uint64_t>(static_cast<unsigned char>(pos[i]))
|
|
|
|
<< ((size - i - 1) << 3));
|
|
|
|
}
|
|
|
|
return ret_val;
|
|
|
|
}
|
|
|
|
|
2014-01-23 23:51:26 +00:00
|
|
|
inline bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
|
|
|
|
uint32_t len = 0;
|
|
|
|
if (GetVarint32(input, &len) && input->size() >= len) {
|
|
|
|
*result = Slice(input->data(), len);
|
|
|
|
input->remove_prefix(len);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline Slice GetLengthPrefixedSlice(const char* data) {
|
|
|
|
uint32_t len = 0;
|
2014-01-24 06:59:04 +00:00
|
|
|
// +5: we assume "data" is not corrupted
|
2016-08-19 22:10:31 +00:00
|
|
|
// unsigned char is 7 bits, uint32_t is 32 bits, need 5 unsigned char
|
2014-01-24 06:59:04 +00:00
|
|
|
auto p = GetVarint32Ptr(data, data + 5 /* limit */, &len);
|
2014-01-23 23:51:26 +00:00
|
|
|
return Slice(p, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline Slice GetSliceUntil(Slice* slice, char delimiter) {
|
|
|
|
uint32_t len = 0;
|
|
|
|
for (len = 0; len < slice->size() && slice->data()[len] != delimiter; ++len) {
|
|
|
|
// nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice ret(slice->data(), len);
|
|
|
|
slice->remove_prefix(len + ((len < slice->size()) ? 1 : 0));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-22 03:41:37 +00:00
|
|
|
template<class T>
|
2017-05-30 18:05:28 +00:00
|
|
|
#ifdef ROCKSDB_UBSAN_RUN
|
|
|
|
#if defined(__clang__)
|
|
|
|
__attribute__((__no_sanitize__("alignment")))
|
|
|
|
#elif defined(__GNUC__)
|
|
|
|
__attribute__((__no_sanitize_undefined__))
|
|
|
|
#endif
|
|
|
|
#endif
|
2017-04-22 03:41:37 +00:00
|
|
|
inline void PutUnaligned(T *memory, const T &value) {
|
|
|
|
#if defined(PLATFORM_UNALIGNED_ACCESS_NOT_ALLOWED)
|
|
|
|
char *nonAlignedMemory = reinterpret_cast<char*>(memory);
|
|
|
|
memcpy(nonAlignedMemory, reinterpret_cast<const char*>(&value), sizeof(T));
|
|
|
|
#else
|
|
|
|
*memory = value;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class T>
|
2017-05-30 18:05:28 +00:00
|
|
|
#ifdef ROCKSDB_UBSAN_RUN
|
|
|
|
#if defined(__clang__)
|
|
|
|
__attribute__((__no_sanitize__("alignment")))
|
|
|
|
#elif defined(__GNUC__)
|
|
|
|
__attribute__((__no_sanitize_undefined__))
|
|
|
|
#endif
|
|
|
|
#endif
|
2017-04-22 03:41:37 +00:00
|
|
|
inline void GetUnaligned(const T *memory, T *value) {
|
|
|
|
#if defined(PLATFORM_UNALIGNED_ACCESS_NOT_ALLOWED)
|
|
|
|
char *nonAlignedMemory = reinterpret_cast<char*>(value);
|
|
|
|
memcpy(nonAlignedMemory, reinterpret_cast<const char*>(memory), sizeof(T));
|
|
|
|
#else
|
|
|
|
*value = *memory;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|