mirror of https://github.com/facebook/rocksdb.git
Support pragma once in all header files and cleanup some warnings (#4339)
Summary: As you know, almost all compilers support "pragma once" keyword instead of using include guards. To be keep consistency between header files, all header files are edited. Besides this, try to fix some warnings about loss of data. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4339 Differential Revision: D9654990 Pulled By: ajkr fbshipit-source-id: c2cf3d2d03a599847684bed81378c401920ca848
This commit is contained in:
parent
90f5048207
commit
64324e329e
2
db/c.cc
2
db/c.cc
|
@ -2402,7 +2402,7 @@ void rocksdb_options_set_bytes_per_sync(
|
||||||
|
|
||||||
void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t* opt,
|
void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t* opt,
|
||||||
uint64_t v) {
|
uint64_t v) {
|
||||||
opt->rep.writable_file_max_buffer_size = v;
|
opt->rep.writable_file_max_buffer_size = static_cast<size_t>(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t* opt,
|
void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t* opt,
|
||||||
|
|
|
@ -49,7 +49,7 @@ bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
|
||||||
// increasing.
|
// increasing.
|
||||||
size_t new_compact_bytes_per_del_file = 0;
|
size_t new_compact_bytes_per_del_file = 0;
|
||||||
for (span_len = 1; span_len < level_files.size(); ++span_len) {
|
for (span_len = 1; span_len < level_files.size(); ++span_len) {
|
||||||
compact_bytes += level_files[span_len]->fd.file_size;
|
compact_bytes += static_cast<size_t>(level_files[span_len]->fd.file_size);
|
||||||
new_compact_bytes_per_del_file = compact_bytes / span_len;
|
new_compact_bytes_per_del_file = compact_bytes / span_len;
|
||||||
if (level_files[span_len]->being_compacted ||
|
if (level_files[span_len]->being_compacted ||
|
||||||
new_compact_bytes_per_del_file > compact_bytes_per_del_file) {
|
new_compact_bytes_per_del_file > compact_bytes_per_del_file) {
|
||||||
|
|
|
@ -2413,7 +2413,7 @@ Status DBImpl::GetDbIdentity(std::string& identity) const {
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
char* buffer = reinterpret_cast<char*>(alloca(file_size));
|
char* buffer = reinterpret_cast<char*>(alloca(static_cast<size_t>(file_size)));
|
||||||
Slice id;
|
Slice id;
|
||||||
s = id_file_reader->Read(static_cast<size_t>(file_size), &id, buffer);
|
s = id_file_reader->Read(static_cast<size_t>(file_size), &id, buffer);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
|
|
|
@ -344,7 +344,7 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo(
|
||||||
file_to_ingest->global_seqno_offset = 0;
|
file_to_ingest->global_seqno_offset = 0;
|
||||||
return Status::Corruption("Was not able to find file global seqno field");
|
return Status::Corruption("Was not able to find file global seqno field");
|
||||||
}
|
}
|
||||||
file_to_ingest->global_seqno_offset = offsets_iter->second;
|
file_to_ingest->global_seqno_offset = static_cast<size_t>(offsets_iter->second);
|
||||||
} else if (file_to_ingest->version == 1) {
|
} else if (file_to_ingest->version == 1) {
|
||||||
// SST file V1 should not have global seqno field
|
// SST file V1 should not have global seqno field
|
||||||
assert(seqno_iter == uprops.end());
|
assert(seqno_iter == uprops.end());
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
//
|
//
|
||||||
#ifndef MERGE_HELPER_H
|
#pragma once
|
||||||
#define MERGE_HELPER_H
|
|
||||||
|
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -193,5 +192,3 @@ class MergeOutputIterator {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -104,7 +104,7 @@ void TransactionLogIteratorImpl::SeekToStartSequence(
|
||||||
if (files_->size() <= startFileIndex) {
|
if (files_->size() <= startFileIndex) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Status s = OpenLogReader(files_->at(startFileIndex).get());
|
Status s = OpenLogReader(files_->at(static_cast<size_t>(startFileIndex)).get());
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
currentStatus_ = s;
|
currentStatus_ = s;
|
||||||
reporter_.Info(currentStatus_.ToString().c_str());
|
reporter_.Info(currentStatus_.ToString().c_str());
|
||||||
|
|
|
@ -895,13 +895,16 @@ void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) {
|
||||||
assert(!ioptions->cf_paths.empty());
|
assert(!ioptions->cf_paths.empty());
|
||||||
file_path = ioptions->cf_paths.back().path;
|
file_path = ioptions->cf_paths.back().path;
|
||||||
}
|
}
|
||||||
files.emplace_back(
|
files.emplace_back(SstFileMetaData{
|
||||||
MakeTableFileName("", file->fd.GetNumber()), file_path,
|
MakeTableFileName("", file->fd.GetNumber()),
|
||||||
file->fd.GetFileSize(), file->fd.smallest_seqno,
|
file_path,
|
||||||
file->fd.largest_seqno, file->smallest.user_key().ToString(),
|
static_cast<size_t>(file->fd.GetFileSize()),
|
||||||
|
file->fd.smallest_seqno,
|
||||||
|
file->fd.largest_seqno,
|
||||||
|
file->smallest.user_key().ToString(),
|
||||||
file->largest.user_key().ToString(),
|
file->largest.user_key().ToString(),
|
||||||
file->stats.num_reads_sampled.load(std::memory_order_relaxed),
|
file->stats.num_reads_sampled.load(std::memory_order_relaxed),
|
||||||
file->being_compacted);
|
file->being_compacted});
|
||||||
level_size += file->fd.GetFileSize();
|
level_size += file->fd.GetFileSize();
|
||||||
}
|
}
|
||||||
cf_meta->levels.emplace_back(
|
cf_meta->levels.emplace_back(
|
||||||
|
@ -4338,7 +4341,7 @@ void VersionSet::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
||||||
}
|
}
|
||||||
filemetadata.name = MakeTableFileName("", file->fd.GetNumber());
|
filemetadata.name = MakeTableFileName("", file->fd.GetNumber());
|
||||||
filemetadata.level = level;
|
filemetadata.level = level;
|
||||||
filemetadata.size = file->fd.GetFileSize();
|
filemetadata.size = static_cast<size_t>(file->fd.GetFileSize());
|
||||||
filemetadata.smallestkey = file->smallest.user_key().ToString();
|
filemetadata.smallestkey = file->smallest.user_key().ToString();
|
||||||
filemetadata.largestkey = file->largest.user_key().ToString();
|
filemetadata.largestkey = file->largest.user_key().ToString();
|
||||||
filemetadata.smallest_seqno = file->fd.smallest_seqno;
|
filemetadata.smallest_seqno = file->fd.smallest_seqno;
|
||||||
|
|
|
@ -237,7 +237,7 @@ void WalManager::PurgeObsoleteWALFiles() {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t const files_keep_num =
|
size_t const files_keep_num =
|
||||||
db_options_.wal_size_limit_mb * 1024 * 1024 / log_file_size;
|
static_cast<size_t>(db_options_.wal_size_limit_mb * 1024 * 1024 / log_file_size);
|
||||||
if (log_files_num <= files_keep_num) {
|
if (log_files_num <= files_keep_num) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -352,7 +352,7 @@ Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs,
|
||||||
// Binary Search. avoid opening all files.
|
// Binary Search. avoid opening all files.
|
||||||
while (end >= start) {
|
while (end >= start) {
|
||||||
int64_t mid = start + (end - start) / 2; // Avoid overflow.
|
int64_t mid = start + (end - start) / 2; // Avoid overflow.
|
||||||
SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
|
SequenceNumber current_seq_num = all_logs.at(static_cast<size_t>(mid))->StartSequence();
|
||||||
if (current_seq_num == target) {
|
if (current_seq_num == target) {
|
||||||
end = mid;
|
end = mid;
|
||||||
break;
|
break;
|
||||||
|
@ -363,7 +363,7 @@ Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// end could be -ve.
|
// end could be -ve.
|
||||||
size_t start_index = std::max(static_cast<int64_t>(0), end);
|
size_t start_index = static_cast<size_t>(std::max(static_cast<int64_t>(0), end));
|
||||||
// The last wal file is always included
|
// The last wal file is always included
|
||||||
all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
|
all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
|
|
|
@ -201,7 +201,7 @@ class MockSequentialFile : public SequentialFile {
|
||||||
if (n > available) {
|
if (n > available) {
|
||||||
n = available;
|
n = available;
|
||||||
}
|
}
|
||||||
pos_ += n;
|
pos_ += static_cast<size_t>(n);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,9 +42,6 @@
|
||||||
(5) All of the pointer arguments must be non-NULL.
|
(5) All of the pointer arguments must be non-NULL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_C_H_
|
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_C_H_
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
|
@ -1678,5 +1675,3 @@ extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value(
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} /* end extern "C" */
|
} /* end extern "C" */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* STORAGE_ROCKSDB_INCLUDE_C_H_ */
|
|
||||||
|
|
|
@ -16,8 +16,7 @@
|
||||||
// non-const method, all threads accessing the same Iterator must use
|
// non-const method, all threads accessing the same Iterator must use
|
||||||
// external synchronization.
|
// external synchronization.
|
||||||
|
|
||||||
#ifndef INCLUDE_ROCKSDB_CLEANABLE_H_
|
#pragma once
|
||||||
#define INCLUDE_ROCKSDB_CLEANABLE_H_
|
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
|
@ -78,5 +77,3 @@ class Cleanable {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // INCLUDE_ROCKSDB_CLEANABLE_H_
|
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_
|
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -206,5 +205,3 @@ class CompactionFilterFactory {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_
|
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_
|
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
@ -92,5 +91,3 @@ extern const Comparator* BytewiseComparator();
|
||||||
extern const Comparator* ReverseBytewiseComparator();
|
extern const Comparator* ReverseBytewiseComparator();
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_
|
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_DB_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_DB_H_
|
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
@ -1221,5 +1220,3 @@ Status RepairDB(const std::string& dbname, const Options& options);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_DB_H_
|
|
||||||
|
|
|
@ -14,8 +14,7 @@
|
||||||
// All Env implementations are safe for concurrent access from
|
// All Env implementations are safe for concurrent access from
|
||||||
// multiple threads without any external synchronization.
|
// multiple threads without any external synchronization.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_ENV_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_ENV_H_
|
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <cstdarg>
|
#include <cstdarg>
|
||||||
|
@ -1267,5 +1266,3 @@ Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname);
|
||||||
Env* NewTimedEnv(Env* base_env);
|
Env* NewTimedEnv(Env* base_env);
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_ENV_H_
|
|
||||||
|
|
|
@ -17,8 +17,7 @@
|
||||||
// Most people will want to use the builtin bloom filter support (see
|
// Most people will want to use the builtin bloom filter support (see
|
||||||
// NewBloomFilterPolicy() below).
|
// NewBloomFilterPolicy() below).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_
|
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
@ -149,5 +148,3 @@ class FilterPolicy {
|
||||||
extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key,
|
extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key,
|
||||||
bool use_block_based_builder = true);
|
bool use_block_based_builder = true);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_
|
|
||||||
|
|
|
@ -16,8 +16,7 @@
|
||||||
// non-const method, all threads accessing the same Iterator must use
|
// non-const method, all threads accessing the same Iterator must use
|
||||||
// external synchronization.
|
// external synchronization.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_
|
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "rocksdb/cleanable.h"
|
#include "rocksdb/cleanable.h"
|
||||||
|
@ -119,5 +118,3 @@ extern Iterator* NewEmptyIterator();
|
||||||
extern Iterator* NewErrorIterator(const Status& status);
|
extern Iterator* NewErrorIterator(const Status& status);
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_
|
|
||||||
|
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -241,5 +240,3 @@ class AssociativeMergeOperator : public MergeOperator {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ struct SstFileMetaData {
|
||||||
num_reads_sampled(0),
|
num_reads_sampled(0),
|
||||||
being_compacted(false) {}
|
being_compacted(false) {}
|
||||||
SstFileMetaData(const std::string& _file_name, const std::string& _path,
|
SstFileMetaData(const std::string& _file_name, const std::string& _path,
|
||||||
uint64_t _size, SequenceNumber _smallest_seqno,
|
size_t _size, SequenceNumber _smallest_seqno,
|
||||||
SequenceNumber _largest_seqno,
|
SequenceNumber _largest_seqno,
|
||||||
const std::string& _smallestkey,
|
const std::string& _smallestkey,
|
||||||
const std::string& _largestkey, uint64_t _num_reads_sampled,
|
const std::string& _largestkey, uint64_t _num_reads_sampled,
|
||||||
|
@ -81,7 +81,7 @@ struct SstFileMetaData {
|
||||||
being_compacted(_being_compacted) {}
|
being_compacted(_being_compacted) {}
|
||||||
|
|
||||||
// File size in bytes.
|
// File size in bytes.
|
||||||
uint64_t size;
|
size_t size;
|
||||||
// The name of the file.
|
// The name of the file.
|
||||||
std::string name;
|
std::string name;
|
||||||
// The full path where the file locates.
|
// The full path where the file locates.
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
|
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
@ -1289,5 +1288,3 @@ struct IngestExternalFileOptions {
|
||||||
struct TraceOptions {};
|
struct TraceOptions {};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
|
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -176,5 +175,3 @@ struct PerfContext {
|
||||||
PerfContext* get_perf_context();
|
PerfContext* get_perf_context();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef INCLUDE_ROCKSDB_PERF_LEVEL_H_
|
#pragma once
|
||||||
#define INCLUDE_ROCKSDB_PERF_LEVEL_H_
|
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -29,5 +28,3 @@ void SetPerfLevel(PerfLevel level);
|
||||||
PerfLevel GetPerfLevel();
|
PerfLevel GetPerfLevel();
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // INCLUDE_ROCKSDB_PERF_LEVEL_H_
|
|
||||||
|
|
|
@ -16,8 +16,7 @@
|
||||||
// non-const method, all threads accessing the same Slice must use
|
// non-const method, all threads accessing the same Slice must use
|
||||||
// external synchronization.
|
// external synchronization.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_SLICE_H_
|
|
||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
@ -256,6 +255,4 @@ inline size_t Slice::difference_offset(const Slice& b) const {
|
||||||
return off;
|
return off;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_SLICE_H_
|
|
|
@ -12,8 +12,7 @@
|
||||||
// define InDomain and InRange to determine which slices are in either
|
// define InDomain and InRange to determine which slices are in either
|
||||||
// of these sets respectively.
|
// of these sets respectively.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_
|
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
@ -100,5 +99,3 @@ extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len);
|
||||||
extern const SliceTransform* NewNoopTransform();
|
extern const SliceTransform* NewNoopTransform();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_
|
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
|
@ -673,5 +672,3 @@ class Statistics {
|
||||||
std::shared_ptr<Statistics> CreateDBStatistics();
|
std::shared_ptr<Statistics> CreateDBStatistics();
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_
|
|
||||||
|
|
|
@ -14,8 +14,7 @@
|
||||||
// non-const method, all threads accessing the same Status must use
|
// non-const method, all threads accessing the same Status must use
|
||||||
// external synchronization.
|
// external synchronization.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_STATUS_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_STATUS_H_
|
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "rocksdb/slice.h"
|
#include "rocksdb/slice.h"
|
||||||
|
@ -348,5 +347,3 @@ inline bool Status::operator!=(const Status& rhs) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_STATUS_H_
|
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
// https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats#wiki-examples
|
// https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats#wiki-examples
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_
|
|
||||||
|
|
||||||
#include "rocksdb/status.h"
|
#include "rocksdb/status.h"
|
||||||
#include "rocksdb/types.h"
|
#include "rocksdb/types.h"
|
||||||
|
@ -121,5 +120,3 @@ class TransactionLogIterator {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_TYPES_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_TYPES_H_
|
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "rocksdb/slice.h"
|
#include "rocksdb/slice.h"
|
||||||
|
@ -53,5 +52,3 @@ struct FullKey {
|
||||||
bool ParseFullKey(const Slice& internal_key, FullKey* result);
|
bool ParseFullKey(const Slice& internal_key, FullKey* result);
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_TYPES_H_
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H
|
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <climits>
|
#include <climits>
|
||||||
|
@ -86,5 +85,3 @@ class CompactionOptionsUniversal {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H
|
|
||||||
|
|
|
@ -2,8 +2,8 @@
|
||||||
// This source code is licensed under both the GPLv2 (found in the
|
// This source code is licensed under both the GPLv2 (found in the
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
#ifndef ROCKSDB_UTILITIES_ENV_LIBRADOS_H
|
|
||||||
#define ROCKSDB_UTILITIES_ENV_LIBRADOS_H
|
#pragma once
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,7 @@
|
||||||
// non-const method, all threads accessing the same WriteBatch must use
|
// non-const method, all threads accessing the same WriteBatch must use
|
||||||
// external synchronization.
|
// external synchronization.
|
||||||
|
|
||||||
#ifndef STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
|
#pragma once
|
||||||
#define STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
|
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <stack>
|
#include <stack>
|
||||||
|
@ -367,5 +366,3 @@ class WriteBatch : public WriteBatchBase {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ namespace rocksdb {
|
||||||
|
|
||||||
HistogramWindowingImpl::HistogramWindowingImpl() {
|
HistogramWindowingImpl::HistogramWindowingImpl() {
|
||||||
env_ = Env::Default();
|
env_ = Env::Default();
|
||||||
window_stats_.reset(new HistogramStat[num_windows_]);
|
window_stats_.reset(new HistogramStat[static_cast<size_t>(num_windows_)]);
|
||||||
Clear();
|
Clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ HistogramWindowingImpl::HistogramWindowingImpl(
|
||||||
micros_per_window_(micros_per_window),
|
micros_per_window_(micros_per_window),
|
||||||
min_num_per_window_(min_num_per_window) {
|
min_num_per_window_(min_num_per_window) {
|
||||||
env_ = Env::Default();
|
env_ = Env::Default();
|
||||||
window_stats_.reset(new HistogramStat[num_windows_]);
|
window_stats_.reset(new HistogramStat[static_cast<size_t>(num_windows_)]);
|
||||||
Clear();
|
Clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,7 @@
|
||||||
//
|
//
|
||||||
// See port_example.h for documentation for the following types/functions.
|
// See port_example.h for documentation for the following types/functions.
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_DIRENT_H_
|
#pragma once
|
||||||
#define STORAGE_LEVELDB_PORT_DIRENT_H_
|
|
||||||
|
|
||||||
#ifdef ROCKSDB_PLATFORM_POSIX
|
#ifdef ROCKSDB_PLATFORM_POSIX
|
||||||
#include <dirent.h>
|
#include <dirent.h>
|
||||||
|
@ -43,5 +42,3 @@ using port::closedir;
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // OS_WIN
|
#endif // OS_WIN
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_DIRENT_H_
|
|
||||||
|
|
|
@ -7,8 +7,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#ifndef PORT_LIKELY_H_
|
#pragma once
|
||||||
#define PORT_LIKELY_H_
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC__ >= 4
|
#if defined(__GNUC__) && __GNUC__ >= 4
|
||||||
#define LIKELY(x) (__builtin_expect((x), 1))
|
#define LIKELY(x) (__builtin_expect((x), 1))
|
||||||
|
@ -17,5 +16,3 @@
|
||||||
#define LIKELY(x) (x)
|
#define LIKELY(x) (x)
|
||||||
#define UNLIKELY(x) (x)
|
#define UNLIKELY(x) (x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // PORT_LIKELY_H_
|
|
||||||
|
|
|
@ -12,8 +12,7 @@
|
||||||
// specific port_<platform>.h file. Use this file as a reference for
|
// specific port_<platform>.h file. Use this file as a reference for
|
||||||
// how to port this package to a new platform.
|
// how to port this package to a new platform.
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
#pragma once
|
||||||
#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
namespace port {
|
namespace port {
|
||||||
|
@ -100,5 +99,3 @@ extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
|
||||||
|
|
||||||
} // namespace port
|
} // namespace port
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
|
||||||
|
|
|
@ -10,8 +10,7 @@
|
||||||
// This file is a portable substitute for sys/time.h which does not exist on
|
// This file is a portable substitute for sys/time.h which does not exist on
|
||||||
// Windows
|
// Windows
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_SYS_TIME_H_
|
#pragma once
|
||||||
#define STORAGE_LEVELDB_PORT_SYS_TIME_H_
|
|
||||||
|
|
||||||
#if defined(OS_WIN) && defined(_MSC_VER)
|
#if defined(OS_WIN) && defined(_MSC_VER)
|
||||||
|
|
||||||
|
@ -44,5 +43,3 @@ using port::localtime_r;
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_SYS_TIME_H_
|
|
||||||
|
|
|
@ -7,8 +7,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
|
#pragma once
|
||||||
#define STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
|
|
||||||
|
|
||||||
// Include the appropriate platform specific file below. If you are
|
// Include the appropriate platform specific file below. If you are
|
||||||
// porting to a new platform, see "port_example.h" for documentation
|
// porting to a new platform, see "port_example.h" for documentation
|
||||||
|
@ -19,5 +18,3 @@
|
||||||
#elif defined(OS_WIN)
|
#elif defined(OS_WIN)
|
||||||
#include "port/win/win_logger.h"
|
#include "port/win/win_logger.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
|
|
||||||
|
|
|
@ -235,7 +235,7 @@ Status WinEnvIO::NewRandomAccessFile(const std::string& fname,
|
||||||
MapViewOfFileEx(hMap, FILE_MAP_READ,
|
MapViewOfFileEx(hMap, FILE_MAP_READ,
|
||||||
0, // High DWORD of access start
|
0, // High DWORD of access start
|
||||||
0, // Low DWORD
|
0, // Low DWORD
|
||||||
fileSize,
|
static_cast<SIZE_T>(fileSize),
|
||||||
NULL); // Let the OS choose the mapping
|
NULL); // Let the OS choose the mapping
|
||||||
|
|
||||||
if (!mapped_region) {
|
if (!mapped_region) {
|
||||||
|
@ -246,7 +246,7 @@ Status WinEnvIO::NewRandomAccessFile(const std::string& fname,
|
||||||
}
|
}
|
||||||
|
|
||||||
result->reset(new WinMmapReadableFile(fname, hFile, hMap, mapped_region,
|
result->reset(new WinMmapReadableFile(fname, hFile, hMap, mapped_region,
|
||||||
fileSize));
|
static_cast<size_t>(fileSize)));
|
||||||
|
|
||||||
mapGuard.release();
|
mapGuard.release();
|
||||||
fileGuard.release();
|
fileGuard.release();
|
||||||
|
@ -448,7 +448,7 @@ Status WinEnvIO::NewMemoryMappedFileBuffer(const std::string & fname,
|
||||||
void* base = MapViewOfFileEx(hMap, FILE_MAP_WRITE,
|
void* base = MapViewOfFileEx(hMap, FILE_MAP_WRITE,
|
||||||
0, // High DWORD of access start
|
0, // High DWORD of access start
|
||||||
0, // Low DWORD
|
0, // Low DWORD
|
||||||
fileSize,
|
static_cast<SIZE_T>(fileSize),
|
||||||
NULL); // Let the OS choose the mapping
|
NULL); // Let the OS choose the mapping
|
||||||
|
|
||||||
if (!base) {
|
if (!base) {
|
||||||
|
|
|
@ -260,7 +260,7 @@ Status WinMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result,
|
||||||
*result = Slice();
|
*result = Slice();
|
||||||
return IOError(filename_, EINVAL);
|
return IOError(filename_, EINVAL);
|
||||||
} else if (offset + n > length_) {
|
} else if (offset + n > length_) {
|
||||||
n = length_ - offset;
|
n = length_ - static_cast<size_t>(offset);
|
||||||
}
|
}
|
||||||
*result =
|
*result =
|
||||||
Slice(reinterpret_cast<const char*>(mapped_region_)+offset, n);
|
Slice(reinterpret_cast<const char*>(mapped_region_)+offset, n);
|
||||||
|
@ -317,7 +317,7 @@ Status WinMmapFile::MapNewRegion() {
|
||||||
|
|
||||||
assert(mapped_begin_ == nullptr);
|
assert(mapped_begin_ == nullptr);
|
||||||
|
|
||||||
size_t minDiskSize = file_offset_ + view_size_;
|
size_t minDiskSize = static_cast<size_t>(file_offset_) + view_size_;
|
||||||
|
|
||||||
if (minDiskSize > reserved_size_) {
|
if (minDiskSize > reserved_size_) {
|
||||||
status = Allocate(file_offset_, view_size_);
|
status = Allocate(file_offset_, view_size_);
|
||||||
|
@ -579,7 +579,7 @@ Status WinMmapFile::Allocate(uint64_t offset, uint64_t len) {
|
||||||
// Make sure that we reserve an aligned amount of space
|
// Make sure that we reserve an aligned amount of space
|
||||||
// since the reservation block size is driven outside so we want
|
// since the reservation block size is driven outside so we want
|
||||||
// to check if we are ok with reservation here
|
// to check if we are ok with reservation here
|
||||||
size_t spaceToReserve = Roundup(offset + len, view_size_);
|
size_t spaceToReserve = Roundup(static_cast<size_t>(offset + len), view_size_);
|
||||||
// Nothing to do
|
// Nothing to do
|
||||||
if (spaceToReserve <= reserved_size_) {
|
if (spaceToReserve <= reserved_size_) {
|
||||||
return status;
|
return status;
|
||||||
|
@ -656,14 +656,14 @@ Status WinSequentialFile::PositionedRead(uint64_t offset, size_t n, Slice* resul
|
||||||
return Status::NotSupported("This function is only used for direct_io");
|
return Status::NotSupported("This function is only used for direct_io");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IsSectorAligned(offset) ||
|
if (!IsSectorAligned(static_cast<size_t>(offset)) ||
|
||||||
!IsSectorAligned(n)) {
|
!IsSectorAligned(n)) {
|
||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"WinSequentialFile::PositionedRead: offset is not properly aligned");
|
"WinSequentialFile::PositionedRead: offset is not properly aligned");
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t bytes_read = 0; // out param
|
size_t bytes_read = 0; // out param
|
||||||
s = PositionedReadInternal(scratch, n, offset, bytes_read);
|
s = PositionedReadInternal(scratch, static_cast<size_t>(n), offset, bytes_read);
|
||||||
*result = Slice(scratch, bytes_read);
|
*result = Slice(scratch, bytes_read);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -721,7 +721,7 @@ Status WinRandomAccessImpl::ReadImpl(uint64_t offset, size_t n, Slice* result,
|
||||||
|
|
||||||
// Check buffer alignment
|
// Check buffer alignment
|
||||||
if (file_base_->use_direct_io()) {
|
if (file_base_->use_direct_io()) {
|
||||||
if (!IsSectorAligned(offset) ||
|
if (!IsSectorAligned(static_cast<size_t>(offset)) ||
|
||||||
!IsAligned(alignment_, scratch)) {
|
!IsAligned(alignment_, scratch)) {
|
||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"WinRandomAccessImpl::ReadImpl: offset or scratch is not properly aligned");
|
"WinRandomAccessImpl::ReadImpl: offset or scratch is not properly aligned");
|
||||||
|
@ -818,7 +818,7 @@ Status WinWritableImpl::AppendImpl(const Slice& data) {
|
||||||
// to the end of the file
|
// to the end of the file
|
||||||
assert(IsSectorAligned(next_write_offset_));
|
assert(IsSectorAligned(next_write_offset_));
|
||||||
if (!IsSectorAligned(data.size()) ||
|
if (!IsSectorAligned(data.size()) ||
|
||||||
!IsAligned(GetAlignement(), data.data())) {
|
!IsAligned(static_cast<size_t>(GetAlignement()), data.data())) {
|
||||||
s = Status::InvalidArgument(
|
s = Status::InvalidArgument(
|
||||||
"WriteData must be page aligned, size must be sector aligned");
|
"WriteData must be page aligned, size must be sector aligned");
|
||||||
} else {
|
} else {
|
||||||
|
@ -857,9 +857,9 @@ inline
|
||||||
Status WinWritableImpl::PositionedAppendImpl(const Slice& data, uint64_t offset) {
|
Status WinWritableImpl::PositionedAppendImpl(const Slice& data, uint64_t offset) {
|
||||||
|
|
||||||
if(file_data_->use_direct_io()) {
|
if(file_data_->use_direct_io()) {
|
||||||
if (!IsSectorAligned(offset) ||
|
if (!IsSectorAligned(static_cast<size_t>(offset)) ||
|
||||||
!IsSectorAligned(data.size()) ||
|
!IsSectorAligned(data.size()) ||
|
||||||
!IsAligned(GetAlignement(), data.data())) {
|
!IsAligned(static_cast<size_t>(GetAlignement()), data.data())) {
|
||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"Data and offset must be page aligned, size must be sector aligned");
|
"Data and offset must be page aligned, size must be sector aligned");
|
||||||
}
|
}
|
||||||
|
@ -944,7 +944,7 @@ Status WinWritableImpl::AllocateImpl(uint64_t offset, uint64_t len) {
|
||||||
// Make sure that we reserve an aligned amount of space
|
// Make sure that we reserve an aligned amount of space
|
||||||
// since the reservation block size is driven outside so we want
|
// since the reservation block size is driven outside so we want
|
||||||
// to check if we are ok with reservation here
|
// to check if we are ok with reservation here
|
||||||
size_t spaceToReserve = Roundup(offset + len, alignment_);
|
size_t spaceToReserve = Roundup(static_cast<size_t>(offset + len), static_cast<size_t>(alignment_));
|
||||||
// Nothing to do
|
// Nothing to do
|
||||||
if (spaceToReserve <= reservedsize_) {
|
if (spaceToReserve <= reservedsize_) {
|
||||||
return status;
|
return status;
|
||||||
|
@ -977,7 +977,7 @@ WinWritableFile::~WinWritableFile() {
|
||||||
bool WinWritableFile::use_direct_io() const { return WinFileData::use_direct_io(); }
|
bool WinWritableFile::use_direct_io() const { return WinFileData::use_direct_io(); }
|
||||||
|
|
||||||
size_t WinWritableFile::GetRequiredBufferAlignment() const {
|
size_t WinWritableFile::GetRequiredBufferAlignment() const {
|
||||||
return GetAlignement();
|
return static_cast<size_t>(GetAlignement());
|
||||||
}
|
}
|
||||||
|
|
||||||
Status WinWritableFile::Append(const Slice& data) {
|
Status WinWritableFile::Append(const Slice& data) {
|
||||||
|
@ -1037,7 +1037,7 @@ WinRandomRWFile::WinRandomRWFile(const std::string& fname, HANDLE hFile,
|
||||||
bool WinRandomRWFile::use_direct_io() const { return WinFileData::use_direct_io(); }
|
bool WinRandomRWFile::use_direct_io() const { return WinFileData::use_direct_io(); }
|
||||||
|
|
||||||
size_t WinRandomRWFile::GetRequiredBufferAlignment() const {
|
size_t WinRandomRWFile::GetRequiredBufferAlignment() const {
|
||||||
return GetAlignement();
|
return static_cast<size_t>(GetAlignement());
|
||||||
}
|
}
|
||||||
|
|
||||||
Status WinRandomRWFile::Write(uint64_t offset, const Slice & data) {
|
Status WinRandomRWFile::Write(uint64_t offset, const Slice & data) {
|
||||||
|
|
|
@ -9,8 +9,7 @@
|
||||||
//
|
//
|
||||||
// See port_example.h for documentation for the following types/functions.
|
// See port_example.h for documentation for the following types/functions.
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_
|
#pragma once
|
||||||
#define STORAGE_LEVELDB_PORT_PORT_WIN_H_
|
|
||||||
|
|
||||||
// Always want minimum headers
|
// Always want minimum headers
|
||||||
#ifndef WIN32_LEAN_AND_MEAN
|
#ifndef WIN32_LEAN_AND_MEAN
|
||||||
|
@ -341,5 +340,3 @@ using port::pthread_getspecific;
|
||||||
using port::truncate;
|
using port::truncate;
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_PORT_WIN_H_
|
|
||||||
|
|
|
@ -1094,7 +1094,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
|
||||||
if (tail_prefetch_stats != nullptr) {
|
if (tail_prefetch_stats != nullptr) {
|
||||||
assert(prefetch_buffer->min_offset_read() < file_size);
|
assert(prefetch_buffer->min_offset_read() < file_size);
|
||||||
tail_prefetch_stats->RecordEffectiveSize(
|
tail_prefetch_stats->RecordEffectiveSize(
|
||||||
file_size - prefetch_buffer->min_offset_read());
|
static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
|
||||||
}
|
}
|
||||||
*table_reader = std::move(new_table);
|
*table_reader = std::move(new_table);
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,9 +164,9 @@ bool CuckooTableBuilder::IsDeletedKey(uint64_t idx) const {
|
||||||
Slice CuckooTableBuilder::GetKey(uint64_t idx) const {
|
Slice CuckooTableBuilder::GetKey(uint64_t idx) const {
|
||||||
assert(closed_);
|
assert(closed_);
|
||||||
if (IsDeletedKey(idx)) {
|
if (IsDeletedKey(idx)) {
|
||||||
return Slice(&deleted_keys_[(idx - num_values_) * key_size_], key_size_);
|
return Slice(&deleted_keys_[static_cast<size_t>((idx - num_values_) * key_size_)], static_cast<size_t>(key_size_));
|
||||||
}
|
}
|
||||||
return Slice(&kvs_[idx * (key_size_ + value_size_)], key_size_);
|
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_))], static_cast<size_t>(key_size_));
|
||||||
}
|
}
|
||||||
|
|
||||||
Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const {
|
Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const {
|
||||||
|
@ -177,14 +177,14 @@ Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const {
|
||||||
Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
|
Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
|
||||||
assert(closed_);
|
assert(closed_);
|
||||||
if (IsDeletedKey(idx)) {
|
if (IsDeletedKey(idx)) {
|
||||||
static std::string empty_value(value_size_, 'a');
|
static std::string empty_value(static_cast<unsigned int>(value_size_), 'a');
|
||||||
return Slice(empty_value);
|
return Slice(empty_value);
|
||||||
}
|
}
|
||||||
return Slice(&kvs_[idx * (key_size_ + value_size_) + key_size_], value_size_);
|
return Slice(&kvs_[static_cast<size_t>(idx * (key_size_ + value_size_) + key_size_)], static_cast<size_t>(value_size_));
|
||||||
}
|
}
|
||||||
|
|
||||||
Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
|
Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
|
||||||
buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1);
|
buckets->resize(static_cast<size_t>(hash_table_size_ + cuckoo_block_size_ - 1));
|
||||||
uint32_t make_space_for_key_call_id = 0;
|
uint32_t make_space_for_key_call_id = 0;
|
||||||
for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
|
for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
|
||||||
uint64_t bucket_id = 0;
|
uint64_t bucket_id = 0;
|
||||||
|
@ -200,13 +200,13 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
|
||||||
// stop searching and proceed for next hash function.
|
// stop searching and proceed for next hash function.
|
||||||
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
|
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
|
||||||
++block_idx, ++hash_val) {
|
++block_idx, ++hash_val) {
|
||||||
if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) {
|
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx == kMaxVectorIdx) {
|
||||||
bucket_id = hash_val;
|
bucket_id = hash_val;
|
||||||
bucket_found = true;
|
bucket_found = true;
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
if (ucomp_->Compare(user_key,
|
if (ucomp_->Compare(user_key,
|
||||||
GetUserKey((*buckets)[hash_val].vector_idx)) == 0) {
|
GetUserKey((*buckets)[static_cast<size_t>(hash_val)].vector_idx)) == 0) {
|
||||||
return Status::NotSupported("Same key is being inserted again.");
|
return Status::NotSupported("Same key is being inserted again.");
|
||||||
}
|
}
|
||||||
hash_vals.push_back(hash_val);
|
hash_vals.push_back(hash_val);
|
||||||
|
@ -226,7 +226,7 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
|
||||||
++num_hash_func_;
|
++num_hash_func_;
|
||||||
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
|
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
|
||||||
++block_idx, ++hash_val) {
|
++block_idx, ++hash_val) {
|
||||||
if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) {
|
if ((*buckets)[static_cast<size_t>(hash_val)].vector_idx == kMaxVectorIdx) {
|
||||||
bucket_found = true;
|
bucket_found = true;
|
||||||
bucket_id = hash_val;
|
bucket_id = hash_val;
|
||||||
break;
|
break;
|
||||||
|
@ -235,7 +235,7 @@ Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(*buckets)[bucket_id].vector_idx = vector_idx;
|
(*buckets)[static_cast<size_t>(bucket_id)].vector_idx = vector_idx;
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
@ -295,7 +295,7 @@ Status CuckooTableBuilder::Finish() {
|
||||||
reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
|
reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
|
||||||
|
|
||||||
uint64_t bucket_size = key_size_ + value_size_;
|
uint64_t bucket_size = key_size_ + value_size_;
|
||||||
unused_bucket.resize(bucket_size, 'a');
|
unused_bucket.resize(static_cast<size_t>(bucket_size), 'a');
|
||||||
// Write the table.
|
// Write the table.
|
||||||
uint32_t num_added = 0;
|
uint32_t num_added = 0;
|
||||||
for (auto& bucket : buckets) {
|
for (auto& bucket : buckets) {
|
||||||
|
@ -320,7 +320,7 @@ Status CuckooTableBuilder::Finish() {
|
||||||
|
|
||||||
uint64_t offset = buckets.size() * bucket_size;
|
uint64_t offset = buckets.size() * bucket_size;
|
||||||
properties_.data_size = offset;
|
properties_.data_size = offset;
|
||||||
unused_bucket.resize(properties_.fixed_key_len);
|
unused_bucket.resize(static_cast<size_t>(properties_.fixed_key_len));
|
||||||
properties_.user_collected_properties[
|
properties_.user_collected_properties[
|
||||||
CuckooTablePropertyNames::kEmptyKey] = unused_bucket;
|
CuckooTablePropertyNames::kEmptyKey] = unused_bucket;
|
||||||
properties_.user_collected_properties[
|
properties_.user_collected_properties[
|
||||||
|
@ -456,7 +456,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
||||||
// no. of times this will be called is <= max_num_hash_func_ + num_entries_.
|
// no. of times this will be called is <= max_num_hash_func_ + num_entries_.
|
||||||
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
|
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
|
||||||
uint64_t bid = hash_vals[hash_cnt];
|
uint64_t bid = hash_vals[hash_cnt];
|
||||||
(*buckets)[bid].make_space_for_key_call_id = make_space_for_key_call_id;
|
(*buckets)[static_cast<size_t>(bid)].make_space_for_key_call_id = make_space_for_key_call_id;
|
||||||
tree.push_back(CuckooNode(bid, 0, 0));
|
tree.push_back(CuckooNode(bid, 0, 0));
|
||||||
}
|
}
|
||||||
bool null_found = false;
|
bool null_found = false;
|
||||||
|
@ -467,7 +467,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
||||||
if (curr_depth >= max_search_depth_) {
|
if (curr_depth >= max_search_depth_) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
CuckooBucket& curr_bucket = (*buckets)[curr_node.bucket_id];
|
CuckooBucket& curr_bucket = (*buckets)[static_cast<size_t>(curr_node.bucket_id)];
|
||||||
for (uint32_t hash_cnt = 0;
|
for (uint32_t hash_cnt = 0;
|
||||||
hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) {
|
hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) {
|
||||||
uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx),
|
uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx),
|
||||||
|
@ -476,15 +476,15 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
||||||
// Iterate inside Cuckoo Block.
|
// Iterate inside Cuckoo Block.
|
||||||
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
|
for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
|
||||||
++block_idx, ++child_bucket_id) {
|
++block_idx, ++child_bucket_id) {
|
||||||
if ((*buckets)[child_bucket_id].make_space_for_key_call_id ==
|
if ((*buckets)[static_cast<size_t>(child_bucket_id)].make_space_for_key_call_id ==
|
||||||
make_space_for_key_call_id) {
|
make_space_for_key_call_id) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
(*buckets)[child_bucket_id].make_space_for_key_call_id =
|
(*buckets)[static_cast<size_t>(child_bucket_id)].make_space_for_key_call_id =
|
||||||
make_space_for_key_call_id;
|
make_space_for_key_call_id;
|
||||||
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1,
|
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1,
|
||||||
curr_pos));
|
curr_pos));
|
||||||
if ((*buckets)[child_bucket_id].vector_idx == kMaxVectorIdx) {
|
if ((*buckets)[static_cast<size_t>(child_bucket_id)].vector_idx == kMaxVectorIdx) {
|
||||||
null_found = true;
|
null_found = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -502,8 +502,8 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
||||||
uint32_t bucket_to_replace_pos = static_cast<uint32_t>(tree.size()) - 1;
|
uint32_t bucket_to_replace_pos = static_cast<uint32_t>(tree.size()) - 1;
|
||||||
while (bucket_to_replace_pos >= num_hash_func_) {
|
while (bucket_to_replace_pos >= num_hash_func_) {
|
||||||
CuckooNode& curr_node = tree[bucket_to_replace_pos];
|
CuckooNode& curr_node = tree[bucket_to_replace_pos];
|
||||||
(*buckets)[curr_node.bucket_id] =
|
(*buckets)[static_cast<size_t>(curr_node.bucket_id)] =
|
||||||
(*buckets)[tree[curr_node.parent_pos].bucket_id];
|
(*buckets)[static_cast<size_t>(tree[curr_node.parent_pos].bucket_id)];
|
||||||
bucket_to_replace_pos = curr_node.parent_pos;
|
bucket_to_replace_pos = curr_node.parent_pos;
|
||||||
}
|
}
|
||||||
*bucket_id = tree[bucket_to_replace_pos].bucket_id;
|
*bucket_id = tree[bucket_to_replace_pos].bucket_id;
|
||||||
|
|
|
@ -136,7 +136,7 @@ CuckooTableReader::CuckooTableReader(
|
||||||
cuckoo_block_size_ = *reinterpret_cast<const uint32_t*>(
|
cuckoo_block_size_ = *reinterpret_cast<const uint32_t*>(
|
||||||
cuckoo_block_size->second.data());
|
cuckoo_block_size->second.data());
|
||||||
cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1;
|
cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1;
|
||||||
status_ = file_->Read(0, file_size, &file_data_, nullptr);
|
status_ = file_->Read(0, static_cast<size_t>(file_size), &file_data_, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
|
Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
|
||||||
|
@ -268,7 +268,7 @@ void CuckooTableIterator::InitIfNeeded() {
|
||||||
if (initialized_) {
|
if (initialized_) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sorted_bucket_ids_.reserve(reader_->GetTableProperties()->num_entries);
|
sorted_bucket_ids_.reserve(static_cast<size_t>(reader_->GetTableProperties()->num_entries));
|
||||||
uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1;
|
uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1;
|
||||||
assert(num_buckets < kInvalidIndex);
|
assert(num_buckets < kInvalidIndex);
|
||||||
const char* bucket = reader_->file_data_.data();
|
const char* bucket = reader_->file_data_.data();
|
||||||
|
|
|
@ -277,7 +277,7 @@ void PlainTableReader::FillBloom(vector<uint32_t>* prefix_hashes) {
|
||||||
Status PlainTableReader::MmapDataIfNeeded() {
|
Status PlainTableReader::MmapDataIfNeeded() {
|
||||||
if (file_info_.is_mmap_mode) {
|
if (file_info_.is_mmap_mode) {
|
||||||
// Get mmapped memory.
|
// Get mmapped memory.
|
||||||
return file_info_.file->Read(0, file_size_, &file_info_.file_data, nullptr);
|
return file_info_.file->Read(0, static_cast<size_t>(file_size_), &file_info_.file_data, nullptr);
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,8 +55,7 @@
|
||||||
* @author Tian Xia <tianx@fb.com>
|
* @author Tian Xia <tianx@fb.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef FBSON_FBSONDOCUMENT_H
|
#pragma once
|
||||||
#define FBSON_FBSONDOCUMENT_H
|
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
@ -889,5 +888,3 @@ inline FbsonValue* FbsonValue::findPath(const char* key_path,
|
||||||
#pragma pack(pop)
|
#pragma pack(pop)
|
||||||
|
|
||||||
} // namespace fbson
|
} // namespace fbson
|
||||||
|
|
||||||
#endif // FBSON_FBSONDOCUMENT_H
|
|
||||||
|
|
|
@ -47,8 +47,7 @@
|
||||||
* @author Tian Xia <tianx@fb.com>
|
* @author Tian Xia <tianx@fb.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef FBSON_FBSONPARSER_H
|
#pragma once
|
||||||
#define FBSON_FBSONPARSER_H
|
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
@ -741,5 +740,3 @@ class FbsonJsonParserT {
|
||||||
typedef FbsonJsonParserT<FbsonOutStream> FbsonJsonParser;
|
typedef FbsonJsonParserT<FbsonOutStream> FbsonJsonParser;
|
||||||
|
|
||||||
} // namespace fbson
|
} // namespace fbson
|
||||||
|
|
||||||
#endif // FBSON_FBSONPARSER_H
|
|
||||||
|
|
|
@ -18,8 +18,7 @@
|
||||||
* @author Tian Xia <tianx@fb.com>
|
* @author Tian Xia <tianx@fb.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef FBSON_FBSONSTREAM_H
|
#pragma once
|
||||||
#define FBSON_FBSONSTREAM_H
|
|
||||||
|
|
||||||
#ifndef __STDC_FORMAT_MACROS
|
#ifndef __STDC_FORMAT_MACROS
|
||||||
#define __STDC_FORMAT_MACROS
|
#define __STDC_FORMAT_MACROS
|
||||||
|
@ -178,5 +177,3 @@ class FbsonOutStream : public std::ostream {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace fbson
|
} // namespace fbson
|
||||||
|
|
||||||
#endif // FBSON_FBSONSTREAM_H
|
|
||||||
|
|
|
@ -9,8 +9,7 @@
|
||||||
* @author Tian Xia <tianx@fb.com>
|
* @author Tian Xia <tianx@fb.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef FBSON_FBSONUTIL_H
|
#pragma once
|
||||||
#define FBSON_FBSONUTIL_H
|
|
||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include "FbsonDocument.h"
|
#include "FbsonDocument.h"
|
||||||
|
@ -159,5 +158,3 @@ class FbsonToJson {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace fbson
|
} // namespace fbson
|
||||||
|
|
||||||
#endif // FBSON_FBSONUTIL_H
|
|
||||||
|
|
|
@ -25,8 +25,7 @@
|
||||||
* @author Tian Xia <tianx@fb.com>
|
* @author Tian Xia <tianx@fb.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef FBSON_FBSONWRITER_H
|
#pragma once
|
||||||
#define FBSON_FBSONWRITER_H
|
|
||||||
|
|
||||||
#include <stack>
|
#include <stack>
|
||||||
#include "FbsonDocument.h"
|
#include "FbsonDocument.h"
|
||||||
|
@ -433,5 +432,3 @@ class FbsonWriterT {
|
||||||
typedef FbsonWriterT<FbsonOutStream> FbsonWriter;
|
typedef FbsonWriterT<FbsonOutStream> FbsonWriter;
|
||||||
|
|
||||||
} // namespace fbson
|
} // namespace fbson
|
||||||
|
|
||||||
#endif // FBSON_FBSONWRITER_H
|
|
||||||
|
|
|
@ -3,13 +3,13 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
// This source code is also licensed under the GPLv2 license found in the
|
// This source code is also licensed under the GPLv2 license found in the
|
||||||
// COPYING file in the root directory of this source tree.
|
// COPYING file in the root directory of this source tree.
|
||||||
|
|
||||||
#ifndef CRC32C_PPC_H
|
#pragma once
|
||||||
#define CRC32C_PPC_H
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -19,5 +18,3 @@ extern uint32_t crc32c_ppc(uint32_t crc, unsigned char const *buffer,
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -5,8 +5,9 @@
|
||||||
// of patent rights can be found in the PATENTS file in the same directory.
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
// This source code is also licensed under the GPLv2 license found in the
|
// This source code is also licensed under the GPLv2 license found in the
|
||||||
// COPYING file in the root directory of this source tree.
|
// COPYING file in the root directory of this source tree.
|
||||||
#ifndef CRC32C_PPC_CONST_H
|
|
||||||
#define CRC32C_PPC_CONST_H
|
#pragma once
|
||||||
|
|
||||||
#define CRC 0x1edc6f41
|
#define CRC 0x1edc6f41
|
||||||
#define REFLECT
|
#define REFLECT
|
||||||
#define CRC_XOR
|
#define CRC_XOR
|
||||||
|
@ -898,5 +899,3 @@ static const unsigned int crc_table[] = {
|
||||||
/* 33 bit reflected Barrett constant n */
|
/* 33 bit reflected Barrett constant n */
|
||||||
.octa 0x00000000000000000000000105ec76f1
|
.octa 0x00000000000000000000000105ec76f1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
// the last "sync". It then checks for data loss errors by purposely dropping
|
// the last "sync". It then checks for data loss errors by purposely dropping
|
||||||
// file data (or entire files) not protected by a "sync".
|
// file data (or entire files) not protected by a "sync".
|
||||||
|
|
||||||
#ifndef UTIL_FAULT_INJECTION_TEST_ENV_H_
|
#pragma once
|
||||||
#define UTIL_FAULT_INJECTION_TEST_ENV_H_
|
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
@ -171,5 +170,3 @@ class FaultInjectionTestEnv : public EnvWrapper {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // UTIL_FAULT_INJECTION_TEST_ENV_H_
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ Status SequentialFileReader::Read(size_t n, Slice* result, char* scratch) {
|
||||||
Status SequentialFileReader::Skip(uint64_t n) {
|
Status SequentialFileReader::Skip(uint64_t n) {
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
if (use_direct_io()) {
|
if (use_direct_io()) {
|
||||||
offset_ += n;
|
offset_ += static_cast<size_t>(n);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
#endif // !ROCKSDB_LITE
|
#endif // !ROCKSDB_LITE
|
||||||
|
@ -81,9 +81,9 @@ Status RandomAccessFileReader::Read(uint64_t offset, size_t n, Slice* result,
|
||||||
if (use_direct_io()) {
|
if (use_direct_io()) {
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
size_t alignment = file_->GetRequiredBufferAlignment();
|
size_t alignment = file_->GetRequiredBufferAlignment();
|
||||||
size_t aligned_offset = TruncateToPageBoundary(alignment, offset);
|
size_t aligned_offset = TruncateToPageBoundary(alignment, static_cast<size_t>(offset));
|
||||||
size_t offset_advance = offset - aligned_offset;
|
size_t offset_advance = static_cast<size_t>(offset) - aligned_offset;
|
||||||
size_t read_size = Roundup(offset + n, alignment) - aligned_offset;
|
size_t read_size = Roundup(static_cast<size_t>(offset + n), alignment) - aligned_offset;
|
||||||
AlignedBuffer buf;
|
AlignedBuffer buf;
|
||||||
buf.Alignment(alignment);
|
buf.Alignment(alignment);
|
||||||
buf.AllocateNewBuffer(read_size);
|
buf.AllocateNewBuffer(read_size);
|
||||||
|
@ -673,7 +673,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
|
||||||
// Only a few requested bytes are in the buffer. memmove those chunk of
|
// Only a few requested bytes are in the buffer. memmove those chunk of
|
||||||
// bytes to the beginning, and memcpy them back into the new buffer if a
|
// bytes to the beginning, and memcpy them back into the new buffer if a
|
||||||
// new buffer is created.
|
// new buffer is created.
|
||||||
chunk_offset_in_buffer = Rounddown(offset - buffer_offset_, alignment);
|
chunk_offset_in_buffer = Rounddown(static_cast<size_t>(offset - buffer_offset_), alignment);
|
||||||
chunk_len = buffer_.CurrentSize() - chunk_offset_in_buffer;
|
chunk_len = buffer_.CurrentSize() - chunk_offset_in_buffer;
|
||||||
assert(chunk_offset_in_buffer % alignment == 0);
|
assert(chunk_offset_in_buffer % alignment == 0);
|
||||||
assert(chunk_len % alignment == 0);
|
assert(chunk_len % alignment == 0);
|
||||||
|
@ -694,11 +694,11 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
|
||||||
buffer_.Alignment(alignment);
|
buffer_.Alignment(alignment);
|
||||||
buffer_.AllocateNewBuffer(static_cast<size_t>(roundup_len),
|
buffer_.AllocateNewBuffer(static_cast<size_t>(roundup_len),
|
||||||
copy_data_to_new_buffer, chunk_offset_in_buffer,
|
copy_data_to_new_buffer, chunk_offset_in_buffer,
|
||||||
chunk_len);
|
static_cast<size_t>(chunk_len));
|
||||||
} else if (chunk_len > 0) {
|
} else if (chunk_len > 0) {
|
||||||
// New buffer not needed. But memmove bytes from tail to the beginning since
|
// New buffer not needed. But memmove bytes from tail to the beginning since
|
||||||
// chunk_len is greater than 0.
|
// chunk_len is greater than 0.
|
||||||
buffer_.RefitTail(chunk_offset_in_buffer, chunk_len);
|
buffer_.RefitTail(static_cast<size_t>(chunk_offset_in_buffer), static_cast<size_t>(chunk_len));
|
||||||
}
|
}
|
||||||
|
|
||||||
Slice result;
|
Slice result;
|
||||||
|
@ -707,7 +707,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
|
||||||
buffer_.BufferStart() + chunk_len);
|
buffer_.BufferStart() + chunk_len);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
buffer_offset_ = rounddown_offset;
|
buffer_offset_ = rounddown_offset;
|
||||||
buffer_.Size(chunk_len + result.size());
|
buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -715,7 +715,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
|
||||||
bool FilePrefetchBuffer::TryReadFromCache(uint64_t offset, size_t n,
|
bool FilePrefetchBuffer::TryReadFromCache(uint64_t offset, size_t n,
|
||||||
Slice* result) {
|
Slice* result) {
|
||||||
if (track_min_offset_ && offset < min_offset_read_) {
|
if (track_min_offset_ && offset < min_offset_read_) {
|
||||||
min_offset_read_ = offset;
|
min_offset_read_ = static_cast<size_t>(offset);
|
||||||
}
|
}
|
||||||
if (!enable_ || offset < buffer_offset_) {
|
if (!enable_ || offset < buffer_offset_) {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
// This source code is also licensed under the GPLv2 license found in the
|
// This source code is also licensed under the GPLv2 license found in the
|
||||||
// COPYING file in the root directory of this source tree.
|
// COPYING file in the root directory of this source tree.
|
||||||
|
|
||||||
#ifndef __OPCODES_H
|
#pragma once
|
||||||
#define __OPCODES_H
|
|
||||||
|
|
||||||
#define __PPC_RA(a) (((a)&0x1f) << 16)
|
#define __PPC_RA(a) (((a)&0x1f) << 16)
|
||||||
#define __PPC_RB(b) (((b)&0x1f) << 11)
|
#define __PPC_RB(b) (((b)&0x1f) << 11)
|
||||||
|
@ -27,5 +26,3 @@
|
||||||
#define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b)
|
#define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b)
|
||||||
#define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, 0)
|
#define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, 0)
|
||||||
#define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, 0)
|
#define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, 0)
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ class RandomRWStringSink : public RandomRWFile {
|
||||||
|
|
||||||
Status Write(uint64_t offset, const Slice& data) override {
|
Status Write(uint64_t offset, const Slice& data) override {
|
||||||
if (offset + data.size() > ss_->contents_.size()) {
|
if (offset + data.size() > ss_->contents_.size()) {
|
||||||
ss_->contents_.resize(offset + data.size(), '\0');
|
ss_->contents_.resize(static_cast<size_t>(offset) + data.size(), '\0');
|
||||||
}
|
}
|
||||||
|
|
||||||
char* pos = const_cast<char*>(ss_->contents_.data() + offset);
|
char* pos = const_cast<char*>(ss_->contents_.data() + offset);
|
||||||
|
@ -518,7 +518,7 @@ class StringEnv : public EnvWrapper {
|
||||||
"Attemp to read when it already reached eof.");
|
"Attemp to read when it already reached eof.");
|
||||||
}
|
}
|
||||||
// TODO(yhchiang): Currently doesn't handle the overflow case.
|
// TODO(yhchiang): Currently doesn't handle the overflow case.
|
||||||
offset_ += n;
|
offset_ += static_cast<size_t>(n);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -532,7 +532,7 @@ class StringEnv : public EnvWrapper {
|
||||||
explicit StringSink(std::string* contents)
|
explicit StringSink(std::string* contents)
|
||||||
: WritableFile(), contents_(contents) {}
|
: WritableFile(), contents_(contents) {}
|
||||||
virtual Status Truncate(uint64_t size) override {
|
virtual Status Truncate(uint64_t size) override {
|
||||||
contents_->resize(size);
|
contents_->resize(static_cast<size_t>(size));
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
virtual Status Close() override { return Status::OK(); }
|
virtual Status Close() override { return Status::OK(); }
|
||||||
|
|
|
@ -137,6 +137,7 @@ bool RandomTransactionInserter::DoInsert(DB* db, Transaction* txn,
|
||||||
std::iota(set_vec.begin(), set_vec.end(), static_cast<uint16_t>(0));
|
std::iota(set_vec.begin(), set_vec.end(), static_cast<uint16_t>(0));
|
||||||
std::random_shuffle(set_vec.begin(), set_vec.end(),
|
std::random_shuffle(set_vec.begin(), set_vec.end(),
|
||||||
[&](uint64_t r) { return rand_->Uniform(r); });
|
[&](uint64_t r) { return rand_->Uniform(r); });
|
||||||
|
|
||||||
// For each set, pick a key at random and increment it
|
// For each set, pick a key at random and increment it
|
||||||
for (uint16_t set_i : set_vec) {
|
for (uint16_t set_i : set_vec) {
|
||||||
uint64_t int_value = 0;
|
uint64_t int_value = 0;
|
||||||
|
|
|
@ -781,7 +781,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
|
||||||
|
|
||||||
RateLimiter* rate_limiter = options_.backup_rate_limiter.get();
|
RateLimiter* rate_limiter = options_.backup_rate_limiter.get();
|
||||||
if (rate_limiter) {
|
if (rate_limiter) {
|
||||||
copy_file_buffer_size_ = rate_limiter->GetSingleBurstBytes();
|
copy_file_buffer_size_ = static_cast<size_t>(rate_limiter->GetSingleBurstBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
// A set into which we will insert the dst_paths that are calculated for live
|
// A set into which we will insert the dst_paths that are calculated for live
|
||||||
|
@ -1078,7 +1078,7 @@ Status BackupEngineImpl::RestoreDBFromBackup(
|
||||||
|
|
||||||
RateLimiter* rate_limiter = options_.restore_rate_limiter.get();
|
RateLimiter* rate_limiter = options_.restore_rate_limiter.get();
|
||||||
if (rate_limiter) {
|
if (rate_limiter) {
|
||||||
copy_file_buffer_size_ = rate_limiter->GetSingleBurstBytes();
|
copy_file_buffer_size_ = static_cast<size_t>(rate_limiter->GetSingleBurstBytes());
|
||||||
}
|
}
|
||||||
Status s;
|
Status s;
|
||||||
std::vector<RestoreAfterCopyOrCreateWorkItem> restore_items_to_finish;
|
std::vector<RestoreAfterCopyOrCreateWorkItem> restore_items_to_finish;
|
||||||
|
@ -1231,7 +1231,7 @@ Status BackupEngineImpl::CopyOrCreateFile(
|
||||||
if (!src.empty()) {
|
if (!src.empty()) {
|
||||||
size_t buffer_to_read = (copy_file_buffer_size_ < size_limit)
|
size_t buffer_to_read = (copy_file_buffer_size_ < size_limit)
|
||||||
? copy_file_buffer_size_
|
? copy_file_buffer_size_
|
||||||
: size_limit;
|
: static_cast<size_t>(size_limit);
|
||||||
s = src_reader->Read(buffer_to_read, &data, buf.get());
|
s = src_reader->Read(buffer_to_read, &data, buf.get());
|
||||||
processed_buffer_size += buffer_to_read;
|
processed_buffer_size += buffer_to_read;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1426,7 +1426,7 @@ Status BackupEngineImpl::CalculateChecksum(const std::string& src, Env* src_env,
|
||||||
return Status::Incomplete("Backup stopped");
|
return Status::Incomplete("Backup stopped");
|
||||||
}
|
}
|
||||||
size_t buffer_to_read = (copy_file_buffer_size_ < size_limit) ?
|
size_t buffer_to_read = (copy_file_buffer_size_ < size_limit) ?
|
||||||
copy_file_buffer_size_ : size_limit;
|
copy_file_buffer_size_ : static_cast<size_t>(size_limit);
|
||||||
s = src_reader->Read(buffer_to_read, &data, buf.get());
|
s = src_reader->Read(buffer_to_read, &data, buf.get());
|
||||||
|
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
|
|
|
@ -1008,14 +1008,14 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry,
|
||||||
uint64_t record_size = sizeof(uint32_t) + key.size() + blob_index.size();
|
uint64_t record_size = sizeof(uint32_t) + key.size() + blob_index.size();
|
||||||
|
|
||||||
// Allocate the buffer. This is safe in C++11
|
// Allocate the buffer. This is safe in C++11
|
||||||
std::string buffer_str(record_size, static_cast<char>(0));
|
std::string buffer_str(static_cast<size_t>(record_size), static_cast<char>(0));
|
||||||
char* buffer = &buffer_str[0];
|
char* buffer = &buffer_str[0];
|
||||||
|
|
||||||
// A partial blob record contain checksum, key and value.
|
// A partial blob record contain checksum, key and value.
|
||||||
Slice blob_record;
|
Slice blob_record;
|
||||||
{
|
{
|
||||||
StopWatch read_sw(env_, statistics_, BLOB_DB_BLOB_FILE_READ_MICROS);
|
StopWatch read_sw(env_, statistics_, BLOB_DB_BLOB_FILE_READ_MICROS);
|
||||||
s = reader->Read(record_offset, record_size, &blob_record, buffer);
|
s = reader->Read(record_offset, static_cast<size_t>(record_size), &blob_record, buffer);
|
||||||
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size());
|
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size());
|
||||||
}
|
}
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
|
@ -1041,7 +1041,7 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry,
|
||||||
}
|
}
|
||||||
Slice crc_slice(blob_record.data(), sizeof(uint32_t));
|
Slice crc_slice(blob_record.data(), sizeof(uint32_t));
|
||||||
Slice blob_value(blob_record.data() + sizeof(uint32_t) + key.size(),
|
Slice blob_value(blob_record.data() + sizeof(uint32_t) + key.size(),
|
||||||
blob_index.size());
|
static_cast<size_t>(blob_index.size()));
|
||||||
uint32_t crc_exp;
|
uint32_t crc_exp;
|
||||||
if (!GetFixed32(&crc_slice, &crc_exp)) {
|
if (!GetFixed32(&crc_slice, &crc_exp)) {
|
||||||
ROCKS_LOG_DEBUG(db_options_.info_log,
|
ROCKS_LOG_DEBUG(db_options_.info_log,
|
||||||
|
|
|
@ -93,7 +93,7 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
|
||||||
for (auto bfile_pair : blob_files_) {
|
for (auto bfile_pair : blob_files_) {
|
||||||
auto blob_file = bfile_pair.second;
|
auto blob_file = bfile_pair.second;
|
||||||
LiveFileMetaData filemetadata;
|
LiveFileMetaData filemetadata;
|
||||||
filemetadata.size = blob_file->GetFileSize();
|
filemetadata.size = static_cast<size_t>(blob_file->GetFileSize());
|
||||||
// Path should be relative to db_name, but begin with slash.
|
// Path should be relative to db_name, but begin with slash.
|
||||||
filemetadata.name =
|
filemetadata.name =
|
||||||
BlobFileName("", bdb_options_.blob_dir, blob_file->BlobFileNumber());
|
BlobFileName("", bdb_options_.blob_dir, blob_file->BlobFileNumber());
|
||||||
|
|
|
@ -199,7 +199,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
|
||||||
fprintf(stdout, " expiration : %" PRIu64 "\n", record.expiration);
|
fprintf(stdout, " expiration : %" PRIu64 "\n", record.expiration);
|
||||||
}
|
}
|
||||||
*offset += BlobLogRecord::kHeaderSize;
|
*offset += BlobLogRecord::kHeaderSize;
|
||||||
s = Read(*offset, key_size + value_size, &slice);
|
s = Read(*offset, static_cast<size_t>(key_size + value_size), &slice);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
|
||||||
UncompressionInfo info(context, CompressionDict::GetEmptyDict(),
|
UncompressionInfo info(context, CompressionDict::GetEmptyDict(),
|
||||||
compression);
|
compression);
|
||||||
s = UncompressBlockContentsForCompressionType(
|
s = UncompressBlockContentsForCompressionType(
|
||||||
info, slice.data() + key_size, value_size, &contents,
|
info, slice.data() + key_size, static_cast<size_t>(value_size), &contents,
|
||||||
2 /*compress_format_version*/, ImmutableCFOptions(Options()));
|
2 /*compress_format_version*/, ImmutableCFOptions(Options()));
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
|
@ -221,10 +221,10 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
|
||||||
}
|
}
|
||||||
if (show_key != DisplayType::kNone) {
|
if (show_key != DisplayType::kNone) {
|
||||||
fprintf(stdout, " key : ");
|
fprintf(stdout, " key : ");
|
||||||
DumpSlice(Slice(slice.data(), key_size), show_key);
|
DumpSlice(Slice(slice.data(), static_cast<size_t>(key_size)), show_key);
|
||||||
if (show_blob != DisplayType::kNone) {
|
if (show_blob != DisplayType::kNone) {
|
||||||
fprintf(stdout, " blob : ");
|
fprintf(stdout, " blob : ");
|
||||||
DumpSlice(Slice(slice.data() + key_size, value_size), show_blob);
|
DumpSlice(Slice(slice.data() + static_cast<size_t>(key_size), static_cast<size_t>(value_size)), show_blob);
|
||||||
}
|
}
|
||||||
if (show_uncompressed_blob != DisplayType::kNone) {
|
if (show_uncompressed_blob != DisplayType::kNone) {
|
||||||
fprintf(stdout, " raw blob : ");
|
fprintf(stdout, " raw blob : ");
|
||||||
|
|
|
@ -26,8 +26,8 @@ Reader::Reader(unique_ptr<RandomAccessFileReader>&& file_reader, Env* env,
|
||||||
|
|
||||||
Status Reader::ReadSlice(uint64_t size, Slice* slice, std::string* buf) {
|
Status Reader::ReadSlice(uint64_t size, Slice* slice, std::string* buf) {
|
||||||
StopWatch read_sw(env_, statistics_, BLOB_DB_BLOB_FILE_READ_MICROS);
|
StopWatch read_sw(env_, statistics_, BLOB_DB_BLOB_FILE_READ_MICROS);
|
||||||
buf->reserve(size);
|
buf->reserve(static_cast<size_t>(size));
|
||||||
Status s = file_->Read(next_byte_, size, slice, &(*buf)[0]);
|
Status s = file_->Read(next_byte_, static_cast<size_t>(size), slice, &(*buf)[0]);
|
||||||
next_byte_ += size;
|
next_byte_ += size;
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
|
|
|
@ -147,7 +147,7 @@ size_t FixedLengthColBufDecoder::Decode(const char* src, char** dest) {
|
||||||
col_compression_type_ == kColDict) {
|
col_compression_type_ == kColDict) {
|
||||||
uint64_t dict_val = read_val;
|
uint64_t dict_val = read_val;
|
||||||
assert(dict_val < dict_vec_.size());
|
assert(dict_val < dict_vec_.size());
|
||||||
write_val = dict_vec_[dict_val];
|
write_val = dict_vec_[static_cast<size_t>(dict_val)];
|
||||||
}
|
}
|
||||||
|
|
||||||
// dest->append(reinterpret_cast<char*>(&write_val), size_);
|
// dest->append(reinterpret_cast<char*>(&write_val), size_);
|
||||||
|
@ -222,7 +222,7 @@ size_t VariableChunkColBufDecoder::Decode(const char* src, char** dest) {
|
||||||
uint64_t dict_val;
|
uint64_t dict_val;
|
||||||
ReadVarint64(&src, &dict_val);
|
ReadVarint64(&src, &dict_val);
|
||||||
assert(dict_val < dict_vec_.size());
|
assert(dict_val < dict_vec_.size());
|
||||||
chunk_buf = dict_vec_[dict_val];
|
chunk_buf = dict_vec_[static_cast<size_t>(dict_val)];
|
||||||
} else {
|
} else {
|
||||||
memcpy(&chunk_buf, src, chunk_size);
|
memcpy(&chunk_buf, src, chunk_size);
|
||||||
src += chunk_size;
|
src += chunk_size;
|
||||||
|
|
|
@ -101,7 +101,7 @@ void ColumnAwareEncodingReader::DecodeBlocks(
|
||||||
|
|
||||||
size_t num_kv_pairs;
|
size_t num_kv_pairs;
|
||||||
const char* header_content_ptr = content_ptr;
|
const char* header_content_ptr = content_ptr;
|
||||||
num_kv_pairs = DecodeFixed64(header_content_ptr);
|
num_kv_pairs = static_cast<size_t>(DecodeFixed64(header_content_ptr));
|
||||||
|
|
||||||
header_content_ptr += sizeof(size_t);
|
header_content_ptr += sizeof(size_t);
|
||||||
size_t num_key_columns = key_col_bufs.size();
|
size_t num_key_columns = key_col_bufs.size();
|
||||||
|
@ -119,7 +119,7 @@ void ColumnAwareEncodingReader::DecodeBlocks(
|
||||||
key_content_ptr[i] = col_content_ptr;
|
key_content_ptr[i] = col_content_ptr;
|
||||||
key_content_ptr[i] += key_col_bufs[i]->Init(key_content_ptr[i]);
|
key_content_ptr[i] += key_col_bufs[i]->Init(key_content_ptr[i]);
|
||||||
size_t offset;
|
size_t offset;
|
||||||
offset = DecodeFixed64(header_content_ptr);
|
offset = static_cast<size_t>(DecodeFixed64(header_content_ptr));
|
||||||
header_content_ptr += sizeof(size_t);
|
header_content_ptr += sizeof(size_t);
|
||||||
col_content_ptr += offset;
|
col_content_ptr += offset;
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ void ColumnAwareEncodingReader::DecodeBlocks(
|
||||||
value_content_ptr[i] = col_content_ptr;
|
value_content_ptr[i] = col_content_ptr;
|
||||||
value_content_ptr[i] += value_col_bufs[i]->Init(value_content_ptr[i]);
|
value_content_ptr[i] += value_col_bufs[i]->Init(value_content_ptr[i]);
|
||||||
size_t offset;
|
size_t offset;
|
||||||
offset = DecodeFixed64(header_content_ptr);
|
offset = static_cast<size_t>(DecodeFixed64(header_content_ptr));
|
||||||
header_content_ptr += sizeof(size_t);
|
header_content_ptr += sizeof(size_t);
|
||||||
col_content_ptr += offset;
|
col_content_ptr += offset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1155,10 +1155,10 @@ Options GetRocksDBOptionsFromOptions(const DocumentDBOptions& options) {
|
||||||
Options rocksdb_options;
|
Options rocksdb_options;
|
||||||
rocksdb_options.max_background_compactions = options.background_threads - 1;
|
rocksdb_options.max_background_compactions = options.background_threads - 1;
|
||||||
rocksdb_options.max_background_flushes = 1;
|
rocksdb_options.max_background_flushes = 1;
|
||||||
rocksdb_options.write_buffer_size = options.memtable_size;
|
rocksdb_options.write_buffer_size = static_cast<size_t>(options.memtable_size);
|
||||||
rocksdb_options.max_write_buffer_number = 6;
|
rocksdb_options.max_write_buffer_number = 6;
|
||||||
BlockBasedTableOptions table_options;
|
BlockBasedTableOptions table_options;
|
||||||
table_options.block_cache = NewLRUCache(options.cache_size);
|
table_options.block_cache = NewLRUCache(static_cast<size_t>(options.cache_size));
|
||||||
rocksdb_options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
rocksdb_options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||||
return rocksdb_options;
|
return rocksdb_options;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
// COPYING file in the root directory) and Apache 2.0 License
|
// COPYING file in the root directory) and Apache 2.0 License
|
||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
|
|
||||||
#ifndef UTILITIES_MERGE_OPERATORS_BYTESXOR_H_
|
#pragma once
|
||||||
#define UTILITIES_MERGE_OPERATORS_BYTESXOR_H_
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -38,5 +37,3 @@ class BytesXOROperator : public AssociativeMergeOperator {
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // UTILITIES_MERGE_OPERATORS_BYTESXOR_H_
|
|
||||||
|
|
|
@ -44,9 +44,9 @@ class BlockCacheTier : public PersistentCacheTier {
|
||||||
public:
|
public:
|
||||||
explicit BlockCacheTier(const PersistentCacheConfig& opt)
|
explicit BlockCacheTier(const PersistentCacheConfig& opt)
|
||||||
: opt_(opt),
|
: opt_(opt),
|
||||||
insert_ops_(opt_.max_write_pipeline_backlog_size),
|
insert_ops_(static_cast<size_t>(opt_.max_write_pipeline_backlog_size)),
|
||||||
buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()),
|
buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()),
|
||||||
writer_(this, opt_.writer_qdepth, opt_.writer_dispatch_size) {
|
writer_(this, opt_.writer_qdepth, static_cast<size_t>(opt_.writer_dispatch_size)) {
|
||||||
Info(opt_.log, "Initializing allocator. size=%d B count=%d",
|
Info(opt_.log, "Initializing allocator. size=%d B count=%d",
|
||||||
opt_.write_buffer_size, opt_.write_buffer_count());
|
opt_.write_buffer_size, opt_.write_buffer_count());
|
||||||
}
|
}
|
||||||
|
|
|
@ -354,8 +354,8 @@ class SpatialIndexCursor : public Cursor {
|
||||||
: value_getter_(value_getter), valid_(true) {
|
: value_getter_(value_getter), valid_(true) {
|
||||||
// calculate quad keys we'll need to query
|
// calculate quad keys we'll need to query
|
||||||
std::vector<uint64_t> quad_keys;
|
std::vector<uint64_t> quad_keys;
|
||||||
quad_keys.reserve((tile_bbox.max_x - tile_bbox.min_x + 1) *
|
quad_keys.reserve(static_cast<size_t>((tile_bbox.max_x - tile_bbox.min_x + 1) *
|
||||||
(tile_bbox.max_y - tile_bbox.min_y + 1));
|
(tile_bbox.max_y - tile_bbox.min_y + 1)));
|
||||||
for (uint64_t x = tile_bbox.min_x; x <= tile_bbox.max_x; ++x) {
|
for (uint64_t x = tile_bbox.min_x; x <= tile_bbox.max_x; ++x) {
|
||||||
for (uint64_t y = tile_bbox.min_y; y <= tile_bbox.max_y; ++y) {
|
for (uint64_t y = tile_bbox.min_y; y <= tile_bbox.max_y; ++y) {
|
||||||
quad_keys.push_back(GetQuadKeyFromTile(x, y, tile_bits));
|
quad_keys.push_back(GetQuadKeyFromTile(x, y, tile_bits));
|
||||||
|
@ -791,7 +791,7 @@ Status SpatialDB::Create(
|
||||||
db_options.create_missing_column_families = true;
|
db_options.create_missing_column_families = true;
|
||||||
db_options.error_if_exists = true;
|
db_options.error_if_exists = true;
|
||||||
|
|
||||||
auto block_cache = NewLRUCache(options.cache_size);
|
auto block_cache = NewLRUCache(static_cast<size_t>(options.cache_size));
|
||||||
ColumnFamilyOptions column_family_options =
|
ColumnFamilyOptions column_family_options =
|
||||||
GetColumnFamilyOptions(options, block_cache);
|
GetColumnFamilyOptions(options, block_cache);
|
||||||
|
|
||||||
|
@ -832,7 +832,7 @@ Status SpatialDB::Create(
|
||||||
Status SpatialDB::Open(const SpatialDBOptions& options, const std::string& name,
|
Status SpatialDB::Open(const SpatialDBOptions& options, const std::string& name,
|
||||||
SpatialDB** db, bool read_only) {
|
SpatialDB** db, bool read_only) {
|
||||||
DBOptions db_options = GetDBOptionsFromSpatialDBOptions(options);
|
DBOptions db_options = GetDBOptionsFromSpatialDBOptions(options);
|
||||||
auto block_cache = NewLRUCache(options.cache_size);
|
auto block_cache = NewLRUCache(static_cast<size_t>(options.cache_size));
|
||||||
ColumnFamilyOptions column_family_options =
|
ColumnFamilyOptions column_family_options =
|
||||||
GetColumnFamilyOptions(options, block_cache);
|
GetColumnFamilyOptions(options, block_cache);
|
||||||
|
|
||||||
|
|
|
@ -446,8 +446,8 @@ bool TransactionLockMgr::IncrementWaiters(
|
||||||
const autovector<TransactionID>& wait_ids, const std::string& key,
|
const autovector<TransactionID>& wait_ids, const std::string& key,
|
||||||
const uint32_t& cf_id, const bool& exclusive, Env* const env) {
|
const uint32_t& cf_id, const bool& exclusive, Env* const env) {
|
||||||
auto id = txn->GetID();
|
auto id = txn->GetID();
|
||||||
std::vector<int> queue_parents(txn->GetDeadlockDetectDepth());
|
std::vector<int> queue_parents(static_cast<size_t>(txn->GetDeadlockDetectDepth()));
|
||||||
std::vector<TransactionID> queue_values(txn->GetDeadlockDetectDepth());
|
std::vector<TransactionID> queue_values(static_cast<size_t>(txn->GetDeadlockDetectDepth()));
|
||||||
std::lock_guard<std::mutex> lock(wait_txn_map_mutex_);
|
std::lock_guard<std::mutex> lock(wait_txn_map_mutex_);
|
||||||
assert(!wait_txn_map_.Contains(id));
|
assert(!wait_txn_map_.Contains(id));
|
||||||
|
|
||||||
|
|
|
@ -460,7 +460,7 @@ void WritePreparedTxnDB::RemovePrepared(const uint64_t prepare_seq,
|
||||||
bool WritePreparedTxnDB::GetCommitEntry(const uint64_t indexed_seq,
|
bool WritePreparedTxnDB::GetCommitEntry(const uint64_t indexed_seq,
|
||||||
CommitEntry64b* entry_64b,
|
CommitEntry64b* entry_64b,
|
||||||
CommitEntry* entry) const {
|
CommitEntry* entry) const {
|
||||||
*entry_64b = commit_cache_[indexed_seq].load(std::memory_order_acquire);
|
*entry_64b = commit_cache_[static_cast<size_t>(indexed_seq)].load(std::memory_order_acquire);
|
||||||
bool valid = entry_64b->Parse(indexed_seq, entry, FORMAT);
|
bool valid = entry_64b->Parse(indexed_seq, entry, FORMAT);
|
||||||
return valid;
|
return valid;
|
||||||
}
|
}
|
||||||
|
@ -469,7 +469,7 @@ bool WritePreparedTxnDB::AddCommitEntry(const uint64_t indexed_seq,
|
||||||
const CommitEntry& new_entry,
|
const CommitEntry& new_entry,
|
||||||
CommitEntry* evicted_entry) {
|
CommitEntry* evicted_entry) {
|
||||||
CommitEntry64b new_entry_64b(new_entry, FORMAT);
|
CommitEntry64b new_entry_64b(new_entry, FORMAT);
|
||||||
CommitEntry64b evicted_entry_64b = commit_cache_[indexed_seq].exchange(
|
CommitEntry64b evicted_entry_64b = commit_cache_[static_cast<size_t>(indexed_seq)].exchange(
|
||||||
new_entry_64b, std::memory_order_acq_rel);
|
new_entry_64b, std::memory_order_acq_rel);
|
||||||
bool valid = evicted_entry_64b.Parse(indexed_seq, evicted_entry, FORMAT);
|
bool valid = evicted_entry_64b.Parse(indexed_seq, evicted_entry, FORMAT);
|
||||||
return valid;
|
return valid;
|
||||||
|
@ -478,7 +478,7 @@ bool WritePreparedTxnDB::AddCommitEntry(const uint64_t indexed_seq,
|
||||||
bool WritePreparedTxnDB::ExchangeCommitEntry(const uint64_t indexed_seq,
|
bool WritePreparedTxnDB::ExchangeCommitEntry(const uint64_t indexed_seq,
|
||||||
CommitEntry64b& expected_entry_64b,
|
CommitEntry64b& expected_entry_64b,
|
||||||
const CommitEntry& new_entry) {
|
const CommitEntry& new_entry) {
|
||||||
auto& atomic_entry = commit_cache_[indexed_seq];
|
auto& atomic_entry = commit_cache_[static_cast<size_t>(indexed_seq)];
|
||||||
CommitEntry64b new_entry_64b(new_entry, FORMAT);
|
CommitEntry64b new_entry_64b(new_entry, FORMAT);
|
||||||
bool succ = atomic_entry.compare_exchange_strong(
|
bool succ = atomic_entry.compare_exchange_strong(
|
||||||
expected_entry_64b, new_entry_64b, std::memory_order_acq_rel,
|
expected_entry_64b, new_entry_64b, std::memory_order_acq_rel,
|
||||||
|
|
Loading…
Reference in New Issue