mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-27 02:44:18 +00:00
151242ce46
Summary: **Context:** The existing stat rocksdb.sst.read.micros does not reflect each of compaction and flush cases but aggregate them, which is not so helpful for us to understand IO read behavior of each of them. **Summary** - Update `StopWatch` and `RandomAccessFileReader` to record `rocksdb.sst.read.micros` and `rocksdb.file.{flush/compaction}.read.micros` - Fixed the default histogram in `RandomAccessFileReader` - New field `ReadOptions/IOOptions::io_activity`; Pass `ReadOptions` through paths under db open, flush and compaction to where we can prepare `IOOptions` and pass it to `RandomAccessFileReader` - Use `thread_status_util` for assertion in `DbStressFSWrapper` for continuous testing on we are passing correct `io_activity` under db open, flush and compaction Pull Request resolved: https://github.com/facebook/rocksdb/pull/11288 Test Plan: - **Stress test** - **Db bench 1: rocksdb.sst.read.micros COUNT ≈ sum of rocksdb.file.read.flush.micros's and rocksdb.file.read.compaction.micros's.** (without blob) - May not be exactly the same due to `HistogramStat::Add` only guarantees atomic not accuracy across threads. ``` ./db_bench -db=/dev/shm/testdb/ -statistics=true -benchmarks="fillseq" -key_size=32 -value_size=512 -num=50000 -write_buffer_size=655 -target_file_size_base=655 -disable_auto_compactions=false -compression_type=none -bloom_bits=3 (-use_plain_table=1 -prefix_size=10) ``` ``` // BlockBasedTable rocksdb.sst.read.micros P50 : 2.009374 P95 : 4.968548 P99 : 8.110362 P100 : 43.000000 COUNT : 40456 SUM : 114805 rocksdb.file.read.flush.micros P50 : 1.871841 P95 : 3.872407 P99 : 5.540541 P100 : 43.000000 COUNT : 2250 SUM : 6116 rocksdb.file.read.compaction.micros P50 : 2.023109 P95 : 5.029149 P99 : 8.196910 P100 : 26.000000 COUNT : 38206 SUM : 108689 // PlainTable Does not apply ``` - **Db bench 2: performance** **Read** SETUP: db with 900 files ``` ./db_bench -db=/dev/shm/testdb/ -benchmarks="fillseq" -key_size=32 -value_size=512 -num=50000 -write_buffer_size=655 -disable_auto_compactions=true -target_file_size_base=655 -compression_type=none ```run till convergence ``` ./db_bench -seed=1678564177044286 -use_existing_db=true -db=/dev/shm/testdb -benchmarks=readrandom[-X60] -statistics=true -num=1000000 -disable_auto_compactions=true -compression_type=none -bloom_bits=3 ``` Pre-change `readrandom [AVG 60 runs] : 21568 (± 248) ops/sec` Post-change (no regression, -0.3%) `readrandom [AVG 60 runs] : 21486 (± 236) ops/sec` **Compaction/Flush**run till convergence ``` ./db_bench -db=/dev/shm/testdb2/ -seed=1678564177044286 -benchmarks="fillseq[-X60]" -key_size=32 -value_size=512 -num=50000 -write_buffer_size=655 -disable_auto_compactions=false -target_file_size_base=655 -compression_type=none rocksdb.sst.read.micros COUNT : 33820 rocksdb.sst.read.flush.micros COUNT : 1800 rocksdb.sst.read.compaction.micros COUNT : 32020 ``` Pre-change `fillseq [AVG 46 runs] : 1391 (± 214) ops/sec; 0.7 (± 0.1) MB/sec` Post-change (no regression, ~-0.4%) `fillseq [AVG 46 runs] : 1385 (± 216) ops/sec; 0.7 (± 0.1) MB/sec` Reviewed By: ajkr Differential Revision: D44007011 Pulled By: hx235 fbshipit-source-id: a54c89e4846dfc9a135389edf3f3eedfea257132
278 lines
9.4 KiB
C++
278 lines
9.4 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
#include "file/file_util.h"
|
|
|
|
#include <algorithm>
|
|
#include <string>
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
#include "file/sequence_file_reader.h"
|
|
#include "file/sst_file_manager_impl.h"
|
|
#include "file/writable_file_writer.h"
|
|
#include "rocksdb/env.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// Utility function to copy a file up to a specified length
|
|
IOStatus CopyFile(FileSystem* fs, const std::string& source,
|
|
std::unique_ptr<WritableFileWriter>& dest_writer,
|
|
uint64_t size, bool use_fsync,
|
|
const std::shared_ptr<IOTracer>& io_tracer,
|
|
const Temperature temperature) {
|
|
FileOptions soptions;
|
|
IOStatus io_s;
|
|
std::unique_ptr<SequentialFileReader> src_reader;
|
|
|
|
{
|
|
soptions.temperature = temperature;
|
|
std::unique_ptr<FSSequentialFile> srcfile;
|
|
io_s = fs->NewSequentialFile(source, soptions, &srcfile, nullptr);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
|
|
if (size == 0) {
|
|
// default argument means copy everything
|
|
io_s = fs->GetFileSize(source, IOOptions(), &size, nullptr);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
}
|
|
src_reader.reset(
|
|
new SequentialFileReader(std::move(srcfile), source, io_tracer));
|
|
}
|
|
|
|
char buffer[4096];
|
|
Slice slice;
|
|
while (size > 0) {
|
|
size_t bytes_to_read = std::min(sizeof(buffer), static_cast<size_t>(size));
|
|
// TODO: rate limit copy file
|
|
io_s = status_to_io_status(
|
|
src_reader->Read(bytes_to_read, &slice, buffer,
|
|
Env::IO_TOTAL /* rate_limiter_priority */));
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
if (slice.size() == 0) {
|
|
return IOStatus::Corruption("file too small");
|
|
}
|
|
io_s = dest_writer->Append(slice);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
size -= slice.size();
|
|
}
|
|
return dest_writer->Sync(use_fsync);
|
|
}
|
|
|
|
IOStatus CopyFile(FileSystem* fs, const std::string& source,
|
|
const std::string& destination, uint64_t size, bool use_fsync,
|
|
const std::shared_ptr<IOTracer>& io_tracer,
|
|
const Temperature temperature) {
|
|
FileOptions options;
|
|
IOStatus io_s;
|
|
std::unique_ptr<WritableFileWriter> dest_writer;
|
|
|
|
{
|
|
options.temperature = temperature;
|
|
std::unique_ptr<FSWritableFile> destfile;
|
|
io_s = fs->NewWritableFile(destination, options, &destfile, nullptr);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
|
|
dest_writer.reset(
|
|
new WritableFileWriter(std::move(destfile), destination, options));
|
|
}
|
|
|
|
return CopyFile(fs, source, dest_writer, size, use_fsync, io_tracer,
|
|
temperature);
|
|
}
|
|
|
|
// Utility function to create a file with the provided contents
|
|
IOStatus CreateFile(FileSystem* fs, const std::string& destination,
|
|
const std::string& contents, bool use_fsync) {
|
|
const EnvOptions soptions;
|
|
IOStatus io_s;
|
|
std::unique_ptr<WritableFileWriter> dest_writer;
|
|
|
|
std::unique_ptr<FSWritableFile> destfile;
|
|
io_s = fs->NewWritableFile(destination, soptions, &destfile, nullptr);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
dest_writer.reset(
|
|
new WritableFileWriter(std::move(destfile), destination, soptions));
|
|
io_s = dest_writer->Append(Slice(contents));
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
return dest_writer->Sync(use_fsync);
|
|
}
|
|
|
|
Status DeleteDBFile(const ImmutableDBOptions* db_options,
|
|
const std::string& fname, const std::string& dir_to_sync,
|
|
const bool force_bg, const bool force_fg) {
|
|
SstFileManagerImpl* sfm =
|
|
static_cast<SstFileManagerImpl*>(db_options->sst_file_manager.get());
|
|
if (sfm && !force_fg) {
|
|
return sfm->ScheduleFileDeletion(fname, dir_to_sync, force_bg);
|
|
} else {
|
|
return db_options->env->DeleteFile(fname);
|
|
}
|
|
}
|
|
|
|
// requested_checksum_func_name brings the function name of the checksum
|
|
// generator in checksum_factory. Empty string is permitted, in which case the
|
|
// name of the generator created by the factory is unchecked. When
|
|
// `requested_checksum_func_name` is non-empty, however, the created generator's
|
|
// name must match it, otherwise an `InvalidArgument` error is returned.
|
|
IOStatus GenerateOneFileChecksum(
|
|
FileSystem* fs, const std::string& file_path,
|
|
FileChecksumGenFactory* checksum_factory,
|
|
const std::string& requested_checksum_func_name, std::string* file_checksum,
|
|
std::string* file_checksum_func_name,
|
|
size_t verify_checksums_readahead_size, bool /*allow_mmap_reads*/,
|
|
std::shared_ptr<IOTracer>& io_tracer, RateLimiter* rate_limiter,
|
|
Env::IOPriority rate_limiter_priority) {
|
|
if (checksum_factory == nullptr) {
|
|
return IOStatus::InvalidArgument("Checksum factory is invalid");
|
|
}
|
|
assert(file_checksum != nullptr);
|
|
assert(file_checksum_func_name != nullptr);
|
|
|
|
FileChecksumGenContext gen_context;
|
|
gen_context.requested_checksum_func_name = requested_checksum_func_name;
|
|
gen_context.file_name = file_path;
|
|
std::unique_ptr<FileChecksumGenerator> checksum_generator =
|
|
checksum_factory->CreateFileChecksumGenerator(gen_context);
|
|
if (checksum_generator == nullptr) {
|
|
std::string msg =
|
|
"Cannot get the file checksum generator based on the requested "
|
|
"checksum function name: " +
|
|
requested_checksum_func_name +
|
|
" from checksum factory: " + checksum_factory->Name();
|
|
return IOStatus::InvalidArgument(msg);
|
|
} else {
|
|
// For backward compatibility and use in file ingestion clients where there
|
|
// is no stored checksum function name, `requested_checksum_func_name` can
|
|
// be empty. If we give the requested checksum function name, we expect it
|
|
// is the same name of the checksum generator.
|
|
if (!requested_checksum_func_name.empty() &&
|
|
checksum_generator->Name() != requested_checksum_func_name) {
|
|
std::string msg = "Expected file checksum generator named '" +
|
|
requested_checksum_func_name +
|
|
"', while the factory created one "
|
|
"named '" +
|
|
checksum_generator->Name() + "'";
|
|
return IOStatus::InvalidArgument(msg);
|
|
}
|
|
}
|
|
|
|
uint64_t size;
|
|
IOStatus io_s;
|
|
std::unique_ptr<RandomAccessFileReader> reader;
|
|
{
|
|
std::unique_ptr<FSRandomAccessFile> r_file;
|
|
io_s = fs->NewRandomAccessFile(file_path, FileOptions(), &r_file, nullptr);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
io_s = fs->GetFileSize(file_path, IOOptions(), &size, nullptr);
|
|
if (!io_s.ok()) {
|
|
return io_s;
|
|
}
|
|
reader.reset(new RandomAccessFileReader(
|
|
std::move(r_file), file_path, nullptr /*Env*/, io_tracer, nullptr,
|
|
Histograms::HISTOGRAM_ENUM_MAX, nullptr, rate_limiter));
|
|
}
|
|
|
|
// Found that 256 KB readahead size provides the best performance, based on
|
|
// experiments, for auto readahead. Experiment data is in PR #3282.
|
|
size_t default_max_read_ahead_size = 256 * 1024;
|
|
size_t readahead_size = (verify_checksums_readahead_size != 0)
|
|
? verify_checksums_readahead_size
|
|
: default_max_read_ahead_size;
|
|
std::unique_ptr<char[]> buf;
|
|
if (reader->use_direct_io()) {
|
|
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
|
readahead_size = (readahead_size + alignment - 1) & ~(alignment - 1);
|
|
}
|
|
buf.reset(new char[readahead_size]);
|
|
|
|
Slice slice;
|
|
uint64_t offset = 0;
|
|
IOOptions opts;
|
|
while (size > 0) {
|
|
size_t bytes_to_read =
|
|
static_cast<size_t>(std::min(uint64_t{readahead_size}, size));
|
|
io_s = reader->Read(opts, offset, bytes_to_read, &slice, buf.get(), nullptr,
|
|
rate_limiter_priority);
|
|
if (!io_s.ok()) {
|
|
return IOStatus::Corruption("file read failed with error: " +
|
|
io_s.ToString());
|
|
}
|
|
if (slice.size() == 0) {
|
|
return IOStatus::Corruption("file too small");
|
|
}
|
|
checksum_generator->Update(slice.data(), slice.size());
|
|
size -= slice.size();
|
|
offset += slice.size();
|
|
|
|
TEST_SYNC_POINT("GenerateOneFileChecksum::Chunk:0");
|
|
}
|
|
checksum_generator->Finalize();
|
|
*file_checksum = checksum_generator->GetChecksum();
|
|
*file_checksum_func_name = checksum_generator->Name();
|
|
return IOStatus::OK();
|
|
}
|
|
|
|
Status DestroyDir(Env* env, const std::string& dir) {
|
|
Status s;
|
|
if (env->FileExists(dir).IsNotFound()) {
|
|
return s;
|
|
}
|
|
std::vector<std::string> files_in_dir;
|
|
s = env->GetChildren(dir, &files_in_dir);
|
|
if (s.ok()) {
|
|
for (auto& file_in_dir : files_in_dir) {
|
|
std::string path = dir + "/" + file_in_dir;
|
|
bool is_dir = false;
|
|
s = env->IsDirectory(path, &is_dir);
|
|
if (s.ok()) {
|
|
if (is_dir) {
|
|
s = DestroyDir(env, path);
|
|
} else {
|
|
s = env->DeleteFile(path);
|
|
}
|
|
} else if (s.IsNotSupported()) {
|
|
s = Status::OK();
|
|
}
|
|
if (!s.ok()) {
|
|
// IsDirectory, etc. might not report NotFound
|
|
if (s.IsNotFound() || env->FileExists(path).IsNotFound()) {
|
|
// Allow files to be deleted externally
|
|
s = Status::OK();
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (s.ok()) {
|
|
s = env->DeleteDir(dir);
|
|
// DeleteDir might or might not report NotFound
|
|
if (!s.ok() && (s.IsNotFound() || env->FileExists(dir).IsNotFound())) {
|
|
// Allow to be deleted externally
|
|
s = Status::OK();
|
|
}
|
|
}
|
|
return s;
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|