2016-05-19 23:40:54 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-05-19 23:40:54 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "port/win/io_win.h"
|
|
|
|
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "monitoring/iostats_context_imp.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2016-05-19 23:40:54 +00:00
|
|
|
#include "util/aligned_buffer.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "util/coding.h"
|
2016-05-19 23:40:54 +00:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
namespace port {
|
|
|
|
|
2017-01-09 23:37:57 +00:00
|
|
|
/*
|
|
|
|
* DirectIOHelper
|
|
|
|
*/
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
const size_t kSectorSize = 512;
|
|
|
|
|
|
|
|
inline
|
|
|
|
bool IsPowerOfTwo(const size_t alignment) {
|
|
|
|
return ((alignment) & (alignment - 1)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline
|
2018-05-01 20:38:36 +00:00
|
|
|
bool IsSectorAligned(const size_t off) {
|
2017-01-09 23:37:57 +00:00
|
|
|
return (off & (kSectorSize - 1)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline
|
|
|
|
bool IsAligned(size_t alignment, const void* ptr) {
|
|
|
|
return ((uintptr_t(ptr)) & (alignment - 1)) == 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
std::string GetWindowsErrSz(DWORD err) {
|
|
|
|
LPSTR lpMsgBuf;
|
|
|
|
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
|
|
|
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
|
|
NULL, err,
|
|
|
|
0, // Default language
|
|
|
|
reinterpret_cast<LPSTR>(&lpMsgBuf), 0, NULL);
|
|
|
|
|
|
|
|
std::string Err = lpMsgBuf;
|
|
|
|
LocalFree(lpMsgBuf);
|
|
|
|
return Err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We preserve the original name of this interface to denote the original idea
|
|
|
|
// behind it.
|
|
|
|
// All reads happen by a specified offset and pwrite interface does not change
|
|
|
|
// the position of the file pointer. Judging from the man page and errno it does
|
|
|
|
// execute
|
|
|
|
// lseek atomically to return the position of the file back where it was.
|
|
|
|
// WriteFile() does not
|
|
|
|
// have this capability. Therefore, for both pread and pwrite the pointer is
|
|
|
|
// advanced to the next position
|
|
|
|
// which is fine for writes because they are (should be) sequential.
|
|
|
|
// Because all the reads/writes happen by the specified offset, the caller in
|
|
|
|
// theory should not
|
|
|
|
// rely on the current file offset.
|
2018-05-01 20:38:36 +00:00
|
|
|
Status pwrite(const WinFileData* file_data, const Slice& data,
|
|
|
|
uint64_t offset, size_t& bytes_written) {
|
|
|
|
|
|
|
|
Status s;
|
|
|
|
bytes_written = 0;
|
|
|
|
|
|
|
|
size_t num_bytes = data.size();
|
|
|
|
if (num_bytes > std::numeric_limits<DWORD>::max()) {
|
|
|
|
// May happen in 64-bit builds where size_t is 64-bits but
|
|
|
|
// long is still 32-bit, but that's the API here at the moment
|
|
|
|
return Status::InvalidArgument("num_bytes is too large for a single write: " +
|
|
|
|
file_data->GetName());
|
|
|
|
}
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
OVERLAPPED overlapped = { 0 };
|
|
|
|
ULARGE_INTEGER offsetUnion;
|
|
|
|
offsetUnion.QuadPart = offset;
|
|
|
|
|
|
|
|
overlapped.Offset = offsetUnion.LowPart;
|
|
|
|
overlapped.OffsetHigh = offsetUnion.HighPart;
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
DWORD bytesWritten = 0;
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if (FALSE == WriteFile(file_data->GetFileHandle(), data.data(), static_cast<DWORD>(num_bytes),
|
|
|
|
&bytesWritten, &overlapped)) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
s = IOErrorFromWindowsError("WriteFile failed: " + file_data->GetName(),
|
|
|
|
lastError);
|
2016-05-19 23:40:54 +00:00
|
|
|
} else {
|
2018-05-01 20:38:36 +00:00
|
|
|
bytes_written = bytesWritten;
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
return s;
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// See comments for pwrite above
|
2018-05-01 20:38:36 +00:00
|
|
|
Status pread(const WinFileData* file_data, char* src, size_t num_bytes,
|
|
|
|
uint64_t offset, size_t& bytes_read) {
|
|
|
|
|
|
|
|
Status s;
|
|
|
|
bytes_read = 0;
|
|
|
|
|
|
|
|
if (num_bytes > std::numeric_limits<DWORD>::max()) {
|
|
|
|
return Status::InvalidArgument("num_bytes is too large for a single read: " +
|
|
|
|
file_data->GetName());
|
|
|
|
}
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
OVERLAPPED overlapped = { 0 };
|
|
|
|
ULARGE_INTEGER offsetUnion;
|
|
|
|
offsetUnion.QuadPart = offset;
|
|
|
|
|
|
|
|
overlapped.Offset = offsetUnion.LowPart;
|
|
|
|
overlapped.OffsetHigh = offsetUnion.HighPart;
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
DWORD bytesRead = 0;
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if (FALSE == ReadFile(file_data->GetFileHandle(), src, static_cast<DWORD>(num_bytes),
|
|
|
|
&bytesRead, &overlapped)) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
// EOF is OK with zero bytes read
|
|
|
|
if (lastError != ERROR_HANDLE_EOF) {
|
|
|
|
s = IOErrorFromWindowsError("ReadFile failed: " + file_data->GetName(),
|
|
|
|
lastError);
|
|
|
|
}
|
2016-05-19 23:40:54 +00:00
|
|
|
} else {
|
2018-05-01 20:38:36 +00:00
|
|
|
bytes_read = bytesRead;
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
return s;
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetFileInformationByHandle() is capable of fast pre-allocates.
|
|
|
|
// However, this does not change the file end position unless the file is
|
|
|
|
// truncated and the pre-allocated space is not considered filled with zeros.
|
|
|
|
Status fallocate(const std::string& filename, HANDLE hFile,
|
|
|
|
uint64_t to_size) {
|
|
|
|
Status status;
|
|
|
|
|
|
|
|
FILE_ALLOCATION_INFO alloc_info;
|
|
|
|
alloc_info.AllocationSize.QuadPart = to_size;
|
|
|
|
|
|
|
|
if (!SetFileInformationByHandle(hFile, FileAllocationInfo, &alloc_info,
|
|
|
|
sizeof(FILE_ALLOCATION_INFO))) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
status = IOErrorFromWindowsError(
|
|
|
|
"Failed to pre-allocate space: " + filename, lastError);
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status ftruncate(const std::string& filename, HANDLE hFile,
|
|
|
|
uint64_t toSize) {
|
|
|
|
Status status;
|
|
|
|
|
|
|
|
FILE_END_OF_FILE_INFO end_of_file;
|
|
|
|
end_of_file.EndOfFile.QuadPart = toSize;
|
|
|
|
|
|
|
|
if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file,
|
|
|
|
sizeof(FILE_END_OF_FILE_INFO))) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
status = IOErrorFromWindowsError("Failed to Set end of file: " + filename,
|
|
|
|
lastError);
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2019-10-12 01:18:14 +00:00
|
|
|
size_t GetUniqueIdFromFile(HANDLE /*hFile*/, char* /*id*/,
|
|
|
|
size_t /*max_size*/) {
|
|
|
|
// Returning 0 is safe as it causes the table reader to generate a unique ID.
|
|
|
|
// This is suboptimal for performance as it prevents multiple table readers
|
|
|
|
// for the same file from sharing cached blocks. For example, if users have
|
|
|
|
// a low value for `max_open_files`, there can be many table readers opened
|
|
|
|
// for the same file.
|
|
|
|
//
|
|
|
|
// TODO: this is a temporarily solution as it is safe but not optimal for
|
|
|
|
// performance. For more details see discussion in
|
|
|
|
// https://github.com/facebook/rocksdb/pull/5844.
|
|
|
|
return 0;
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// WinMmapReadableFile
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
WinMmapReadableFile::WinMmapReadableFile(const std::string& fileName,
|
|
|
|
HANDLE hFile, HANDLE hMap,
|
|
|
|
const void* mapped_region,
|
|
|
|
size_t length)
|
|
|
|
: WinFileData(fileName, hFile, false /* use_direct_io */),
|
|
|
|
hMap_(hMap),
|
|
|
|
mapped_region_(mapped_region),
|
|
|
|
length_(length) {}
|
2016-05-19 23:40:54 +00:00
|
|
|
|
|
|
|
WinMmapReadableFile::~WinMmapReadableFile() {
|
2018-02-02 20:14:42 +00:00
|
|
|
BOOL ret __attribute__((__unused__));
|
|
|
|
ret = ::UnmapViewOfFile(mapped_region_);
|
2017-06-12 20:08:57 +00:00
|
|
|
assert(ret);
|
|
|
|
|
|
|
|
ret = ::CloseHandle(hMap_);
|
|
|
|
assert(ret);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const {
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
if (offset > length_) {
|
|
|
|
*result = Slice();
|
2016-10-13 23:36:34 +00:00
|
|
|
return IOError(filename_, EINVAL);
|
2016-05-19 23:40:54 +00:00
|
|
|
} else if (offset + n > length_) {
|
2018-09-06 01:07:53 +00:00
|
|
|
n = length_ - static_cast<size_t>(offset);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
*result =
|
|
|
|
Slice(reinterpret_cast<const char*>(mapped_region_)+offset, n);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapReadableFile::InvalidateCache(size_t offset, size_t length) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t WinMmapReadableFile::GetUniqueId(char* id, size_t max_size) const {
|
|
|
|
return GetUniqueIdFromFile(hFile_, id, max_size);
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinMmapFile
|
|
|
|
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
// Can only truncate or reserve to a sector size aligned if
|
|
|
|
// used on files that are opened with Unbuffered I/O
|
|
|
|
Status WinMmapFile::TruncateFile(uint64_t toSize) {
|
|
|
|
return ftruncate(filename_, hFile_, toSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::UnmapCurrentRegion() {
|
|
|
|
Status status;
|
|
|
|
|
|
|
|
if (mapped_begin_ != nullptr) {
|
|
|
|
if (!::UnmapViewOfFile(mapped_begin_)) {
|
|
|
|
status = IOErrorFromWindowsError(
|
|
|
|
"Failed to unmap file view: " + filename_, GetLastError());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move on to the next portion of the file
|
|
|
|
file_offset_ += view_size_;
|
|
|
|
|
|
|
|
// UnmapView automatically sends data to disk but not the metadata
|
|
|
|
// which is good and provides some equivalent of fdatasync() on Linux
|
|
|
|
// therefore, we donot need separate flag for metadata
|
|
|
|
mapped_begin_ = nullptr;
|
|
|
|
mapped_end_ = nullptr;
|
|
|
|
dst_ = nullptr;
|
|
|
|
|
|
|
|
last_sync_ = nullptr;
|
|
|
|
pending_sync_ = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::MapNewRegion() {
|
|
|
|
|
|
|
|
Status status;
|
|
|
|
|
|
|
|
assert(mapped_begin_ == nullptr);
|
|
|
|
|
2018-09-06 01:07:53 +00:00
|
|
|
size_t minDiskSize = static_cast<size_t>(file_offset_) + view_size_;
|
2016-05-19 23:40:54 +00:00
|
|
|
|
|
|
|
if (minDiskSize > reserved_size_) {
|
|
|
|
status = Allocate(file_offset_, view_size_);
|
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Need to remap
|
|
|
|
if (hMap_ == NULL || reserved_size_ > mapping_size_) {
|
|
|
|
|
|
|
|
if (hMap_ != NULL) {
|
|
|
|
// Unmap the previous one
|
2018-02-02 20:14:42 +00:00
|
|
|
BOOL ret __attribute__((__unused__));
|
2017-10-23 21:20:53 +00:00
|
|
|
ret = ::CloseHandle(hMap_);
|
2016-05-19 23:40:54 +00:00
|
|
|
assert(ret);
|
|
|
|
hMap_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ULARGE_INTEGER mappingSize;
|
|
|
|
mappingSize.QuadPart = reserved_size_;
|
|
|
|
|
|
|
|
hMap_ = CreateFileMappingA(
|
|
|
|
hFile_,
|
|
|
|
NULL, // Security attributes
|
|
|
|
PAGE_READWRITE, // There is not a write only mode for mapping
|
|
|
|
mappingSize.HighPart, // Enable mapping the whole file but the actual
|
|
|
|
// amount mapped is determined by MapViewOfFile
|
|
|
|
mappingSize.LowPart,
|
|
|
|
NULL); // Mapping name
|
|
|
|
|
|
|
|
if (NULL == hMap_) {
|
|
|
|
return IOErrorFromWindowsError(
|
|
|
|
"WindowsMmapFile failed to create file mapping for: " + filename_,
|
|
|
|
GetLastError());
|
|
|
|
}
|
|
|
|
|
|
|
|
mapping_size_ = reserved_size_;
|
|
|
|
}
|
|
|
|
|
|
|
|
ULARGE_INTEGER offset;
|
|
|
|
offset.QuadPart = file_offset_;
|
|
|
|
|
|
|
|
// View must begin at the granularity aligned offset
|
|
|
|
mapped_begin_ = reinterpret_cast<char*>(
|
|
|
|
MapViewOfFileEx(hMap_, FILE_MAP_WRITE, offset.HighPart, offset.LowPart,
|
|
|
|
view_size_, NULL));
|
|
|
|
|
|
|
|
if (!mapped_begin_) {
|
|
|
|
status = IOErrorFromWindowsError(
|
|
|
|
"WindowsMmapFile failed to map file view: " + filename_,
|
|
|
|
GetLastError());
|
|
|
|
} else {
|
|
|
|
mapped_end_ = mapped_begin_ + view_size_;
|
|
|
|
dst_ = mapped_begin_;
|
|
|
|
last_sync_ = mapped_begin_;
|
|
|
|
pending_sync_ = false;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::PreallocateInternal(uint64_t spaceToReserve) {
|
|
|
|
return fallocate(filename_, hFile_, spaceToReserve);
|
|
|
|
}
|
|
|
|
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
WinMmapFile::WinMmapFile(const std::string& fname, HANDLE hFile,
|
|
|
|
size_t page_size, size_t allocation_granularity,
|
|
|
|
const EnvOptions& options)
|
|
|
|
: WinFileData(fname, hFile, false),
|
|
|
|
WritableFile(options),
|
|
|
|
hMap_(NULL),
|
|
|
|
page_size_(page_size),
|
|
|
|
allocation_granularity_(allocation_granularity),
|
|
|
|
reserved_size_(0),
|
|
|
|
mapping_size_(0),
|
|
|
|
view_size_(0),
|
|
|
|
mapped_begin_(nullptr),
|
|
|
|
mapped_end_(nullptr),
|
|
|
|
dst_(nullptr),
|
|
|
|
last_sync_(nullptr),
|
|
|
|
file_offset_(0),
|
|
|
|
pending_sync_(false) {
|
2016-05-19 23:40:54 +00:00
|
|
|
// Allocation granularity must be obtained from GetSystemInfo() and must be
|
|
|
|
// a power of two.
|
|
|
|
assert(allocation_granularity > 0);
|
|
|
|
assert((allocation_granularity & (allocation_granularity - 1)) == 0);
|
|
|
|
|
|
|
|
assert(page_size > 0);
|
|
|
|
assert((page_size & (page_size - 1)) == 0);
|
|
|
|
|
|
|
|
// Only for memory mapped writes
|
|
|
|
assert(options.use_mmap_writes);
|
|
|
|
|
|
|
|
// View size must be both the multiple of allocation_granularity AND the
|
|
|
|
// page size and the granularity is usually a multiple of a page size.
|
|
|
|
const size_t viewSize = 32 * 1024; // 32Kb similar to the Windows File Cache in buffered mode
|
|
|
|
view_size_ = Roundup(viewSize, allocation_granularity_);
|
|
|
|
}
|
|
|
|
|
|
|
|
WinMmapFile::~WinMmapFile() {
|
|
|
|
if (hFile_) {
|
|
|
|
this->Close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::Append(const Slice& data) {
|
|
|
|
const char* src = data.data();
|
|
|
|
size_t left = data.size();
|
|
|
|
|
|
|
|
while (left > 0) {
|
|
|
|
assert(mapped_begin_ <= dst_);
|
|
|
|
size_t avail = mapped_end_ - dst_;
|
|
|
|
|
|
|
|
if (avail == 0) {
|
|
|
|
Status s = UnmapCurrentRegion();
|
|
|
|
if (s.ok()) {
|
|
|
|
s = MapNewRegion();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size_t n = std::min(left, avail);
|
|
|
|
memcpy(dst_, src, n);
|
|
|
|
dst_ += n;
|
|
|
|
src += n;
|
|
|
|
left -= n;
|
|
|
|
pending_sync_ = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make sure that the last partial page is padded with zeros if needed
|
|
|
|
size_t bytesToPad = Roundup(size_t(dst_), page_size_) - size_t(dst_);
|
|
|
|
if (bytesToPad > 0) {
|
|
|
|
memset(dst_, 0, bytesToPad);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Means Close() will properly take care of truncate
|
|
|
|
// and it does not need any additional information
|
|
|
|
Status WinMmapFile::Truncate(uint64_t size) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::Close() {
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
assert(NULL != hFile_);
|
|
|
|
|
|
|
|
// We truncate to the precise size so no
|
|
|
|
// uninitialized data at the end. SetEndOfFile
|
|
|
|
// which we use does not write zeros and it is good.
|
|
|
|
uint64_t targetSize = GetFileSize();
|
|
|
|
|
|
|
|
if (mapped_begin_ != nullptr) {
|
|
|
|
// Sync before unmapping to make sure everything
|
|
|
|
// is on disk and there is not a lazy writing
|
|
|
|
// so we are deterministic with the tests
|
|
|
|
Sync();
|
|
|
|
s = UnmapCurrentRegion();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != hMap_) {
|
|
|
|
BOOL ret = ::CloseHandle(hMap_);
|
|
|
|
if (!ret && s.ok()) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
s = IOErrorFromWindowsError(
|
|
|
|
"Failed to Close mapping for file: " + filename_, lastError);
|
|
|
|
}
|
|
|
|
|
|
|
|
hMap_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hFile_ != NULL) {
|
|
|
|
|
|
|
|
TruncateFile(targetSize);
|
|
|
|
|
|
|
|
BOOL ret = ::CloseHandle(hFile_);
|
|
|
|
hFile_ = NULL;
|
|
|
|
|
|
|
|
if (!ret && s.ok()) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
s = IOErrorFromWindowsError(
|
|
|
|
"Failed to close file map handle: " + filename_, lastError);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::Flush() { return Status::OK(); }
|
|
|
|
|
|
|
|
// Flush only data
|
|
|
|
Status WinMmapFile::Sync() {
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
// Some writes occurred since last sync
|
|
|
|
if (dst_ > last_sync_) {
|
|
|
|
assert(mapped_begin_);
|
|
|
|
assert(dst_);
|
|
|
|
assert(dst_ > mapped_begin_);
|
|
|
|
assert(dst_ < mapped_end_);
|
|
|
|
|
|
|
|
size_t page_begin =
|
|
|
|
TruncateToPageBoundary(page_size_, last_sync_ - mapped_begin_);
|
|
|
|
size_t page_end =
|
|
|
|
TruncateToPageBoundary(page_size_, dst_ - mapped_begin_ - 1);
|
|
|
|
|
|
|
|
// Flush only the amount of that is a multiple of pages
|
|
|
|
if (!::FlushViewOfFile(mapped_begin_ + page_begin,
|
|
|
|
(page_end - page_begin) + page_size_)) {
|
|
|
|
s = IOErrorFromWindowsError("Failed to FlushViewOfFile: " + filename_,
|
|
|
|
GetLastError());
|
|
|
|
} else {
|
|
|
|
last_sync_ = dst_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush data as well as metadata to stable storage.
|
|
|
|
*/
|
|
|
|
Status WinMmapFile::Fsync() {
|
|
|
|
Status s = Sync();
|
|
|
|
|
|
|
|
// Flush metadata
|
|
|
|
if (s.ok() && pending_sync_) {
|
|
|
|
if (!::FlushFileBuffers(hFile_)) {
|
|
|
|
s = IOErrorFromWindowsError("Failed to FlushFileBuffers: " + filename_,
|
|
|
|
GetLastError());
|
|
|
|
}
|
|
|
|
pending_sync_ = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the size of valid data in the file. This will not match the
|
|
|
|
* size that is returned from the filesystem because we use mmap
|
|
|
|
* to extend file by map_size every time.
|
|
|
|
*/
|
|
|
|
uint64_t WinMmapFile::GetFileSize() {
|
|
|
|
size_t used = dst_ - mapped_begin_;
|
|
|
|
return file_offset_ + used;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::InvalidateCache(size_t offset, size_t length) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinMmapFile::Allocate(uint64_t offset, uint64_t len) {
|
|
|
|
Status status;
|
|
|
|
TEST_KILL_RANDOM("WinMmapFile::Allocate", rocksdb_kill_odds);
|
|
|
|
|
|
|
|
// Make sure that we reserve an aligned amount of space
|
|
|
|
// since the reservation block size is driven outside so we want
|
|
|
|
// to check if we are ok with reservation here
|
2018-09-06 01:07:53 +00:00
|
|
|
size_t spaceToReserve = Roundup(static_cast<size_t>(offset + len), view_size_);
|
2016-05-19 23:40:54 +00:00
|
|
|
// Nothing to do
|
|
|
|
if (spaceToReserve <= reserved_size_) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
IOSTATS_TIMER_GUARD(allocate_nanos);
|
|
|
|
status = PreallocateInternal(spaceToReserve);
|
|
|
|
if (status.ok()) {
|
|
|
|
reserved_size_ = spaceToReserve;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t WinMmapFile::GetUniqueId(char* id, size_t max_size) const {
|
|
|
|
return GetUniqueIdFromFile(hFile_, id, max_size);
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// WinSequentialFile
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
WinSequentialFile::WinSequentialFile(const std::string& fname, HANDLE f,
|
2016-12-22 20:51:29 +00:00
|
|
|
const EnvOptions& options)
|
|
|
|
: WinFileData(fname, f, options.use_direct_reads) {}
|
2016-05-19 23:40:54 +00:00
|
|
|
|
|
|
|
WinSequentialFile::~WinSequentialFile() {
|
2016-10-13 23:36:34 +00:00
|
|
|
assert(hFile_ != INVALID_HANDLE_VALUE);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status WinSequentialFile::Read(size_t n, Slice* result, char* scratch) {
|
|
|
|
Status s;
|
|
|
|
size_t r = 0;
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
assert(result != nullptr);
|
|
|
|
if (WinFileData::use_direct_io()) {
|
|
|
|
return Status::NotSupported("Read() does not support direct_io");
|
|
|
|
}
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
// Windows ReadFile API accepts a DWORD.
|
2018-05-01 20:38:36 +00:00
|
|
|
// While it is possible to read in a loop if n is too big
|
|
|
|
// it is an unlikely case.
|
|
|
|
if (n > std::numeric_limits<DWORD>::max()) {
|
|
|
|
return Status::InvalidArgument("n is too big for a single ReadFile: "
|
|
|
|
+ filename_);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DWORD bytesToRead = static_cast<DWORD>(n); //cast is safe due to the check above
|
|
|
|
DWORD bytesRead = 0;
|
2016-10-13 23:36:34 +00:00
|
|
|
BOOL ret = ReadFile(hFile_, scratch, bytesToRead, &bytesRead, NULL);
|
2018-05-01 20:38:36 +00:00
|
|
|
if (ret != FALSE) {
|
2016-05-19 23:40:54 +00:00
|
|
|
r = bytesRead;
|
|
|
|
} else {
|
2018-05-01 20:38:36 +00:00
|
|
|
auto lastError = GetLastError();
|
|
|
|
if (lastError != ERROR_HANDLE_EOF) {
|
|
|
|
s = IOErrorFromWindowsError("ReadFile failed: " + filename_,
|
|
|
|
lastError);
|
|
|
|
}
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*result = Slice(scratch, r);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
Status WinSequentialFile::PositionedReadInternal(char* src, size_t numBytes,
|
|
|
|
uint64_t offset, size_t& bytes_read) const {
|
|
|
|
return pread(this, src, numBytes, offset, bytes_read);
|
2017-01-15 21:11:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status WinSequentialFile::PositionedRead(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) {
|
|
|
|
|
|
|
|
Status s;
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if (!WinFileData::use_direct_io()) {
|
|
|
|
return Status::NotSupported("This function is only used for direct_io");
|
2017-01-15 21:11:04 +00:00
|
|
|
}
|
|
|
|
|
2018-09-06 01:07:53 +00:00
|
|
|
if (!IsSectorAligned(static_cast<size_t>(offset)) ||
|
2018-05-01 20:38:36 +00:00
|
|
|
!IsSectorAligned(n)) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"WinSequentialFile::PositionedRead: offset is not properly aligned");
|
2017-01-15 21:11:04 +00:00
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
size_t bytes_read = 0; // out param
|
2018-09-06 01:07:53 +00:00
|
|
|
s = PositionedReadInternal(scratch, static_cast<size_t>(n), offset, bytes_read);
|
2018-05-01 20:38:36 +00:00
|
|
|
*result = Slice(scratch, bytes_read);
|
2017-01-15 21:11:04 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
Status WinSequentialFile::Skip(uint64_t n) {
|
|
|
|
// Can't handle more than signed max as SetFilePointerEx accepts a signed 64-bit
|
|
|
|
// integer. As such it is a highly unlikley case to have n so large.
|
2018-05-01 20:38:36 +00:00
|
|
|
if (n > static_cast<uint64_t>(std::numeric_limits<LONGLONG>::max())) {
|
|
|
|
return Status::InvalidArgument("n is too large for a single SetFilePointerEx() call" +
|
|
|
|
filename_);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
LARGE_INTEGER li;
|
2018-05-01 20:38:36 +00:00
|
|
|
li.QuadPart = static_cast<LONGLONG>(n); //cast is safe due to the check above
|
2016-10-13 23:36:34 +00:00
|
|
|
BOOL ret = SetFilePointerEx(hFile_, li, NULL, FILE_CURRENT);
|
2016-05-19 23:40:54 +00:00
|
|
|
if (ret == FALSE) {
|
2018-05-01 20:38:36 +00:00
|
|
|
auto lastError = GetLastError();
|
|
|
|
return IOErrorFromWindowsError("Skip SetFilePointerEx():" + filename_,
|
|
|
|
lastError);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinSequentialFile::InvalidateCache(size_t offset, size_t length) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinRandomAccessBase
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2018-05-01 20:38:36 +00:00
|
|
|
Status WinRandomAccessImpl::PositionedReadInternal(char* src,
|
2016-10-13 23:36:34 +00:00
|
|
|
size_t numBytes,
|
2018-05-01 20:38:36 +00:00
|
|
|
uint64_t offset,
|
|
|
|
size_t& bytes_read) const {
|
|
|
|
return pread(file_base_, src, numBytes, offset, bytes_read);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
inline
|
|
|
|
WinRandomAccessImpl::WinRandomAccessImpl(WinFileData* file_base,
|
|
|
|
size_t alignment,
|
|
|
|
const EnvOptions& options) :
|
|
|
|
file_base_(file_base),
|
2017-04-27 19:19:55 +00:00
|
|
|
alignment_(alignment) {
|
2016-10-13 23:36:34 +00:00
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
assert(!options.use_mmap_reads);
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
inline
|
|
|
|
Status WinRandomAccessImpl::ReadImpl(uint64_t offset, size_t n, Slice* result,
|
2016-05-19 23:40:54 +00:00
|
|
|
char* scratch) const {
|
|
|
|
|
|
|
|
Status s;
|
2017-04-27 19:19:55 +00:00
|
|
|
|
|
|
|
// Check buffer alignment
|
|
|
|
if (file_base_->use_direct_io()) {
|
2018-09-06 01:07:53 +00:00
|
|
|
if (!IsSectorAligned(static_cast<size_t>(offset)) ||
|
2018-05-01 20:38:36 +00:00
|
|
|
!IsAligned(alignment_, scratch)) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"WinRandomAccessImpl::ReadImpl: offset or scratch is not properly aligned");
|
2017-04-27 19:19:55 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-19 23:40:54 +00:00
|
|
|
|
|
|
|
if (n == 0) {
|
|
|
|
*result = Slice(scratch, 0);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
size_t bytes_read = 0;
|
|
|
|
s = PositionedReadInternal(scratch, n, offset, bytes_read);
|
|
|
|
*result = Slice(scratch, bytes_read);
|
2016-05-19 23:40:54 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinRandomAccessFile
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
WinRandomAccessFile::WinRandomAccessFile(const std::string& fname, HANDLE hFile,
|
|
|
|
size_t alignment,
|
|
|
|
const EnvOptions& options)
|
|
|
|
: WinFileData(fname, hFile, options.use_direct_reads),
|
|
|
|
WinRandomAccessImpl(this, alignment, options) {}
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
WinRandomAccessFile::~WinRandomAccessFile() {
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinRandomAccessFile::Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const {
|
|
|
|
return ReadImpl(offset, n, result, scratch);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinRandomAccessFile::InvalidateCache(size_t offset, size_t length) {
|
|
|
|
return Status::OK();
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
size_t WinRandomAccessFile::GetUniqueId(char* id, size_t max_size) const {
|
|
|
|
return GetUniqueIdFromFile(GetFileHandle(), id, max_size);
|
|
|
|
}
|
|
|
|
|
2017-01-15 21:11:04 +00:00
|
|
|
size_t WinRandomAccessFile::GetRequiredBufferAlignment() const {
|
|
|
|
return GetAlignment();
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
|
|
|
// WinWritableImpl
|
|
|
|
//
|
|
|
|
|
|
|
|
inline
|
|
|
|
Status WinWritableImpl::PreallocateInternal(uint64_t spaceToReserve) {
|
|
|
|
return fallocate(file_data_->GetName(), file_data_->GetFileHandle(), spaceToReserve);
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2016-10-13 23:36:34 +00:00
|
|
|
WinWritableImpl::WinWritableImpl(WinFileData* file_data, size_t alignment)
|
|
|
|
: file_data_(file_data),
|
|
|
|
alignment_(alignment),
|
2017-06-20 17:16:24 +00:00
|
|
|
next_write_offset_(0),
|
2016-10-13 23:36:34 +00:00
|
|
|
reservedsize_(0) {
|
2017-06-20 17:16:24 +00:00
|
|
|
|
|
|
|
// Query current position in case ReopenWritableFile is called
|
|
|
|
// This position is only important for buffered writes
|
|
|
|
// for unbuffered writes we explicitely specify the position.
|
|
|
|
LARGE_INTEGER zero_move;
|
|
|
|
zero_move.QuadPart = 0; // Do not move
|
|
|
|
LARGE_INTEGER pos;
|
|
|
|
pos.QuadPart = 0;
|
|
|
|
BOOL ret = SetFilePointerEx(file_data_->GetFileHandle(), zero_move, &pos,
|
|
|
|
FILE_CURRENT);
|
|
|
|
// Querying no supped to fail
|
2018-05-01 20:38:36 +00:00
|
|
|
if (ret != 0) {
|
2017-06-20 17:16:24 +00:00
|
|
|
next_write_offset_ = pos.QuadPart;
|
|
|
|
} else {
|
|
|
|
assert(false);
|
|
|
|
}
|
2016-10-13 23:36:34 +00:00
|
|
|
}
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinWritableImpl::AppendImpl(const Slice& data) {
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2017-01-09 23:37:57 +00:00
|
|
|
Status s;
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if (data.size() > std::numeric_limits<DWORD>::max()) {
|
|
|
|
return Status::InvalidArgument("data is too long for a single write" +
|
|
|
|
file_data_->GetName());
|
|
|
|
}
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
size_t bytes_written = 0; // out param
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2017-01-13 20:01:08 +00:00
|
|
|
if (file_data_->use_direct_io()) {
|
2017-01-09 23:37:57 +00:00
|
|
|
// With no offset specified we are appending
|
|
|
|
// to the end of the file
|
2017-06-20 17:16:24 +00:00
|
|
|
assert(IsSectorAligned(next_write_offset_));
|
2018-05-01 20:38:36 +00:00
|
|
|
if (!IsSectorAligned(data.size()) ||
|
2018-09-06 01:07:53 +00:00
|
|
|
!IsAligned(static_cast<size_t>(GetAlignement()), data.data())) {
|
2018-05-01 20:38:36 +00:00
|
|
|
s = Status::InvalidArgument(
|
|
|
|
"WriteData must be page aligned, size must be sector aligned");
|
2018-03-06 19:47:42 +00:00
|
|
|
} else {
|
2018-05-01 20:38:36 +00:00
|
|
|
s = pwrite(file_data_, data, next_write_offset_, bytes_written);
|
2017-01-09 23:37:57 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
|
|
|
|
DWORD bytesWritten = 0;
|
|
|
|
if (!WriteFile(file_data_->GetFileHandle(), data.data(),
|
|
|
|
static_cast<DWORD>(data.size()), &bytesWritten, NULL)) {
|
|
|
|
auto lastError = GetLastError();
|
|
|
|
s = IOErrorFromWindowsError(
|
|
|
|
"Failed to WriteFile: " + file_data_->GetName(),
|
|
|
|
lastError);
|
2018-05-01 20:38:36 +00:00
|
|
|
} else {
|
|
|
|
bytes_written = bytesWritten;
|
2017-01-09 23:37:57 +00:00
|
|
|
}
|
2016-10-13 23:36:34 +00:00
|
|
|
}
|
2017-01-09 23:37:57 +00:00
|
|
|
|
|
|
|
if(s.ok()) {
|
2018-05-01 20:38:36 +00:00
|
|
|
if (bytes_written == data.size()) {
|
|
|
|
// This matters for direct_io cases where
|
|
|
|
// we rely on the fact that next_write_offset_
|
|
|
|
// is sector aligned
|
|
|
|
next_write_offset_ += bytes_written;
|
|
|
|
} else {
|
|
|
|
s = Status::IOError("Failed to write all bytes: " +
|
|
|
|
file_data_->GetName());
|
|
|
|
}
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinWritableImpl::PositionedAppendImpl(const Slice& data, uint64_t offset) {
|
2017-01-09 23:37:57 +00:00
|
|
|
|
2017-01-13 20:01:08 +00:00
|
|
|
if(file_data_->use_direct_io()) {
|
2018-09-06 01:07:53 +00:00
|
|
|
if (!IsSectorAligned(static_cast<size_t>(offset)) ||
|
2018-05-01 20:38:36 +00:00
|
|
|
!IsSectorAligned(data.size()) ||
|
2018-09-06 01:07:53 +00:00
|
|
|
!IsAligned(static_cast<size_t>(GetAlignement()), data.data())) {
|
2018-05-01 20:38:36 +00:00
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Data and offset must be page aligned, size must be sector aligned");
|
|
|
|
}
|
2017-01-09 23:37:57 +00:00
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
size_t bytes_written = 0;
|
|
|
|
Status s = pwrite(file_data_, data, offset, bytes_written);
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if(s.ok()) {
|
|
|
|
if (bytes_written == data.size()) {
|
|
|
|
// For sequential write this would be simple
|
|
|
|
// size extension by data.size()
|
|
|
|
uint64_t write_end = offset + bytes_written;
|
|
|
|
if (write_end >= next_write_offset_) {
|
|
|
|
next_write_offset_ = write_end;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s = Status::IOError("Failed to write all of the requested data: " +
|
|
|
|
file_data_->GetName());
|
2016-10-13 23:36:34 +00:00
|
|
|
}
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
inline
|
|
|
|
Status WinWritableImpl::TruncateImpl(uint64_t size) {
|
2018-05-01 20:38:36 +00:00
|
|
|
|
|
|
|
// It is tempting to check for the size for sector alignment
|
|
|
|
// but truncation may come at the end and there is not a requirement
|
|
|
|
// for this to be sector aligned so long as we do not attempt to write
|
|
|
|
// after that. The interface docs state that the behavior is undefined
|
|
|
|
// in that case.
|
2016-10-13 23:36:34 +00:00
|
|
|
Status s = ftruncate(file_data_->GetName(), file_data_->GetFileHandle(),
|
|
|
|
size);
|
2018-05-01 20:38:36 +00:00
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
if (s.ok()) {
|
2017-06-20 17:16:24 +00:00
|
|
|
next_write_offset_ = size;
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinWritableImpl::CloseImpl() {
|
2016-05-19 23:40:54 +00:00
|
|
|
|
|
|
|
Status s;
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
auto hFile = file_data_->GetFileHandle();
|
|
|
|
assert(INVALID_HANDLE_VALUE != hFile);
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if (!::FlushFileBuffers(hFile)) {
|
2016-05-19 23:40:54 +00:00
|
|
|
auto lastError = GetLastError();
|
2018-05-01 20:38:36 +00:00
|
|
|
s = IOErrorFromWindowsError("FlushFileBuffers failed at Close() for: " +
|
2016-10-13 23:36:34 +00:00
|
|
|
file_data_->GetName(),
|
2016-05-19 23:40:54 +00:00
|
|
|
lastError);
|
|
|
|
}
|
|
|
|
|
2018-05-01 20:38:36 +00:00
|
|
|
if(!file_data_->CloseFile() && s.ok()) {
|
2016-05-19 23:40:54 +00:00
|
|
|
auto lastError = GetLastError();
|
2016-10-13 23:36:34 +00:00
|
|
|
s = IOErrorFromWindowsError("CloseHandle failed for: " + file_data_->GetName(),
|
2016-05-19 23:40:54 +00:00
|
|
|
lastError);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinWritableImpl::SyncImpl() {
|
2016-05-19 23:40:54 +00:00
|
|
|
Status s;
|
2018-05-01 20:38:36 +00:00
|
|
|
if (!::FlushFileBuffers (file_data_->GetFileHandle())) {
|
2016-05-19 23:40:54 +00:00
|
|
|
auto lastError = GetLastError();
|
2016-12-22 20:51:29 +00:00
|
|
|
s = IOErrorFromWindowsError(
|
2018-05-01 20:38:36 +00:00
|
|
|
"FlushFileBuffers failed at Sync() for: " + file_data_->GetName(), lastError);
|
2016-05-19 23:40:54 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-20 17:16:24 +00:00
|
|
|
inline
|
2016-10-13 23:36:34 +00:00
|
|
|
Status WinWritableImpl::AllocateImpl(uint64_t offset, uint64_t len) {
|
2016-05-19 23:40:54 +00:00
|
|
|
Status status;
|
|
|
|
TEST_KILL_RANDOM("WinWritableFile::Allocate", rocksdb_kill_odds);
|
|
|
|
|
|
|
|
// Make sure that we reserve an aligned amount of space
|
|
|
|
// since the reservation block size is driven outside so we want
|
|
|
|
// to check if we are ok with reservation here
|
2018-09-06 01:07:53 +00:00
|
|
|
size_t spaceToReserve = Roundup(static_cast<size_t>(offset + len), static_cast<size_t>(alignment_));
|
2016-05-19 23:40:54 +00:00
|
|
|
// Nothing to do
|
|
|
|
if (spaceToReserve <= reservedsize_) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
IOSTATS_TIMER_GUARD(allocate_nanos);
|
|
|
|
status = PreallocateInternal(spaceToReserve);
|
|
|
|
if (status.ok()) {
|
|
|
|
reservedsize_ = spaceToReserve;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinWritableFile
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
WinWritableFile::WinWritableFile(const std::string& fname, HANDLE hFile,
|
|
|
|
size_t alignment, size_t /* capacity */,
|
|
|
|
const EnvOptions& options)
|
|
|
|
: WinFileData(fname, hFile, options.use_direct_writes),
|
Optionally wait on bytes_per_sync to smooth I/O (#5183)
Summary:
The existing implementation does not guarantee bytes reach disk every `bytes_per_sync` when writing SST files, or every `wal_bytes_per_sync` when writing WALs. This can cause confusing behavior for users who enable this feature to avoid large syncs during flush and compaction, but then end up hitting them anyways.
My understanding of the existing behavior is we used `sync_file_range` with `SYNC_FILE_RANGE_WRITE` to submit ranges for async writeback, such that we could continue processing the next range of bytes while that I/O is happening. I believe we can preserve that benefit while also limiting how far the processing can get ahead of the I/O, which prevents huge syncs from happening when the file finishes.
Consider this `sync_file_range` usage: `sync_file_range(fd_, 0, static_cast<off_t>(offset + nbytes), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)`. Expanding the range to start at 0 and adding the `SYNC_FILE_RANGE_WAIT_BEFORE` flag causes any pending writeback (like from a previous call to `sync_file_range`) to finish before it proceeds to submit the latest `nbytes` for writeback. The latest `nbytes` are still written back asynchronously, unless processing exceeds I/O speed, in which case the following `sync_file_range` will need to wait on it.
There is a second change in this PR to use `fdatasync` when `sync_file_range` is unavailable (determined statically) or has some known problem with the underlying filesystem (determined dynamically).
The above two changes only apply when the user enables a new option, `strict_bytes_per_sync`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5183
Differential Revision: D14953553
Pulled By: siying
fbshipit-source-id: 445c3862e019fb7b470f9c7f314fc231b62706e9
2019-04-22 18:48:45 +00:00
|
|
|
WinWritableImpl(this, alignment),
|
|
|
|
WritableFile(options) {
|
2016-10-13 23:36:34 +00:00
|
|
|
assert(!options.use_mmap_writes);
|
|
|
|
}
|
|
|
|
|
|
|
|
WinWritableFile::~WinWritableFile() {
|
|
|
|
}
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
// Indicates if the class makes use of direct I/O
|
2017-01-13 20:01:08 +00:00
|
|
|
bool WinWritableFile::use_direct_io() const { return WinFileData::use_direct_io(); }
|
2016-10-13 23:36:34 +00:00
|
|
|
|
|
|
|
size_t WinWritableFile::GetRequiredBufferAlignment() const {
|
2018-09-06 01:07:53 +00:00
|
|
|
return static_cast<size_t>(GetAlignement());
|
2016-10-13 23:36:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status WinWritableFile::Append(const Slice& data) {
|
|
|
|
return AppendImpl(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinWritableFile::PositionedAppend(const Slice& data, uint64_t offset) {
|
|
|
|
return PositionedAppendImpl(data, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Need to implement this so the file is truncated correctly
|
|
|
|
// when buffered and unbuffered mode
|
|
|
|
Status WinWritableFile::Truncate(uint64_t size) {
|
|
|
|
return TruncateImpl(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinWritableFile::Close() {
|
|
|
|
return CloseImpl();
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out the cached data to the OS cache
|
|
|
|
// This is now taken care of the WritableFileWriter
|
|
|
|
Status WinWritableFile::Flush() {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinWritableFile::Sync() {
|
|
|
|
return SyncImpl();
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:34:51 +00:00
|
|
|
Status WinWritableFile::Fsync() { return SyncImpl(); }
|
2017-12-22 02:37:27 +00:00
|
|
|
|
|
|
|
bool WinWritableFile::IsSyncThreadSafe() const { return true; }
|
2016-10-13 23:36:34 +00:00
|
|
|
|
|
|
|
uint64_t WinWritableFile::GetFileSize() {
|
2017-06-20 17:16:24 +00:00
|
|
|
return GetFileNextWriteOffset();
|
2016-10-13 23:36:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status WinWritableFile::Allocate(uint64_t offset, uint64_t len) {
|
|
|
|
return AllocateImpl(offset, len);
|
|
|
|
}
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
size_t WinWritableFile::GetUniqueId(char* id, size_t max_size) const {
|
2016-10-13 23:36:34 +00:00
|
|
|
return GetUniqueIdFromFile(GetFileHandle(), id, max_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinRandomRWFile
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
WinRandomRWFile::WinRandomRWFile(const std::string& fname, HANDLE hFile,
|
|
|
|
size_t alignment, const EnvOptions& options)
|
|
|
|
: WinFileData(fname, hFile,
|
|
|
|
options.use_direct_reads && options.use_direct_writes),
|
|
|
|
WinRandomAccessImpl(this, alignment, options),
|
|
|
|
WinWritableImpl(this, alignment) {}
|
2016-10-13 23:36:34 +00:00
|
|
|
|
2017-01-13 20:01:08 +00:00
|
|
|
bool WinRandomRWFile::use_direct_io() const { return WinFileData::use_direct_io(); }
|
2016-05-19 23:40:54 +00:00
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
size_t WinRandomRWFile::GetRequiredBufferAlignment() const {
|
2018-09-06 01:07:53 +00:00
|
|
|
return static_cast<size_t>(GetAlignement());
|
2016-10-13 23:36:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status WinRandomRWFile::Write(uint64_t offset, const Slice & data) {
|
|
|
|
return PositionedAppendImpl(data, offset);
|
|
|
|
}
|
|
|
|
|
2016-12-22 20:51:29 +00:00
|
|
|
Status WinRandomRWFile::Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const {
|
2016-10-13 23:36:34 +00:00
|
|
|
return ReadImpl(offset, n, result, scratch);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinRandomRWFile::Flush() {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinRandomRWFile::Sync() {
|
|
|
|
return SyncImpl();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WinRandomRWFile::Close() {
|
|
|
|
return CloseImpl();
|
|
|
|
}
|
|
|
|
|
2018-05-24 22:05:00 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinMemoryMappedBufer
|
|
|
|
WinMemoryMappedBuffer::~WinMemoryMappedBuffer() {
|
|
|
|
BOOL ret = FALSE;
|
|
|
|
if (base_ != nullptr) {
|
|
|
|
ret = ::UnmapViewOfFile(base_);
|
|
|
|
assert(ret);
|
|
|
|
base_ = nullptr;
|
|
|
|
}
|
|
|
|
if (map_handle_ != NULL && map_handle_ != INVALID_HANDLE_VALUE) {
|
|
|
|
ret = ::CloseHandle(map_handle_);
|
|
|
|
assert(ret);
|
|
|
|
map_handle_ = NULL;
|
|
|
|
}
|
|
|
|
if (file_handle_ != NULL && file_handle_ != INVALID_HANDLE_VALUE) {
|
|
|
|
ret = ::CloseHandle(file_handle_);
|
|
|
|
assert(ret);
|
|
|
|
file_handle_ = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:36:34 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinDirectory
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
Status WinDirectory::Fsync() { return Status::OK(); }
|
|
|
|
|
2018-03-06 19:47:42 +00:00
|
|
|
size_t WinDirectory::GetUniqueId(char* id, size_t max_size) const {
|
|
|
|
return GetUniqueIdFromFile(handle_, id, max_size);
|
|
|
|
}
|
2016-10-13 23:36:34 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
/// WinFileLock
|
|
|
|
|
2016-05-19 23:40:54 +00:00
|
|
|
WinFileLock::~WinFileLock() {
|
2018-02-02 20:14:42 +00:00
|
|
|
BOOL ret __attribute__((__unused__));
|
2017-10-23 21:20:53 +00:00
|
|
|
ret = ::CloseHandle(hFile_);
|
2016-05-19 23:40:54 +00:00
|
|
|
assert(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|