mirror of https://github.com/facebook/rocksdb.git
In logging format, use PRIu64 instead of casting
Summary: Code cleaning up, since we are already using __STDC_FORMAT_MACROS in printing uint64_t, change other places. Only logging is changed. Test Plan: make all check Reviewers: ljin Reviewed By: ljin Subscribers: dhruba, yhchiang, haobo, leveldb Differential Revision: https://reviews.facebook.net/D19113
This commit is contained in:
parent
a3594867ba
commit
dd337bc0b2
|
@ -278,14 +278,12 @@ void CompactionPicker::SetupOtherInputs(Compaction* c) {
|
|||
if (expanded1.size() == c->inputs_[1].size() &&
|
||||
!FilesInCompaction(expanded1)) {
|
||||
Log(options_->info_log,
|
||||
"[%s] Expanding@%lu %lu+%lu (%lu+%lu bytes) to %lu+%lu (%lu+%lu "
|
||||
"bytes)\n",
|
||||
c->column_family_data()->GetName().c_str(), (unsigned long)level,
|
||||
(unsigned long)(c->inputs_[0].size()),
|
||||
(unsigned long)(c->inputs_[1].size()), (unsigned long)inputs0_size,
|
||||
(unsigned long)inputs1_size, (unsigned long)(expanded0.size()),
|
||||
(unsigned long)(expanded1.size()), (unsigned long)expanded0_size,
|
||||
(unsigned long)inputs1_size);
|
||||
"[%s] Expanding@%d %zu+%zu (%" PRIu64 "+%" PRIu64
|
||||
" bytes) to %zu+%zu (%" PRIu64 "+%" PRIu64 "bytes)\n",
|
||||
c->column_family_data()->GetName().c_str(), level,
|
||||
c->inputs_[0].size(), c->inputs_[1].size(), inputs0_size,
|
||||
inputs1_size, expanded0.size(), expanded1.size(), expanded0_size,
|
||||
inputs1_size);
|
||||
smallest = new_start;
|
||||
largest = new_limit;
|
||||
c->inputs_[0] = expanded0;
|
||||
|
@ -656,10 +654,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||
candidate_count = 1;
|
||||
break;
|
||||
}
|
||||
LogToBuffer(log_buffer,
|
||||
"[%s] Universal: file %lu[%d] being compacted, skipping",
|
||||
version->cfd_->GetName().c_str(),
|
||||
(unsigned long)f->fd.GetNumber(), loop);
|
||||
LogToBuffer(log_buffer, "[%s] Universal: file %" PRIu64
|
||||
"[%d] being compacted, skipping",
|
||||
version->cfd_->GetName().c_str(), f->fd.GetNumber(), loop);
|
||||
f = nullptr;
|
||||
}
|
||||
|
||||
|
@ -668,9 +665,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||
uint64_t candidate_size = f != nullptr? f->compensated_file_size : 0;
|
||||
if (f != nullptr) {
|
||||
LogToBuffer(log_buffer,
|
||||
"[%s] Universal: Possible candidate file %lu[%d].",
|
||||
version->cfd_->GetName().c_str(),
|
||||
(unsigned long)f->fd.GetNumber(), loop);
|
||||
"[%s] Universal: Possible candidate file %" PRIu64 "[%d].",
|
||||
version->cfd_->GetName().c_str(), f->fd.GetNumber(), loop);
|
||||
}
|
||||
|
||||
// Check if the suceeding files need compaction.
|
||||
|
@ -800,19 +796,19 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
|
|||
start_index = loop; // Consider this as the first candidate.
|
||||
break;
|
||||
}
|
||||
LogToBuffer(
|
||||
log_buffer, "[%s] Universal: skipping file %lu[%d] compacted %s",
|
||||
version->cfd_->GetName().c_str(), (unsigned long)f->fd.GetNumber(),
|
||||
loop, " cannot be a candidate to reduce size amp.\n");
|
||||
LogToBuffer(log_buffer,
|
||||
"[%s] Universal: skipping file %" PRIu64 "[%d] compacted %s",
|
||||
version->cfd_->GetName().c_str(), f->fd.GetNumber(), loop,
|
||||
" cannot be a candidate to reduce size amp.\n");
|
||||
f = nullptr;
|
||||
}
|
||||
if (f == nullptr) {
|
||||
return nullptr; // no candidate files
|
||||
}
|
||||
|
||||
LogToBuffer(log_buffer, "[%s] Universal: First candidate file %lu[%d] %s",
|
||||
version->cfd_->GetName().c_str(),
|
||||
(unsigned long)f->fd.GetNumber(), start_index,
|
||||
LogToBuffer(log_buffer,
|
||||
"[%s] Universal: First candidate file %" PRIu64 "[%d] %s",
|
||||
version->cfd_->GetName().c_str(), f->fd.GetNumber(), start_index,
|
||||
" to reduce size amp.\n");
|
||||
|
||||
// keep adding up all the remaining files
|
||||
|
@ -822,9 +818,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
|
|||
f = version->files_[level][index];
|
||||
if (f->being_compacted) {
|
||||
LogToBuffer(
|
||||
log_buffer, "[%s] Universal: Possible candidate file %lu[%d] %s.",
|
||||
version->cfd_->GetName().c_str(), (unsigned long)f->fd.GetNumber(),
|
||||
loop,
|
||||
log_buffer,
|
||||
"[%s] Universal: Possible candidate file %" PRIu64 "[%d] %s.",
|
||||
version->cfd_->GetName().c_str(), f->fd.GetNumber(), loop,
|
||||
" is already being compacted. No size amp reduction possible.\n");
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -843,17 +839,16 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
|
|||
if (candidate_size * 100 < ratio * earliest_file_size) {
|
||||
LogToBuffer(
|
||||
log_buffer,
|
||||
"[%s] Universal: size amp not needed. newer-files-total-size %lu "
|
||||
"earliest-file-size %lu",
|
||||
version->cfd_->GetName().c_str(), (unsigned long)candidate_size,
|
||||
(unsigned long)earliest_file_size);
|
||||
"[%s] Universal: size amp not needed. newer-files-total-size %" PRIu64
|
||||
"earliest-file-size %" PRIu64,
|
||||
version->cfd_->GetName().c_str(), candidate_size, earliest_file_size);
|
||||
return nullptr;
|
||||
} else {
|
||||
LogToBuffer(log_buffer,
|
||||
"[%s] Universal: size amp needed. newer-files-total-size %lu "
|
||||
"earliest-file-size %lu",
|
||||
version->cfd_->GetName().c_str(), (unsigned long)candidate_size,
|
||||
(unsigned long)earliest_file_size);
|
||||
LogToBuffer(
|
||||
log_buffer,
|
||||
"[%s] Universal: size amp needed. newer-files-total-size %" PRIu64
|
||||
"earliest-file-size %" PRIu64,
|
||||
version->cfd_->GetName().c_str(), candidate_size, earliest_file_size);
|
||||
}
|
||||
assert(start_index >= 0 && start_index < file_by_time.size() - 1);
|
||||
|
||||
|
|
|
@ -731,9 +731,8 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
|
|||
fname.c_str(), archived_log_name.c_str(), s.ToString().c_str());
|
||||
} else {
|
||||
Status s = env_->DeleteFile(fname);
|
||||
Log(options_.info_log, "Delete %s type=%d #%lu -- %s\n",
|
||||
fname.c_str(), type, (unsigned long)number,
|
||||
s.ToString().c_str());
|
||||
Log(options_.info_log, "Delete %s type=%d #%" PRIu64 " -- %s\n",
|
||||
fname.c_str(), type, number, s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1257,8 +1256,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, SequenceNumber* max_sequence,
|
|||
// large sequence numbers).
|
||||
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
||||
0/*initial_offset*/);
|
||||
Log(options_.info_log, "Recovering log #%lu",
|
||||
(unsigned long) log_number);
|
||||
Log(options_.info_log, "Recovering log #%" PRIu64 "", log_number);
|
||||
|
||||
// Read all the records and add to a memtable
|
||||
std::string scratch;
|
||||
|
@ -1375,8 +1373,8 @@ Status DBImpl::WriteLevel0TableForRecovery(ColumnFamilyData* cfd, MemTable* mem,
|
|||
const SequenceNumber newest_snapshot = snapshots_.GetNewest();
|
||||
const SequenceNumber earliest_seqno_in_memtable =
|
||||
mem->GetFirstSequenceNumber();
|
||||
Log(options_.info_log, "[%s] Level-0 table #%lu: started",
|
||||
cfd->GetName().c_str(), (unsigned long)meta.fd.GetNumber());
|
||||
Log(options_.info_log, "[%s] Level-0 table #%" PRIu64 ": started",
|
||||
cfd->GetName().c_str(), meta.fd.GetNumber());
|
||||
|
||||
Status s;
|
||||
{
|
||||
|
@ -1389,9 +1387,10 @@ Status DBImpl::WriteLevel0TableForRecovery(ColumnFamilyData* cfd, MemTable* mem,
|
|||
mutex_.Lock();
|
||||
}
|
||||
|
||||
Log(options_.info_log, "[%s] Level-0 table #%lu: %lu bytes %s",
|
||||
cfd->GetName().c_str(), (unsigned long)meta.fd.GetNumber(),
|
||||
(unsigned long)meta.fd.GetFileSize(), s.ToString().c_str());
|
||||
Log(options_.info_log,
|
||||
"[%s] Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s",
|
||||
cfd->GetName().c_str(), meta.fd.GetNumber(), meta.fd.GetFileSize(),
|
||||
s.ToString().c_str());
|
||||
delete iter;
|
||||
|
||||
pending_outputs_.erase(meta.fd.GetNumber());
|
||||
|
@ -1436,14 +1435,15 @@ Status DBImpl::WriteLevel0Table(ColumnFamilyData* cfd,
|
|||
log_buffer->FlushBufferToLog();
|
||||
std::vector<Iterator*> memtables;
|
||||
for (MemTable* m : mems) {
|
||||
Log(options_.info_log, "[%s] Flushing memtable with next log file: %lu\n",
|
||||
cfd->GetName().c_str(), (unsigned long)m->GetNextLogNumber());
|
||||
Log(options_.info_log,
|
||||
"[%s] Flushing memtable with next log file: %" PRIu64 "\n",
|
||||
cfd->GetName().c_str(), m->GetNextLogNumber());
|
||||
memtables.push_back(m->NewIterator(ReadOptions(), true));
|
||||
}
|
||||
Iterator* iter = NewMergingIterator(&cfd->internal_comparator(),
|
||||
&memtables[0], memtables.size());
|
||||
Log(options_.info_log, "[%s] Level-0 flush table #%lu: started",
|
||||
cfd->GetName().c_str(), (unsigned long)meta.fd.GetNumber());
|
||||
Log(options_.info_log, "[%s] Level-0 flush table #%" PRIu64 ": started",
|
||||
cfd->GetName().c_str(), meta.fd.GetNumber());
|
||||
|
||||
s = BuildTable(dbname_, env_, *cfd->options(), storage_options_,
|
||||
cfd->table_cache(), iter, &meta, cfd->internal_comparator(),
|
||||
|
@ -1451,9 +1451,10 @@ Status DBImpl::WriteLevel0Table(ColumnFamilyData* cfd,
|
|||
GetCompressionFlush(*cfd->options()));
|
||||
LogFlush(options_.info_log);
|
||||
delete iter;
|
||||
Log(options_.info_log, "[%s] Level-0 flush table #%lu: %lu bytes %s",
|
||||
cfd->GetName().c_str(), (unsigned long)meta.fd.GetFileSize(),
|
||||
(unsigned long)meta.fd.GetFileSize(), s.ToString().c_str());
|
||||
Log(options_.info_log,
|
||||
"[%s] Level-0 flush table #%" PRIu64 ": %" PRIu64 " bytes %s",
|
||||
cfd->GetName().c_str(), meta.fd.GetFileSize(), meta.fd.GetFileSize(),
|
||||
s.ToString().c_str());
|
||||
|
||||
if (!options_.disableDataSync) {
|
||||
db_directory_->Fsync();
|
||||
|
@ -2402,9 +2403,10 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
|||
s = iter->status();
|
||||
delete iter;
|
||||
if (s.ok()) {
|
||||
Log(options_.info_log, "[%s] Generated table #%lu: %lu keys, %lu bytes",
|
||||
cfd->GetName().c_str(), (unsigned long)output_number,
|
||||
(unsigned long)current_entries, (unsigned long)current_bytes);
|
||||
Log(options_.info_log, "[%s] Generated table #%" PRIu64 ": %" PRIu64
|
||||
" keys, %" PRIu64 " bytes",
|
||||
cfd->GetName().c_str(), output_number, current_entries,
|
||||
current_bytes);
|
||||
}
|
||||
}
|
||||
return s;
|
||||
|
@ -2469,9 +2471,8 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
|
|||
assert(prev);
|
||||
}
|
||||
Log(options_.info_log,
|
||||
"Looking for seqid %lu but maxseqid is %lu",
|
||||
(unsigned long)in,
|
||||
(unsigned long)snapshots[snapshots.size()-1]);
|
||||
"Looking for seqid %" PRIu64 " but maxseqid is %" PRIu64 "", in,
|
||||
snapshots[snapshots.size() - 1]);
|
||||
assert(0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -4169,8 +4170,9 @@ Status DBImpl::MakeRoomForWrite(
|
|||
}
|
||||
new_mem->Ref();
|
||||
cfd->SetMemtable(new_mem);
|
||||
Log(options_.info_log, "[%s] New memtable created with log file: #%lu\n",
|
||||
cfd->GetName().c_str(), (unsigned long)logfile_number_);
|
||||
Log(options_.info_log,
|
||||
"[%s] New memtable created with log file: #%" PRIu64 "\n",
|
||||
cfd->GetName().c_str(), logfile_number_);
|
||||
force = false; // Do not force another compaction if have room
|
||||
MaybeScheduleFlushOrCompaction();
|
||||
superversions_to_free->push_back(
|
||||
|
|
45
db/repair.cc
45
db/repair.cc
|
@ -31,6 +31,8 @@
|
|||
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
#include "db/builder.h"
|
||||
#include "db/db_impl.h"
|
||||
#include "db/dbformat.h"
|
||||
|
@ -82,18 +84,17 @@ class Repairer {
|
|||
status = WriteDescriptor();
|
||||
}
|
||||
if (status.ok()) {
|
||||
unsigned long long bytes = 0;
|
||||
uint64_t bytes = 0;
|
||||
for (size_t i = 0; i < tables_.size(); i++) {
|
||||
bytes += tables_[i].meta.fd.GetFileSize();
|
||||
}
|
||||
Log(options_.info_log,
|
||||
"**** Repaired rocksdb %s; "
|
||||
"recovered %d files; %llu bytes. "
|
||||
"recovered %zu files; %" PRIu64
|
||||
"bytes. "
|
||||
"Some data may have been lost. "
|
||||
"****",
|
||||
dbname_.c_str(),
|
||||
static_cast<int>(tables_.size()),
|
||||
bytes);
|
||||
dbname_.c_str(), tables_.size(), bytes);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
@ -159,8 +160,8 @@ class Repairer {
|
|||
std::string logname = LogFileName(dbname_, logs_[i]);
|
||||
Status status = ConvertLogToTable(logs_[i]);
|
||||
if (!status.ok()) {
|
||||
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
|
||||
(unsigned long long) logs_[i],
|
||||
Log(options_.info_log,
|
||||
"Log #%" PRIu64 ": ignoring conversion error: %s", logs_[i],
|
||||
status.ToString().c_str());
|
||||
}
|
||||
ArchiveFile(logname);
|
||||
|
@ -174,10 +175,8 @@ class Repairer {
|
|||
uint64_t lognum;
|
||||
virtual void Corruption(size_t bytes, const Status& s) {
|
||||
// We print error messages for corruption, but continue repairing.
|
||||
Log(info_log, "Log #%llu: dropping %d bytes; %s",
|
||||
(unsigned long long) lognum,
|
||||
static_cast<int>(bytes),
|
||||
s.ToString().c_str());
|
||||
Log(info_log, "Log #%" PRIu64 ": dropping %d bytes; %s", lognum,
|
||||
static_cast<int>(bytes), s.ToString().c_str());
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -220,8 +219,7 @@ class Repairer {
|
|||
if (status.ok()) {
|
||||
counter += WriteBatchInternal::Count(&batch);
|
||||
} else {
|
||||
Log(options_.info_log, "Log #%llu: ignoring %s",
|
||||
(unsigned long long) log,
|
||||
Log(options_.info_log, "Log #%" PRIu64 ": ignoring %s", log,
|
||||
status.ToString().c_str());
|
||||
status = Status::OK(); // Keep going with rest of file
|
||||
}
|
||||
|
@ -244,9 +242,9 @@ class Repairer {
|
|||
table_numbers_.push_back(meta.fd.GetNumber());
|
||||
}
|
||||
}
|
||||
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
|
||||
(unsigned long long)log, counter,
|
||||
(unsigned long long)meta.fd.GetNumber(), status.ToString().c_str());
|
||||
Log(options_.info_log,
|
||||
"Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s", log, counter,
|
||||
meta.fd.GetNumber(), status.ToString().c_str());
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -257,9 +255,8 @@ class Repairer {
|
|||
Status status = ScanTable(&t);
|
||||
if (!status.ok()) {
|
||||
std::string fname = TableFileName(dbname_, table_numbers_[i]);
|
||||
Log(options_.info_log, "Table #%llu: ignoring %s",
|
||||
(unsigned long long) table_numbers_[i],
|
||||
status.ToString().c_str());
|
||||
Log(options_.info_log, "Table #%" PRIu64 ": ignoring %s",
|
||||
table_numbers_[i], status.ToString().c_str());
|
||||
ArchiveFile(fname);
|
||||
} else {
|
||||
tables_.push_back(t);
|
||||
|
@ -281,9 +278,8 @@ class Repairer {
|
|||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
Slice key = iter->key();
|
||||
if (!ParseInternalKey(key, &parsed)) {
|
||||
Log(options_.info_log, "Table #%llu: unparsable key %s",
|
||||
(unsigned long long)t->meta.fd.GetNumber(),
|
||||
EscapeString(key).c_str());
|
||||
Log(options_.info_log, "Table #%" PRIu64 ": unparsable key %s",
|
||||
t->meta.fd.GetNumber(), EscapeString(key).c_str());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -305,9 +301,8 @@ class Repairer {
|
|||
}
|
||||
delete iter;
|
||||
}
|
||||
Log(options_.info_log, "Table #%llu: %d entries %s",
|
||||
(unsigned long long)t->meta.fd.GetNumber(), counter,
|
||||
status.ToString().c_str());
|
||||
Log(options_.info_log, "Table #%" PRIu64 ": %d entries %s",
|
||||
t->meta.fd.GetNumber(), counter, status.ToString().c_str());
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue