Remove "using namespace std;" from zippy-stubs-internal.h.

This makes it easier to build zippy, as some compiles require a warning
suppression to accept "using namespace std".
This commit is contained in:
costan 2017-03-13 12:46:43 -07:00 committed by Victor Costan
parent 7d7a8ec805
commit 8b60aac4fd
5 changed files with 24 additions and 27 deletions

View File

@ -59,10 +59,6 @@
#define MAP_ANONYMOUS MAP_ANON
#endif
// Pull in std::min, std::ostream, and the likes. This is safe because this
// header file is never used from any public header files.
using namespace std;
// The size of an array, if known at compile-time.
// Will give unexpected results if used on a pointer.
// We undefine it first, since some compilers already have a definition.

View File

@ -201,7 +201,7 @@ void Benchmark::Run() {
if (benchmark_real_time_us > 0) {
num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
}
num_iterations = max(num_iterations, kCalibrateIterations);
num_iterations = std::max(num_iterations, kCalibrateIterations);
BenchmarkRun benchmark_runs[kNumRuns];
for (int run = 0; run < kNumRuns; ++run) {
@ -217,10 +217,10 @@ void Benchmark::Run() {
string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
string human_readable_speed;
nth_element(benchmark_runs,
benchmark_runs + kMedianPos,
benchmark_runs + kNumRuns,
BenchmarkCompareCPUTime());
std::nth_element(benchmark_runs,
benchmark_runs + kMedianPos,
benchmark_runs + kNumRuns,
BenchmarkCompareCPUTime());
int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
if (cpu_time_us <= 0) {
@ -523,8 +523,8 @@ int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
LOG(WARNING)
<< "UncompressChunkOrAll: Received some extra data, bytes total: "
<< uncomp_stream_.avail_in << " bytes: "
<< string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
min(int(uncomp_stream_.avail_in), 20));
<< std::string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
std::min(int(uncomp_stream_.avail_in), 20));
UncompressErrorInit();
return Z_DATA_ERROR; // what's the extra data for?
} else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {

View File

@ -138,7 +138,8 @@ namespace file {
void CheckSuccess() { }
};
DummyStatus GetContents(const string& filename, string* data, int unused) {
DummyStatus GetContents(
const std::string& filename, std::string* data, int unused) {
FILE* fp = fopen(filename.c_str(), "rb");
if (fp == NULL) {
perror(filename.c_str());
@ -153,7 +154,7 @@ namespace file {
perror("fread");
exit(1);
}
data->append(string(buf, ret));
data->append(std::string(buf, ret));
}
fclose(fp);
@ -161,8 +162,8 @@ namespace file {
return DummyStatus();
}
DummyStatus SetContents(const string& filename,
const string& str,
DummyStatus SetContents(const std::string& filename,
const std::string& str,
int unused) {
FILE* fp = fopen(filename.c_str(), "wb");
if (fp == NULL) {
@ -529,15 +530,15 @@ class LogMessage {
public:
LogMessage() { }
~LogMessage() {
cerr << endl;
std::cerr << std::endl;
}
LogMessage& operator<<(const std::string& msg) {
cerr << msg;
std::cerr << msg;
return *this;
}
LogMessage& operator<<(int x) {
cerr << x;
std::cerr << x;
return *this;
}
};
@ -560,7 +561,7 @@ class LogMessageCrash : public LogMessage {
public:
LogMessageCrash() { }
~LogMessageCrash() {
cerr << endl;
std::cerr << std::endl;
abort();
}
};

View File

@ -765,7 +765,7 @@ bool SnappyDecompressor::RefillTag() {
size_t length;
const char* src = reader_->Peek(&length);
if (length == 0) return false;
uint32 to_add = min<uint32>(needed - nbuf, length);
uint32 to_add = std::min<uint32>(needed - nbuf, length);
memcpy(scratch_ + nbuf, src, to_add);
nbuf += to_add;
reader_->Skip(to_add);
@ -837,7 +837,7 @@ size_t Compress(Source* reader, Sink* writer) {
size_t fragment_size;
const char* fragment = reader->Peek(&fragment_size);
assert(fragment_size != 0); // premature end of input
const size_t num_to_read = min(N, kBlockSize);
const size_t num_to_read = std::min(N, kBlockSize);
size_t bytes_read = fragment_size;
size_t pending_advance = 0;
@ -858,7 +858,7 @@ size_t Compress(Source* reader, Sink* writer) {
while (bytes_read < num_to_read) {
fragment = reader->Peek(&fragment_size);
size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
memcpy(scratch + bytes_read, fragment, n);
bytes_read += n;
reader->Skip(n);
@ -1360,7 +1360,7 @@ bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
}
// Make new block
size_t bsize = min<size_t>(kBlockSize, expected_ - full_size_);
size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
op_base_ = allocator_.Allocate(bsize);
op_ptr_ = op_base_;
op_limit_ = op_base_ + bsize;
@ -1417,7 +1417,7 @@ class SnappySinkAllocator {
size_t size_written = 0;
size_t block_size;
for (int i = 0; i < blocks_.size(); ++i) {
block_size = min<size_t>(blocks_[i].size, size - size_written);
block_size = std::min<size_t>(blocks_[i].size, size - size_written);
dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
&SnappySinkAllocator::Deleter, NULL);
size_written += block_size;

View File

@ -399,7 +399,7 @@ static void Measure(const char* data,
std::vector<string> output(num_blocks);
for (int b = 0; b < num_blocks; b++) {
int input_start = b * block_size;
int input_limit = min<int>((b+1)*block_size, length);
int input_limit = std::min<int>((b+1)*block_size, length);
input[b] = data+input_start;
input_length[b] = input_limit-input_start;
@ -470,7 +470,7 @@ static void Measure(const char* data,
x.c_str(),
block_size/(1<<20),
static_cast<int>(length), static_cast<uint32>(compressed_size),
(compressed_size * 100.0) / max<int>(1, length),
(compressed_size * 100.0) / std::max<int>(1, length),
comp_rate,
urate.c_str());
}
@ -1307,7 +1307,7 @@ static void MeasureFile(const char* fname) {
int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len;
int end_len = fullinput.size();
if (FLAGS_end_len >= 0) {
end_len = min<int>(fullinput.size(), FLAGS_end_len);
end_len = std::min<int>(fullinput.size(), FLAGS_end_len);
}
for (int len = start_len; len <= end_len; len++) {
const char* const input = fullinput.data();