mirror of
https://github.com/google/benchmark.git
synced 2024-11-26 07:32:19 +00:00
Fix cycleclock::Now for RISC-V and PPC (#955)
Fixes the following issues with the implementation of `cycleclock::Now`: - The RISC-V implementation wouldn't compile due to a typo; - Both the PPC and RISC-V implementation's asm statements lacked the volatile keyword. This resulted in the repeated read of the counter's high part being optimized away, so overflow wasn't handled at all. Multiple counter reads could also be misoptimized, especially in LTO scenarios. - Relied on the zero/sign-extension of inline asm operands, which isn't guaranteed to occur and differs between compilers, namely GCC and Clang. The PowerPC64 implementation was improved to do a single 64-bit read of the time-base counter. The RISC-V implementation was improved to do the overflow handing in assembly, since Clang would generate a branch, defeating the purpose of the non-branching counter reading approach.
This commit is contained in:
parent
0ab2c2906b
commit
a77d5f70ef
|
@ -84,13 +84,21 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
|
||||||
return (high << 32) | low;
|
return (high << 32) | low;
|
||||||
#elif defined(__powerpc__) || defined(__ppc__)
|
#elif defined(__powerpc__) || defined(__ppc__)
|
||||||
// This returns a time-base, which is not always precisely a cycle-count.
|
// This returns a time-base, which is not always precisely a cycle-count.
|
||||||
int64_t tbl, tbu0, tbu1;
|
#if defined(__powerpc64__) || defined(__ppc64__)
|
||||||
asm("mftbu %0" : "=r"(tbu0));
|
int64_t tb;
|
||||||
asm("mftb %0" : "=r"(tbl));
|
asm volatile("mfspr %0, 268" : "=r" (tb));
|
||||||
asm("mftbu %0" : "=r"(tbu1));
|
return tb;
|
||||||
tbl &= -static_cast<int64_t>(tbu0 == tbu1);
|
#else
|
||||||
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
|
uint32_t tbl, tbu0, tbu1;
|
||||||
return (tbu1 << 32) | tbl;
|
asm volatile(
|
||||||
|
"mftbu %0\n"
|
||||||
|
"mftbl %1\n"
|
||||||
|
"mftbu %2"
|
||||||
|
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
|
||||||
|
tbl &= -static_cast<int32_t>(tbu0 == tbu1);
|
||||||
|
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
|
||||||
|
return (static_cast<uint64_t>(tbu1) << 32) | tbl;
|
||||||
|
#endif
|
||||||
#elif defined(__sparc__)
|
#elif defined(__sparc__)
|
||||||
int64_t tick;
|
int64_t tick;
|
||||||
asm(".byte 0x83, 0x41, 0x00, 0x00");
|
asm(".byte 0x83, 0x41, 0x00, 0x00");
|
||||||
|
@ -167,16 +175,22 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
|
||||||
#elif defined(__riscv) // RISC-V
|
#elif defined(__riscv) // RISC-V
|
||||||
// Use RDCYCLE (and RDCYCLEH on riscv32)
|
// Use RDCYCLE (and RDCYCLEH on riscv32)
|
||||||
#if __riscv_xlen == 32
|
#if __riscv_xlen == 32
|
||||||
uint64_t cycles_low, cycles_hi0, cycles_hi1;
|
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
|
||||||
asm("rdcycleh %0" : "=r"(cycles_hi0));
|
// This asm also includes the PowerPC overflow handling strategy, as above.
|
||||||
asm("rdcycle %0" : "=r"(cycles_lo));
|
// Implemented in assembly because Clang insisted on branching.
|
||||||
asm("rdcycleh %0" : "=r"(cycles_hi1));
|
asm volatile(
|
||||||
// This matches the PowerPC overflow detection, above
|
"rdcycleh %0\n"
|
||||||
cycles_lo &= -static_cast<int64_t>(cycles_hi0 == cycles_hi1);
|
"rdcycle %1\n"
|
||||||
return (cycles_hi1 << 32) | cycles_lo;
|
"rdcycleh %2\n"
|
||||||
|
"sub %0, %0, %2\n"
|
||||||
|
"seqz %0, %0\n"
|
||||||
|
"sub %0, zero, %0\n"
|
||||||
|
"and %1, %1, %0\n"
|
||||||
|
: "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
|
||||||
|
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
|
||||||
#else
|
#else
|
||||||
uint64_t cycles;
|
uint64_t cycles;
|
||||||
asm("rdcycle %0" : "=r"(cycles));
|
asm volatile("rdcycle %0" : "=r"(cycles));
|
||||||
return cycles;
|
return cycles;
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
|
|
Loading…
Reference in a new issue