Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 1 | // Copyright 2017 Google Inc. All Rights Reserved. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | #include "absl/random/internal/nanobenchmark.h" |
| 16 | |
| 17 | #include <sys/types.h> |
| 18 | |
| 19 | #include <algorithm> // sort |
| 20 | #include <atomic> |
| 21 | #include <cstddef> |
| 22 | #include <cstdint> |
| 23 | #include <cstdlib> |
| 24 | #include <cstring> // memcpy |
| 25 | #include <limits> |
| 26 | #include <string> |
| 27 | #include <utility> |
| 28 | #include <vector> |
| 29 | |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 30 | #include "absl/base/attributes.h" |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 31 | #include "absl/base/internal/raw_logging.h" |
| 32 | #include "absl/random/internal/platform.h" |
| 33 | #include "absl/random/internal/randen_engine.h" |
| 34 | |
| 35 | // OS |
| 36 | #if defined(_WIN32) || defined(_WIN64) |
| 37 | #define ABSL_OS_WIN |
| 38 | #include <windows.h> // NOLINT |
| 39 | |
| 40 | #elif defined(__ANDROID__) |
| 41 | #define ABSL_OS_ANDROID |
| 42 | |
| 43 | #elif defined(__linux__) |
| 44 | #define ABSL_OS_LINUX |
| 45 | #include <sched.h> // NOLINT |
| 46 | #include <sys/syscall.h> // NOLINT |
| 47 | #endif |
| 48 | |
| 49 | #if defined(ABSL_ARCH_X86_64) && !defined(ABSL_OS_WIN) |
| 50 | #include <cpuid.h> // NOLINT |
| 51 | #endif |
| 52 | |
| 53 | // __ppc_get_timebase_freq |
| 54 | #if defined(ABSL_ARCH_PPC) |
| 55 | #include <sys/platform/ppc.h> // NOLINT |
| 56 | #endif |
| 57 | |
| 58 | // clock_gettime |
| 59 | #if defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64) |
| 60 | #include <time.h> // NOLINT |
| 61 | #endif |
| 62 | |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 63 | // ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method. |
| 64 | #if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) |
| 65 | #define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline)) |
| 66 | #elif defined(_MSC_VER) |
| 67 | #define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __declspec(noinline) |
| 68 | #else |
| 69 | #define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE |
| 70 | #endif |
| 71 | |
| 72 | namespace absl { |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 73 | ABSL_NAMESPACE_BEGIN |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 74 | namespace random_internal_nanobenchmark { |
| 75 | namespace { |
| 76 | |
| 77 | // For code folding. |
| 78 | namespace platform { |
| 79 | #if defined(ABSL_ARCH_X86_64) |
| 80 | |
| 81 | // TODO(janwas): Merge with the one in randen_hwaes.cc? |
| 82 | void Cpuid(const uint32_t level, const uint32_t count, |
| 83 | uint32_t* ABSL_RANDOM_INTERNAL_RESTRICT abcd) { |
| 84 | #if defined(ABSL_OS_WIN) |
| 85 | int regs[4]; |
| 86 | __cpuidex(regs, level, count); |
| 87 | for (int i = 0; i < 4; ++i) { |
| 88 | abcd[i] = regs[i]; |
| 89 | } |
| 90 | #else |
| 91 | uint32_t a, b, c, d; |
| 92 | __cpuid_count(level, count, a, b, c, d); |
| 93 | abcd[0] = a; |
| 94 | abcd[1] = b; |
| 95 | abcd[2] = c; |
| 96 | abcd[3] = d; |
| 97 | #endif |
| 98 | } |
| 99 | |
| 100 | std::string BrandString() { |
| 101 | char brand_string[49]; |
| 102 | uint32_t abcd[4]; |
| 103 | |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 104 | // Check if brand string is supported (it is on all reasonable Intel/AMD) |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 105 | Cpuid(0x80000000U, 0, abcd); |
| 106 | if (abcd[0] < 0x80000004U) { |
| 107 | return std::string(); |
| 108 | } |
| 109 | |
| 110 | for (int i = 0; i < 3; ++i) { |
| 111 | Cpuid(0x80000002U + i, 0, abcd); |
| 112 | memcpy(brand_string + i * 16, &abcd, sizeof(abcd)); |
| 113 | } |
| 114 | brand_string[48] = 0; |
| 115 | return brand_string; |
| 116 | } |
| 117 | |
| 118 | // Returns the frequency quoted inside the brand string. This does not |
| 119 | // account for throttling nor Turbo Boost. |
| 120 | double NominalClockRate() { |
| 121 | const std::string& brand_string = BrandString(); |
| 122 | // Brand strings include the maximum configured frequency. These prefixes are |
| 123 | // defined by Intel CPUID documentation. |
| 124 | const char* prefixes[3] = {"MHz", "GHz", "THz"}; |
| 125 | const double multipliers[3] = {1E6, 1E9, 1E12}; |
| 126 | for (size_t i = 0; i < 3; ++i) { |
| 127 | const size_t pos_prefix = brand_string.find(prefixes[i]); |
| 128 | if (pos_prefix != std::string::npos) { |
| 129 | const size_t pos_space = brand_string.rfind(' ', pos_prefix - 1); |
| 130 | if (pos_space != std::string::npos) { |
| 131 | const std::string digits = |
| 132 | brand_string.substr(pos_space + 1, pos_prefix - pos_space - 1); |
| 133 | return std::stod(digits) * multipliers[i]; |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | return 0.0; |
| 139 | } |
| 140 | |
| 141 | #endif // ABSL_ARCH_X86_64 |
| 142 | } // namespace platform |
| 143 | |
| 144 | // Prevents the compiler from eliding the computations that led to "output". |
| 145 | template <class T> |
| 146 | inline void PreventElision(T&& output) { |
| 147 | #ifndef ABSL_OS_WIN |
| 148 | // Works by indicating to the compiler that "output" is being read and |
| 149 | // modified. The +r constraint avoids unnecessary writes to memory, but only |
| 150 | // works for built-in types (typically FuncOutput). |
| 151 | asm volatile("" : "+r"(output) : : "memory"); |
| 152 | #else |
| 153 | // MSVC does not support inline assembly anymore (and never supported GCC's |
| 154 | // RTL constraints). Self-assignment with #pragma optimize("off") might be |
| 155 | // expected to prevent elision, but it does not with MSVC 2015. Type-punning |
| 156 | // with volatile pointers generates inefficient code on MSVC 2017. |
| 157 | static std::atomic<T> dummy(T{}); |
| 158 | dummy.store(output, std::memory_order_relaxed); |
| 159 | #endif |
| 160 | } |
| 161 | |
| 162 | namespace timer { |
| 163 | |
| 164 | // Start/Stop return absolute timestamps and must be placed immediately before |
| 165 | // and after the region to measure. We provide separate Start/Stop functions |
| 166 | // because they use different fences. |
| 167 | // |
| 168 | // Background: RDTSC is not 'serializing'; earlier instructions may complete |
| 169 | // after it, and/or later instructions may complete before it. 'Fences' ensure |
| 170 | // regions' elapsed times are independent of such reordering. The only |
| 171 | // documented unprivileged serializing instruction is CPUID, which acts as a |
| 172 | // full fence (no reordering across it in either direction). Unfortunately |
| 173 | // the latency of CPUID varies wildly (perhaps made worse by not initializing |
| 174 | // its EAX input). Because it cannot reliably be deducted from the region's |
| 175 | // elapsed time, it must not be included in the region to measure (i.e. |
| 176 | // between the two RDTSC). |
| 177 | // |
| 178 | // The newer RDTSCP is sometimes described as serializing, but it actually |
| 179 | // only serves as a half-fence with release semantics. Although all |
| 180 | // instructions in the region will complete before the final timestamp is |
| 181 | // captured, subsequent instructions may leak into the region and increase the |
| 182 | // elapsed time. Inserting another fence after the final RDTSCP would prevent |
| 183 | // such reordering without affecting the measured region. |
| 184 | // |
| 185 | // Fortunately, such a fence exists. The LFENCE instruction is only documented |
| 186 | // to delay later loads until earlier loads are visible. However, Intel's |
| 187 | // reference manual says it acts as a full fence (waiting until all earlier |
| 188 | // instructions have completed, and delaying later instructions until it |
| 189 | // completes). AMD assigns the same behavior to MFENCE. |
| 190 | // |
| 191 | // We need a fence before the initial RDTSC to prevent earlier instructions |
| 192 | // from leaking into the region, and arguably another after RDTSC to avoid |
| 193 | // region instructions from completing before the timestamp is recorded. |
| 194 | // When surrounded by fences, the additional RDTSCP half-fence provides no |
| 195 | // benefit, so the initial timestamp can be recorded via RDTSC, which has |
| 196 | // lower overhead than RDTSCP because it does not read TSC_AUX. In summary, |
| 197 | // we define Start = LFENCE/RDTSC/LFENCE; Stop = RDTSCP/LFENCE. |
| 198 | // |
| 199 | // Using Start+Start leads to higher variance and overhead than Stop+Stop. |
| 200 | // However, Stop+Stop includes an LFENCE in the region measurements, which |
| 201 | // adds a delay dependent on earlier loads. The combination of Start+Stop |
| 202 | // is faster than Start+Start and more consistent than Stop+Stop because |
| 203 | // the first LFENCE already delayed subsequent loads before the measured |
| 204 | // region. This combination seems not to have been considered in prior work: |
| 205 | // http://akaros.cs.berkeley.edu/lxr/akaros/kern/arch/x86/rdtsc_test.c |
| 206 | // |
| 207 | // Note: performance counters can measure 'exact' instructions-retired or |
| 208 | // (unhalted) cycle counts. The RDPMC instruction is not serializing and also |
| 209 | // requires fences. Unfortunately, it is not accessible on all OSes and we |
| 210 | // prefer to avoid kernel-mode drivers. Performance counters are also affected |
| 211 | // by several under/over-count errata, so we use the TSC instead. |
| 212 | |
| 213 | // Returns a 64-bit timestamp in unit of 'ticks'; to convert to seconds, |
| 214 | // divide by InvariantTicksPerSecond. |
| 215 | inline uint64_t Start64() { |
| 216 | uint64_t t; |
| 217 | #if defined(ABSL_ARCH_PPC) |
| 218 | asm volatile("mfspr %0, %1" : "=r"(t) : "i"(268)); |
| 219 | #elif defined(ABSL_ARCH_X86_64) |
| 220 | #if defined(ABSL_OS_WIN) |
| 221 | _ReadWriteBarrier(); |
| 222 | _mm_lfence(); |
| 223 | _ReadWriteBarrier(); |
| 224 | t = __rdtsc(); |
| 225 | _ReadWriteBarrier(); |
| 226 | _mm_lfence(); |
| 227 | _ReadWriteBarrier(); |
| 228 | #else |
| 229 | asm volatile( |
| 230 | "lfence\n\t" |
| 231 | "rdtsc\n\t" |
| 232 | "shl $32, %%rdx\n\t" |
| 233 | "or %%rdx, %0\n\t" |
| 234 | "lfence" |
| 235 | : "=a"(t) |
| 236 | : |
| 237 | // "memory" avoids reordering. rdx = TSC >> 32. |
| 238 | // "cc" = flags modified by SHL. |
| 239 | : "rdx", "memory", "cc"); |
| 240 | #endif |
| 241 | #else |
| 242 | // Fall back to OS - unsure how to reliably query cntvct_el0 frequency. |
| 243 | timespec ts; |
| 244 | clock_gettime(CLOCK_REALTIME, &ts); |
| 245 | t = ts.tv_sec * 1000000000LL + ts.tv_nsec; |
| 246 | #endif |
| 247 | return t; |
| 248 | } |
| 249 | |
| 250 | inline uint64_t Stop64() { |
| 251 | uint64_t t; |
| 252 | #if defined(ABSL_ARCH_X86_64) |
| 253 | #if defined(ABSL_OS_WIN) |
| 254 | _ReadWriteBarrier(); |
| 255 | unsigned aux; |
| 256 | t = __rdtscp(&aux); |
| 257 | _ReadWriteBarrier(); |
| 258 | _mm_lfence(); |
| 259 | _ReadWriteBarrier(); |
| 260 | #else |
| 261 | // Use inline asm because __rdtscp generates code to store TSC_AUX (ecx). |
| 262 | asm volatile( |
| 263 | "rdtscp\n\t" |
| 264 | "shl $32, %%rdx\n\t" |
| 265 | "or %%rdx, %0\n\t" |
| 266 | "lfence" |
| 267 | : "=a"(t) |
| 268 | : |
| 269 | // "memory" avoids reordering. rcx = TSC_AUX. rdx = TSC >> 32. |
| 270 | // "cc" = flags modified by SHL. |
| 271 | : "rcx", "rdx", "memory", "cc"); |
| 272 | #endif |
| 273 | #else |
| 274 | t = Start64(); |
| 275 | #endif |
| 276 | return t; |
| 277 | } |
| 278 | |
| 279 | // Returns a 32-bit timestamp with about 4 cycles less overhead than |
| 280 | // Start64. Only suitable for measuring very short regions because the |
| 281 | // timestamp overflows about once a second. |
| 282 | inline uint32_t Start32() { |
| 283 | uint32_t t; |
| 284 | #if defined(ABSL_ARCH_X86_64) |
| 285 | #if defined(ABSL_OS_WIN) |
| 286 | _ReadWriteBarrier(); |
| 287 | _mm_lfence(); |
| 288 | _ReadWriteBarrier(); |
| 289 | t = static_cast<uint32_t>(__rdtsc()); |
| 290 | _ReadWriteBarrier(); |
| 291 | _mm_lfence(); |
| 292 | _ReadWriteBarrier(); |
| 293 | #else |
| 294 | asm volatile( |
| 295 | "lfence\n\t" |
| 296 | "rdtsc\n\t" |
| 297 | "lfence" |
| 298 | : "=a"(t) |
| 299 | : |
| 300 | // "memory" avoids reordering. rdx = TSC >> 32. |
| 301 | : "rdx", "memory"); |
| 302 | #endif |
| 303 | #else |
| 304 | t = static_cast<uint32_t>(Start64()); |
| 305 | #endif |
| 306 | return t; |
| 307 | } |
| 308 | |
| 309 | inline uint32_t Stop32() { |
| 310 | uint32_t t; |
| 311 | #if defined(ABSL_ARCH_X86_64) |
| 312 | #if defined(ABSL_OS_WIN) |
| 313 | _ReadWriteBarrier(); |
| 314 | unsigned aux; |
| 315 | t = static_cast<uint32_t>(__rdtscp(&aux)); |
| 316 | _ReadWriteBarrier(); |
| 317 | _mm_lfence(); |
| 318 | _ReadWriteBarrier(); |
| 319 | #else |
| 320 | // Use inline asm because __rdtscp generates code to store TSC_AUX (ecx). |
| 321 | asm volatile( |
| 322 | "rdtscp\n\t" |
| 323 | "lfence" |
| 324 | : "=a"(t) |
| 325 | : |
| 326 | // "memory" avoids reordering. rcx = TSC_AUX. rdx = TSC >> 32. |
| 327 | : "rcx", "rdx", "memory"); |
| 328 | #endif |
| 329 | #else |
| 330 | t = static_cast<uint32_t>(Stop64()); |
| 331 | #endif |
| 332 | return t; |
| 333 | } |
| 334 | |
| 335 | } // namespace timer |
| 336 | |
| 337 | namespace robust_statistics { |
| 338 | |
| 339 | // Sorts integral values in ascending order (e.g. for Mode). About 3x faster |
| 340 | // than std::sort for input distributions with very few unique values. |
| 341 | template <class T> |
| 342 | void CountingSort(T* values, size_t num_values) { |
| 343 | // Unique values and their frequency (similar to flat_map). |
| 344 | using Unique = std::pair<T, int>; |
| 345 | std::vector<Unique> unique; |
| 346 | for (size_t i = 0; i < num_values; ++i) { |
| 347 | const T value = values[i]; |
| 348 | const auto pos = |
| 349 | std::find_if(unique.begin(), unique.end(), |
| 350 | [value](const Unique u) { return u.first == value; }); |
| 351 | if (pos == unique.end()) { |
| 352 | unique.push_back(std::make_pair(value, 1)); |
| 353 | } else { |
| 354 | ++pos->second; |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | // Sort in ascending order of value (pair.first). |
| 359 | std::sort(unique.begin(), unique.end()); |
| 360 | |
| 361 | // Write that many copies of each unique value to the array. |
| 362 | T* ABSL_RANDOM_INTERNAL_RESTRICT p = values; |
| 363 | for (const auto& value_count : unique) { |
| 364 | std::fill(p, p + value_count.second, value_count.first); |
| 365 | p += value_count.second; |
| 366 | } |
| 367 | ABSL_RAW_CHECK(p == values + num_values, "Did not produce enough output"); |
| 368 | } |
| 369 | |
| 370 | // @return i in [idx_begin, idx_begin + half_count) that minimizes |
| 371 | // sorted[i + half_count] - sorted[i]. |
| 372 | template <typename T> |
| 373 | size_t MinRange(const T* const ABSL_RANDOM_INTERNAL_RESTRICT sorted, |
| 374 | const size_t idx_begin, const size_t half_count) { |
| 375 | T min_range = (std::numeric_limits<T>::max)(); |
| 376 | size_t min_idx = 0; |
| 377 | |
| 378 | for (size_t idx = idx_begin; idx < idx_begin + half_count; ++idx) { |
| 379 | ABSL_RAW_CHECK(sorted[idx] <= sorted[idx + half_count], "Not sorted"); |
| 380 | const T range = sorted[idx + half_count] - sorted[idx]; |
| 381 | if (range < min_range) { |
| 382 | min_range = range; |
| 383 | min_idx = idx; |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | return min_idx; |
| 388 | } |
| 389 | |
| 390 | // Returns an estimate of the mode by calling MinRange on successively |
| 391 | // halved intervals. "sorted" must be in ascending order. This is the |
| 392 | // Half Sample Mode estimator proposed by Bickel in "On a fast, robust |
| 393 | // estimator of the mode", with complexity O(N log N). The mode is less |
| 394 | // affected by outliers in highly-skewed distributions than the median. |
| 395 | // The averaging operation below assumes "T" is an unsigned integer type. |
| 396 | template <typename T> |
| 397 | T ModeOfSorted(const T* const ABSL_RANDOM_INTERNAL_RESTRICT sorted, |
| 398 | const size_t num_values) { |
| 399 | size_t idx_begin = 0; |
| 400 | size_t half_count = num_values / 2; |
| 401 | while (half_count > 1) { |
| 402 | idx_begin = MinRange(sorted, idx_begin, half_count); |
| 403 | half_count >>= 1; |
| 404 | } |
| 405 | |
| 406 | const T x = sorted[idx_begin + 0]; |
| 407 | if (half_count == 0) { |
| 408 | return x; |
| 409 | } |
| 410 | ABSL_RAW_CHECK(half_count == 1, "Should stop at half_count=1"); |
| 411 | const T average = (x + sorted[idx_begin + 1] + 1) / 2; |
| 412 | return average; |
| 413 | } |
| 414 | |
| 415 | // Returns the mode. Side effect: sorts "values". |
| 416 | template <typename T> |
| 417 | T Mode(T* values, const size_t num_values) { |
| 418 | CountingSort(values, num_values); |
| 419 | return ModeOfSorted(values, num_values); |
| 420 | } |
| 421 | |
| 422 | template <typename T, size_t N> |
| 423 | T Mode(T (&values)[N]) { |
| 424 | return Mode(&values[0], N); |
| 425 | } |
| 426 | |
| 427 | // Returns the median value. Side effect: sorts "values". |
| 428 | template <typename T> |
| 429 | T Median(T* values, const size_t num_values) { |
| 430 | ABSL_RAW_CHECK(num_values != 0, "Empty input"); |
| 431 | std::sort(values, values + num_values); |
| 432 | const size_t half = num_values / 2; |
| 433 | // Odd count: return middle |
| 434 | if (num_values % 2) { |
| 435 | return values[half]; |
| 436 | } |
| 437 | // Even count: return average of middle two. |
| 438 | return (values[half] + values[half - 1] + 1) / 2; |
| 439 | } |
| 440 | |
| 441 | // Returns a robust measure of variability. |
| 442 | template <typename T> |
| 443 | T MedianAbsoluteDeviation(const T* values, const size_t num_values, |
| 444 | const T median) { |
| 445 | ABSL_RAW_CHECK(num_values != 0, "Empty input"); |
| 446 | std::vector<T> abs_deviations; |
| 447 | abs_deviations.reserve(num_values); |
| 448 | for (size_t i = 0; i < num_values; ++i) { |
| 449 | const int64_t abs = std::abs(int64_t(values[i]) - int64_t(median)); |
| 450 | abs_deviations.push_back(static_cast<T>(abs)); |
| 451 | } |
| 452 | return Median(abs_deviations.data(), num_values); |
| 453 | } |
| 454 | |
| 455 | } // namespace robust_statistics |
| 456 | |
| 457 | // Ticks := platform-specific timer values (CPU cycles on x86). Must be |
| 458 | // unsigned to guarantee wraparound on overflow. 32 bit timers are faster to |
| 459 | // read than 64 bit. |
| 460 | using Ticks = uint32_t; |
| 461 | |
| 462 | // Returns timer overhead / minimum measurable difference. |
| 463 | Ticks TimerResolution() { |
| 464 | // Nested loop avoids exceeding stack/L1 capacity. |
| 465 | Ticks repetitions[Params::kTimerSamples]; |
| 466 | for (size_t rep = 0; rep < Params::kTimerSamples; ++rep) { |
| 467 | Ticks samples[Params::kTimerSamples]; |
| 468 | for (size_t i = 0; i < Params::kTimerSamples; ++i) { |
| 469 | const Ticks t0 = timer::Start32(); |
| 470 | const Ticks t1 = timer::Stop32(); |
| 471 | samples[i] = t1 - t0; |
| 472 | } |
| 473 | repetitions[rep] = robust_statistics::Mode(samples); |
| 474 | } |
| 475 | return robust_statistics::Mode(repetitions); |
| 476 | } |
| 477 | |
| 478 | static const Ticks timer_resolution = TimerResolution(); |
| 479 | |
| 480 | // Estimates the expected value of "lambda" values with a variable number of |
| 481 | // samples until the variability "rel_mad" is less than "max_rel_mad". |
| 482 | template <class Lambda> |
| 483 | Ticks SampleUntilStable(const double max_rel_mad, double* rel_mad, |
| 484 | const Params& p, const Lambda& lambda) { |
| 485 | auto measure_duration = [&lambda]() -> Ticks { |
| 486 | const Ticks t0 = timer::Start32(); |
| 487 | lambda(); |
| 488 | const Ticks t1 = timer::Stop32(); |
| 489 | return t1 - t0; |
| 490 | }; |
| 491 | |
| 492 | // Choose initial samples_per_eval based on a single estimated duration. |
| 493 | Ticks est = measure_duration(); |
| 494 | static const double ticks_per_second = InvariantTicksPerSecond(); |
| 495 | const size_t ticks_per_eval = ticks_per_second * p.seconds_per_eval; |
| 496 | size_t samples_per_eval = ticks_per_eval / est; |
| 497 | samples_per_eval = (std::max)(samples_per_eval, p.min_samples_per_eval); |
| 498 | |
| 499 | std::vector<Ticks> samples; |
| 500 | samples.reserve(1 + samples_per_eval); |
| 501 | samples.push_back(est); |
| 502 | |
| 503 | // Percentage is too strict for tiny differences, so also allow a small |
| 504 | // absolute "median absolute deviation". |
| 505 | const Ticks max_abs_mad = (timer_resolution + 99) / 100; |
| 506 | *rel_mad = 0.0; // ensure initialized |
| 507 | |
| 508 | for (size_t eval = 0; eval < p.max_evals; ++eval, samples_per_eval *= 2) { |
| 509 | samples.reserve(samples.size() + samples_per_eval); |
| 510 | for (size_t i = 0; i < samples_per_eval; ++i) { |
| 511 | const Ticks r = measure_duration(); |
| 512 | samples.push_back(r); |
| 513 | } |
| 514 | |
| 515 | if (samples.size() >= p.min_mode_samples) { |
| 516 | est = robust_statistics::Mode(samples.data(), samples.size()); |
| 517 | } else { |
| 518 | // For "few" (depends also on the variance) samples, Median is safer. |
| 519 | est = robust_statistics::Median(samples.data(), samples.size()); |
| 520 | } |
| 521 | ABSL_RAW_CHECK(est != 0, "Estimator returned zero duration"); |
| 522 | |
| 523 | // Median absolute deviation (mad) is a robust measure of 'variability'. |
| 524 | const Ticks abs_mad = robust_statistics::MedianAbsoluteDeviation( |
| 525 | samples.data(), samples.size(), est); |
| 526 | *rel_mad = static_cast<double>(static_cast<int>(abs_mad)) / est; |
| 527 | |
| 528 | if (*rel_mad <= max_rel_mad || abs_mad <= max_abs_mad) { |
| 529 | if (p.verbose) { |
| 530 | ABSL_RAW_LOG(INFO, |
| 531 | "%6zu samples => %5u (abs_mad=%4u, rel_mad=%4.2f%%)\n", |
| 532 | samples.size(), est, abs_mad, *rel_mad * 100.0); |
| 533 | } |
| 534 | return est; |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | if (p.verbose) { |
| 539 | ABSL_RAW_LOG(WARNING, |
| 540 | "rel_mad=%4.2f%% still exceeds %4.2f%% after %6zu samples.\n", |
| 541 | *rel_mad * 100.0, max_rel_mad * 100.0, samples.size()); |
| 542 | } |
| 543 | return est; |
| 544 | } |
| 545 | |
| 546 | using InputVec = std::vector<FuncInput>; |
| 547 | |
| 548 | // Returns vector of unique input values. |
| 549 | InputVec UniqueInputs(const FuncInput* inputs, const size_t num_inputs) { |
| 550 | InputVec unique(inputs, inputs + num_inputs); |
| 551 | std::sort(unique.begin(), unique.end()); |
| 552 | unique.erase(std::unique(unique.begin(), unique.end()), unique.end()); |
| 553 | return unique; |
| 554 | } |
| 555 | |
| 556 | // Returns how often we need to call func for sufficient precision, or zero |
| 557 | // on failure (e.g. the elapsed time is too long for a 32-bit tick count). |
| 558 | size_t NumSkip(const Func func, const void* arg, const InputVec& unique, |
| 559 | const Params& p) { |
| 560 | // Min elapsed ticks for any input. |
| 561 | Ticks min_duration = ~0u; |
| 562 | |
| 563 | for (const FuncInput input : unique) { |
| 564 | // Make sure a 32-bit timer is sufficient. |
| 565 | const uint64_t t0 = timer::Start64(); |
| 566 | PreventElision(func(arg, input)); |
| 567 | const uint64_t t1 = timer::Stop64(); |
| 568 | const uint64_t elapsed = t1 - t0; |
| 569 | if (elapsed >= (1ULL << 30)) { |
| 570 | ABSL_RAW_LOG(WARNING, |
| 571 | "Measurement failed: need 64-bit timer for input=%zu\n", |
| 572 | static_cast<size_t>(input)); |
| 573 | return 0; |
| 574 | } |
| 575 | |
| 576 | double rel_mad; |
| 577 | const Ticks total = SampleUntilStable( |
| 578 | p.target_rel_mad, &rel_mad, p, |
| 579 | [func, arg, input]() { PreventElision(func(arg, input)); }); |
| 580 | min_duration = (std::min)(min_duration, total - timer_resolution); |
| 581 | } |
| 582 | |
| 583 | // Number of repetitions required to reach the target resolution. |
| 584 | const size_t max_skip = p.precision_divisor; |
| 585 | // Number of repetitions given the estimated duration. |
| 586 | const size_t num_skip = |
| 587 | min_duration == 0 ? 0 : (max_skip + min_duration - 1) / min_duration; |
| 588 | if (p.verbose) { |
| 589 | ABSL_RAW_LOG(INFO, "res=%u max_skip=%zu min_dur=%u num_skip=%zu\n", |
| 590 | timer_resolution, max_skip, min_duration, num_skip); |
| 591 | } |
| 592 | return num_skip; |
| 593 | } |
| 594 | |
| 595 | // Replicates inputs until we can omit "num_skip" occurrences of an input. |
| 596 | InputVec ReplicateInputs(const FuncInput* inputs, const size_t num_inputs, |
| 597 | const size_t num_unique, const size_t num_skip, |
| 598 | const Params& p) { |
| 599 | InputVec full; |
| 600 | if (num_unique == 1) { |
| 601 | full.assign(p.subset_ratio * num_skip, inputs[0]); |
| 602 | return full; |
| 603 | } |
| 604 | |
| 605 | full.reserve(p.subset_ratio * num_skip * num_inputs); |
| 606 | for (size_t i = 0; i < p.subset_ratio * num_skip; ++i) { |
| 607 | full.insert(full.end(), inputs, inputs + num_inputs); |
| 608 | } |
| 609 | absl::random_internal::randen_engine<uint32_t> rng; |
| 610 | std::shuffle(full.begin(), full.end(), rng); |
| 611 | return full; |
| 612 | } |
| 613 | |
| 614 | // Copies the "full" to "subset" in the same order, but with "num_skip" |
| 615 | // randomly selected occurrences of "input_to_skip" removed. |
| 616 | void FillSubset(const InputVec& full, const FuncInput input_to_skip, |
| 617 | const size_t num_skip, InputVec* subset) { |
| 618 | const size_t count = std::count(full.begin(), full.end(), input_to_skip); |
| 619 | // Generate num_skip random indices: which occurrence to skip. |
| 620 | std::vector<uint32_t> omit; |
| 621 | // Replacement for std::iota, not yet available in MSVC builds. |
| 622 | omit.reserve(count); |
| 623 | for (size_t i = 0; i < count; ++i) { |
| 624 | omit.push_back(i); |
| 625 | } |
| 626 | // omit[] is the same on every call, but that's OK because they identify the |
| 627 | // Nth instance of input_to_skip, so the position within full[] differs. |
| 628 | absl::random_internal::randen_engine<uint32_t> rng; |
| 629 | std::shuffle(omit.begin(), omit.end(), rng); |
| 630 | omit.resize(num_skip); |
| 631 | std::sort(omit.begin(), omit.end()); |
| 632 | |
| 633 | uint32_t occurrence = ~0u; // 0 after preincrement |
| 634 | size_t idx_omit = 0; // cursor within omit[] |
| 635 | size_t idx_subset = 0; // cursor within *subset |
| 636 | for (const FuncInput next : full) { |
| 637 | if (next == input_to_skip) { |
| 638 | ++occurrence; |
| 639 | // Haven't removed enough already |
| 640 | if (idx_omit < num_skip) { |
| 641 | // This one is up for removal |
| 642 | if (occurrence == omit[idx_omit]) { |
| 643 | ++idx_omit; |
| 644 | continue; |
| 645 | } |
| 646 | } |
| 647 | } |
| 648 | if (idx_subset < subset->size()) { |
| 649 | (*subset)[idx_subset++] = next; |
| 650 | } |
| 651 | } |
| 652 | ABSL_RAW_CHECK(idx_subset == subset->size(), "idx_subset not at end"); |
| 653 | ABSL_RAW_CHECK(idx_omit == omit.size(), "idx_omit not at end"); |
| 654 | ABSL_RAW_CHECK(occurrence == count - 1, "occurrence not at end"); |
| 655 | } |
| 656 | |
| 657 | // Returns total ticks elapsed for all inputs. |
| 658 | Ticks TotalDuration(const Func func, const void* arg, const InputVec* inputs, |
| 659 | const Params& p, double* max_rel_mad) { |
| 660 | double rel_mad; |
| 661 | const Ticks duration = |
| 662 | SampleUntilStable(p.target_rel_mad, &rel_mad, p, [func, arg, inputs]() { |
| 663 | for (const FuncInput input : *inputs) { |
| 664 | PreventElision(func(arg, input)); |
| 665 | } |
| 666 | }); |
| 667 | *max_rel_mad = (std::max)(*max_rel_mad, rel_mad); |
| 668 | return duration; |
| 669 | } |
| 670 | |
| 671 | // (Nearly) empty Func for measuring timer overhead/resolution. |
| 672 | ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE FuncOutput |
| 673 | EmptyFunc(const void* arg, const FuncInput input) { |
| 674 | return input; |
| 675 | } |
| 676 | |
| 677 | // Returns overhead of accessing inputs[] and calling a function; this will |
| 678 | // be deducted from future TotalDuration return values. |
| 679 | Ticks Overhead(const void* arg, const InputVec* inputs, const Params& p) { |
| 680 | double rel_mad; |
| 681 | // Zero tolerance because repeatability is crucial and EmptyFunc is fast. |
| 682 | return SampleUntilStable(0.0, &rel_mad, p, [arg, inputs]() { |
| 683 | for (const FuncInput input : *inputs) { |
| 684 | PreventElision(EmptyFunc(arg, input)); |
| 685 | } |
| 686 | }); |
| 687 | } |
| 688 | |
| 689 | } // namespace |
| 690 | |
| 691 | void PinThreadToCPU(int cpu) { |
| 692 | // We might migrate to another CPU before pinning below, but at least cpu |
| 693 | // will be one of the CPUs on which this thread ran. |
| 694 | #if defined(ABSL_OS_WIN) |
| 695 | if (cpu < 0) { |
| 696 | cpu = static_cast<int>(GetCurrentProcessorNumber()); |
| 697 | ABSL_RAW_CHECK(cpu >= 0, "PinThreadToCPU detect failed"); |
| 698 | if (cpu >= 64) { |
| 699 | // NOTE: On wine, at least, GetCurrentProcessorNumber() sometimes returns |
| 700 | // a value > 64, which is out of range. When this happens, log a message |
| 701 | // and don't set a cpu affinity. |
| 702 | ABSL_RAW_LOG(ERROR, "Invalid CPU number: %d", cpu); |
| 703 | return; |
| 704 | } |
| 705 | } else if (cpu >= 64) { |
| 706 | // User specified an explicit CPU affinity > the valid range. |
| 707 | ABSL_RAW_LOG(FATAL, "Invalid CPU number: %d", cpu); |
| 708 | } |
| 709 | const DWORD_PTR prev = SetThreadAffinityMask(GetCurrentThread(), 1ULL << cpu); |
| 710 | ABSL_RAW_CHECK(prev != 0, "SetAffinity failed"); |
| 711 | #elif defined(ABSL_OS_LINUX) && !defined(ABSL_OS_ANDROID) |
| 712 | if (cpu < 0) { |
| 713 | cpu = sched_getcpu(); |
| 714 | ABSL_RAW_CHECK(cpu >= 0, "PinThreadToCPU detect failed"); |
| 715 | } |
| 716 | const pid_t pid = 0; // current thread |
| 717 | cpu_set_t set; |
| 718 | CPU_ZERO(&set); |
| 719 | CPU_SET(cpu, &set); |
| 720 | const int err = sched_setaffinity(pid, sizeof(set), &set); |
| 721 | ABSL_RAW_CHECK(err == 0, "SetAffinity failed"); |
| 722 | #endif |
| 723 | } |
| 724 | |
| 725 | // Returns tick rate. Invariant means the tick counter frequency is independent |
| 726 | // of CPU throttling or sleep. May be expensive, caller should cache the result. |
| 727 | double InvariantTicksPerSecond() { |
| 728 | #if defined(ABSL_ARCH_PPC) |
| 729 | return __ppc_get_timebase_freq(); |
| 730 | #elif defined(ABSL_ARCH_X86_64) |
| 731 | // We assume the TSC is invariant; it is on all recent Intel/AMD CPUs. |
| 732 | return platform::NominalClockRate(); |
| 733 | #else |
| 734 | // Fall back to clock_gettime nanoseconds. |
| 735 | return 1E9; |
| 736 | #endif |
| 737 | } |
| 738 | |
| 739 | size_t MeasureImpl(const Func func, const void* arg, const size_t num_skip, |
| 740 | const InputVec& unique, const InputVec& full, |
| 741 | const Params& p, Result* results) { |
| 742 | const float mul = 1.0f / static_cast<int>(num_skip); |
| 743 | |
| 744 | InputVec subset(full.size() - num_skip); |
| 745 | const Ticks overhead = Overhead(arg, &full, p); |
| 746 | const Ticks overhead_skip = Overhead(arg, &subset, p); |
| 747 | if (overhead < overhead_skip) { |
| 748 | ABSL_RAW_LOG(WARNING, "Measurement failed: overhead %u < %u\n", overhead, |
| 749 | overhead_skip); |
| 750 | return 0; |
| 751 | } |
| 752 | |
| 753 | if (p.verbose) { |
| 754 | ABSL_RAW_LOG(INFO, "#inputs=%5zu,%5zu overhead=%5u,%5u\n", full.size(), |
| 755 | subset.size(), overhead, overhead_skip); |
| 756 | } |
| 757 | |
| 758 | double max_rel_mad = 0.0; |
| 759 | const Ticks total = TotalDuration(func, arg, &full, p, &max_rel_mad); |
| 760 | |
| 761 | for (size_t i = 0; i < unique.size(); ++i) { |
| 762 | FillSubset(full, unique[i], num_skip, &subset); |
| 763 | const Ticks total_skip = TotalDuration(func, arg, &subset, p, &max_rel_mad); |
| 764 | |
| 765 | if (total < total_skip) { |
| 766 | ABSL_RAW_LOG(WARNING, "Measurement failed: total %u < %u\n", total, |
| 767 | total_skip); |
| 768 | return 0; |
| 769 | } |
| 770 | |
| 771 | const Ticks duration = (total - overhead) - (total_skip - overhead_skip); |
| 772 | results[i].input = unique[i]; |
| 773 | results[i].ticks = duration * mul; |
| 774 | results[i].variability = max_rel_mad; |
| 775 | } |
| 776 | |
| 777 | return unique.size(); |
| 778 | } |
| 779 | |
| 780 | size_t Measure(const Func func, const void* arg, const FuncInput* inputs, |
| 781 | const size_t num_inputs, Result* results, const Params& p) { |
| 782 | ABSL_RAW_CHECK(num_inputs != 0, "No inputs"); |
| 783 | |
| 784 | const InputVec unique = UniqueInputs(inputs, num_inputs); |
| 785 | const size_t num_skip = NumSkip(func, arg, unique, p); // never 0 |
| 786 | if (num_skip == 0) return 0; // NumSkip already printed error message |
| 787 | |
| 788 | const InputVec full = |
| 789 | ReplicateInputs(inputs, num_inputs, unique.size(), num_skip, p); |
| 790 | |
| 791 | // MeasureImpl may fail up to p.max_measure_retries times. |
| 792 | for (size_t i = 0; i < p.max_measure_retries; i++) { |
| 793 | auto result = MeasureImpl(func, arg, num_skip, unique, full, p, results); |
| 794 | if (result != 0) { |
| 795 | return result; |
| 796 | } |
| 797 | } |
| 798 | // All retries failed. (Unusual) |
| 799 | return 0; |
| 800 | } |
| 801 | |
| 802 | } // namespace random_internal_nanobenchmark |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 803 | ABSL_NAMESPACE_END |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 804 | } // namespace absl |