Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 1 | // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- |
| 2 | /* Copyright (c) 2006, Google Inc. |
| 3 | * All rights reserved. |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 4 | * |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are |
| 7 | * met: |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 8 | * |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 9 | * * Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * * Redistributions in binary form must reproduce the above |
| 12 | * copyright notice, this list of conditions and the following disclaimer |
| 13 | * in the documentation and/or other materials provided with the |
| 14 | * distribution. |
| 15 | * * Neither the name of Google Inc. nor the names of its |
| 16 | * contributors may be used to endorse or promote products derived from |
| 17 | * this software without specific prior written permission. |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 18 | * |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | * |
| 31 | * --- |
| 32 | * Author: Sanjay Ghemawat |
| 33 | */ |
| 34 | |
| 35 | #include <config.h> |
| 36 | #include "base/spinlock.h" |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 37 | #include "base/spinlock_internal.h" |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 38 | #include "base/sysinfo.h" /* for GetSystemCPUsCount() */ |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 39 | |
| 40 | // NOTE on the Lock-state values: |
| 41 | // |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 42 | // kSpinLockFree represents the unlocked state |
| 43 | // kSpinLockHeld represents the locked state with no waiters |
| 44 | // kSpinLockSleeper represents the locked state with waiters |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 45 | |
| 46 | static int adaptive_spin_count = 0; |
| 47 | |
| 48 | const base::LinkerInitialized SpinLock::LINKER_INITIALIZED = |
| 49 | base::LINKER_INITIALIZED; |
| 50 | |
| 51 | namespace { |
| 52 | struct SpinLock_InitHelper { |
| 53 | SpinLock_InitHelper() { |
| 54 | // On multi-cpu machines, spin for longer before yielding |
| 55 | // the processor or sleeping. Reduces idle time significantly. |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 56 | if (GetSystemCPUsCount() > 1) { |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 57 | adaptive_spin_count = 1000; |
| 58 | } |
| 59 | } |
| 60 | }; |
| 61 | |
| 62 | // Hook into global constructor execution: |
| 63 | // We do not do adaptive spinning before that, |
| 64 | // but nothing lock-intensive should be going on at that time. |
| 65 | static SpinLock_InitHelper init_helper; |
| 66 | |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 67 | inline void SpinlockPause(void) { |
| 68 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) |
| 69 | __asm__ __volatile__("rep; nop" : : ); |
| 70 | #endif |
| 71 | } |
| 72 | |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 73 | } // unnamed namespace |
| 74 | |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 75 | // Monitor the lock to see if its value changes within some time |
| 76 | // period (adaptive_spin_count loop iterations). The last value read |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 77 | // from the lock is returned from the method. |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 78 | Atomic32 SpinLock::SpinLoop() { |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 79 | int c = adaptive_spin_count; |
| 80 | while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) { |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 81 | SpinlockPause(); |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 82 | } |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 83 | return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, |
| 84 | kSpinLockSleeper); |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | void SpinLock::SlowLock() { |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 88 | Atomic32 lock_value = SpinLoop(); |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 89 | |
| 90 | int lock_wait_call_count = 0; |
| 91 | while (lock_value != kSpinLockFree) { |
| 92 | // If the lock is currently held, but not marked as having a sleeper, mark |
| 93 | // it as having a sleeper. |
| 94 | if (lock_value == kSpinLockHeld) { |
| 95 | // Here, just "mark" that the thread is going to sleep. Don't store the |
| 96 | // lock wait time in the lock as that will cause the current lock |
| 97 | // owner to think it experienced contention. |
| 98 | lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_, |
| 99 | kSpinLockHeld, |
| 100 | kSpinLockSleeper); |
| 101 | if (lock_value == kSpinLockHeld) { |
| 102 | // Successfully transitioned to kSpinLockSleeper. Pass |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 103 | // kSpinLockSleeper to the SpinLockDelay routine to properly indicate |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 104 | // the last lock_value observed. |
| 105 | lock_value = kSpinLockSleeper; |
| 106 | } else if (lock_value == kSpinLockFree) { |
| 107 | // Lock is free again, so try and acquire it before sleeping. The |
| 108 | // new lock state will be the number of cycles this thread waited if |
| 109 | // this thread obtains the lock. |
| 110 | lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_, |
| 111 | kSpinLockFree, |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 112 | kSpinLockSleeper); |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 113 | continue; // skip the delay at the end of the loop |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | // Wait for an OS specific delay. |
| 118 | base::internal::SpinLockDelay(&lockword_, lock_value, |
| 119 | ++lock_wait_call_count); |
| 120 | // Spin again after returning from the wait routine to give this thread |
| 121 | // some chance of obtaining the lock. |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 122 | lock_value = SpinLoop(); |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 123 | } |
| 124 | } |
| 125 | |
Brian Silverman | 20350ac | 2021-11-17 18:19:55 -0800 | [diff] [blame] | 126 | void SpinLock::SlowUnlock() { |
| 127 | // wake waiter if necessary |
| 128 | base::internal::SpinLockWake(&lockword_, false); |
Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 129 | } |