Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame^] | 1 | // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- |
| 2 | /* Copyright (c) 2006, Google Inc. |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are |
| 7 | * met: |
| 8 | * |
| 9 | * * Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * * Redistributions in binary form must reproduce the above |
| 12 | * copyright notice, this list of conditions and the following disclaimer |
| 13 | * in the documentation and/or other materials provided with the |
| 14 | * distribution. |
| 15 | * * Neither the name of Google Inc. nor the names of its |
| 16 | * contributors may be used to endorse or promote products derived from |
| 17 | * this software without specific prior written permission. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | */ |
| 31 | |
| 32 | // Implementation of atomic operations for Mac OS X. This file should not |
| 33 | // be included directly. Clients should instead include |
| 34 | // "base/atomicops.h". |
| 35 | |
| 36 | #ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_ |
| 37 | #define BASE_ATOMICOPS_INTERNALS_MACOSX_H_ |
| 38 | |
| 39 | typedef int32_t Atomic32; |
| 40 | |
| 41 | // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different |
| 42 | // on the Mac, even when they are the same size. Similarly, on __ppc64__, |
| 43 | // AtomicWord and Atomic64 are always different. Thus, we need explicit |
| 44 | // casting. |
| 45 | #ifdef __LP64__ |
| 46 | #define AtomicWordCastType base::subtle::Atomic64 |
| 47 | #else |
| 48 | #define AtomicWordCastType Atomic32 |
| 49 | #endif |
| 50 | |
| 51 | #if defined(__LP64__) || defined(__i386__) |
| 52 | #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* |
| 53 | #endif |
| 54 | |
| 55 | #include <libkern/OSAtomic.h> |
| 56 | |
| 57 | namespace base { |
| 58 | namespace subtle { |
| 59 | |
| 60 | #if !defined(__LP64__) && defined(__ppc__) |
| 61 | |
| 62 | // The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC, |
| 63 | // while the underlying assembly instructions are available only some |
| 64 | // implementations of PowerPC. |
| 65 | |
| 66 | // The following inline functions will fail with the error message at compile |
| 67 | // time ONLY IF they are called. So it is safe to use this header if user |
| 68 | // code only calls AtomicWord and Atomic32 operations. |
| 69 | // |
| 70 | // NOTE(vchen): Implementation notes to implement the atomic ops below may |
| 71 | // be found in "PowerPC Virtual Environment Architecture, Book II, |
| 72 | // Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately, |
| 73 | // extra care must be taken to ensure data are properly 8-byte aligned, and |
| 74 | // that data are returned correctly according to Mac OS X ABI specs. |
| 75 | |
| 76 | inline int64_t OSAtomicCompareAndSwap64( |
| 77 | int64_t oldValue, int64_t newValue, int64_t *theValue) { |
| 78 | __asm__ __volatile__( |
| 79 | "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t"); |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) { |
| 84 | __asm__ __volatile__( |
| 85 | "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t"); |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | inline int64_t OSAtomicCompareAndSwap64Barrier( |
| 90 | int64_t oldValue, int64_t newValue, int64_t *theValue) { |
| 91 | int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue); |
| 92 | OSMemoryBarrier(); |
| 93 | return prev; |
| 94 | } |
| 95 | |
| 96 | inline int64_t OSAtomicAdd64Barrier( |
| 97 | int64_t theAmount, int64_t *theValue) { |
| 98 | int64_t new_val = OSAtomicAdd64(theAmount, theValue); |
| 99 | OSMemoryBarrier(); |
| 100 | return new_val; |
| 101 | } |
| 102 | #endif |
| 103 | |
| 104 | typedef int64_t Atomic64; |
| 105 | |
| 106 | inline void MemoryBarrier() { |
| 107 | OSMemoryBarrier(); |
| 108 | } |
| 109 | |
| 110 | // 32-bit Versions. |
| 111 | |
| 112 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, |
| 113 | Atomic32 old_value, |
| 114 | Atomic32 new_value) { |
| 115 | Atomic32 prev_value; |
| 116 | do { |
| 117 | if (OSAtomicCompareAndSwap32(old_value, new_value, |
| 118 | const_cast<Atomic32*>(ptr))) { |
| 119 | return old_value; |
| 120 | } |
| 121 | prev_value = *ptr; |
| 122 | } while (prev_value == old_value); |
| 123 | return prev_value; |
| 124 | } |
| 125 | |
| 126 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, |
| 127 | Atomic32 new_value) { |
| 128 | Atomic32 old_value; |
| 129 | do { |
| 130 | old_value = *ptr; |
| 131 | } while (!OSAtomicCompareAndSwap32(old_value, new_value, |
| 132 | const_cast<Atomic32*>(ptr))); |
| 133 | return old_value; |
| 134 | } |
| 135 | |
| 136 | inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, |
| 137 | Atomic32 new_value) { |
| 138 | Atomic32 old_value; |
| 139 | do { |
| 140 | old_value = *ptr; |
| 141 | } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
| 142 | const_cast<Atomic32*>(ptr))); |
| 143 | return old_value; |
| 144 | } |
| 145 | |
| 146 | inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, |
| 147 | Atomic32 new_value) { |
| 148 | return Acquire_AtomicExchange(ptr, new_value); |
| 149 | } |
| 150 | |
| 151 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, |
| 152 | Atomic32 old_value, |
| 153 | Atomic32 new_value) { |
| 154 | Atomic32 prev_value; |
| 155 | do { |
| 156 | if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
| 157 | const_cast<Atomic32*>(ptr))) { |
| 158 | return old_value; |
| 159 | } |
| 160 | prev_value = *ptr; |
| 161 | } while (prev_value == old_value); |
| 162 | return prev_value; |
| 163 | } |
| 164 | |
| 165 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, |
| 166 | Atomic32 old_value, |
| 167 | Atomic32 new_value) { |
| 168 | return Acquire_CompareAndSwap(ptr, old_value, new_value); |
| 169 | } |
| 170 | |
| 171 | inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 172 | *ptr = value; |
| 173 | } |
| 174 | |
| 175 | inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { |
| 176 | *ptr = value; |
| 177 | MemoryBarrier(); |
| 178 | } |
| 179 | |
| 180 | inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { |
| 181 | MemoryBarrier(); |
| 182 | *ptr = value; |
| 183 | } |
| 184 | |
| 185 | inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 186 | return *ptr; |
| 187 | } |
| 188 | |
| 189 | inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { |
| 190 | Atomic32 value = *ptr; |
| 191 | MemoryBarrier(); |
| 192 | return value; |
| 193 | } |
| 194 | |
| 195 | inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { |
| 196 | MemoryBarrier(); |
| 197 | return *ptr; |
| 198 | } |
| 199 | |
| 200 | // 64-bit version |
| 201 | |
| 202 | inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, |
| 203 | Atomic64 old_value, |
| 204 | Atomic64 new_value) { |
| 205 | Atomic64 prev_value; |
| 206 | do { |
| 207 | if (OSAtomicCompareAndSwap64(old_value, new_value, |
| 208 | const_cast<Atomic64*>(ptr))) { |
| 209 | return old_value; |
| 210 | } |
| 211 | prev_value = *ptr; |
| 212 | } while (prev_value == old_value); |
| 213 | return prev_value; |
| 214 | } |
| 215 | |
| 216 | inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, |
| 217 | Atomic64 new_value) { |
| 218 | Atomic64 old_value; |
| 219 | do { |
| 220 | old_value = *ptr; |
| 221 | } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
| 222 | const_cast<Atomic64*>(ptr))); |
| 223 | return old_value; |
| 224 | } |
| 225 | |
| 226 | inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr, |
| 227 | Atomic64 new_value) { |
| 228 | Atomic64 old_value; |
| 229 | do { |
| 230 | old_value = *ptr; |
| 231 | } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
| 232 | const_cast<Atomic64*>(ptr))); |
| 233 | return old_value; |
| 234 | } |
| 235 | |
| 236 | inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr, |
| 237 | Atomic64 new_value) { |
| 238 | return Acquire_AtomicExchange(ptr, new_value); |
| 239 | } |
| 240 | |
| 241 | inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, |
| 242 | Atomic64 old_value, |
| 243 | Atomic64 new_value) { |
| 244 | Atomic64 prev_value; |
| 245 | do { |
| 246 | if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
| 247 | const_cast<Atomic64*>(ptr))) { |
| 248 | return old_value; |
| 249 | } |
| 250 | prev_value = *ptr; |
| 251 | } while (prev_value == old_value); |
| 252 | return prev_value; |
| 253 | } |
| 254 | |
| 255 | inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, |
| 256 | Atomic64 old_value, |
| 257 | Atomic64 new_value) { |
| 258 | // The lib kern interface does not distinguish between |
| 259 | // Acquire and Release memory barriers; they are equivalent. |
| 260 | return Acquire_CompareAndSwap(ptr, old_value, new_value); |
| 261 | } |
| 262 | |
| 263 | #ifdef __LP64__ |
| 264 | |
| 265 | // 64-bit implementation on 64-bit platform |
| 266 | |
| 267 | inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 268 | *ptr = value; |
| 269 | } |
| 270 | |
| 271 | inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { |
| 272 | *ptr = value; |
| 273 | MemoryBarrier(); |
| 274 | } |
| 275 | |
| 276 | inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { |
| 277 | MemoryBarrier(); |
| 278 | *ptr = value; |
| 279 | } |
| 280 | |
| 281 | inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 282 | return *ptr; |
| 283 | } |
| 284 | |
| 285 | inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { |
| 286 | Atomic64 value = *ptr; |
| 287 | MemoryBarrier(); |
| 288 | return value; |
| 289 | } |
| 290 | |
| 291 | inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
| 292 | MemoryBarrier(); |
| 293 | return *ptr; |
| 294 | } |
| 295 | |
| 296 | #else |
| 297 | |
| 298 | // 64-bit implementation on 32-bit platform |
| 299 | |
| 300 | #if defined(__ppc__) |
| 301 | |
| 302 | inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 303 | __asm__ __volatile__( |
| 304 | "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t"); |
| 305 | } |
| 306 | |
| 307 | inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 308 | __asm__ __volatile__( |
| 309 | "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t"); |
| 310 | return 0; |
| 311 | } |
| 312 | |
| 313 | #elif defined(__i386__) |
| 314 | |
| 315 | inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 316 | __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic |
| 317 | "movq %%mm0, %0\n\t" // moves (ptr could be read-only) |
| 318 | "emms\n\t" // Reset FP registers |
| 319 | : "=m" (*ptr) |
| 320 | : "m" (value) |
| 321 | : // mark the FP stack and mmx registers as clobbered |
| 322 | "st", "st(1)", "st(2)", "st(3)", "st(4)", |
| 323 | "st(5)", "st(6)", "st(7)", "mm0", "mm1", |
| 324 | "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); |
| 325 | |
| 326 | } |
| 327 | |
| 328 | inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 329 | Atomic64 value; |
| 330 | __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic |
| 331 | "movq %%mm0, %0\n\t" // moves (ptr could be read-only) |
| 332 | "emms\n\t" // Reset FP registers |
| 333 | : "=m" (value) |
| 334 | : "m" (*ptr) |
| 335 | : // mark the FP stack and mmx registers as clobbered |
| 336 | "st", "st(1)", "st(2)", "st(3)", "st(4)", |
| 337 | "st(5)", "st(6)", "st(7)", "mm0", "mm1", |
| 338 | "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); |
| 339 | |
| 340 | return value; |
| 341 | } |
| 342 | #endif |
| 343 | |
| 344 | |
| 345 | inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { |
| 346 | NoBarrier_Store(ptr, value); |
| 347 | MemoryBarrier(); |
| 348 | } |
| 349 | |
| 350 | inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { |
| 351 | MemoryBarrier(); |
| 352 | NoBarrier_Store(ptr, value); |
| 353 | } |
| 354 | |
| 355 | inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { |
| 356 | Atomic64 value = NoBarrier_Load(ptr); |
| 357 | MemoryBarrier(); |
| 358 | return value; |
| 359 | } |
| 360 | |
| 361 | inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
| 362 | MemoryBarrier(); |
| 363 | return NoBarrier_Load(ptr); |
| 364 | } |
| 365 | #endif // __LP64__ |
| 366 | |
| 367 | } // namespace base::subtle |
| 368 | } // namespace base |
| 369 | |
| 370 | #endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_ |