blob: 9a0c00a89b68d4b24b65f3074d1bf2ab60073b8a [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2/* Copyright (c) 2006, Google Inc.
3 * All rights reserved.
Brian Silverman20350ac2021-11-17 18:19:55 -08004 *
Austin Schuh745610d2015-09-06 18:19:50 -07005 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
Brian Silverman20350ac2021-11-17 18:19:55 -08008 *
Austin Schuh745610d2015-09-06 18:19:50 -07009 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
Brian Silverman20350ac2021-11-17 18:19:55 -080018 *
Austin Schuh745610d2015-09-06 18:19:50 -070019 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32// Implementation of atomic operations for Mac OS X. This file should not
33// be included directly. Clients should instead include
34// "base/atomicops.h".
35
36#ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
37#define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
38
39typedef int32_t Atomic32;
40
41// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
42// on the Mac, even when they are the same size. Similarly, on __ppc64__,
43// AtomicWord and Atomic64 are always different. Thus, we need explicit
44// casting.
45#ifdef __LP64__
46#define AtomicWordCastType base::subtle::Atomic64
47#else
48#define AtomicWordCastType Atomic32
49#endif
50
51#if defined(__LP64__) || defined(__i386__)
52#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
53#endif
54
55#include <libkern/OSAtomic.h>
56
57namespace base {
58namespace subtle {
59
60#if !defined(__LP64__) && defined(__ppc__)
61
62// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
63// while the underlying assembly instructions are available only some
64// implementations of PowerPC.
65
66// The following inline functions will fail with the error message at compile
67// time ONLY IF they are called. So it is safe to use this header if user
68// code only calls AtomicWord and Atomic32 operations.
69//
70// NOTE(vchen): Implementation notes to implement the atomic ops below may
71// be found in "PowerPC Virtual Environment Architecture, Book II,
72// Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
73// extra care must be taken to ensure data are properly 8-byte aligned, and
74// that data are returned correctly according to Mac OS X ABI specs.
75
76inline int64_t OSAtomicCompareAndSwap64(
77 int64_t oldValue, int64_t newValue, int64_t *theValue) {
78 __asm__ __volatile__(
79 "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
80 return 0;
81}
82
83inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
84 __asm__ __volatile__(
85 "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
86 return 0;
87}
88
89inline int64_t OSAtomicCompareAndSwap64Barrier(
90 int64_t oldValue, int64_t newValue, int64_t *theValue) {
91 int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
92 OSMemoryBarrier();
93 return prev;
94}
95
96inline int64_t OSAtomicAdd64Barrier(
97 int64_t theAmount, int64_t *theValue) {
98 int64_t new_val = OSAtomicAdd64(theAmount, theValue);
99 OSMemoryBarrier();
100 return new_val;
101}
102#endif
103
104typedef int64_t Atomic64;
105
106inline void MemoryBarrier() {
107 OSMemoryBarrier();
108}
109
110// 32-bit Versions.
111
112inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
113 Atomic32 old_value,
114 Atomic32 new_value) {
115 Atomic32 prev_value;
116 do {
117 if (OSAtomicCompareAndSwap32(old_value, new_value,
118 const_cast<Atomic32*>(ptr))) {
119 return old_value;
120 }
121 prev_value = *ptr;
122 } while (prev_value == old_value);
123 return prev_value;
124}
125
126inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
127 Atomic32 new_value) {
128 Atomic32 old_value;
129 do {
130 old_value = *ptr;
131 } while (!OSAtomicCompareAndSwap32(old_value, new_value,
132 const_cast<Atomic32*>(ptr)));
133 return old_value;
134}
135
136inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
137 Atomic32 new_value) {
138 Atomic32 old_value;
139 do {
140 old_value = *ptr;
141 } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
142 const_cast<Atomic32*>(ptr)));
143 return old_value;
144}
145
146inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
147 Atomic32 new_value) {
148 return Acquire_AtomicExchange(ptr, new_value);
149}
150
151inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
152 Atomic32 old_value,
153 Atomic32 new_value) {
154 Atomic32 prev_value;
155 do {
156 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
157 const_cast<Atomic32*>(ptr))) {
158 return old_value;
159 }
160 prev_value = *ptr;
161 } while (prev_value == old_value);
162 return prev_value;
163}
164
165inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
166 Atomic32 old_value,
167 Atomic32 new_value) {
168 return Acquire_CompareAndSwap(ptr, old_value, new_value);
169}
170
171inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
172 *ptr = value;
173}
174
Austin Schuh745610d2015-09-06 18:19:50 -0700175inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
176 MemoryBarrier();
177 *ptr = value;
178}
179
180inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
181 return *ptr;
182}
183
184inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
185 Atomic32 value = *ptr;
186 MemoryBarrier();
187 return value;
188}
189
Austin Schuh745610d2015-09-06 18:19:50 -0700190// 64-bit version
191
192inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
193 Atomic64 old_value,
194 Atomic64 new_value) {
195 Atomic64 prev_value;
196 do {
197 if (OSAtomicCompareAndSwap64(old_value, new_value,
198 const_cast<Atomic64*>(ptr))) {
199 return old_value;
200 }
201 prev_value = *ptr;
202 } while (prev_value == old_value);
203 return prev_value;
204}
205
206inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
207 Atomic64 new_value) {
208 Atomic64 old_value;
209 do {
210 old_value = *ptr;
211 } while (!OSAtomicCompareAndSwap64(old_value, new_value,
212 const_cast<Atomic64*>(ptr)));
213 return old_value;
214}
215
216inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
217 Atomic64 new_value) {
218 Atomic64 old_value;
219 do {
220 old_value = *ptr;
221 } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
222 const_cast<Atomic64*>(ptr)));
223 return old_value;
224}
225
226inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
227 Atomic64 new_value) {
228 return Acquire_AtomicExchange(ptr, new_value);
229}
230
231inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
232 Atomic64 old_value,
233 Atomic64 new_value) {
234 Atomic64 prev_value;
235 do {
236 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
237 const_cast<Atomic64*>(ptr))) {
238 return old_value;
239 }
240 prev_value = *ptr;
241 } while (prev_value == old_value);
242 return prev_value;
243}
244
245inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
246 Atomic64 old_value,
247 Atomic64 new_value) {
248 // The lib kern interface does not distinguish between
249 // Acquire and Release memory barriers; they are equivalent.
250 return Acquire_CompareAndSwap(ptr, old_value, new_value);
251}
252
253#ifdef __LP64__
254
255// 64-bit implementation on 64-bit platform
256
257inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
258 *ptr = value;
259}
260
Austin Schuh745610d2015-09-06 18:19:50 -0700261inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
262 MemoryBarrier();
263 *ptr = value;
264}
265
266inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
267 return *ptr;
268}
269
270inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
271 Atomic64 value = *ptr;
272 MemoryBarrier();
273 return value;
274}
275
Austin Schuh745610d2015-09-06 18:19:50 -0700276#else
277
278// 64-bit implementation on 32-bit platform
279
280#if defined(__ppc__)
281
282inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
283 __asm__ __volatile__(
284 "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
285}
286
287inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
288 __asm__ __volatile__(
289 "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
290 return 0;
291}
292
293#elif defined(__i386__)
294
295inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
296 __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
297 "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
298 "emms\n\t" // Reset FP registers
299 : "=m" (*ptr)
300 : "m" (value)
301 : // mark the FP stack and mmx registers as clobbered
302 "st", "st(1)", "st(2)", "st(3)", "st(4)",
303 "st(5)", "st(6)", "st(7)", "mm0", "mm1",
304 "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
305
306}
307
308inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
309 Atomic64 value;
310 __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
311 "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
312 "emms\n\t" // Reset FP registers
313 : "=m" (value)
314 : "m" (*ptr)
315 : // mark the FP stack and mmx registers as clobbered
316 "st", "st(1)", "st(2)", "st(3)", "st(4)",
317 "st(5)", "st(6)", "st(7)", "mm0", "mm1",
318 "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
319
320 return value;
321}
322#endif
323
324
Austin Schuh745610d2015-09-06 18:19:50 -0700325inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
326 MemoryBarrier();
327 NoBarrier_Store(ptr, value);
328}
329
330inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
331 Atomic64 value = NoBarrier_Load(ptr);
332 MemoryBarrier();
333 return value;
334}
335
Austin Schuh745610d2015-09-06 18:19:50 -0700336#endif // __LP64__
337
338} // namespace base::subtle
339} // namespace base
340
341#endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_