blob: be038f344168fd58e3a16bdc281c5bde645242f5 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2/* Copyright (c) 2006, Google Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * ---
32 * Author: Sanjay Ghemawat
33 */
34
35// For atomic operations on statistics counters, see atomic_stats_counter.h.
36// For atomic operations on sequence numbers, see atomic_sequence_num.h.
37// For atomic operations on reference counts, see atomic_refcount.h.
38
39// Some fast atomic operations -- typically with machine-dependent
40// implementations. This file may need editing as Google code is
41// ported to different architectures.
42
43// The routines exported by this module are subtle. If you use them, even if
44// you get the code right, it will depend on careful reasoning about atomicity
45// and memory ordering; it will be less readable, and harder to maintain. If
46// you plan to use these routines, you should have a good reason, such as solid
47// evidence that performance would otherwise suffer, or there being no
48// alternative. You should assume only properties explicitly guaranteed by the
49// specifications in this file. You are almost certainly _not_ writing code
50// just for the x86; if you assume x86 semantics, x86 hardware bugs and
51// implementations on other archtectures will cause your code to break. If you
52// do not know what you are doing, avoid these routines, and use a Mutex.
53//
54// These following lower-level operations are typically useful only to people
55// implementing higher-level synchronization operations like spinlocks,
56// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
57// a store with appropriate memory-ordering instructions. "Acquire" operations
58// ensure that no later memory access can be reordered ahead of the operation.
59// "Release" operations ensure that no previous memory access can be reordered
60// after the operation. "Barrier" operations have both "Acquire" and "Release"
61// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
62// access.
63//
64// It is incorrect to make direct assignments to/from an atomic variable.
65// You should use one of the Load or Store routines. The NoBarrier
66// versions are provided when no barriers are needed:
67// NoBarrier_Store()
68// NoBarrier_Load()
69// Although there are currently no compiler enforcement, you are encouraged
70// to use these. Moreover, if you choose to use base::subtle::Atomic64 type,
71// you MUST use one of the Load or Store routines to get correct behavior
72// on 32-bit platforms.
73//
74// The intent is eventually to put all of these routines in namespace
75// base::subtle
76
77#ifndef THREAD_ATOMICOPS_H_
78#define THREAD_ATOMICOPS_H_
79
80#include <config.h>
81#ifdef HAVE_STDINT_H
82#include <stdint.h>
83#endif
84
85// ------------------------------------------------------------------------
86// Include the platform specific implementations of the types
87// and operations listed below. Implementations are to provide Atomic32
88// and Atomic64 operations. If there is a mismatch between intptr_t and
89// the Atomic32 or Atomic64 types for a platform, the platform-specific header
90// should define the macro, AtomicWordCastType in a clause similar to the
91// following:
92// #if ...pointers are 64 bits...
93// # define AtomicWordCastType base::subtle::Atomic64
94// #else
95// # define AtomicWordCastType Atomic32
96// #endif
97// TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
98// ------------------------------------------------------------------------
99
100#include "base/arm_instruction_set_select.h"
101#define GCC_VERSION (__GNUC__ * 10000 \
102 + __GNUC_MINOR__ * 100 \
103 + __GNUC_PATCHLEVEL__)
104
105#if defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__GNUC__) && GCC_VERSION >= 40700
106#include "base/atomicops-internals-gcc.h"
107#elif defined(__MACH__) && defined(__APPLE__)
108#include "base/atomicops-internals-macosx.h"
109#elif defined(__GNUC__) && defined(ARMV6)
110#include "base/atomicops-internals-arm-v6plus.h"
111#elif defined(ARMV3)
112#include "base/atomicops-internals-arm-generic.h"
113#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
114#include "base/atomicops-internals-x86.h"
115#elif defined(_WIN32)
116#include "base/atomicops-internals-windows.h"
117#elif defined(__linux__) && defined(__PPC__)
118#include "base/atomicops-internals-linuxppc.h"
119#elif defined(__GNUC__) && defined(__mips__)
120#include "base/atomicops-internals-mips.h"
121#elif defined(__GNUC__) && GCC_VERSION >= 40700
122#include "base/atomicops-internals-gcc.h"
123#else
124#error You need to implement atomic operations for this architecture
125#endif
126
127// Signed type that can hold a pointer and supports the atomic ops below, as
128// well as atomic loads and stores. Instances must be naturally-aligned.
129typedef intptr_t AtomicWord;
130
131#ifdef AtomicWordCastType
132// ------------------------------------------------------------------------
133// This section is needed only when explicit type casting is required to
134// cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
135// It also serves to document the AtomicWord interface.
136// ------------------------------------------------------------------------
137
138namespace base {
139namespace subtle {
140
141// Atomically execute:
142// result = *ptr;
143// if (*ptr == old_value)
144// *ptr = new_value;
145// return result;
146//
147// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
148// Always return the old value of "*ptr"
149//
150// This routine implies no memory barriers.
151inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
152 AtomicWord old_value,
153 AtomicWord new_value) {
154 return NoBarrier_CompareAndSwap(
155 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
156 old_value, new_value);
157}
158
159// Atomically store new_value into *ptr, returning the previous value held in
160// *ptr. This routine implies no memory barriers.
161inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
162 AtomicWord new_value) {
163 return NoBarrier_AtomicExchange(
164 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
165}
166
167inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr,
168 AtomicWord new_value) {
169 return Acquire_AtomicExchange(
170 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
171}
172
173inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr,
174 AtomicWord new_value) {
175 return Release_AtomicExchange(
176 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
177}
178
179inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
180 AtomicWord old_value,
181 AtomicWord new_value) {
182 return base::subtle::Acquire_CompareAndSwap(
183 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
184 old_value, new_value);
185}
186
187inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
188 AtomicWord old_value,
189 AtomicWord new_value) {
190 return base::subtle::Release_CompareAndSwap(
191 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
192 old_value, new_value);
193}
194
195inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
196 NoBarrier_Store(
197 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
198}
199
200inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
201 return base::subtle::Acquire_Store(
202 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
203}
204
205inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
206 return base::subtle::Release_Store(
207 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
208}
209
210inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
211 return NoBarrier_Load(
212 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
213}
214
215inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
216 return base::subtle::Acquire_Load(
217 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
218}
219
220inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
221 return base::subtle::Release_Load(
222 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
223}
224
225} // namespace base::subtle
226} // namespace base
227#endif // AtomicWordCastType
228
229// ------------------------------------------------------------------------
230// Commented out type definitions and method declarations for documentation
231// of the interface provided by this module.
232// ------------------------------------------------------------------------
233
234#if 0
235
236// Signed 32-bit type that supports the atomic ops below, as well as atomic
237// loads and stores. Instances must be naturally aligned. This type differs
238// from AtomicWord in 64-bit binaries where AtomicWord is 64-bits.
239typedef int32_t Atomic32;
240
241// Corresponding operations on Atomic32
242namespace base {
243namespace subtle {
244
245// Signed 64-bit type that supports the atomic ops below, as well as atomic
246// loads and stores. Instances must be naturally aligned. This type differs
247// from AtomicWord in 32-bit binaries where AtomicWord is 32-bits.
248typedef int64_t Atomic64;
249
250Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
251 Atomic32 old_value,
252 Atomic32 new_value);
253Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
254Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
255Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
256Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
257 Atomic32 old_value,
258 Atomic32 new_value);
259Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
260 Atomic32 old_value,
261 Atomic32 new_value);
262void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
263void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
264void Release_Store(volatile Atomic32* ptr, Atomic32 value);
265Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
266Atomic32 Acquire_Load(volatile const Atomic32* ptr);
267Atomic32 Release_Load(volatile const Atomic32* ptr);
268
269// Corresponding operations on Atomic64
270Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
271 Atomic64 old_value,
272 Atomic64 new_value);
273Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
274Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
275Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
276
277Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
278 Atomic64 old_value,
279 Atomic64 new_value);
280Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
281 Atomic64 old_value,
282 Atomic64 new_value);
283void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
284void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
285void Release_Store(volatile Atomic64* ptr, Atomic64 value);
286Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
287Atomic64 Acquire_Load(volatile const Atomic64* ptr);
288Atomic64 Release_Load(volatile const Atomic64* ptr);
289} // namespace base::subtle
290} // namespace base
291
292void MemoryBarrier();
293
294#endif // 0
295
296
297// ------------------------------------------------------------------------
298// The following are to be deprecated when all uses have been changed to
299// use the base::subtle namespace.
300// ------------------------------------------------------------------------
301
302#ifdef AtomicWordCastType
303// AtomicWord versions to be deprecated
304inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
305 AtomicWord old_value,
306 AtomicWord new_value) {
307 return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
308}
309
310inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
311 AtomicWord old_value,
312 AtomicWord new_value) {
313 return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
314}
315
316inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
317 return base::subtle::Acquire_Store(ptr, value);
318}
319
320inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
321 return base::subtle::Release_Store(ptr, value);
322}
323
324inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
325 return base::subtle::Acquire_Load(ptr);
326}
327
328inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
329 return base::subtle::Release_Load(ptr);
330}
331#endif // AtomicWordCastType
332
333// 32-bit Acquire/Release operations to be deprecated.
334
335inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
336 Atomic32 old_value,
337 Atomic32 new_value) {
338 return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
339}
340inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
341 Atomic32 old_value,
342 Atomic32 new_value) {
343 return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
344}
345inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
346 base::subtle::Acquire_Store(ptr, value);
347}
348inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
349 return base::subtle::Release_Store(ptr, value);
350}
351inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
352 return base::subtle::Acquire_Load(ptr);
353}
354inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
355 return base::subtle::Release_Load(ptr);
356}
357
358#ifdef BASE_HAS_ATOMIC64
359
360// 64-bit Acquire/Release operations to be deprecated.
361
362inline base::subtle::Atomic64 Acquire_CompareAndSwap(
363 volatile base::subtle::Atomic64* ptr,
364 base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
365 return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
366}
367inline base::subtle::Atomic64 Release_CompareAndSwap(
368 volatile base::subtle::Atomic64* ptr,
369 base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
370 return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
371}
372inline void Acquire_Store(
373 volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
374 base::subtle::Acquire_Store(ptr, value);
375}
376inline void Release_Store(
377 volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
378 return base::subtle::Release_Store(ptr, value);
379}
380inline base::subtle::Atomic64 Acquire_Load(
381 volatile const base::subtle::Atomic64* ptr) {
382 return base::subtle::Acquire_Load(ptr);
383}
384inline base::subtle::Atomic64 Release_Load(
385 volatile const base::subtle::Atomic64* ptr) {
386 return base::subtle::Release_Load(ptr);
387}
388
389#endif // BASE_HAS_ATOMIC64
390
391#endif // THREAD_ATOMICOPS_H_