blob: af2920a5b6b67f6559eb47b35c91f1e85cb511ed [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2011, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30// ---
31//
32// Author: Sasha Levitskiy
33// based on atomicops-internals by Sanjay Ghemawat
34//
35// This file is an internal atomic implementation, use base/atomicops.h instead.
36//
37// This code implements ARM atomics for architectures V6 and newer.
38
39#ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
40#define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
41
42#include <stdio.h>
43#include <stdlib.h>
44#include "base/basictypes.h" // For COMPILE_ASSERT
45
46// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
47// only some variants support it. For simplicity, we only use exclusive
48// 64-bit load/store in V7 or above.
49#if defined(ARMV7)
50# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
51#endif
52
53typedef int32_t Atomic32;
54
55namespace base {
56namespace subtle {
57
58typedef int64_t Atomic64;
59
60// 32-bit low-level ops
61
62inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
63 Atomic32 old_value,
64 Atomic32 new_value) {
65 Atomic32 oldval, res;
66 do {
67 __asm__ __volatile__(
68 "ldrex %1, [%3]\n"
69 "mov %0, #0\n"
70 "teq %1, %4\n"
71 // The following IT (if-then) instruction is needed for the subsequent
72 // conditional instruction STREXEQ when compiling in THUMB mode.
73 // In ARM mode, the compiler/assembler will not generate any code for it.
74 "it eq\n"
75 "strexeq %0, %5, [%3]\n"
76 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
77 : "r" (ptr), "Ir" (old_value), "r" (new_value)
78 : "cc");
79 } while (res);
80 return oldval;
81}
82
83inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
84 Atomic32 new_value) {
85 Atomic32 tmp, old;
86 __asm__ __volatile__(
87 "1:\n"
88 "ldrex %1, [%2]\n"
89 "strex %0, %3, [%2]\n"
90 "teq %0, #0\n"
91 "bne 1b"
92 : "=&r" (tmp), "=&r" (old)
93 : "r" (ptr), "r" (new_value)
94 : "cc", "memory");
95 return old;
96}
97
98inline void MemoryBarrier() {
99#if !defined(ARMV7)
100 uint32_t dest = 0;
101 __asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory");
102#else
103 __asm__ __volatile__("dmb" : : : "memory");
104#endif
105}
106
107inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
108 Atomic32 new_value) {
109 Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
110 MemoryBarrier();
111 return old_value;
112}
113
114inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
115 Atomic32 new_value) {
116 MemoryBarrier();
117 return NoBarrier_AtomicExchange(ptr, new_value);
118}
119
120inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
121 Atomic32 old_value,
122 Atomic32 new_value) {
123 Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
124 MemoryBarrier();
125 return value;
126}
127
128inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
129 Atomic32 old_value,
130 Atomic32 new_value) {
131 MemoryBarrier();
132 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
133}
134
135inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
136 *ptr = value;
137}
138
Austin Schuh745610d2015-09-06 18:19:50 -0700139inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
140 MemoryBarrier();
141 *ptr = value;
142}
143
144inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
145 return *ptr;
146}
147
148inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
149 Atomic32 value = *ptr;
150 MemoryBarrier();
151 return value;
152}
153
Austin Schuh745610d2015-09-06 18:19:50 -0700154// 64-bit versions are only available if LDREXD and STREXD instructions
155// are available.
156#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
157
158#define BASE_HAS_ATOMIC64 1
159
160inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
161 Atomic64 old_value,
162 Atomic64 new_value) {
163 Atomic64 oldval, res;
164 do {
165 __asm__ __volatile__(
166 "ldrexd %1, [%3]\n"
167 "mov %0, #0\n"
168 "teq %Q1, %Q4\n"
169 // The following IT (if-then) instructions are needed for the subsequent
170 // conditional instructions when compiling in THUMB mode.
171 // In ARM mode, the compiler/assembler will not generate any code for it.
172 "it eq\n"
173 "teqeq %R1, %R4\n"
174 "it eq\n"
175 "strexdeq %0, %5, [%3]\n"
176 : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
177 : "r" (ptr), "Ir" (old_value), "r" (new_value)
178 : "cc");
179 } while (res);
180 return oldval;
181}
182
183inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
184 Atomic64 new_value) {
185 int store_failed;
186 Atomic64 old;
187 __asm__ __volatile__(
188 "1:\n"
189 "ldrexd %1, [%2]\n"
190 "strexd %0, %3, [%2]\n"
191 "teq %0, #0\n"
192 "bne 1b"
193 : "=&r" (store_failed), "=&r" (old)
194 : "r" (ptr), "r" (new_value)
195 : "cc", "memory");
196 return old;
197}
198
199inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
200 Atomic64 new_value) {
201 Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
202 MemoryBarrier();
203 return old_value;
204}
205
206inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
207 Atomic64 new_value) {
208 MemoryBarrier();
209 return NoBarrier_AtomicExchange(ptr, new_value);
210}
211
212inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
213 int store_failed;
214 Atomic64 dummy;
215 __asm__ __volatile__(
216 "1:\n"
217 // Dummy load to lock cache line.
218 "ldrexd %1, [%3]\n"
219 "strexd %0, %2, [%3]\n"
220 "teq %0, #0\n"
221 "bne 1b"
222 : "=&r" (store_failed), "=&r"(dummy)
223 : "r"(value), "r" (ptr)
224 : "cc", "memory");
225}
226
227inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
228 Atomic64 res;
229 __asm__ __volatile__(
230 "ldrexd %0, [%1]\n"
231 "clrex\n"
232 : "=r" (res)
233 : "r"(ptr), "Q"(*ptr));
234 return res;
235}
236
237#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
238
239inline void NotImplementedFatalError(const char *function_name) {
240 fprintf(stderr, "64-bit %s() not implemented on this platform\n",
241 function_name);
242 abort();
243}
244
245inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
246 Atomic64 old_value,
247 Atomic64 new_value) {
248 NotImplementedFatalError("NoBarrier_CompareAndSwap");
249 return 0;
250}
251
252inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
253 Atomic64 new_value) {
254 NotImplementedFatalError("NoBarrier_AtomicExchange");
255 return 0;
256}
257
258inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
259 Atomic64 new_value) {
260 NotImplementedFatalError("Acquire_AtomicExchange");
261 return 0;
262}
263
264inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
265 Atomic64 new_value) {
266 NotImplementedFatalError("Release_AtomicExchange");
267 return 0;
268}
269
270inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
271 NotImplementedFatalError("NoBarrier_Store");
272}
273
274inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
275 NotImplementedFatalError("NoBarrier_Load");
276 return 0;
277}
278
279#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
280
Austin Schuh745610d2015-09-06 18:19:50 -0700281inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
282 MemoryBarrier();
283 NoBarrier_Store(ptr, value);
284}
285
286inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
287 Atomic64 value = NoBarrier_Load(ptr);
288 MemoryBarrier();
289 return value;
290}
291
Austin Schuh745610d2015-09-06 18:19:50 -0700292inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
293 Atomic64 old_value,
294 Atomic64 new_value) {
295 Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
296 MemoryBarrier();
297 return value;
298}
299
300inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
301 Atomic64 old_value,
302 Atomic64 new_value) {
303 MemoryBarrier();
304 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
305}
306
307} // namespace subtle ends
308} // namespace base ends
309
310#endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_