blob: 35f10481b047f37a9babdca8d5d57f2ea6a0d145 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2011, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30// ---
31//
32// Author: Sasha Levitskiy
33// based on atomicops-internals by Sanjay Ghemawat
34//
35// This file is an internal atomic implementation, use base/atomicops.h instead.
36//
37// This code implements ARM atomics for architectures V6 and newer.
38
39#ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
40#define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
41
42#include <stdio.h>
43#include <stdlib.h>
44#include "base/basictypes.h" // For COMPILE_ASSERT
45
46// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
47// only some variants support it. For simplicity, we only use exclusive
48// 64-bit load/store in V7 or above.
49#if defined(ARMV7)
50# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
51#endif
52
53typedef int32_t Atomic32;
54
55namespace base {
56namespace subtle {
57
58typedef int64_t Atomic64;
59
60// 32-bit low-level ops
61
62inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
63 Atomic32 old_value,
64 Atomic32 new_value) {
65 Atomic32 oldval, res;
66 do {
67 __asm__ __volatile__(
68 "ldrex %1, [%3]\n"
69 "mov %0, #0\n"
70 "teq %1, %4\n"
71 // The following IT (if-then) instruction is needed for the subsequent
72 // conditional instruction STREXEQ when compiling in THUMB mode.
73 // In ARM mode, the compiler/assembler will not generate any code for it.
74 "it eq\n"
75 "strexeq %0, %5, [%3]\n"
76 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
77 : "r" (ptr), "Ir" (old_value), "r" (new_value)
78 : "cc");
79 } while (res);
80 return oldval;
81}
82
83inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
84 Atomic32 new_value) {
85 Atomic32 tmp, old;
86 __asm__ __volatile__(
87 "1:\n"
88 "ldrex %1, [%2]\n"
89 "strex %0, %3, [%2]\n"
90 "teq %0, #0\n"
91 "bne 1b"
92 : "=&r" (tmp), "=&r" (old)
93 : "r" (ptr), "r" (new_value)
94 : "cc", "memory");
95 return old;
96}
97
98inline void MemoryBarrier() {
99#if !defined(ARMV7)
100 uint32_t dest = 0;
101 __asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory");
102#else
103 __asm__ __volatile__("dmb" : : : "memory");
104#endif
105}
106
107inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
108 Atomic32 new_value) {
109 Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
110 MemoryBarrier();
111 return old_value;
112}
113
114inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
115 Atomic32 new_value) {
116 MemoryBarrier();
117 return NoBarrier_AtomicExchange(ptr, new_value);
118}
119
120inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
121 Atomic32 old_value,
122 Atomic32 new_value) {
123 Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
124 MemoryBarrier();
125 return value;
126}
127
128inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
129 Atomic32 old_value,
130 Atomic32 new_value) {
131 MemoryBarrier();
132 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
133}
134
135inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
136 *ptr = value;
137}
138
139inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
140 *ptr = value;
141 MemoryBarrier();
142}
143
144inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
145 MemoryBarrier();
146 *ptr = value;
147}
148
149inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
150 return *ptr;
151}
152
153inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
154 Atomic32 value = *ptr;
155 MemoryBarrier();
156 return value;
157}
158
159inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
160 MemoryBarrier();
161 return *ptr;
162}
163
164// 64-bit versions are only available if LDREXD and STREXD instructions
165// are available.
166#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
167
168#define BASE_HAS_ATOMIC64 1
169
170inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
171 Atomic64 old_value,
172 Atomic64 new_value) {
173 Atomic64 oldval, res;
174 do {
175 __asm__ __volatile__(
176 "ldrexd %1, [%3]\n"
177 "mov %0, #0\n"
178 "teq %Q1, %Q4\n"
179 // The following IT (if-then) instructions are needed for the subsequent
180 // conditional instructions when compiling in THUMB mode.
181 // In ARM mode, the compiler/assembler will not generate any code for it.
182 "it eq\n"
183 "teqeq %R1, %R4\n"
184 "it eq\n"
185 "strexdeq %0, %5, [%3]\n"
186 : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
187 : "r" (ptr), "Ir" (old_value), "r" (new_value)
188 : "cc");
189 } while (res);
190 return oldval;
191}
192
193inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
194 Atomic64 new_value) {
195 int store_failed;
196 Atomic64 old;
197 __asm__ __volatile__(
198 "1:\n"
199 "ldrexd %1, [%2]\n"
200 "strexd %0, %3, [%2]\n"
201 "teq %0, #0\n"
202 "bne 1b"
203 : "=&r" (store_failed), "=&r" (old)
204 : "r" (ptr), "r" (new_value)
205 : "cc", "memory");
206 return old;
207}
208
209inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
210 Atomic64 new_value) {
211 Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
212 MemoryBarrier();
213 return old_value;
214}
215
216inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
217 Atomic64 new_value) {
218 MemoryBarrier();
219 return NoBarrier_AtomicExchange(ptr, new_value);
220}
221
222inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
223 int store_failed;
224 Atomic64 dummy;
225 __asm__ __volatile__(
226 "1:\n"
227 // Dummy load to lock cache line.
228 "ldrexd %1, [%3]\n"
229 "strexd %0, %2, [%3]\n"
230 "teq %0, #0\n"
231 "bne 1b"
232 : "=&r" (store_failed), "=&r"(dummy)
233 : "r"(value), "r" (ptr)
234 : "cc", "memory");
235}
236
237inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
238 Atomic64 res;
239 __asm__ __volatile__(
240 "ldrexd %0, [%1]\n"
241 "clrex\n"
242 : "=r" (res)
243 : "r"(ptr), "Q"(*ptr));
244 return res;
245}
246
247#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
248
249inline void NotImplementedFatalError(const char *function_name) {
250 fprintf(stderr, "64-bit %s() not implemented on this platform\n",
251 function_name);
252 abort();
253}
254
255inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
256 Atomic64 old_value,
257 Atomic64 new_value) {
258 NotImplementedFatalError("NoBarrier_CompareAndSwap");
259 return 0;
260}
261
262inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
263 Atomic64 new_value) {
264 NotImplementedFatalError("NoBarrier_AtomicExchange");
265 return 0;
266}
267
268inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
269 Atomic64 new_value) {
270 NotImplementedFatalError("Acquire_AtomicExchange");
271 return 0;
272}
273
274inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
275 Atomic64 new_value) {
276 NotImplementedFatalError("Release_AtomicExchange");
277 return 0;
278}
279
280inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
281 NotImplementedFatalError("NoBarrier_Store");
282}
283
284inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
285 NotImplementedFatalError("NoBarrier_Load");
286 return 0;
287}
288
289#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
290
291inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
292 NoBarrier_Store(ptr, value);
293 MemoryBarrier();
294}
295
296inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
297 MemoryBarrier();
298 NoBarrier_Store(ptr, value);
299}
300
301inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
302 Atomic64 value = NoBarrier_Load(ptr);
303 MemoryBarrier();
304 return value;
305}
306
307inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
308 MemoryBarrier();
309 return NoBarrier_Load(ptr);
310}
311
312inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
313 Atomic64 old_value,
314 Atomic64 new_value) {
315 Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
316 MemoryBarrier();
317 return value;
318}
319
320inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
321 Atomic64 old_value,
322 Atomic64 new_value) {
323 MemoryBarrier();
324 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
325}
326
327} // namespace subtle ends
328} // namespace base ends
329
330#endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_