blob: 4bfd7f6c70d91594fae951b5618f3a4a99e1ae23 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2/* Copyright (c) 2013, Google Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32// Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com>
33// based on atomicops-internals by Sanjay Ghemawat
34
35// This file is an internal atomic implementation, use base/atomicops.h instead.
36//
37// This code implements MIPS atomics.
38
39#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_
40#define BASE_ATOMICOPS_INTERNALS_MIPS_H_
41
42#if (_MIPS_ISA == _MIPS_ISA_MIPS64)
43#define BASE_HAS_ATOMIC64 1
44#endif
45
46typedef int32_t Atomic32;
47
48namespace base {
49namespace subtle {
50
51// Atomically execute:
52// result = *ptr;
53// if (*ptr == old_value)
54// *ptr = new_value;
55// return result;
56//
57// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
58// Always return the old value of "*ptr"
59//
60// This routine implies no memory barriers.
61inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
62 Atomic32 old_value,
63 Atomic32 new_value)
64{
65 Atomic32 prev, tmp;
66 __asm__ volatile(
67 ".set push \n"
68 ".set noreorder \n"
69
70 "1: \n"
71 "ll %0, %5 \n" // prev = *ptr
72 "bne %0, %3, 2f \n" // if (prev != old_value) goto 2
73 " move %2, %4 \n" // tmp = new_value
74 "sc %2, %1 \n" // *ptr = tmp (with atomic check)
75 "beqz %2, 1b \n" // start again on atomic error
76 " nop \n" // delay slot nop
77 "2: \n"
78
79 ".set pop \n"
80 : "=&r" (prev), "=m" (*ptr),
81 "=&r" (tmp)
82 : "Ir" (old_value), "r" (new_value),
83 "m" (*ptr)
84 : "memory"
85 );
86 return prev;
87}
88
89// Atomically store new_value into *ptr, returning the previous value held in
90// *ptr. This routine implies no memory barriers.
91inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
92 Atomic32 new_value)
93{
94 Atomic32 temp, old;
95 __asm__ volatile(
96 ".set push \n"
97 ".set noreorder \n"
98
99 "1: \n"
100 "ll %1, %2 \n" // old = *ptr
101 "move %0, %3 \n" // temp = new_value
102 "sc %0, %2 \n" // *ptr = temp (with atomic check)
103 "beqz %0, 1b \n" // start again on atomic error
104 " nop \n" // delay slot nop
105
106 ".set pop \n"
107 : "=&r" (temp), "=&r" (old),
108 "=m" (*ptr)
109 : "r" (new_value), "m" (*ptr)
110 : "memory"
111 );
112 return old;
113}
114
115inline void MemoryBarrier()
116{
117 __asm__ volatile("sync" : : : "memory");
118}
119
120// "Acquire" operations
121// ensure that no later memory access can be reordered ahead of the operation.
122// "Release" operations ensure that no previous memory access can be reordered
123// after the operation. "Barrier" operations have both "Acquire" and "Release"
124// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
125// access.
126inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
127 Atomic32 old_value,
128 Atomic32 new_value)
129{
130 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
131 MemoryBarrier();
132 return res;
133}
134
135inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
136 Atomic32 old_value,
137 Atomic32 new_value)
138{
139 MemoryBarrier();
140 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
141 return res;
142}
143
144inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value)
145{
146 *ptr = value;
147}
148
149inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
150 Atomic32 new_value)
151{
152 Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
153 MemoryBarrier();
154 return old_value;
155}
156
157inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
158 Atomic32 new_value)
159{
160 MemoryBarrier();
161 return NoBarrier_AtomicExchange(ptr, new_value);
162}
163
164inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value)
165{
166 *ptr = value;
167 MemoryBarrier();
168}
169
170inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
171{
172 MemoryBarrier();
173 *ptr = value;
174}
175
176inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr)
177{
178 return *ptr;
179}
180
181inline Atomic32 Acquire_Load(volatile const Atomic32* ptr)
182{
183 Atomic32 value = *ptr;
184 MemoryBarrier();
185 return value;
186}
187
188inline Atomic32 Release_Load(volatile const Atomic32* ptr)
189{
190 MemoryBarrier();
191 return *ptr;
192}
193
194#if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
195
196typedef int64_t Atomic64;
197
198inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
199 Atomic64 old_value,
200 Atomic64 new_value)
201{
202 Atomic64 prev, tmp;
203 __asm__ volatile(
204 ".set push \n"
205 ".set noreorder \n"
206
207 "1: \n"
208 "lld %0, %5 \n" // prev = *ptr
209 "bne %0, %3, 2f \n" // if (prev != old_value) goto 2
210 " move %2, %4 \n" // tmp = new_value
211 "scd %2, %1 \n" // *ptr = tmp (with atomic check)
212 "beqz %2, 1b \n" // start again on atomic error
213 " nop \n" // delay slot nop
214 "2: \n"
215
216 ".set pop \n"
217 : "=&r" (prev), "=m" (*ptr),
218 "=&r" (tmp)
219 : "Ir" (old_value), "r" (new_value),
220 "m" (*ptr)
221 : "memory"
222 );
223 return prev;
224}
225
226inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
227 Atomic64 new_value)
228{
229 Atomic64 temp, old;
230 __asm__ volatile(
231 ".set push \n"
232 ".set noreorder \n"
233
234 "1: \n"
235 "lld %1, %2 \n" // old = *ptr
236 "move %0, %3 \n" // temp = new_value
237 "scd %0, %2 \n" // *ptr = temp (with atomic check)
238 "beqz %0, 1b \n" // start again on atomic error
239 " nop \n" // delay slot nop
240
241 ".set pop \n"
242 : "=&r" (temp), "=&r" (old),
243 "=m" (*ptr)
244 : "r" (new_value), "m" (*ptr)
245 : "memory"
246 );
247 return old;
248}
249
250inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
251 Atomic64 new_value)
252{
253 Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
254 MemoryBarrier();
255 return old_value;
256}
257
258inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
259 Atomic64 old_value,
260 Atomic64 new_value)
261{
262 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
263 MemoryBarrier();
264 return res;
265}
266
267inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
268 Atomic64 old_value,
269 Atomic64 new_value)
270{
271 MemoryBarrier();
272 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
273 return res;
274}
275
276inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value)
277{
278 *ptr = value;
279}
280
281inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
282 Atomic64 new_value)
283{
284 MemoryBarrier();
285 return NoBarrier_AtomicExchange(ptr, new_value);
286}
287
288inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value)
289{
290 *ptr = value;
291 MemoryBarrier();
292}
293
294inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
295{
296 MemoryBarrier();
297 *ptr = value;
298}
299
300inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr)
301{
302 return *ptr;
303}
304
305inline Atomic64 Acquire_Load(volatile const Atomic64* ptr)
306{
307 Atomic64 value = *ptr;
308 MemoryBarrier();
309 return value;
310}
311
312inline Atomic64 Release_Load(volatile const Atomic64* ptr)
313{
314 MemoryBarrier();
315 return *ptr;
316}
317
318#endif
319
320} // namespace base::subtle
321} // namespace base
322
323#endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_