blob: 58e0f14208a9b04750882649def9143d47c76096 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2/* Copyright (c) 2013, Google Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32// Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com>
33// based on atomicops-internals by Sanjay Ghemawat
34
35// This file is an internal atomic implementation, use base/atomicops.h instead.
36//
37// This code implements MIPS atomics.
38
39#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_
40#define BASE_ATOMICOPS_INTERNALS_MIPS_H_
41
42#if (_MIPS_ISA == _MIPS_ISA_MIPS64)
43#define BASE_HAS_ATOMIC64 1
44#endif
45
46typedef int32_t Atomic32;
47
48namespace base {
49namespace subtle {
50
51// Atomically execute:
52// result = *ptr;
53// if (*ptr == old_value)
54// *ptr = new_value;
55// return result;
56//
57// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
58// Always return the old value of "*ptr"
59//
60// This routine implies no memory barriers.
61inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
62 Atomic32 old_value,
63 Atomic32 new_value)
64{
65 Atomic32 prev, tmp;
66 __asm__ volatile(
67 ".set push \n"
68 ".set noreorder \n"
69
70 "1: \n"
71 "ll %0, %5 \n" // prev = *ptr
72 "bne %0, %3, 2f \n" // if (prev != old_value) goto 2
73 " move %2, %4 \n" // tmp = new_value
74 "sc %2, %1 \n" // *ptr = tmp (with atomic check)
75 "beqz %2, 1b \n" // start again on atomic error
76 " nop \n" // delay slot nop
77 "2: \n"
78
79 ".set pop \n"
80 : "=&r" (prev), "=m" (*ptr),
81 "=&r" (tmp)
82 : "Ir" (old_value), "r" (new_value),
83 "m" (*ptr)
84 : "memory"
85 );
86 return prev;
87}
88
89// Atomically store new_value into *ptr, returning the previous value held in
90// *ptr. This routine implies no memory barriers.
91inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
92 Atomic32 new_value)
93{
94 Atomic32 temp, old;
95 __asm__ volatile(
96 ".set push \n"
97 ".set noreorder \n"
98
99 "1: \n"
100 "ll %1, %2 \n" // old = *ptr
101 "move %0, %3 \n" // temp = new_value
102 "sc %0, %2 \n" // *ptr = temp (with atomic check)
103 "beqz %0, 1b \n" // start again on atomic error
104 " nop \n" // delay slot nop
105
106 ".set pop \n"
107 : "=&r" (temp), "=&r" (old),
108 "=m" (*ptr)
109 : "r" (new_value), "m" (*ptr)
110 : "memory"
111 );
112 return old;
113}
114
115inline void MemoryBarrier()
116{
117 __asm__ volatile("sync" : : : "memory");
118}
119
120// "Acquire" operations
121// ensure that no later memory access can be reordered ahead of the operation.
122// "Release" operations ensure that no previous memory access can be reordered
123// after the operation. "Barrier" operations have both "Acquire" and "Release"
124// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
125// access.
126inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
127 Atomic32 old_value,
128 Atomic32 new_value)
129{
130 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
131 MemoryBarrier();
132 return res;
133}
134
135inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
136 Atomic32 old_value,
137 Atomic32 new_value)
138{
139 MemoryBarrier();
140 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
141 return res;
142}
143
144inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value)
145{
146 *ptr = value;
147}
148
149inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
150 Atomic32 new_value)
151{
152 Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
153 MemoryBarrier();
154 return old_value;
155}
156
157inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
158 Atomic32 new_value)
159{
160 MemoryBarrier();
161 return NoBarrier_AtomicExchange(ptr, new_value);
162}
163
Austin Schuh745610d2015-09-06 18:19:50 -0700164inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
165{
166 MemoryBarrier();
167 *ptr = value;
168}
169
170inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr)
171{
172 return *ptr;
173}
174
175inline Atomic32 Acquire_Load(volatile const Atomic32* ptr)
176{
177 Atomic32 value = *ptr;
178 MemoryBarrier();
179 return value;
180}
181
Austin Schuh745610d2015-09-06 18:19:50 -0700182#if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
183
184typedef int64_t Atomic64;
185
186inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
187 Atomic64 old_value,
188 Atomic64 new_value)
189{
190 Atomic64 prev, tmp;
191 __asm__ volatile(
192 ".set push \n"
193 ".set noreorder \n"
194
195 "1: \n"
196 "lld %0, %5 \n" // prev = *ptr
197 "bne %0, %3, 2f \n" // if (prev != old_value) goto 2
198 " move %2, %4 \n" // tmp = new_value
199 "scd %2, %1 \n" // *ptr = tmp (with atomic check)
200 "beqz %2, 1b \n" // start again on atomic error
201 " nop \n" // delay slot nop
202 "2: \n"
203
204 ".set pop \n"
205 : "=&r" (prev), "=m" (*ptr),
206 "=&r" (tmp)
207 : "Ir" (old_value), "r" (new_value),
208 "m" (*ptr)
209 : "memory"
210 );
211 return prev;
212}
213
214inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
215 Atomic64 new_value)
216{
217 Atomic64 temp, old;
218 __asm__ volatile(
219 ".set push \n"
220 ".set noreorder \n"
221
222 "1: \n"
223 "lld %1, %2 \n" // old = *ptr
224 "move %0, %3 \n" // temp = new_value
225 "scd %0, %2 \n" // *ptr = temp (with atomic check)
226 "beqz %0, 1b \n" // start again on atomic error
227 " nop \n" // delay slot nop
228
229 ".set pop \n"
230 : "=&r" (temp), "=&r" (old),
231 "=m" (*ptr)
232 : "r" (new_value), "m" (*ptr)
233 : "memory"
234 );
235 return old;
236}
237
238inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
239 Atomic64 new_value)
240{
241 Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
242 MemoryBarrier();
243 return old_value;
244}
245
246inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
247 Atomic64 old_value,
248 Atomic64 new_value)
249{
250 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
251 MemoryBarrier();
252 return res;
253}
254
255inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
256 Atomic64 old_value,
257 Atomic64 new_value)
258{
259 MemoryBarrier();
260 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
261 return res;
262}
263
264inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value)
265{
266 *ptr = value;
267}
268
269inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
270 Atomic64 new_value)
271{
272 MemoryBarrier();
273 return NoBarrier_AtomicExchange(ptr, new_value);
274}
275
Austin Schuh745610d2015-09-06 18:19:50 -0700276inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
277{
278 MemoryBarrier();
279 *ptr = value;
280}
281
282inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr)
283{
284 return *ptr;
285}
286
287inline Atomic64 Acquire_Load(volatile const Atomic64* ptr)
288{
289 Atomic64 value = *ptr;
290 MemoryBarrier();
291 return value;
292}
293
Austin Schuh745610d2015-09-06 18:19:50 -0700294#endif
295
296} // namespace base::subtle
297} // namespace base
298
299#endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_