blob: 34d948953fe8f67c589894cfe88c9bc0c63b441b [file] [log] [blame]
Brian Silvermandc1eb272014-08-19 14:25:59 -04001#if !AOS_DEBUG
Austin Schuh7a41be62015-10-31 13:06:55 -07002#undef NDEBUG
Brian Silvermandc1eb272014-08-19 14:25:59 -04003#define NDEBUG
4#endif
5
John Park398c74a2018-10-20 21:17:39 -07006#include "aos/ipc_lib/aos_sync.h"
Brian Silvermandc1eb272014-08-19 14:25:59 -04007
8#include <linux/futex.h>
9#include <unistd.h>
10#include <sys/syscall.h>
11#include <errno.h>
12#include <stdint.h>
13#include <limits.h>
14#include <string.h>
15#include <inttypes.h>
16#include <sys/types.h>
17#include <stddef.h>
18#include <assert.h>
19#include <pthread.h>
20#include <sched.h>
21
Brian Silverman71c55c52014-08-19 14:31:59 -040022#ifdef AOS_SANITIZER_thread
23#include <sanitizer/tsan_interface_atomic.h>
24#endif
25
Brian Silvermandc1eb272014-08-19 14:25:59 -040026#include <algorithm>
Brian Silverman71c55c52014-08-19 14:31:59 -040027#include <type_traits>
Brian Silvermandc1eb272014-08-19 14:25:59 -040028
Alex Perrycb7da4b2019-08-28 19:35:56 -070029#include "absl/base/call_once.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070030#include "glog/logging.h"
Brian Silverman71c55c52014-08-19 14:31:59 -040031
Brian Silvermanb47f5552020-10-01 15:08:14 -070032#include "aos/macros.h"
33#include "aos/thread_local.h"
34#include "aos/util/compiler_memory_barrier.h"
35
Brian Silverman71c55c52014-08-19 14:31:59 -040036using ::aos::linux_code::ipc_lib::FutexAccessorObserver;
Brian Silvermandc1eb272014-08-19 14:25:59 -040037
Brian Silverman0d8ed032016-05-31 10:37:48 -070038// This code was originally based on <https://www.akkadia.org/drepper/futex.pdf>,
Brian Silvermandc1eb272014-08-19 14:25:59 -040039// but is has since evolved a lot. However, that still has useful information.
40//
41// Finding information about actually using futexes is really REALLY hard, so
42// here's a list of the stuff that I've used:
43// futex(7) has a really high-level overview.
44// <http://locklessinc.com/articles/futex_cheat_sheet/> describes some of the
45// operations in a bit more detail than most places.
46// <http://locklessinc.com/articles/mutex_cv_futex/> is the basis of our
47// implementations (before PI).
48// <http://lwn.net/Articles/360699/> has a nice overview of futexes in late 2009
49// (fairly recent compared to everything else...).
50// <https://www.kernel.org/doc/Documentation/pi-futex.txt>,
51// <https://www.kernel.org/doc/Documentation/futex-requeue-pi.txt>,
52// <https://www.kernel.org/doc/Documentation/robust-futexes.txt>,
53// and <https://www.kernel.org/doc/Documentation/robust-futex-ABI.txt> are all
54// useful references.
55// The kernel source (kernel/futex.c) has some useful comments about what the
56// various operations do (except figuring out which argument goes where in the
57// syscall is still confusing).
58// futex(2) is basically useless except for describing the order of the
59// arguments (it only has high-level descriptions of what some of the
60// operations do, and some of them are wrong in Wheezy).
61// glibc's nptl pthreads implementation is the intended user of most of these
62// things, so it is also a good place to look for examples. However, it is all
63// very hard to read because it supports ~20 different kinds of mutexes and
64// several variations of condition variables, and some of the pieces of code
65// are only written in assembly.
66// set_robust_list(2) is wrong in Wheezy (it doesn't actually take a TID
67// argument).
68//
69// Can't use PRIVATE futex operations because they use the pid (or something) as
70// part of the hash.
71//
72// ThreadSanitizer understands how these mutexes etc work. It appears to be able
73// to figure out the happens-before relationship from the __ATOMIC_SEQ_CST
74// atomic primitives.
75//
76// Remember that EAGAIN and EWOUDBLOCK are the same! (ie if you get EAGAIN from
77// FUTEX_WAIT, the docs call it EWOULDBLOCK...)
78
79// Values for an aos_mutex.futex (kernel-mandated):
80// 0 = unlocked
81// TID = locked, not contended
82// |FUTEX_WAITERS = there are waiters (aka contended)
Brian Silverman71c55c52014-08-19 14:31:59 -040083// |FUTEX_OWNER_DIED = old owner died
Brian Silvermandc1eb272014-08-19 14:25:59 -040084//
85// Values for an aos_futex being used directly:
86// 0 = unset
87// 1 = set
88//
89// The value of an aos_condition is just a generation counter.
90
Brian Silverman71c55c52014-08-19 14:31:59 -040091#ifdef AOS_SANITIZER_thread
92extern "C" void AnnotateHappensBefore(const char *file, int line,
93 uintptr_t addr);
94extern "C" void AnnotateHappensAfter(const char *file, int line,
95 uintptr_t addr);
96#define ANNOTATE_HAPPENS_BEFORE(address) \
97 AnnotateHappensBefore(__FILE__, __LINE__, \
98 reinterpret_cast<uintptr_t>(address))
99#define ANNOTATE_HAPPENS_AFTER(address) \
100 AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast<uintptr_t>(address))
101#else
102#define ANNOTATE_HAPPENS_BEFORE(address)
103#define ANNOTATE_HAPPENS_AFTER(address)
104#endif
105
Brian Silvermandc1eb272014-08-19 14:25:59 -0400106namespace {
107
Brian Silverman71c55c52014-08-19 14:31:59 -0400108const bool kRobustListDebug = false;
109const bool kLockDebug = false;
110const bool kPrintOperations = false;
111
Brian Silvermandc1eb272014-08-19 14:25:59 -0400112// These sys_futex_* functions are wrappers around syscall(SYS_futex). They each
113// take a specific set of arguments for a given futex operation. They return the
114// result or a negated errno value. -1..-4095 mean errors and not successful
115// results, which is guaranteed by the kernel.
116//
117// They each have optimized versions for ARM EABI (the syscall interface is
118// different for non-EABI ARM, so that is the right thing to test for) that
119// don't go through syscall(2) or errno.
120// These use register variables to get the values in the right registers to
121// actually make the syscall.
122
123// The actual macro that we key off of to use the inline versions or not.
Brian Silverman17426d92018-08-09 11:38:49 -0700124#if defined(__ARM_EABI__)
125#define ARM_EABI_INLINE_SYSCALL 1
126#else
127#define ARM_EABI_INLINE_SYSCALL 0
128#endif
Brian Silvermandc1eb272014-08-19 14:25:59 -0400129
130// Used for FUTEX_WAIT, FUTEX_LOCK_PI, and FUTEX_TRYLOCK_PI.
131inline int sys_futex_wait(int op, aos_futex *addr1, int val1,
132 const struct timespec *timeout) {
133#if ARM_EABI_INLINE_SYSCALL
134 register aos_futex *addr1_reg __asm__("r0") = addr1;
135 register int op_reg __asm__("r1") = op;
136 register int val1_reg __asm__("r2") = val1;
137 register const struct timespec *timeout_reg __asm__("r3") = timeout;
138 register int syscall_number __asm__("r7") = SYS_futex;
139 register int result __asm__("r0");
140 __asm__ volatile("swi #0"
141 : "=r"(result)
142 : "r"(addr1_reg), "r"(op_reg), "r"(val1_reg),
143 "r"(timeout_reg), "r"(syscall_number)
144 : "memory");
145 return result;
146#else
147 const int r = syscall(SYS_futex, addr1, op, val1, timeout);
148 if (r == -1) return -errno;
149 return r;
150#endif
151}
152
153inline int sys_futex_wake(aos_futex *addr1, int val1) {
154#if ARM_EABI_INLINE_SYSCALL
155 register aos_futex *addr1_reg __asm__("r0") = addr1;
156 register int op_reg __asm__("r1") = FUTEX_WAKE;
157 register int val1_reg __asm__("r2") = val1;
158 register int syscall_number __asm__("r7") = SYS_futex;
159 register int result __asm__("r0");
160 __asm__ volatile("swi #0"
161 : "=r"(result)
162 : "r"(addr1_reg), "r"(op_reg), "r"(val1_reg),
163 "r"(syscall_number)
164 : "memory");
165 return result;
166#else
167 const int r = syscall(SYS_futex, addr1, FUTEX_WAKE, val1);
168 if (r == -1) return -errno;
169 return r;
170#endif
171}
172
Brian Silverman71c55c52014-08-19 14:31:59 -0400173inline int sys_futex_cmp_requeue_pi(aos_futex *addr1, int num_wake,
174 int num_requeue, aos_futex *m, uint32_t val) {
175#if ARM_EABI_INLINE_SYSCALL
176 register aos_futex *addr1_reg __asm__("r0") = addr1;
177 register int op_reg __asm__("r1") = FUTEX_CMP_REQUEUE_PI;
178 register int num_wake_reg __asm__("r2") = num_wake;
179 register int num_requeue_reg __asm__("r3") = num_requeue;
180 register aos_futex *m_reg __asm__("r4") = m;
181 register uint32_t val_reg __asm__("r5") = val;
182 register int syscall_number __asm__("r7") = SYS_futex;
183 register int result __asm__("r0");
184 __asm__ volatile("swi #0"
185 : "=r"(result)
186 : "r"(addr1_reg), "r"(op_reg), "r"(num_wake_reg),
187 "r"(num_requeue_reg), "r"(m_reg), "r"(val_reg),
188 "r"(syscall_number)
189 : "memory");
190 return result;
191#else
192 const int r = syscall(SYS_futex, addr1, FUTEX_CMP_REQUEUE_PI, num_wake,
193 num_requeue, m, val);
194 if (r == -1) return -errno;
195 return r;
196#endif
197}
198
199inline int sys_futex_wait_requeue_pi(aos_condition *addr1,
200 uint32_t start_val,
201 const struct timespec *timeout,
202 aos_futex *m) {
203#if ARM_EABI_INLINE_SYSCALL
204 register aos_condition *addr1_reg __asm__("r0") = addr1;
205 register int op_reg __asm__("r1") = FUTEX_WAIT_REQUEUE_PI;
206 register uint32_t start_val_reg __asm__("r2") = start_val;
207 register const struct timespec *timeout_reg __asm__("r3") = timeout;
208 register aos_futex *m_reg __asm__("r4") = m;
209 register int syscall_number __asm__("r7") = SYS_futex;
210 register int result __asm__("r0");
211 __asm__ volatile("swi #0"
212 : "=r"(result)
213 : "r"(addr1_reg), "r"(op_reg), "r"(start_val_reg),
214 "r"(timeout_reg), "r"(m_reg), "r"(syscall_number)
215 : "memory");
216 return result;
217#else
218 const int r =
219 syscall(SYS_futex, addr1, FUTEX_WAIT_REQUEUE_PI, start_val, timeout, m);
220 if (r == -1) return -errno;
221 return r;
222#endif
223}
224
Brian Silvermandc1eb272014-08-19 14:25:59 -0400225inline int sys_futex_unlock_pi(aos_futex *addr1) {
226#if ARM_EABI_INLINE_SYSCALL
227 register aos_futex *addr1_reg __asm__("r0") = addr1;
228 register int op_reg __asm__("r1") = FUTEX_UNLOCK_PI;
229 register int syscall_number __asm__("r7") = SYS_futex;
230 register int result __asm__("r0");
231 __asm__ volatile("swi #0"
232 : "=r"(result)
233 : "r"(addr1_reg), "r"(op_reg), "r"(syscall_number)
234 : "memory");
235 return result;
236#else
237 const int r = syscall(SYS_futex, addr1, FUTEX_UNLOCK_PI);
238 if (r == -1) return -errno;
239 return r;
240#endif
241}
242
Brian Silverman71c55c52014-08-19 14:31:59 -0400243// Returns the previous value of f.
244inline uint32_t compare_and_swap_val(aos_futex *f, uint32_t before,
245 uint32_t after) {
246#ifdef AOS_SANITIZER_thread
247 // This is a workaround for <https://llvm.org/bugs/show_bug.cgi?id=23176>.
248 // Basically, most of the atomic operations are broken under tsan, but this
249 // particular one isn't.
250 // TODO(Brian): Remove this #ifdef (and the one in compare_and_swap) once we
251 // don't have to worry about tsan with this bug any more.
252 uint32_t before_value = before;
253 __tsan_atomic32_compare_exchange_strong(
254 reinterpret_cast<int32_t *>(f),
255 reinterpret_cast<int32_t *>(&before_value), after,
256 __tsan_memory_order_seq_cst, __tsan_memory_order_seq_cst);
257 return before_value;
258#else
259 return __sync_val_compare_and_swap(f, before, after);
260#endif
Brian Silvermandc1eb272014-08-19 14:25:59 -0400261}
262
Brian Silverman71c55c52014-08-19 14:31:59 -0400263// Returns true if it succeeds and false if it fails.
264inline bool compare_and_swap(aos_futex *f, uint32_t before, uint32_t after) {
265#ifdef AOS_SANITIZER_thread
266 return compare_and_swap_val(f, before, after) == before;
267#else
268 return __sync_bool_compare_and_swap(f, before, after);
269#endif
270}
271
272#ifdef AOS_SANITIZER_thread
273
274// Simple macro for checking something which should always be true.
275// Using the standard CHECK macro isn't safe because failures often result in
276// reentering the mutex locking code, which doesn't work.
277#define SIMPLE_CHECK(expr) \
278 do { \
279 if (!(expr)) { \
280 fprintf(stderr, "%s: %d: SIMPLE_CHECK(" #expr ") failed!\n", __FILE__, \
281 __LINE__); \
282 abort(); \
283 } \
284 } while (false)
285
286// Forcibly initializes the pthread mutex for *m.
287// This sequence of operations is only safe for the simpler kinds of mutexes in
288// glibc's pthreads implementation on Linux.
289void init_pthread_mutex(aos_mutex *m) {
290 // Re-initialize the mutex so the destroy won't fail if it's locked.
291 // tsan ignores this.
292 SIMPLE_CHECK(0 == pthread_mutex_init(&m->pthread_mutex, nullptr));
293 // Destroy the mutex so tsan will forget about it if some now-dead thread
294 // locked it.
295 SIMPLE_CHECK(0 == pthread_mutex_destroy(&m->pthread_mutex));
296
297 // Now actually initialize it, making sure it's process-shareable so it works
298 // correctly across shared memory.
299 pthread_mutexattr_t attr;
300 SIMPLE_CHECK(0 == pthread_mutexattr_init(&attr));
301 SIMPLE_CHECK(0 == pthread_mutexattr_setpshared(&attr, true));
302 SIMPLE_CHECK(0 == pthread_mutex_init(&m->pthread_mutex, &attr));
303 SIMPLE_CHECK(0 == pthread_mutexattr_destroy(&attr));
304}
305
306// Locks the pthread mutex for *m.
307// If a stack trace ever reveals the pthread_mutex_lock call in here blocking,
308// there is a bug in our mutex code or the way somebody is calling it.
309void lock_pthread_mutex(aos_mutex *m) {
310 if (!m->pthread_mutex_init) {
311 init_pthread_mutex(m);
312 m->pthread_mutex_init = true;
313 }
314 SIMPLE_CHECK(0 == pthread_mutex_lock(&m->pthread_mutex));
315}
316
317// Forcibly locks the pthread mutex for *m.
318// This will (somewhat hackily) rip the lock out from underneath somebody else
319// who is already holding it.
320void force_lock_pthread_mutex(aos_mutex *m) {
321 if (!m->pthread_mutex_init) {
322 init_pthread_mutex(m);
323 m->pthread_mutex_init = true;
324 }
325 const int trylock_result = pthread_mutex_trylock(&m->pthread_mutex);
326 SIMPLE_CHECK(trylock_result == 0 || trylock_result == EBUSY);
327 if (trylock_result == 0) {
328 // We're good, so unlock it and then go for a real lock down below.
329 SIMPLE_CHECK(0 == pthread_mutex_unlock(&m->pthread_mutex));
330 } else {
331 // Somebody (should always be somebody else who died with it held) already
332 // has it, so make tsan forget about that.
333 init_pthread_mutex(m);
334 }
335 lock_pthread_mutex(m);
336}
337
338// Unlocks the pthread mutex for *m.
339void unlock_pthread_mutex(aos_mutex *m) {
340 assert(m->pthread_mutex_init);
341 SIMPLE_CHECK(0 == pthread_mutex_unlock(&m->pthread_mutex));
342}
343
344#else
345
346// Empty implementations of all these so the code below doesn't need #ifdefs.
347static inline void lock_pthread_mutex(aos_mutex *) {}
348static inline void force_lock_pthread_mutex(aos_mutex *) {}
349static inline void unlock_pthread_mutex(aos_mutex *) {}
350
351#endif
352
Brian Silvermandc1eb272014-08-19 14:25:59 -0400353pid_t do_get_tid() {
354 pid_t r = syscall(SYS_gettid);
355 assert(r > 0);
356 return r;
357}
358
Alex Perrycb7da4b2019-08-28 19:35:56 -0700359// This gets called by functions before LOG(FATAL)ing with error messages
Austin Schuhf257f3c2019-10-27 21:00:43 -0700360// that would be incorrect if the error was caused by a process forking without
Brian Silvermandc1eb272014-08-19 14:25:59 -0400361// initialize_in_new_thread getting called in the fork.
362void check_cached_tid(pid_t tid) {
363 pid_t actual = do_get_tid();
364 if (tid != actual) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700365 LOG(FATAL) << "task " << static_cast<intmax_t>(tid) << " forked into "
366 << static_cast<intmax_t>(actual)
367 << " without letting aos_sync know so we're not really sure "
368 "what's going on";
Brian Silvermandc1eb272014-08-19 14:25:59 -0400369 }
370}
371
372// Starts off at 0 in each new thread (because that's what it gets initialized
373// to in most of them or it gets to reset to 0 after a fork by atfork_child()).
Brian Silvermanb47f5552020-10-01 15:08:14 -0700374AOS_THREAD_LOCAL pid_t my_tid = 0;
Brian Silvermandc1eb272014-08-19 14:25:59 -0400375
376// Gets called before the fork(2) wrapper function returns in the child.
377void atfork_child() {
378 // The next time get_tid() is called, it will set everything up again.
379 my_tid = 0;
380}
381
John Park0e699502019-11-20 19:36:05 -0800382void InstallAtforkHook() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700383 PCHECK(pthread_atfork(NULL, NULL, &atfork_child) == 0)
384 << ": pthread_atfork(NULL, NULL, "
385 << reinterpret_cast<void *>(&atfork_child) << ") failed";
Brian Silvermandc1eb272014-08-19 14:25:59 -0400386}
387
388// This gets called to set everything up in a new thread by get_tid().
389void initialize_in_new_thread();
390
391// Gets the current thread's TID and does all of the 1-time initialization the
392// first time it's called in a given thread.
393inline uint32_t get_tid() {
Brian Silverman71c55c52014-08-19 14:31:59 -0400394 if (__builtin_expect(my_tid == 0, false)) {
Brian Silvermandc1eb272014-08-19 14:25:59 -0400395 initialize_in_new_thread();
396 }
397 static_assert(sizeof(my_tid) <= sizeof(uint32_t), "pid_t is too big");
398 return static_cast<uint32_t>(my_tid);
399}
400
Brian Silverman71c55c52014-08-19 14:31:59 -0400401// Contains all of the stuff for dealing with the robust list. Nothing outside
402// this namespace should touch anything inside it except Init, Adder, and
403// Remover.
404namespace my_robust_list {
405
406static_assert(offsetof(aos_mutex, next) == 0,
407 "Our math all assumes that the beginning of a mutex and its next "
408 "pointer are at the same place in memory.");
409
410// Our version of robust_list_head.
411// This is copied from the kernel header because that's a pretty stable ABI (and
412// any changes will be backwards compatible anyways) and we want ours to have
413// different types.
414// The uintptr_ts are &next of the elements in the list (with stuff |ed in).
415struct aos_robust_list_head {
416 uintptr_t next;
417 long futex_offset;
418 uintptr_t pending_next;
419};
420
421static_assert(offsetof(aos_robust_list_head, next) ==
422 offsetof(robust_list_head, list),
423 "Our aos_robust_list_head doesn't match the kernel's");
424static_assert(offsetof(aos_robust_list_head, futex_offset) ==
425 offsetof(robust_list_head, futex_offset),
426 "Our aos_robust_list_head doesn't match the kernel's");
427static_assert(offsetof(aos_robust_list_head, pending_next) ==
428 offsetof(robust_list_head, list_op_pending),
429 "Our aos_robust_list_head doesn't match the kernel's");
430static_assert(sizeof(aos_robust_list_head) == sizeof(robust_list_head),
431 "Our aos_robust_list_head doesn't match the kernel's");
432
Brian Silvermanb47f5552020-10-01 15:08:14 -0700433AOS_THREAD_LOCAL aos_robust_list_head robust_head;
Brian Silverman71c55c52014-08-19 14:31:59 -0400434
435// Extra offset between mutex values and where we point to for their robust list
436// entries (from SetRobustListOffset).
437uintptr_t robust_list_offset = 0;
438
439// The value to OR each pointer's value with whenever putting it into the robust
440// list (technically only if it's PI, but all of ours are, so...).
441static const uintptr_t kRobustListOr = 1;
442
443// Returns the value which goes into a next variable to represent the head.
444inline uintptr_t robust_head_next_value() {
445 return reinterpret_cast<uintptr_t>(&robust_head.next);
446}
447// Returns true iff next represents the head.
448inline bool next_is_head(uintptr_t next) {
449 return next == robust_head_next_value();
450}
451// Returns the (psuedo-)mutex corresponding to the head.
452// This does NOT have a previous pointer, so be careful with the return value.
453inline aos_mutex *robust_head_mutex() {
454 return reinterpret_cast<aos_mutex *>(robust_head_next_value());
455}
456
457inline uintptr_t mutex_to_next(aos_mutex *m) {
458 return (reinterpret_cast<uintptr_t>(&m->next) + robust_list_offset) |
459 kRobustListOr;
460}
461inline aos_mutex *next_to_mutex(uintptr_t next) {
462 if (__builtin_expect(robust_list_offset != 0, false) && next_is_head(next)) {
463 // We don't offset the head pointer, so be careful.
464 return reinterpret_cast<aos_mutex *>(next);
465 }
466 return reinterpret_cast<aos_mutex *>(
467 (next & ~kRobustListOr) - robust_list_offset);
468}
469
470// Sets up the robust list for each thread.
471void Init() {
472 // It starts out just pointing back to itself.
473 robust_head.next = robust_head_next_value();
474 robust_head.futex_offset = static_cast<ssize_t>(offsetof(aos_mutex, futex)) -
475 static_cast<ssize_t>(offsetof(aos_mutex, next));
476 robust_head.pending_next = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700477 PCHECK(syscall(SYS_set_robust_list, robust_head_next_value(),
478 sizeof(robust_head)) == 0)
479 << ": set_robust_list(" << reinterpret_cast<void *>(robust_head.next)
480 << ", " << sizeof(robust_head) << ") failed";
Brian Silverman71c55c52014-08-19 14:31:59 -0400481 if (kRobustListDebug) {
482 printf("%" PRId32 ": init done\n", get_tid());
483 }
484}
485
486// Updating the offset with locked mutexes is important during robustness
487// testing, because there are mutexes which are locked before this is set to a
488// non-0 value and then unlocked after it is changed back. However, to make sure
489// the code works correctly when manipulating the next pointer of the last of
490// those mutexes, all of their next values have to be adjusted appropriately.
491void SetRobustListOffset(uintptr_t offset) {
492 const uintptr_t offset_change = offset - robust_list_offset;
493 robust_list_offset = offset;
494 aos_mutex *m = robust_head_mutex();
495 // Update the offset contained in each of the mutexes which is already locked.
496 while (!next_is_head(m->next)) {
497 m->next += offset_change;
498 m = next_to_mutex(m->next);
499 }
500}
501
502bool HaveLockedMutexes() {
503 return robust_head.next != robust_head_next_value();
504}
505
506// Handles adding a mutex to the robust list.
507// The idea is to create one of these at the beginning of a function that needs
508// to do this and then call Add() iff it should actually be added.
509class Adder {
510 public:
511 Adder(aos_mutex *m) : m_(m) {
512 assert(robust_head.pending_next == 0);
513 if (kRobustListDebug) {
514 printf("%" PRId32 ": maybe add %p\n", get_tid(), m_);
515 }
516 robust_head.pending_next = mutex_to_next(m);
517 aos_compiler_memory_barrier();
518 }
519 ~Adder() {
520 assert(robust_head.pending_next == mutex_to_next(m_));
521 if (kRobustListDebug) {
522 printf("%" PRId32 ": done maybe add %p, n=%p p=%p\n", get_tid(), m_,
523 next_to_mutex(m_->next), m_->previous);
524 }
525 aos_compiler_memory_barrier();
526 robust_head.pending_next = 0;
527 }
528
529 void Add() {
530 assert(robust_head.pending_next == mutex_to_next(m_));
531 if (kRobustListDebug) {
532 printf("%" PRId32 ": adding %p\n", get_tid(), m_);
533 }
534 const uintptr_t old_head_next_value = robust_head.next;
535
536 m_->next = old_head_next_value;
537 aos_compiler_memory_barrier();
538 robust_head.next = mutex_to_next(m_);
539
540 m_->previous = robust_head_mutex();
541 if (!next_is_head(old_head_next_value)) {
542 // robust_head's psuedo-mutex doesn't have a previous pointer to update.
543 next_to_mutex(old_head_next_value)->previous = m_;
544 }
545 aos_compiler_memory_barrier();
546 if (kRobustListDebug) {
547 printf("%" PRId32 ": done adding %p\n", get_tid(), m_);
548 }
549 }
550
551 private:
552 aos_mutex *const m_;
553
554 DISALLOW_COPY_AND_ASSIGN(Adder);
555};
556
557// Handles removing a mutex from the robust list.
558// The idea is to create one of these at the beginning of a function that needs
559// to do this.
560class Remover {
561 public:
562 Remover(aos_mutex *m) {
563 assert(robust_head.pending_next == 0);
564 if (kRobustListDebug) {
565 printf("%" PRId32 ": beginning to remove %p, n=%p p=%p\n", get_tid(), m,
566 next_to_mutex(m->next), m->previous);
567 }
568 robust_head.pending_next = mutex_to_next(m);
569 aos_compiler_memory_barrier();
570
571 aos_mutex *const previous = m->previous;
572 const uintptr_t next_value = m->next;
573
574 previous->next = m->next;
575 if (!next_is_head(next_value)) {
576 // robust_head's psuedo-mutex doesn't have a previous pointer to update.
577 next_to_mutex(next_value)->previous = previous;
578 }
579
580 if (kRobustListDebug) {
581 printf("%" PRId32 ": done removing %p\n", get_tid(), m);
582 }
583 }
584 ~Remover() {
585 assert(robust_head.pending_next != 0);
586 aos_compiler_memory_barrier();
587 robust_head.pending_next = 0;
588 if (kRobustListDebug) {
589 printf("%" PRId32 ": done with removal\n", get_tid());
590 }
591 }
592
593 private:
594 DISALLOW_COPY_AND_ASSIGN(Remover);
595};
596
597} // namespace my_robust_list
598
Brian Silvermandc1eb272014-08-19 14:25:59 -0400599void initialize_in_new_thread() {
600 // No synchronization necessary in most of this because it's all thread-local!
601
602 my_tid = do_get_tid();
603
John Park9372a682019-11-27 18:07:48 -0800604 static absl::once_flag once;
605 absl::call_once(once, InstallAtforkHook);
Brian Silverman71c55c52014-08-19 14:31:59 -0400606
607 my_robust_list::Init();
Brian Silvermandc1eb272014-08-19 14:25:59 -0400608}
609
Brian Silverman71c55c52014-08-19 14:31:59 -0400610FutexAccessorObserver before_observer = nullptr, after_observer = nullptr;
611
612// RAII class which runs before_observer during construction and after_observer
613// during destruction.
614class RunObservers {
615 public:
616 template <class T>
617 RunObservers(T *address, bool write)
618 : address_(static_cast<void *>(
619 const_cast<typename ::std::remove_cv<T>::type *>(address))),
620 write_(write) {
621 if (__builtin_expect(before_observer != nullptr, false)) {
622 before_observer(address_, write_);
623 }
624 }
625 ~RunObservers() {
626 if (__builtin_expect(after_observer != nullptr, false)) {
627 after_observer(address_, write_);
628 }
629 }
630
631 private:
632 void *const address_;
633 const bool write_;
634
635 DISALLOW_COPY_AND_ASSIGN(RunObservers);
636};
637
638// Finishes the locking of a mutex by potentially clearing FUTEX_OWNER_DIED in
639// the futex and returning the correct value.
640inline int mutex_finish_lock(aos_mutex *m) {
641 const uint32_t value = __atomic_load_n(&m->futex, __ATOMIC_ACQUIRE);
642 if (__builtin_expect((value & FUTEX_OWNER_DIED) != 0, false)) {
643 __atomic_and_fetch(&m->futex, ~FUTEX_OWNER_DIED, __ATOMIC_RELAXED);
644 force_lock_pthread_mutex(m);
645 return 1;
646 } else {
647 lock_pthread_mutex(m);
648 return 0;
649 }
650}
651
652// Split out separately from mutex_get so condition_wait can call it and use its
653// own my_robust_list::Adder.
Brian Silvermandc1eb272014-08-19 14:25:59 -0400654inline int mutex_do_get(aos_mutex *m, bool signals_fail,
Brian Silverman71c55c52014-08-19 14:31:59 -0400655 const struct timespec *timeout, uint32_t tid) {
656 RunObservers run_observers(m, true);
657 if (kPrintOperations) {
658 printf("%" PRId32 ": %p do_get\n", tid, m);
659 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400660
661 while (true) {
662 // If the atomic 0->TID transition fails.
663 if (!compare_and_swap(&m->futex, 0, tid)) {
664 // Wait in the kernel, which handles atomically ORing in FUTEX_WAITERS
665 // before actually sleeping.
666 const int ret = sys_futex_wait(FUTEX_LOCK_PI, &m->futex, 1, timeout);
667 if (ret != 0) {
668 if (timeout != NULL && ret == -ETIMEDOUT) {
669 return 3;
670 }
Brian Silverman71c55c52014-08-19 14:31:59 -0400671 if (__builtin_expect(ret == -EINTR, true)) {
Brian Silvermandc1eb272014-08-19 14:25:59 -0400672 if (signals_fail) {
673 return 2;
674 } else {
675 continue;
676 }
677 }
Brian Silverman71c55c52014-08-19 14:31:59 -0400678 my_robust_list::robust_head.pending_next = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700679 CHECK_NE(ret, -EDEADLK) << ": multiple lock of " << m << " by " << tid;
680
681 errno = -ret;
682 PLOG(FATAL) << "FUTEX_LOCK_PI(" << &m->futex
683 << "(=" << __atomic_load_n(&m->futex, __ATOMIC_SEQ_CST)
684 << "), 1, " << timeout << ") failed";
Brian Silvermandc1eb272014-08-19 14:25:59 -0400685 } else {
Brian Silverman71c55c52014-08-19 14:31:59 -0400686 if (kLockDebug) {
687 printf("%" PRId32 ": %p kernel lock done\n", tid, m);
688 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400689 // The kernel already handled setting the value to our TID (ish).
690 break;
691 }
692 } else {
Brian Silverman71c55c52014-08-19 14:31:59 -0400693 if (kLockDebug) {
694 printf("%" PRId32 ": %p fast lock done\n", tid, m);
695 }
696 lock_pthread_mutex(m);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400697 // Fastpath succeeded, so no need to call into the kernel.
Brian Silverman71c55c52014-08-19 14:31:59 -0400698 // Because this is the fastpath, it's a good idea to avoid even having to
699 // load the value again down below.
700 return 0;
Brian Silvermandc1eb272014-08-19 14:25:59 -0400701 }
702 }
703
Brian Silverman71c55c52014-08-19 14:31:59 -0400704 return mutex_finish_lock(m);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400705}
706
707// The common implementation for everything that wants to lock a mutex.
708// If signals_fail is false, the function will try again if the wait syscall is
709// interrupted by a signal.
710// timeout can be NULL for no timeout.
711inline int mutex_get(aos_mutex *m, bool signals_fail,
712 const struct timespec *timeout) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400713 const uint32_t tid = get_tid();
714 my_robust_list::Adder adder(m);
715 const int r = mutex_do_get(m, signals_fail, timeout, tid);
716 if (r == 0 || r == 1) adder.Add();
Brian Silvermandc1eb272014-08-19 14:25:59 -0400717 return r;
718}
719
720// The common implementation for broadcast and signal.
721// number_requeue is the number of waiters to requeue (probably INT_MAX or 0). 1
722// will always be woken.
Brian Silverman71c55c52014-08-19 14:31:59 -0400723void condition_wake(aos_condition *c, aos_mutex *m, int number_requeue) {
724 RunObservers run_observers(c, true);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400725 // Make it so that anybody just going to sleep won't.
726 // This is where we might accidentally wake more than just 1 waiter with 1
727 // signal():
728 // 1 already sleeping will be woken but n might never actually make it to
729 // sleep in the kernel because of this.
Brian Silverman71c55c52014-08-19 14:31:59 -0400730 uint32_t new_value = __atomic_add_fetch(c, 1, __ATOMIC_SEQ_CST);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400731
Brian2a4294f2019-06-12 20:23:32 -0700732 while (true) {
733 // This really wants to be FUTEX_REQUEUE_PI, but the kernel doesn't have
734 // that... However, the code to support that is in the kernel, so it might
735 // be a good idea to patch it to support that and use it iff it's there.
736 const int ret =
737 sys_futex_cmp_requeue_pi(c, 1, number_requeue, &m->futex, new_value);
738 if (ret < 0) {
739 // If the value got changed out from under us (aka somebody else did a
740 // condition_wake).
741 if (__builtin_expect(ret == -EAGAIN, true)) {
742 // If we're doing a broadcast, the other guy might have done a signal
743 // instead, so we have to try again.
744 // If we're doing a signal, we have to go again to make sure that 2
745 // signals wake 2 processes.
746 new_value = __atomic_load_n(c, __ATOMIC_RELAXED);
747 continue;
Brian Silverman71c55c52014-08-19 14:31:59 -0400748 }
Brian Silverman71c55c52014-08-19 14:31:59 -0400749 my_robust_list::robust_head.pending_next = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700750 errno = -ret;
751 PLOG(FATAL) << "FUTEX_CMP_REQUEUE_PI(" << c << ", 1, " << number_requeue
752 << ", " << &m->futex << ", *" << c << ") failed";
Brian2a4294f2019-06-12 20:23:32 -0700753 } else {
754 return;
Brian Silverman71c55c52014-08-19 14:31:59 -0400755 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400756 }
757}
758
759} // namespace
760
761int mutex_lock(aos_mutex *m) {
762 return mutex_get(m, true, NULL);
763}
764int mutex_lock_timeout(aos_mutex *m, const struct timespec *timeout) {
765 return mutex_get(m, true, timeout);
766}
767int mutex_grab(aos_mutex *m) {
768 return mutex_get(m, false, NULL);
769}
770
771void mutex_unlock(aos_mutex *m) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400772 RunObservers run_observers(m, true);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400773 const uint32_t tid = get_tid();
Brian Silverman71c55c52014-08-19 14:31:59 -0400774 if (kPrintOperations) {
775 printf("%" PRId32 ": %p unlock\n", tid, m);
776 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400777
778 const uint32_t value = __atomic_load_n(&m->futex, __ATOMIC_SEQ_CST);
Brian Silverman71c55c52014-08-19 14:31:59 -0400779 if (__builtin_expect((value & FUTEX_TID_MASK) != tid, false)) {
780 my_robust_list::robust_head.pending_next = 0;
Brian Silvermandc1eb272014-08-19 14:25:59 -0400781 check_cached_tid(tid);
782 if ((value & FUTEX_TID_MASK) == 0) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700783 LOG(FATAL) << "multiple unlock of aos_mutex " << m << " by " << tid;
Brian Silvermandc1eb272014-08-19 14:25:59 -0400784 } else {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700785 LOG(FATAL) << "aos_mutex " << m << " is locked by "
786 << (value & FUTEX_TID_MASK) << ", not " << tid;
Brian Silvermandc1eb272014-08-19 14:25:59 -0400787 }
788 }
789
Brian Silverman71c55c52014-08-19 14:31:59 -0400790 my_robust_list::Remover remover(m);
791 unlock_pthread_mutex(m);
792
Brian Silvermandc1eb272014-08-19 14:25:59 -0400793 // If the atomic TID->0 transition fails (ie FUTEX_WAITERS is set),
794 if (!compare_and_swap(&m->futex, tid, 0)) {
795 // The kernel handles everything else.
796 const int ret = sys_futex_unlock_pi(&m->futex);
797 if (ret != 0) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400798 my_robust_list::robust_head.pending_next = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700799 errno = -ret;
800 PLOG(FATAL) << "FUTEX_UNLOCK_PI(" << (&m->futex) << ") failed";
Brian Silvermandc1eb272014-08-19 14:25:59 -0400801 }
802 } else {
803 // There aren't any waiters, so no need to call into the kernel.
804 }
805}
806
807int mutex_trylock(aos_mutex *m) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400808 RunObservers run_observers(m, true);
809 const uint32_t tid = get_tid();
810 if (kPrintOperations) {
811 printf("%" PRId32 ": %p trylock\n", tid, m);
812 }
813 my_robust_list::Adder adder(m);
814
Brian Silvermandc1eb272014-08-19 14:25:59 -0400815 // Try an atomic 0->TID transition.
Brian Silverman71c55c52014-08-19 14:31:59 -0400816 uint32_t c = compare_and_swap_val(&m->futex, 0, tid);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400817
818 if (c != 0) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400819 if (__builtin_expect((c & FUTEX_OWNER_DIED) == 0, true)) {
820 // Somebody else had it locked; we failed.
821 return 4;
822 } else {
823 // FUTEX_OWNER_DIED was set, so we have to call into the kernel to deal
824 // with resetting it.
825 const int ret = sys_futex_wait(FUTEX_TRYLOCK_PI, &m->futex, 0, NULL);
826 if (ret == 0) {
827 adder.Add();
828 // Only clear the owner died if somebody else didn't do the recovery
829 // and then unlock before our TRYLOCK happened.
830 return mutex_finish_lock(m);
831 } else {
832 // EWOULDBLOCK means that somebody else beat us to it.
833 if (__builtin_expect(ret == -EWOULDBLOCK, true)) {
834 return 4;
835 }
836 my_robust_list::robust_head.pending_next = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700837 errno = -ret;
838 PLOG(FATAL) << "FUTEX_TRYLOCK_PI(" << (&m->futex)
839 << ", 0, NULL) failed";
Brian Silverman71c55c52014-08-19 14:31:59 -0400840 }
841 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400842 }
Brian Silverman71c55c52014-08-19 14:31:59 -0400843
844 lock_pthread_mutex(m);
845 adder.Add();
Brian Silvermandc1eb272014-08-19 14:25:59 -0400846 return 0;
847}
848
849bool mutex_islocked(const aos_mutex *m) {
850 const uint32_t tid = get_tid();
851
852 const uint32_t value = __atomic_load_n(&m->futex, __ATOMIC_RELAXED);
853 return (value & FUTEX_TID_MASK) == tid;
854}
855
Brian Silverman27af1f62019-11-18 12:04:48 -0800856void death_notification_init(aos_mutex *m) {
857 const uint32_t tid = get_tid();
858 if (kPrintOperations) {
859 printf("%" PRId32 ": %p death_notification start\n", tid, m);
860 }
861 my_robust_list::Adder adder(m);
862 {
863 RunObservers run_observers(m, true);
864 CHECK(compare_and_swap(&m->futex, 0, tid));
865 }
866 adder.Add();
867}
868
869void death_notification_release(aos_mutex *m) {
870 RunObservers run_observers(m, true);
871
872#ifndef NDEBUG
873 // Verify it's "locked", like it should be.
874 {
875 const uint32_t tid = get_tid();
876 if (kPrintOperations) {
877 printf("%" PRId32 ": %p death_notification release\n", tid, m);
878 }
879 const uint32_t value = __atomic_load_n(&m->futex, __ATOMIC_SEQ_CST);
880 assert((value & ~FUTEX_WAITERS) == tid);
881 }
882#endif
883
884 my_robust_list::Remover remover(m);
885 ANNOTATE_HAPPENS_BEFORE(m);
886 const int ret = sys_futex_unlock_pi(&m->futex);
887 if (ret != 0) {
888 my_robust_list::robust_head.pending_next = 0;
889 errno = -ret;
890 PLOG(FATAL) << "FUTEX_UNLOCK_PI(" << &m->futex << ") failed";
891 }
892}
893
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700894int condition_wait(aos_condition *c, aos_mutex *m, struct timespec *end_time) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400895 RunObservers run_observers(c, false);
896 const uint32_t tid = get_tid();
Brian Silvermandc1eb272014-08-19 14:25:59 -0400897 const uint32_t wait_start = __atomic_load_n(c, __ATOMIC_SEQ_CST);
898
899 mutex_unlock(m);
900
Brian Silverman71c55c52014-08-19 14:31:59 -0400901 my_robust_list::Adder adder(m);
902
Brian Silvermandc1eb272014-08-19 14:25:59 -0400903 while (true) {
904 // Wait in the kernel iff the value of it doesn't change (ie somebody else
905 // does a wake) from before we unlocked the mutex.
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700906 int ret = sys_futex_wait_requeue_pi(c, wait_start, end_time, &m->futex);
907
Brian Silvermandc1eb272014-08-19 14:25:59 -0400908 if (ret != 0) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700909 // Timed out waiting. Signal that back up to the user.
910 if (__builtin_expect(ret == -ETIMEDOUT, true)) {
911 // We have to relock it ourself because the kernel didn't do it.
912 const int r = mutex_do_get(m, false, nullptr, tid);
913 assert(__builtin_expect(r == 0 || r == 1, true));
914 adder.Add();
915
916 // OWNER_DIED takes priority. Pass it on if we found it.
917 if (r == 1) return r;
918 // Otherwise communicate that we were interrupted.
919 return -1;
920 }
921
Brian Silvermandc1eb272014-08-19 14:25:59 -0400922 // If it failed because somebody else did a wake and changed the value
923 // before we actually made it to sleep.
Brian Silverman71c55c52014-08-19 14:31:59 -0400924 if (__builtin_expect(ret == -EAGAIN, true)) {
925 // There's no need to unconditionally set FUTEX_WAITERS here if we're
926 // using REQUEUE_PI because the kernel automatically does that in the
927 // REQUEUE_PI iff it requeued anybody.
928 // If we're not using REQUEUE_PI, then everything is just normal locks
929 // etc, so there's no need to do anything special there either.
Brian Silvermandc1eb272014-08-19 14:25:59 -0400930
931 // We have to relock it ourself because the kernel didn't do it.
Brian Silverman71c55c52014-08-19 14:31:59 -0400932 const int r = mutex_do_get(m, false, nullptr, tid);
933 assert(__builtin_expect(r == 0 || r == 1, true));
934 adder.Add();
Brian Silvermandc1eb272014-08-19 14:25:59 -0400935 return r;
936 }
937 // Try again if it was because of a signal.
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700938 if (__builtin_expect((ret == -EINTR), true)) {
939 continue;
940 }
Brian Silverman71c55c52014-08-19 14:31:59 -0400941 my_robust_list::robust_head.pending_next = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700942 errno = -ret;
943 PLOG(FATAL) << "FUTEX_WAIT_REQUEUE_PI(" << c << ", " << wait_start << ", "
944 << (&m->futex) << ") failed";
Brian Silvermandc1eb272014-08-19 14:25:59 -0400945 } else {
Brian2a4294f2019-06-12 20:23:32 -0700946 // Record that the kernel relocked it for us.
947 lock_pthread_mutex(m);
Brian Silverman71c55c52014-08-19 14:31:59 -0400948
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700949 // We succeeded in waiting, and the kernel took care of locking the
950 // mutex
Brian Silverman71c55c52014-08-19 14:31:59 -0400951 // for us and setting FUTEX_WAITERS iff it needed to (for REQUEUE_PI).
952
953 adder.Add();
954
955 const uint32_t value = __atomic_load_n(&m->futex, __ATOMIC_RELAXED);
956 if (__builtin_expect((value & FUTEX_OWNER_DIED) != 0, false)) {
957 __atomic_and_fetch(&m->futex, ~FUTEX_OWNER_DIED, __ATOMIC_RELAXED);
958 return 1;
959 } else {
960 return 0;
961 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400962 }
963 }
964}
965
966void condition_signal(aos_condition *c, aos_mutex *m) {
967 condition_wake(c, m, 0);
968}
969
970void condition_broadcast(aos_condition *c, aos_mutex *m) {
971 condition_wake(c, m, INT_MAX);
972}
973
974int futex_wait_timeout(aos_futex *m, const struct timespec *timeout) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400975 RunObservers run_observers(m, false);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400976 const int ret = sys_futex_wait(FUTEX_WAIT, m, 0, timeout);
977 if (ret != 0) {
978 if (ret == -EINTR) {
979 return 1;
980 } else if (ret == -ETIMEDOUT) {
981 return 2;
982 } else if (ret != -EWOULDBLOCK) {
983 errno = -ret;
984 return -1;
985 }
986 }
Brian Silverman71c55c52014-08-19 14:31:59 -0400987 ANNOTATE_HAPPENS_AFTER(m);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400988 return 0;
989}
990
991int futex_wait(aos_futex *m) { return futex_wait_timeout(m, NULL); }
992
993int futex_set_value(aos_futex *m, uint32_t value) {
Brian Silverman71c55c52014-08-19 14:31:59 -0400994 RunObservers run_observers(m, false);
995 ANNOTATE_HAPPENS_BEFORE(m);
Brian Silvermandc1eb272014-08-19 14:25:59 -0400996 __atomic_store_n(m, value, __ATOMIC_SEQ_CST);
997 const int r = sys_futex_wake(m, INT_MAX - 4096);
998 if (__builtin_expect(
Brian Silverman71c55c52014-08-19 14:31:59 -0400999 static_cast<unsigned int>(r) > static_cast<unsigned int>(-4096),
1000 false)) {
Brian Silvermandc1eb272014-08-19 14:25:59 -04001001 errno = -r;
1002 return -1;
1003 } else {
1004 return r;
1005 }
1006}
1007
1008int futex_set(aos_futex *m) {
1009 return futex_set_value(m, 1);
1010}
1011
1012int futex_unset(aos_futex *m) {
1013 return !__atomic_exchange_n(m, 0, __ATOMIC_SEQ_CST);
1014}
Brian Silverman71c55c52014-08-19 14:31:59 -04001015
1016namespace aos {
1017namespace linux_code {
1018namespace ipc_lib {
1019
1020// Sets functions to run befor eand after all futex operations.
1021// This is important when doing robustness testing because the memory has to be
1022// made writable for the whole futex operation, otherwise it never succeeds.
1023void SetFutexAccessorObservers(FutexAccessorObserver before,
1024 FutexAccessorObserver after) {
1025 before_observer = before;
1026 after_observer = after;
1027}
1028
1029// Sets an extra offset between mutexes and the value we use for them in the
1030// robust list (only the forward pointers). This is used to work around a kernel
1031// bug by keeping a second set of mutexes which is always writable so the kernel
1032// won't go into an infinite loop when trying to unlock them.
1033void SetRobustListOffset(ptrdiff_t offset) {
1034 my_robust_list::SetRobustListOffset(offset);
1035}
1036
1037// Returns true iff there are any mutexes locked by the current thread.
1038// This is mainly useful for testing.
1039bool HaveLockedMutexes() {
1040 return my_robust_list::HaveLockedMutexes();
1041}
1042
1043} // namespace ipc_lib
1044} // namespace linux_code
1045} // namespace aos