James Kuszmaul | 4cb043c | 2021-01-17 11:25:51 -0800 | [diff] [blame^] | 1 | /*- |
| 2 | * Copyright (c) 2009-2010 Brad Penoff |
| 3 | * Copyright (c) 2009-2010 Humaira Kamal |
| 4 | * Copyright (c) 2011-2012 Irene Ruengeler |
| 5 | * Copyright (c) 2011-2012 Michael Tuexen |
| 6 | * |
| 7 | * All rights reserved. |
| 8 | * |
| 9 | * Redistribution and use in source and binary forms, with or without |
| 10 | * modification, are permitted provided that the following conditions |
| 11 | * are met: |
| 12 | * 1. Redistributions of source code must retain the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer. |
| 14 | * 2. Redistributions in binary form must reproduce the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer in the |
| 16 | * documentation and/or other materials provided with the distribution. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 | * SUCH DAMAGE. |
| 29 | */ |
| 30 | |
| 31 | #ifndef _USER_ATOMIC_H_ |
| 32 | #define _USER_ATOMIC_H_ |
| 33 | |
| 34 | /* __Userspace__ version of sys/i386/include/atomic.h goes here */ |
| 35 | |
| 36 | /* TODO In the future, might want to not use i386 specific assembly. |
| 37 | * The options include: |
| 38 | * - implement them generically (but maybe not truly atomic?) in userspace |
| 39 | * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...) |
| 40 | */ |
| 41 | |
| 42 | #include <stdio.h> |
| 43 | #include <sys/types.h> |
| 44 | |
| 45 | #if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows) |
| 46 | #if defined (__Userspace_os_Windows) |
| 47 | #define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val) |
| 48 | #define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val) |
| 49 | #define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val)) |
| 50 | #define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp) |
| 51 | #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1) |
| 52 | #else |
| 53 | #include <libkern/OSAtomic.h> |
| 54 | #define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr) |
| 55 | #define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr) |
| 56 | #define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr) |
| 57 | #define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst) |
| 58 | #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0) |
| 59 | #endif |
| 60 | |
| 61 | #if defined(INVARIANTS) |
| 62 | #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ |
| 63 | { \ |
| 64 | int32_t newval; \ |
| 65 | newval = atomic_fetchadd_int(addr, -val); \ |
| 66 | if (newval < 0) { \ |
| 67 | panic("Counter goes negative"); \ |
| 68 | } \ |
| 69 | } |
| 70 | #else |
| 71 | #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ |
| 72 | { \ |
| 73 | int32_t newval; \ |
| 74 | newval = atomic_fetchadd_int(addr, -val); \ |
| 75 | if (newval < 0) { \ |
| 76 | *addr = 0; \ |
| 77 | } \ |
| 78 | } |
| 79 | #endif |
| 80 | #if defined(__Userspace_os_Windows) |
| 81 | static void atomic_init() {} /* empty when we are not using atomic_mtx */ |
| 82 | #else |
| 83 | static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ |
| 84 | #endif |
| 85 | |
| 86 | #else |
| 87 | /* Using gcc built-in functions for atomic memory operations |
| 88 | Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html |
| 89 | Requires gcc version 4.1.0 |
| 90 | compile with -march=i486 |
| 91 | */ |
| 92 | |
| 93 | /*Atomically add V to *P.*/ |
| 94 | #define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V) |
| 95 | |
| 96 | /*Atomically subtrace V from *P.*/ |
| 97 | #define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V) |
| 98 | |
| 99 | /* |
| 100 | * Atomically add the value of v to the integer pointed to by p and return |
| 101 | * the previous value of *p. |
| 102 | */ |
| 103 | #define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v) |
| 104 | |
| 105 | /* Following explanation from src/sys/i386/include/atomic.h, |
| 106 | * for atomic compare and set |
| 107 | * |
| 108 | * if (*dst == exp) *dst = src (all 32 bit words) |
| 109 | * |
| 110 | * Returns 0 on failure, non-zero on success |
| 111 | */ |
| 112 | |
| 113 | #define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src) |
| 114 | |
| 115 | #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1) |
| 116 | #if defined(INVARIANTS) |
| 117 | #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ |
| 118 | { \ |
| 119 | int32_t oldval; \ |
| 120 | oldval = atomic_fetchadd_int(addr, -val); \ |
| 121 | if (oldval < val) { \ |
| 122 | panic("Counter goes negative"); \ |
| 123 | } \ |
| 124 | } |
| 125 | #else |
| 126 | #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ |
| 127 | { \ |
| 128 | int32_t oldval; \ |
| 129 | oldval = atomic_fetchadd_int(addr, -val); \ |
| 130 | if (oldval < val) { \ |
| 131 | *addr = 0; \ |
| 132 | } \ |
| 133 | } |
| 134 | #endif |
| 135 | static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ |
| 136 | #endif |
| 137 | |
| 138 | #if 0 /* using libatomic_ops */ |
| 139 | #include "user_include/atomic_ops.h" |
| 140 | |
| 141 | /*Atomically add incr to *P, and return the original value of *P.*/ |
| 142 | #define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V) |
| 143 | |
| 144 | #define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V)) |
| 145 | |
| 146 | /* |
| 147 | * Atomically add the value of v to the integer pointed to by p and return |
| 148 | * the previous value of *p. |
| 149 | */ |
| 150 | #define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v) |
| 151 | |
| 152 | /* Atomically compare *addr to old_val, and replace *addr by new_val |
| 153 | if the first comparison succeeds. Returns nonzero if the comparison |
| 154 | succeeded and *addr was updated. |
| 155 | */ |
| 156 | /* Following Explanation from src/sys/i386/include/atomic.h, which |
| 157 | matches that of AO_compare_and_swap above. |
| 158 | * Atomic compare and set, used by the mutex functions |
| 159 | * |
| 160 | * if (*dst == exp) *dst = src (all 32 bit words) |
| 161 | * |
| 162 | * Returns 0 on failure, non-zero on success |
| 163 | */ |
| 164 | |
| 165 | #define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src) |
| 166 | |
| 167 | static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ |
| 168 | #endif /* closing #if for libatomic */ |
| 169 | |
| 170 | #if 0 /* using atomic_mtx */ |
| 171 | |
| 172 | #include <pthread.h> |
| 173 | |
| 174 | extern userland_mutex_t atomic_mtx; |
| 175 | |
| 176 | #if defined (__Userspace_os_Windows) |
| 177 | static inline void atomic_init() { |
| 178 | InitializeCriticalSection(&atomic_mtx); |
| 179 | } |
| 180 | static inline void atomic_destroy() { |
| 181 | DeleteCriticalSection(&atomic_mtx); |
| 182 | } |
| 183 | static inline void atomic_lock() { |
| 184 | EnterCriticalSection(&atomic_mtx); |
| 185 | } |
| 186 | static inline void atomic_unlock() { |
| 187 | LeaveCriticalSection(&atomic_mtx); |
| 188 | } |
| 189 | #else |
| 190 | static inline void atomic_init() { |
| 191 | pthread_mutexattr_t mutex_attr; |
| 192 | |
| 193 | pthread_mutexattr_init(&mutex_attr); |
| 194 | #ifdef INVARIANTS |
| 195 | pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK); |
| 196 | #endif |
| 197 | pthread_mutex_init(&accept_mtx, &mutex_attr); |
| 198 | pthread_mutexattr_destroy(&mutex_attr); |
| 199 | } |
| 200 | static inline void atomic_destroy() { |
| 201 | (void)pthread_mutex_destroy(&atomic_mtx); |
| 202 | } |
| 203 | static inline void atomic_lock() { |
| 204 | #ifdef INVARIANTS |
| 205 | KASSERT(pthread_mutex_lock(&atomic_mtx) == 0, ("atomic_lock: atomic_mtx already locked")) |
| 206 | #else |
| 207 | (void)pthread_mutex_lock(&atomic_mtx); |
| 208 | #endif |
| 209 | } |
| 210 | static inline void atomic_unlock() { |
| 211 | #ifdef INVARIANTS |
| 212 | KASSERT(pthread_mutex_unlock(&atomic_mtx) == 0, ("atomic_unlock: atomic_mtx not locked")) |
| 213 | #else |
| 214 | (void)pthread_mutex_unlock(&atomic_mtx); |
| 215 | #endif |
| 216 | } |
| 217 | #endif |
| 218 | /* |
| 219 | * For userland, always use lock prefixes so that the binaries will run |
| 220 | * on both SMP and !SMP systems. |
| 221 | */ |
| 222 | |
| 223 | #define MPLOCKED "lock ; " |
| 224 | |
| 225 | /* |
| 226 | * Atomically add the value of v to the integer pointed to by p and return |
| 227 | * the previous value of *p. |
| 228 | */ |
| 229 | static __inline u_int |
| 230 | atomic_fetchadd_int(volatile void *n, u_int v) |
| 231 | { |
| 232 | int *p = (int *) n; |
| 233 | atomic_lock(); |
| 234 | __asm __volatile( |
| 235 | " " MPLOCKED " " |
| 236 | " xaddl %0, %1 ; " |
| 237 | "# atomic_fetchadd_int" |
| 238 | : "+r" (v), /* 0 (result) */ |
| 239 | "=m" (*p) /* 1 */ |
| 240 | : "m" (*p)); /* 2 */ |
| 241 | atomic_unlock(); |
| 242 | |
| 243 | return (v); |
| 244 | } |
| 245 | |
| 246 | |
| 247 | #ifdef CPU_DISABLE_CMPXCHG |
| 248 | |
| 249 | static __inline int |
| 250 | atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) |
| 251 | { |
| 252 | u_char res; |
| 253 | |
| 254 | atomic_lock(); |
| 255 | __asm __volatile( |
| 256 | " pushfl ; " |
| 257 | " cli ; " |
| 258 | " cmpl %3,%4 ; " |
| 259 | " jne 1f ; " |
| 260 | " movl %2,%1 ; " |
| 261 | "1: " |
| 262 | " sete %0 ; " |
| 263 | " popfl ; " |
| 264 | "# atomic_cmpset_int" |
| 265 | : "=q" (res), /* 0 */ |
| 266 | "=m" (*dst) /* 1 */ |
| 267 | : "r" (src), /* 2 */ |
| 268 | "r" (exp), /* 3 */ |
| 269 | "m" (*dst) /* 4 */ |
| 270 | : "memory"); |
| 271 | atomic_unlock(); |
| 272 | |
| 273 | return (res); |
| 274 | } |
| 275 | |
| 276 | #else /* !CPU_DISABLE_CMPXCHG */ |
| 277 | |
| 278 | static __inline int |
| 279 | atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) |
| 280 | { |
| 281 | atomic_lock(); |
| 282 | u_char res; |
| 283 | |
| 284 | __asm __volatile( |
| 285 | " " MPLOCKED " " |
| 286 | " cmpxchgl %2,%1 ; " |
| 287 | " sete %0 ; " |
| 288 | "1: " |
| 289 | "# atomic_cmpset_int" |
| 290 | : "=a" (res), /* 0 */ |
| 291 | "=m" (*dst) /* 1 */ |
| 292 | : "r" (src), /* 2 */ |
| 293 | "a" (exp), /* 3 */ |
| 294 | "m" (*dst) /* 4 */ |
| 295 | : "memory"); |
| 296 | atomic_unlock(); |
| 297 | |
| 298 | return (res); |
| 299 | } |
| 300 | |
| 301 | #endif /* CPU_DISABLE_CMPXCHG */ |
| 302 | |
| 303 | #define atomic_add_int(P, V) do { \ |
| 304 | atomic_lock(); \ |
| 305 | (*(u_int *)(P) += (V)); \ |
| 306 | atomic_unlock(); \ |
| 307 | } while(0) |
| 308 | #define atomic_subtract_int(P, V) do { \ |
| 309 | atomic_lock(); \ |
| 310 | (*(u_int *)(P) -= (V)); \ |
| 311 | atomic_unlock(); \ |
| 312 | } while(0) |
| 313 | |
| 314 | #endif |
| 315 | #endif |