Austin Schuh | a273376 | 2015-09-06 17:46:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * 3. The name of the author may not be used to endorse or promote products |
| 13 | * derived from this software without specific prior written permission. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | */ |
| 26 | #ifndef _EVTHREAD_INTERNAL_H_ |
| 27 | #define _EVTHREAD_INTERNAL_H_ |
| 28 | |
| 29 | #ifdef __cplusplus |
| 30 | extern "C" { |
| 31 | #endif |
| 32 | |
| 33 | #include "event2/thread.h" |
| 34 | #include "event2/event-config.h" |
| 35 | #include "util-internal.h" |
| 36 | |
| 37 | struct event_base; |
| 38 | |
| 39 | #ifndef WIN32 |
| 40 | /* On Windows, the way we currently make DLLs, it's not allowed for us to |
| 41 | * have shared global structures. Thus, we only do the direct-call-to-function |
| 42 | * code path if we know that the local shared library system supports it. |
| 43 | */ |
| 44 | #define EVTHREAD_EXPOSE_STRUCTS |
| 45 | #endif |
| 46 | |
| 47 | #if ! defined(_EVENT_DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS) |
| 48 | /* Global function pointers to lock-related functions. NULL if locking isn't |
| 49 | enabled. */ |
| 50 | extern struct evthread_lock_callbacks _evthread_lock_fns; |
| 51 | extern struct evthread_condition_callbacks _evthread_cond_fns; |
| 52 | extern unsigned long (*_evthread_id_fn)(void); |
| 53 | extern int _evthread_lock_debugging_enabled; |
| 54 | |
| 55 | /** Return the ID of the current thread, or 1 if threading isn't enabled. */ |
| 56 | #define EVTHREAD_GET_ID() \ |
| 57 | (_evthread_id_fn ? _evthread_id_fn() : 1) |
| 58 | |
| 59 | /** Return true iff we're in the thread that is currently (or most recently) |
| 60 | * running a given event_base's loop. Requires lock. */ |
| 61 | #define EVBASE_IN_THREAD(base) \ |
| 62 | (_evthread_id_fn == NULL || \ |
| 63 | (base)->th_owner_id == _evthread_id_fn()) |
| 64 | |
| 65 | /** Return true iff we need to notify the base's main thread about changes to |
| 66 | * its state, because it's currently running the main loop in another |
| 67 | * thread. Requires lock. */ |
| 68 | #define EVBASE_NEED_NOTIFY(base) \ |
| 69 | (_evthread_id_fn != NULL && \ |
| 70 | (base)->running_loop && \ |
| 71 | (base)->th_owner_id != _evthread_id_fn()) |
| 72 | |
| 73 | /** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to |
| 74 | NULL if locking is not enabled. */ |
| 75 | #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ |
| 76 | ((lockvar) = _evthread_lock_fns.alloc ? \ |
| 77 | _evthread_lock_fns.alloc(locktype) : NULL) |
| 78 | |
| 79 | /** Free a given lock, if it is present and locking is enabled. */ |
| 80 | #define EVTHREAD_FREE_LOCK(lockvar, locktype) \ |
| 81 | do { \ |
| 82 | void *_lock_tmp_ = (lockvar); \ |
| 83 | if (_lock_tmp_ && _evthread_lock_fns.free) \ |
| 84 | _evthread_lock_fns.free(_lock_tmp_, (locktype)); \ |
| 85 | } while (0) |
| 86 | |
| 87 | /** Acquire a lock. */ |
| 88 | #define EVLOCK_LOCK(lockvar,mode) \ |
| 89 | do { \ |
| 90 | if (lockvar) \ |
| 91 | _evthread_lock_fns.lock(mode, lockvar); \ |
| 92 | } while (0) |
| 93 | |
| 94 | /** Release a lock */ |
| 95 | #define EVLOCK_UNLOCK(lockvar,mode) \ |
| 96 | do { \ |
| 97 | if (lockvar) \ |
| 98 | _evthread_lock_fns.unlock(mode, lockvar); \ |
| 99 | } while (0) |
| 100 | |
| 101 | /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ |
| 102 | #define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \ |
| 103 | do { \ |
| 104 | if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ |
| 105 | void *tmp = lockvar1; \ |
| 106 | lockvar1 = lockvar2; \ |
| 107 | lockvar2 = tmp; \ |
| 108 | } \ |
| 109 | } while (0) |
| 110 | |
| 111 | /** Lock an event_base, if it is set up for locking. Acquires the lock |
| 112 | in the base structure whose field is named 'lockvar'. */ |
| 113 | #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ |
| 114 | EVLOCK_LOCK((base)->lockvar, 0); \ |
| 115 | } while (0) |
| 116 | |
| 117 | /** Unlock an event_base, if it is set up for locking. */ |
| 118 | #define EVBASE_RELEASE_LOCK(base, lockvar) do { \ |
| 119 | EVLOCK_UNLOCK((base)->lockvar, 0); \ |
| 120 | } while (0) |
| 121 | |
| 122 | /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is |
| 123 | * locked and held by us. */ |
| 124 | #define EVLOCK_ASSERT_LOCKED(lock) \ |
| 125 | do { \ |
| 126 | if ((lock) && _evthread_lock_debugging_enabled) { \ |
| 127 | EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \ |
| 128 | } \ |
| 129 | } while (0) |
| 130 | |
| 131 | /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we |
| 132 | * manage to get it. */ |
| 133 | static inline int EVLOCK_TRY_LOCK(void *lock); |
| 134 | static inline int |
| 135 | EVLOCK_TRY_LOCK(void *lock) |
| 136 | { |
| 137 | if (lock && _evthread_lock_fns.lock) { |
| 138 | int r = _evthread_lock_fns.lock(EVTHREAD_TRY, lock); |
| 139 | return !r; |
| 140 | } else { |
| 141 | /* Locking is disabled either globally or for this thing; |
| 142 | * of course we count as having the lock. */ |
| 143 | return 1; |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | /** Allocate a new condition variable and store it in the void *, condvar */ |
| 148 | #define EVTHREAD_ALLOC_COND(condvar) \ |
| 149 | do { \ |
| 150 | (condvar) = _evthread_cond_fns.alloc_condition ? \ |
| 151 | _evthread_cond_fns.alloc_condition(0) : NULL; \ |
| 152 | } while (0) |
| 153 | /** Deallocate and free a condition variable in condvar */ |
| 154 | #define EVTHREAD_FREE_COND(cond) \ |
| 155 | do { \ |
| 156 | if (cond) \ |
| 157 | _evthread_cond_fns.free_condition((cond)); \ |
| 158 | } while (0) |
| 159 | /** Signal one thread waiting on cond */ |
| 160 | #define EVTHREAD_COND_SIGNAL(cond) \ |
| 161 | ( (cond) ? _evthread_cond_fns.signal_condition((cond), 0) : 0 ) |
| 162 | /** Signal all threads waiting on cond */ |
| 163 | #define EVTHREAD_COND_BROADCAST(cond) \ |
| 164 | ( (cond) ? _evthread_cond_fns.signal_condition((cond), 1) : 0 ) |
| 165 | /** Wait until the condition 'cond' is signalled. Must be called while |
| 166 | * holding 'lock'. The lock will be released until the condition is |
| 167 | * signalled, at which point it will be acquired again. Returns 0 for |
| 168 | * success, -1 for failure. */ |
| 169 | #define EVTHREAD_COND_WAIT(cond, lock) \ |
| 170 | ( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), NULL) : 0 ) |
| 171 | /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 |
| 172 | * on timeout. */ |
| 173 | #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ |
| 174 | ( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 ) |
| 175 | |
| 176 | /** True iff locking functions have been configured. */ |
| 177 | #define EVTHREAD_LOCKING_ENABLED() \ |
| 178 | (_evthread_lock_fns.lock != NULL) |
| 179 | |
| 180 | #elif ! defined(_EVENT_DISABLE_THREAD_SUPPORT) |
| 181 | |
| 182 | unsigned long _evthreadimpl_get_id(void); |
| 183 | int _evthreadimpl_is_lock_debugging_enabled(void); |
| 184 | void *_evthreadimpl_lock_alloc(unsigned locktype); |
| 185 | void _evthreadimpl_lock_free(void *lock, unsigned locktype); |
| 186 | int _evthreadimpl_lock_lock(unsigned mode, void *lock); |
| 187 | int _evthreadimpl_lock_unlock(unsigned mode, void *lock); |
| 188 | void *_evthreadimpl_cond_alloc(unsigned condtype); |
| 189 | void _evthreadimpl_cond_free(void *cond); |
| 190 | int _evthreadimpl_cond_signal(void *cond, int broadcast); |
| 191 | int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv); |
| 192 | int _evthreadimpl_locking_enabled(void); |
| 193 | |
| 194 | #define EVTHREAD_GET_ID() _evthreadimpl_get_id() |
| 195 | #define EVBASE_IN_THREAD(base) \ |
| 196 | ((base)->th_owner_id == _evthreadimpl_get_id()) |
| 197 | #define EVBASE_NEED_NOTIFY(base) \ |
| 198 | ((base)->running_loop && \ |
| 199 | ((base)->th_owner_id != _evthreadimpl_get_id())) |
| 200 | |
| 201 | #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ |
| 202 | ((lockvar) = _evthreadimpl_lock_alloc(locktype)) |
| 203 | |
| 204 | #define EVTHREAD_FREE_LOCK(lockvar, locktype) \ |
| 205 | do { \ |
| 206 | void *_lock_tmp_ = (lockvar); \ |
| 207 | if (_lock_tmp_) \ |
| 208 | _evthreadimpl_lock_free(_lock_tmp_, (locktype)); \ |
| 209 | } while (0) |
| 210 | |
| 211 | /** Acquire a lock. */ |
| 212 | #define EVLOCK_LOCK(lockvar,mode) \ |
| 213 | do { \ |
| 214 | if (lockvar) \ |
| 215 | _evthreadimpl_lock_lock(mode, lockvar); \ |
| 216 | } while (0) |
| 217 | |
| 218 | /** Release a lock */ |
| 219 | #define EVLOCK_UNLOCK(lockvar,mode) \ |
| 220 | do { \ |
| 221 | if (lockvar) \ |
| 222 | _evthreadimpl_lock_unlock(mode, lockvar); \ |
| 223 | } while (0) |
| 224 | |
| 225 | /** Lock an event_base, if it is set up for locking. Acquires the lock |
| 226 | in the base structure whose field is named 'lockvar'. */ |
| 227 | #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ |
| 228 | EVLOCK_LOCK((base)->lockvar, 0); \ |
| 229 | } while (0) |
| 230 | |
| 231 | /** Unlock an event_base, if it is set up for locking. */ |
| 232 | #define EVBASE_RELEASE_LOCK(base, lockvar) do { \ |
| 233 | EVLOCK_UNLOCK((base)->lockvar, 0); \ |
| 234 | } while (0) |
| 235 | |
| 236 | /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is |
| 237 | * locked and held by us. */ |
| 238 | #define EVLOCK_ASSERT_LOCKED(lock) \ |
| 239 | do { \ |
| 240 | if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \ |
| 241 | EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \ |
| 242 | } \ |
| 243 | } while (0) |
| 244 | |
| 245 | /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we |
| 246 | * manage to get it. */ |
| 247 | static inline int EVLOCK_TRY_LOCK(void *lock); |
| 248 | static inline int |
| 249 | EVLOCK_TRY_LOCK(void *lock) |
| 250 | { |
| 251 | if (lock) { |
| 252 | int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock); |
| 253 | return !r; |
| 254 | } else { |
| 255 | /* Locking is disabled either globally or for this thing; |
| 256 | * of course we count as having the lock. */ |
| 257 | return 1; |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | /** Allocate a new condition variable and store it in the void *, condvar */ |
| 262 | #define EVTHREAD_ALLOC_COND(condvar) \ |
| 263 | do { \ |
| 264 | (condvar) = _evthreadimpl_cond_alloc(0); \ |
| 265 | } while (0) |
| 266 | /** Deallocate and free a condition variable in condvar */ |
| 267 | #define EVTHREAD_FREE_COND(cond) \ |
| 268 | do { \ |
| 269 | if (cond) \ |
| 270 | _evthreadimpl_cond_free((cond)); \ |
| 271 | } while (0) |
| 272 | /** Signal one thread waiting on cond */ |
| 273 | #define EVTHREAD_COND_SIGNAL(cond) \ |
| 274 | ( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 ) |
| 275 | /** Signal all threads waiting on cond */ |
| 276 | #define EVTHREAD_COND_BROADCAST(cond) \ |
| 277 | ( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 ) |
| 278 | /** Wait until the condition 'cond' is signalled. Must be called while |
| 279 | * holding 'lock'. The lock will be released until the condition is |
| 280 | * signalled, at which point it will be acquired again. Returns 0 for |
| 281 | * success, -1 for failure. */ |
| 282 | #define EVTHREAD_COND_WAIT(cond, lock) \ |
| 283 | ( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 ) |
| 284 | /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 |
| 285 | * on timeout. */ |
| 286 | #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ |
| 287 | ( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 ) |
| 288 | |
| 289 | #define EVTHREAD_LOCKING_ENABLED() \ |
| 290 | (_evthreadimpl_locking_enabled()) |
| 291 | |
| 292 | #else /* _EVENT_DISABLE_THREAD_SUPPORT */ |
| 293 | |
| 294 | #define EVTHREAD_GET_ID() 1 |
| 295 | #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT |
| 296 | #define EVTHREAD_FREE_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT |
| 297 | |
| 298 | #define EVLOCK_LOCK(lockvar, mode) _EVUTIL_NIL_STMT |
| 299 | #define EVLOCK_UNLOCK(lockvar, mode) _EVUTIL_NIL_STMT |
| 300 | #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT |
| 301 | #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT |
| 302 | |
| 303 | #define EVBASE_IN_THREAD(base) 1 |
| 304 | #define EVBASE_NEED_NOTIFY(base) 0 |
| 305 | #define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT |
| 306 | #define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT |
| 307 | #define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT |
| 308 | |
| 309 | #define EVLOCK_TRY_LOCK(lock) 1 |
| 310 | |
| 311 | #define EVTHREAD_ALLOC_COND(condvar) _EVUTIL_NIL_STMT |
| 312 | #define EVTHREAD_FREE_COND(cond) _EVUTIL_NIL_STMT |
| 313 | #define EVTHREAD_COND_SIGNAL(cond) _EVUTIL_NIL_STMT |
| 314 | #define EVTHREAD_COND_BROADCAST(cond) _EVUTIL_NIL_STMT |
| 315 | #define EVTHREAD_COND_WAIT(cond, lock) _EVUTIL_NIL_STMT |
| 316 | #define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) _EVUTIL_NIL_STMT |
| 317 | |
| 318 | #define EVTHREAD_LOCKING_ENABLED() 0 |
| 319 | |
| 320 | #endif |
| 321 | |
| 322 | /* This code is shared between both lock impls */ |
| 323 | #if ! defined(_EVENT_DISABLE_THREAD_SUPPORT) |
| 324 | /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ |
| 325 | #define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \ |
| 326 | do { \ |
| 327 | if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ |
| 328 | void *tmp = lockvar1; \ |
| 329 | lockvar1 = lockvar2; \ |
| 330 | lockvar2 = tmp; \ |
| 331 | } \ |
| 332 | } while (0) |
| 333 | |
| 334 | /** Acquire both lock1 and lock2. Always allocates locks in the same order, |
| 335 | * so that two threads locking two locks with LOCK2 will not deadlock. */ |
| 336 | #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \ |
| 337 | do { \ |
| 338 | void *_lock1_tmplock = (lock1); \ |
| 339 | void *_lock2_tmplock = (lock2); \ |
| 340 | _EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \ |
| 341 | EVLOCK_LOCK(_lock1_tmplock,mode1); \ |
| 342 | if (_lock2_tmplock != _lock1_tmplock) \ |
| 343 | EVLOCK_LOCK(_lock2_tmplock,mode2); \ |
| 344 | } while (0) |
| 345 | /** Release both lock1 and lock2. */ |
| 346 | #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \ |
| 347 | do { \ |
| 348 | void *_lock1_tmplock = (lock1); \ |
| 349 | void *_lock2_tmplock = (lock2); \ |
| 350 | _EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \ |
| 351 | if (_lock2_tmplock != _lock1_tmplock) \ |
| 352 | EVLOCK_UNLOCK(_lock2_tmplock,mode2); \ |
| 353 | EVLOCK_UNLOCK(_lock1_tmplock,mode1); \ |
| 354 | } while (0) |
| 355 | |
| 356 | int _evthread_is_debug_lock_held(void *lock); |
| 357 | void *_evthread_debug_get_real_lock(void *lock); |
| 358 | |
| 359 | void *evthread_setup_global_lock_(void *lock_, unsigned locktype, |
| 360 | int enable_locks); |
| 361 | |
| 362 | #define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \ |
| 363 | do { \ |
| 364 | lockvar = evthread_setup_global_lock_(lockvar, \ |
| 365 | (locktype), enable_locks); \ |
| 366 | if (!lockvar) { \ |
| 367 | event_warn("Couldn't allocate %s", #lockvar); \ |
| 368 | return -1; \ |
| 369 | } \ |
| 370 | } while (0); |
| 371 | |
| 372 | int event_global_setup_locks_(const int enable_locks); |
| 373 | int evsig_global_setup_locks_(const int enable_locks); |
| 374 | int evutil_secure_rng_global_setup_locks_(const int enable_locks); |
| 375 | |
| 376 | #endif |
| 377 | |
| 378 | #ifdef __cplusplus |
| 379 | } |
| 380 | #endif |
| 381 | |
| 382 | #endif /* _EVTHREAD_INTERNAL_H_ */ |