Austin Schuh | a273376 | 2015-09-06 17:46:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * 3. The name of the author may not be used to endorse or promote products |
| 13 | * derived from this software without specific prior written permission. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | */ |
| 26 | #include "event2/event-config.h" |
| 27 | |
| 28 | #ifdef WIN32 |
| 29 | #include <winsock2.h> |
| 30 | #define WIN32_LEAN_AND_MEAN |
| 31 | #include <windows.h> |
| 32 | #undef WIN32_LEAN_AND_MEAN |
| 33 | #endif |
| 34 | #include <sys/types.h> |
| 35 | #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H) |
| 36 | #include <sys/time.h> |
| 37 | #endif |
| 38 | #include <sys/queue.h> |
| 39 | #include <stdio.h> |
| 40 | #include <stdlib.h> |
| 41 | #ifndef WIN32 |
| 42 | #include <unistd.h> |
| 43 | #endif |
| 44 | #include <errno.h> |
| 45 | #include <signal.h> |
| 46 | #include <string.h> |
| 47 | #include <time.h> |
| 48 | |
| 49 | #include "event-internal.h" |
| 50 | #include "evmap-internal.h" |
| 51 | #include "mm-internal.h" |
| 52 | #include "changelist-internal.h" |
| 53 | |
| 54 | /** An entry for an evmap_io list: notes all the events that want to read or |
| 55 | write on a given fd, and the number of each. |
| 56 | */ |
| 57 | struct evmap_io { |
| 58 | struct event_list events; |
| 59 | ev_uint16_t nread; |
| 60 | ev_uint16_t nwrite; |
| 61 | }; |
| 62 | |
| 63 | /* An entry for an evmap_signal list: notes all the events that want to know |
| 64 | when a signal triggers. */ |
| 65 | struct evmap_signal { |
| 66 | struct event_list events; |
| 67 | }; |
| 68 | |
| 69 | /* On some platforms, fds start at 0 and increment by 1 as they are |
| 70 | allocated, and old numbers get used. For these platforms, we |
| 71 | implement io maps just like signal maps: as an array of pointers to |
| 72 | struct evmap_io. But on other platforms (windows), sockets are not |
| 73 | 0-indexed, not necessarily consecutive, and not necessarily reused. |
| 74 | There, we use a hashtable to implement evmap_io. |
| 75 | */ |
| 76 | #ifdef EVMAP_USE_HT |
| 77 | struct event_map_entry { |
| 78 | HT_ENTRY(event_map_entry) map_node; |
| 79 | evutil_socket_t fd; |
| 80 | union { /* This is a union in case we need to make more things that can |
| 81 | be in the hashtable. */ |
| 82 | struct evmap_io evmap_io; |
| 83 | } ent; |
| 84 | }; |
| 85 | |
| 86 | /* Helper used by the event_io_map hashtable code; tries to return a good hash |
| 87 | * of the fd in e->fd. */ |
| 88 | static inline unsigned |
| 89 | hashsocket(struct event_map_entry *e) |
| 90 | { |
| 91 | /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to |
| 92 | * matter. Our hashtable implementation really likes low-order bits, |
| 93 | * though, so let's do the rotate-and-add trick. */ |
| 94 | unsigned h = (unsigned) e->fd; |
| 95 | h += (h >> 2) | (h << 30); |
| 96 | return h; |
| 97 | } |
| 98 | |
| 99 | /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2 |
| 100 | * have the same e->fd. */ |
| 101 | static inline int |
| 102 | eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) |
| 103 | { |
| 104 | return e1->fd == e2->fd; |
| 105 | } |
| 106 | |
| 107 | HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket) |
| 108 | HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, |
| 109 | 0.5, mm_malloc, mm_realloc, mm_free) |
| 110 | |
| 111 | #define GET_IO_SLOT(x, map, slot, type) \ |
| 112 | do { \ |
| 113 | struct event_map_entry _key, *_ent; \ |
| 114 | _key.fd = slot; \ |
| 115 | _ent = HT_FIND(event_io_map, map, &_key); \ |
| 116 | (x) = _ent ? &_ent->ent.type : NULL; \ |
| 117 | } while (0); |
| 118 | |
| 119 | #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ |
| 120 | do { \ |
| 121 | struct event_map_entry _key, *_ent; \ |
| 122 | _key.fd = slot; \ |
| 123 | _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \ |
| 124 | event_map_entry, &_key, ptr, \ |
| 125 | { \ |
| 126 | _ent = *ptr; \ |
| 127 | }, \ |
| 128 | { \ |
| 129 | _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ |
| 130 | if (EVUTIL_UNLIKELY(_ent == NULL)) \ |
| 131 | return (-1); \ |
| 132 | _ent->fd = slot; \ |
| 133 | (ctor)(&_ent->ent.type); \ |
| 134 | _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \ |
| 135 | }); \ |
| 136 | (x) = &_ent->ent.type; \ |
| 137 | } while (0) |
| 138 | |
| 139 | void evmap_io_initmap(struct event_io_map *ctx) |
| 140 | { |
| 141 | HT_INIT(event_io_map, ctx); |
| 142 | } |
| 143 | |
| 144 | void evmap_io_clear(struct event_io_map *ctx) |
| 145 | { |
| 146 | struct event_map_entry **ent, **next, *this; |
| 147 | for (ent = HT_START(event_io_map, ctx); ent; ent = next) { |
| 148 | this = *ent; |
| 149 | next = HT_NEXT_RMV(event_io_map, ctx, ent); |
| 150 | mm_free(this); |
| 151 | } |
| 152 | HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */ |
| 153 | } |
| 154 | #endif |
| 155 | |
| 156 | /* Set the variable 'x' to the field in event_map 'map' with fields of type |
| 157 | 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL |
| 158 | if there are no entries for 'slot'. Does no bounds-checking. */ |
| 159 | #define GET_SIGNAL_SLOT(x, map, slot, type) \ |
| 160 | (x) = (struct type *)((map)->entries[slot]) |
| 161 | /* As GET_SLOT, but construct the entry for 'slot' if it is not present, |
| 162 | by allocating enough memory for a 'struct type', and initializing the new |
| 163 | value by calling the function 'ctor' on it. Makes the function |
| 164 | return -1 on allocation failure. |
| 165 | */ |
| 166 | #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ |
| 167 | do { \ |
| 168 | if ((map)->entries[slot] == NULL) { \ |
| 169 | (map)->entries[slot] = \ |
| 170 | mm_calloc(1,sizeof(struct type)+fdinfo_len); \ |
| 171 | if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \ |
| 172 | return (-1); \ |
| 173 | (ctor)((struct type *)(map)->entries[slot]); \ |
| 174 | } \ |
| 175 | (x) = (struct type *)((map)->entries[slot]); \ |
| 176 | } while (0) |
| 177 | |
| 178 | /* If we aren't using hashtables, then define the IO_SLOT macros and functions |
| 179 | as thin aliases over the SIGNAL_SLOT versions. */ |
| 180 | #ifndef EVMAP_USE_HT |
| 181 | #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) |
| 182 | #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ |
| 183 | GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) |
| 184 | #define FDINFO_OFFSET sizeof(struct evmap_io) |
| 185 | void |
| 186 | evmap_io_initmap(struct event_io_map* ctx) |
| 187 | { |
| 188 | evmap_signal_initmap(ctx); |
| 189 | } |
| 190 | void |
| 191 | evmap_io_clear(struct event_io_map* ctx) |
| 192 | { |
| 193 | evmap_signal_clear(ctx); |
| 194 | } |
| 195 | #endif |
| 196 | |
| 197 | |
| 198 | /** Expand 'map' with new entries of width 'msize' until it is big enough |
| 199 | to store a value in 'slot'. |
| 200 | */ |
| 201 | static int |
| 202 | evmap_make_space(struct event_signal_map *map, int slot, int msize) |
| 203 | { |
| 204 | if (map->nentries <= slot) { |
| 205 | int nentries = map->nentries ? map->nentries : 32; |
| 206 | void **tmp; |
| 207 | |
| 208 | while (nentries <= slot) |
| 209 | nentries <<= 1; |
| 210 | |
| 211 | tmp = (void **)mm_realloc(map->entries, nentries * msize); |
| 212 | if (tmp == NULL) |
| 213 | return (-1); |
| 214 | |
| 215 | memset(&tmp[map->nentries], 0, |
| 216 | (nentries - map->nentries) * msize); |
| 217 | |
| 218 | map->nentries = nentries; |
| 219 | map->entries = tmp; |
| 220 | } |
| 221 | |
| 222 | return (0); |
| 223 | } |
| 224 | |
| 225 | void |
| 226 | evmap_signal_initmap(struct event_signal_map *ctx) |
| 227 | { |
| 228 | ctx->nentries = 0; |
| 229 | ctx->entries = NULL; |
| 230 | } |
| 231 | |
| 232 | void |
| 233 | evmap_signal_clear(struct event_signal_map *ctx) |
| 234 | { |
| 235 | if (ctx->entries != NULL) { |
| 236 | int i; |
| 237 | for (i = 0; i < ctx->nentries; ++i) { |
| 238 | if (ctx->entries[i] != NULL) |
| 239 | mm_free(ctx->entries[i]); |
| 240 | } |
| 241 | mm_free(ctx->entries); |
| 242 | ctx->entries = NULL; |
| 243 | } |
| 244 | ctx->nentries = 0; |
| 245 | } |
| 246 | |
| 247 | |
| 248 | /* code specific to file descriptors */ |
| 249 | |
| 250 | /** Constructor for struct evmap_io */ |
| 251 | static void |
| 252 | evmap_io_init(struct evmap_io *entry) |
| 253 | { |
| 254 | TAILQ_INIT(&entry->events); |
| 255 | entry->nread = 0; |
| 256 | entry->nwrite = 0; |
| 257 | } |
| 258 | |
| 259 | |
| 260 | /* return -1 on error, 0 on success if nothing changed in the event backend, |
| 261 | * and 1 on success if something did. */ |
| 262 | int |
| 263 | evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev) |
| 264 | { |
| 265 | const struct eventop *evsel = base->evsel; |
| 266 | struct event_io_map *io = &base->io; |
| 267 | struct evmap_io *ctx = NULL; |
| 268 | int nread, nwrite, retval = 0; |
| 269 | short res = 0, old = 0; |
| 270 | struct event *old_ev; |
| 271 | |
| 272 | EVUTIL_ASSERT(fd == ev->ev_fd); |
| 273 | |
| 274 | if (fd < 0) |
| 275 | return 0; |
| 276 | |
| 277 | #ifndef EVMAP_USE_HT |
| 278 | if (fd >= io->nentries) { |
| 279 | if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) |
| 280 | return (-1); |
| 281 | } |
| 282 | #endif |
| 283 | GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, |
| 284 | evsel->fdinfo_len); |
| 285 | |
| 286 | nread = ctx->nread; |
| 287 | nwrite = ctx->nwrite; |
| 288 | |
| 289 | if (nread) |
| 290 | old |= EV_READ; |
| 291 | if (nwrite) |
| 292 | old |= EV_WRITE; |
| 293 | |
| 294 | if (ev->ev_events & EV_READ) { |
| 295 | if (++nread == 1) |
| 296 | res |= EV_READ; |
| 297 | } |
| 298 | if (ev->ev_events & EV_WRITE) { |
| 299 | if (++nwrite == 1) |
| 300 | res |= EV_WRITE; |
| 301 | } |
| 302 | if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) { |
| 303 | event_warnx("Too many events reading or writing on fd %d", |
| 304 | (int)fd); |
| 305 | return -1; |
| 306 | } |
| 307 | if (EVENT_DEBUG_MODE_IS_ON() && |
| 308 | (old_ev = TAILQ_FIRST(&ctx->events)) && |
| 309 | (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { |
| 310 | event_warnx("Tried to mix edge-triggered and non-edge-triggered" |
| 311 | " events on fd %d", (int)fd); |
| 312 | return -1; |
| 313 | } |
| 314 | |
| 315 | if (res) { |
| 316 | void *extra = ((char*)ctx) + sizeof(struct evmap_io); |
| 317 | /* XXX(niels): we cannot mix edge-triggered and |
| 318 | * level-triggered, we should probably assert on |
| 319 | * this. */ |
| 320 | if (evsel->add(base, ev->ev_fd, |
| 321 | old, (ev->ev_events & EV_ET) | res, extra) == -1) |
| 322 | return (-1); |
| 323 | retval = 1; |
| 324 | } |
| 325 | |
| 326 | ctx->nread = (ev_uint16_t) nread; |
| 327 | ctx->nwrite = (ev_uint16_t) nwrite; |
| 328 | TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next); |
| 329 | |
| 330 | return (retval); |
| 331 | } |
| 332 | |
| 333 | /* return -1 on error, 0 on success if nothing changed in the event backend, |
| 334 | * and 1 on success if something did. */ |
| 335 | int |
| 336 | evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev) |
| 337 | { |
| 338 | const struct eventop *evsel = base->evsel; |
| 339 | struct event_io_map *io = &base->io; |
| 340 | struct evmap_io *ctx; |
| 341 | int nread, nwrite, retval = 0; |
| 342 | short res = 0, old = 0; |
| 343 | |
| 344 | if (fd < 0) |
| 345 | return 0; |
| 346 | |
| 347 | EVUTIL_ASSERT(fd == ev->ev_fd); |
| 348 | |
| 349 | #ifndef EVMAP_USE_HT |
| 350 | if (fd >= io->nentries) |
| 351 | return (-1); |
| 352 | #endif |
| 353 | |
| 354 | GET_IO_SLOT(ctx, io, fd, evmap_io); |
| 355 | |
| 356 | nread = ctx->nread; |
| 357 | nwrite = ctx->nwrite; |
| 358 | |
| 359 | if (nread) |
| 360 | old |= EV_READ; |
| 361 | if (nwrite) |
| 362 | old |= EV_WRITE; |
| 363 | |
| 364 | if (ev->ev_events & EV_READ) { |
| 365 | if (--nread == 0) |
| 366 | res |= EV_READ; |
| 367 | EVUTIL_ASSERT(nread >= 0); |
| 368 | } |
| 369 | if (ev->ev_events & EV_WRITE) { |
| 370 | if (--nwrite == 0) |
| 371 | res |= EV_WRITE; |
| 372 | EVUTIL_ASSERT(nwrite >= 0); |
| 373 | } |
| 374 | |
| 375 | if (res) { |
| 376 | void *extra = ((char*)ctx) + sizeof(struct evmap_io); |
| 377 | if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) |
| 378 | return (-1); |
| 379 | retval = 1; |
| 380 | } |
| 381 | |
| 382 | ctx->nread = nread; |
| 383 | ctx->nwrite = nwrite; |
| 384 | TAILQ_REMOVE(&ctx->events, ev, ev_io_next); |
| 385 | |
| 386 | return (retval); |
| 387 | } |
| 388 | |
| 389 | void |
| 390 | evmap_io_active(struct event_base *base, evutil_socket_t fd, short events) |
| 391 | { |
| 392 | struct event_io_map *io = &base->io; |
| 393 | struct evmap_io *ctx; |
| 394 | struct event *ev; |
| 395 | |
| 396 | #ifndef EVMAP_USE_HT |
| 397 | EVUTIL_ASSERT(fd < io->nentries); |
| 398 | #endif |
| 399 | GET_IO_SLOT(ctx, io, fd, evmap_io); |
| 400 | |
| 401 | EVUTIL_ASSERT(ctx); |
| 402 | TAILQ_FOREACH(ev, &ctx->events, ev_io_next) { |
| 403 | if (ev->ev_events & events) |
| 404 | event_active_nolock(ev, ev->ev_events & events, 1); |
| 405 | } |
| 406 | } |
| 407 | |
| 408 | /* code specific to signals */ |
| 409 | |
| 410 | static void |
| 411 | evmap_signal_init(struct evmap_signal *entry) |
| 412 | { |
| 413 | TAILQ_INIT(&entry->events); |
| 414 | } |
| 415 | |
| 416 | |
| 417 | int |
| 418 | evmap_signal_add(struct event_base *base, int sig, struct event *ev) |
| 419 | { |
| 420 | const struct eventop *evsel = base->evsigsel; |
| 421 | struct event_signal_map *map = &base->sigmap; |
| 422 | struct evmap_signal *ctx = NULL; |
| 423 | |
| 424 | if (sig >= map->nentries) { |
| 425 | if (evmap_make_space( |
| 426 | map, sig, sizeof(struct evmap_signal *)) == -1) |
| 427 | return (-1); |
| 428 | } |
| 429 | GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, |
| 430 | base->evsigsel->fdinfo_len); |
| 431 | |
| 432 | if (TAILQ_EMPTY(&ctx->events)) { |
| 433 | if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL) |
| 434 | == -1) |
| 435 | return (-1); |
| 436 | } |
| 437 | |
| 438 | TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next); |
| 439 | |
| 440 | return (1); |
| 441 | } |
| 442 | |
| 443 | int |
| 444 | evmap_signal_del(struct event_base *base, int sig, struct event *ev) |
| 445 | { |
| 446 | const struct eventop *evsel = base->evsigsel; |
| 447 | struct event_signal_map *map = &base->sigmap; |
| 448 | struct evmap_signal *ctx; |
| 449 | |
| 450 | if (sig >= map->nentries) |
| 451 | return (-1); |
| 452 | |
| 453 | GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); |
| 454 | |
| 455 | if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) { |
| 456 | if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1) |
| 457 | return (-1); |
| 458 | } |
| 459 | |
| 460 | TAILQ_REMOVE(&ctx->events, ev, ev_signal_next); |
| 461 | |
| 462 | return (1); |
| 463 | } |
| 464 | |
| 465 | void |
| 466 | evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls) |
| 467 | { |
| 468 | struct event_signal_map *map = &base->sigmap; |
| 469 | struct evmap_signal *ctx; |
| 470 | struct event *ev; |
| 471 | |
| 472 | EVUTIL_ASSERT(sig < map->nentries); |
| 473 | GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); |
| 474 | |
| 475 | TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) |
| 476 | event_active_nolock(ev, EV_SIGNAL, ncalls); |
| 477 | } |
| 478 | |
| 479 | void * |
| 480 | evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd) |
| 481 | { |
| 482 | struct evmap_io *ctx; |
| 483 | GET_IO_SLOT(ctx, map, fd, evmap_io); |
| 484 | if (ctx) |
| 485 | return ((char*)ctx) + sizeof(struct evmap_io); |
| 486 | else |
| 487 | return NULL; |
| 488 | } |
| 489 | |
| 490 | /** Per-fd structure for use with changelists. It keeps track, for each fd or |
| 491 | * signal using the changelist, of where its entry in the changelist is. |
| 492 | */ |
| 493 | struct event_changelist_fdinfo { |
| 494 | int idxplus1; /* this is the index +1, so that memset(0) will make it |
| 495 | * a no-such-element */ |
| 496 | }; |
| 497 | |
| 498 | void |
| 499 | event_changelist_init(struct event_changelist *changelist) |
| 500 | { |
| 501 | changelist->changes = NULL; |
| 502 | changelist->changes_size = 0; |
| 503 | changelist->n_changes = 0; |
| 504 | } |
| 505 | |
| 506 | /** Helper: return the changelist_fdinfo corresponding to a given change. */ |
| 507 | static inline struct event_changelist_fdinfo * |
| 508 | event_change_get_fdinfo(struct event_base *base, |
| 509 | const struct event_change *change) |
| 510 | { |
| 511 | char *ptr; |
| 512 | if (change->read_change & EV_CHANGE_SIGNAL) { |
| 513 | struct evmap_signal *ctx; |
| 514 | GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal); |
| 515 | ptr = ((char*)ctx) + sizeof(struct evmap_signal); |
| 516 | } else { |
| 517 | struct evmap_io *ctx; |
| 518 | GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io); |
| 519 | ptr = ((char*)ctx) + sizeof(struct evmap_io); |
| 520 | } |
| 521 | return (void*)ptr; |
| 522 | } |
| 523 | |
| 524 | #ifdef DEBUG_CHANGELIST |
| 525 | /** Make sure that the changelist is consistent with the evmap structures. */ |
| 526 | static void |
| 527 | event_changelist_check(struct event_base *base) |
| 528 | { |
| 529 | int i; |
| 530 | struct event_changelist *changelist = &base->changelist; |
| 531 | |
| 532 | EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes); |
| 533 | for (i = 0; i < changelist->n_changes; ++i) { |
| 534 | struct event_change *c = &changelist->changes[i]; |
| 535 | struct event_changelist_fdinfo *f; |
| 536 | EVUTIL_ASSERT(c->fd >= 0); |
| 537 | f = event_change_get_fdinfo(base, c); |
| 538 | EVUTIL_ASSERT(f); |
| 539 | EVUTIL_ASSERT(f->idxplus1 == i + 1); |
| 540 | } |
| 541 | |
| 542 | for (i = 0; i < base->io.nentries; ++i) { |
| 543 | struct evmap_io *io = base->io.entries[i]; |
| 544 | struct event_changelist_fdinfo *f; |
| 545 | if (!io) |
| 546 | continue; |
| 547 | f = (void*) |
| 548 | ( ((char*)io) + sizeof(struct evmap_io) ); |
| 549 | if (f->idxplus1) { |
| 550 | struct event_change *c = &changelist->changes[f->idxplus1 - 1]; |
| 551 | EVUTIL_ASSERT(c->fd == i); |
| 552 | } |
| 553 | } |
| 554 | } |
| 555 | #else |
| 556 | #define event_changelist_check(base) ((void)0) |
| 557 | #endif |
| 558 | |
| 559 | void |
| 560 | event_changelist_remove_all(struct event_changelist *changelist, |
| 561 | struct event_base *base) |
| 562 | { |
| 563 | int i; |
| 564 | |
| 565 | event_changelist_check(base); |
| 566 | |
| 567 | for (i = 0; i < changelist->n_changes; ++i) { |
| 568 | struct event_change *ch = &changelist->changes[i]; |
| 569 | struct event_changelist_fdinfo *fdinfo = |
| 570 | event_change_get_fdinfo(base, ch); |
| 571 | EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1); |
| 572 | fdinfo->idxplus1 = 0; |
| 573 | } |
| 574 | |
| 575 | changelist->n_changes = 0; |
| 576 | |
| 577 | event_changelist_check(base); |
| 578 | } |
| 579 | |
| 580 | void |
| 581 | event_changelist_freemem(struct event_changelist *changelist) |
| 582 | { |
| 583 | if (changelist->changes) |
| 584 | mm_free(changelist->changes); |
| 585 | event_changelist_init(changelist); /* zero it all out. */ |
| 586 | } |
| 587 | |
| 588 | /** Increase the size of 'changelist' to hold more changes. */ |
| 589 | static int |
| 590 | event_changelist_grow(struct event_changelist *changelist) |
| 591 | { |
| 592 | int new_size; |
| 593 | struct event_change *new_changes; |
| 594 | if (changelist->changes_size < 64) |
| 595 | new_size = 64; |
| 596 | else |
| 597 | new_size = changelist->changes_size * 2; |
| 598 | |
| 599 | new_changes = mm_realloc(changelist->changes, |
| 600 | new_size * sizeof(struct event_change)); |
| 601 | |
| 602 | if (EVUTIL_UNLIKELY(new_changes == NULL)) |
| 603 | return (-1); |
| 604 | |
| 605 | changelist->changes = new_changes; |
| 606 | changelist->changes_size = new_size; |
| 607 | |
| 608 | return (0); |
| 609 | } |
| 610 | |
| 611 | /** Return a pointer to the changelist entry for the file descriptor or signal |
| 612 | * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its |
| 613 | * old_events field to old_events. |
| 614 | */ |
| 615 | static struct event_change * |
| 616 | event_changelist_get_or_construct(struct event_changelist *changelist, |
| 617 | evutil_socket_t fd, |
| 618 | short old_events, |
| 619 | struct event_changelist_fdinfo *fdinfo) |
| 620 | { |
| 621 | struct event_change *change; |
| 622 | |
| 623 | if (fdinfo->idxplus1 == 0) { |
| 624 | int idx; |
| 625 | EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size); |
| 626 | |
| 627 | if (changelist->n_changes == changelist->changes_size) { |
| 628 | if (event_changelist_grow(changelist) < 0) |
| 629 | return NULL; |
| 630 | } |
| 631 | |
| 632 | idx = changelist->n_changes++; |
| 633 | change = &changelist->changes[idx]; |
| 634 | fdinfo->idxplus1 = idx + 1; |
| 635 | |
| 636 | memset(change, 0, sizeof(struct event_change)); |
| 637 | change->fd = fd; |
| 638 | change->old_events = old_events; |
| 639 | } else { |
| 640 | change = &changelist->changes[fdinfo->idxplus1 - 1]; |
| 641 | EVUTIL_ASSERT(change->fd == fd); |
| 642 | } |
| 643 | return change; |
| 644 | } |
| 645 | |
| 646 | int |
| 647 | event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events, |
| 648 | void *p) |
| 649 | { |
| 650 | struct event_changelist *changelist = &base->changelist; |
| 651 | struct event_changelist_fdinfo *fdinfo = p; |
| 652 | struct event_change *change; |
| 653 | |
| 654 | event_changelist_check(base); |
| 655 | |
| 656 | change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); |
| 657 | if (!change) |
| 658 | return -1; |
| 659 | |
| 660 | /* An add replaces any previous delete, but doesn't result in a no-op, |
| 661 | * since the delete might fail (because the fd had been closed since |
| 662 | * the last add, for instance. */ |
| 663 | |
| 664 | if (events & (EV_READ|EV_SIGNAL)) { |
| 665 | change->read_change = EV_CHANGE_ADD | |
| 666 | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); |
| 667 | } |
| 668 | if (events & EV_WRITE) { |
| 669 | change->write_change = EV_CHANGE_ADD | |
| 670 | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); |
| 671 | } |
| 672 | |
| 673 | event_changelist_check(base); |
| 674 | return (0); |
| 675 | } |
| 676 | |
| 677 | int |
| 678 | event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events, |
| 679 | void *p) |
| 680 | { |
| 681 | struct event_changelist *changelist = &base->changelist; |
| 682 | struct event_changelist_fdinfo *fdinfo = p; |
| 683 | struct event_change *change; |
| 684 | |
| 685 | event_changelist_check(base); |
| 686 | change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); |
| 687 | event_changelist_check(base); |
| 688 | if (!change) |
| 689 | return -1; |
| 690 | |
| 691 | /* A delete removes any previous add, rather than replacing it: |
| 692 | on those platforms where "add, delete, dispatch" is not the same |
| 693 | as "no-op, dispatch", we want the no-op behavior. |
| 694 | |
| 695 | As well as checking the current operation we should also check |
| 696 | the original set of events to make sure were not ignoring |
| 697 | the case where the add operation is present on an event that |
| 698 | was already set. |
| 699 | |
| 700 | If we have a no-op item, we could remove it it from the list |
| 701 | entirely, but really there's not much point: skipping the no-op |
| 702 | change when we do the dispatch later is far cheaper than rejuggling |
| 703 | the array now. |
| 704 | |
| 705 | As this stands, it also lets through deletions of events that are |
| 706 | not currently set. |
| 707 | */ |
| 708 | |
| 709 | if (events & (EV_READ|EV_SIGNAL)) { |
| 710 | if (!(change->old_events & (EV_READ | EV_SIGNAL)) && |
| 711 | (change->read_change & EV_CHANGE_ADD)) |
| 712 | change->read_change = 0; |
| 713 | else |
| 714 | change->read_change = EV_CHANGE_DEL; |
| 715 | } |
| 716 | if (events & EV_WRITE) { |
| 717 | if (!(change->old_events & EV_WRITE) && |
| 718 | (change->write_change & EV_CHANGE_ADD)) |
| 719 | change->write_change = 0; |
| 720 | else |
| 721 | change->write_change = EV_CHANGE_DEL; |
| 722 | } |
| 723 | |
| 724 | event_changelist_check(base); |
| 725 | return (0); |
| 726 | } |
| 727 | |
| 728 | void |
| 729 | evmap_check_integrity(struct event_base *base) |
| 730 | { |
| 731 | #define EVLIST_X_SIGFOUND 0x1000 |
| 732 | #define EVLIST_X_IOFOUND 0x2000 |
| 733 | |
| 734 | evutil_socket_t i; |
| 735 | struct event *ev; |
| 736 | struct event_io_map *io = &base->io; |
| 737 | struct event_signal_map *sigmap = &base->sigmap; |
| 738 | #ifdef EVMAP_USE_HT |
| 739 | struct event_map_entry **mapent; |
| 740 | #endif |
| 741 | int nsignals, ntimers, nio; |
| 742 | nsignals = ntimers = nio = 0; |
| 743 | |
| 744 | TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { |
| 745 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); |
| 746 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT); |
| 747 | ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND); |
| 748 | } |
| 749 | |
| 750 | #ifdef EVMAP_USE_HT |
| 751 | HT_FOREACH(mapent, event_io_map, io) { |
| 752 | struct evmap_io *ctx = &(*mapent)->ent.evmap_io; |
| 753 | i = (*mapent)->fd; |
| 754 | #else |
| 755 | for (i = 0; i < io->nentries; ++i) { |
| 756 | struct evmap_io *ctx = io->entries[i]; |
| 757 | |
| 758 | if (!ctx) |
| 759 | continue; |
| 760 | #endif |
| 761 | |
| 762 | TAILQ_FOREACH(ev, &ctx->events, ev_io_next) { |
| 763 | EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND)); |
| 764 | EVUTIL_ASSERT(ev->ev_fd == i); |
| 765 | ev->ev_flags |= EVLIST_X_IOFOUND; |
| 766 | nio++; |
| 767 | } |
| 768 | } |
| 769 | |
| 770 | for (i = 0; i < sigmap->nentries; ++i) { |
| 771 | struct evmap_signal *ctx = sigmap->entries[i]; |
| 772 | if (!ctx) |
| 773 | continue; |
| 774 | |
| 775 | TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) { |
| 776 | EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND)); |
| 777 | EVUTIL_ASSERT(ev->ev_fd == i); |
| 778 | ev->ev_flags |= EVLIST_X_SIGFOUND; |
| 779 | nsignals++; |
| 780 | } |
| 781 | } |
| 782 | |
| 783 | TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { |
| 784 | if (ev->ev_events & (EV_READ|EV_WRITE)) { |
| 785 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND); |
| 786 | --nio; |
| 787 | } |
| 788 | if (ev->ev_events & EV_SIGNAL) { |
| 789 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND); |
| 790 | --nsignals; |
| 791 | } |
| 792 | } |
| 793 | |
| 794 | EVUTIL_ASSERT(nio == 0); |
| 795 | EVUTIL_ASSERT(nsignals == 0); |
| 796 | /* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for |
| 797 | * pending signals and io events. |
| 798 | */ |
| 799 | } |