/* * Copyright (c) 2007-2009 Niels Provos and Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "event-config.h" #endif #ifdef WIN32 #define WIN32_LEAN_AND_MEAN #include #undef WIN32_LEAN_AND_MEAN #endif #include #ifdef _EVENT_HAVE_SYS_TIME_H #include #else #include #endif #include #include #include #ifndef WIN32 #include #endif #include #include #include #include #include #include "event-internal.h" #include "evmap-internal.h" #include "mm-internal.h" /** An entry for an evmap_io list: notes all the events that want to read or write on a given fd, and the number of each. */ struct evmap_io { struct event_list events; unsigned int nread; unsigned int nwrite; }; /* An entry for an evmap_signal list: notes all the events that want to know when a signal triggers. */ struct evmap_signal { struct event_list events; }; /* On some platforms, fds start at 0 and increment by 1 as they are allocated, and old numbers get used. For these platforms, we implement io maps just like signal maps: as an array of pointers to struct evmap_io. But on other platforms (windows), sockets are not 0-indexed, not necessarily consecutive, and not necessarily reused. There, we use a hashtable to implement evmap_io. */ #ifdef EVMAP_USE_HT struct event_map_entry { HT_ENTRY(event_map_entry) map_node; evutil_socket_t fd; union { /* This is a union in case we need to make more things that can be in the hashtable. */ struct evmap_io evmap_io; } ent; }; static inline unsigned hashsocket(struct event_map_entry *e) { /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to * matter. Our hashtable implementation really likes low-order bits, * though, so let's do the rotate-and-add trick. */ unsigned h = (unsigned) e->fd; h += (h >> 2) | (h << 30); return h; } static inline int eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) { return e1->fd == e2->fd; } HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket); HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, 0.5, mm_malloc, mm_realloc, mm_free); #define GET_IO_SLOT(x, map, slot, type) \ do { \ struct event_map_entry _key, *_ent; \ _key.fd = slot; \ _ent = HT_FIND(event_io_map, map, &_key); \ (x) = _ent ? &_ent->ent.type : NULL; \ } while (0); #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ do { \ struct event_map_entry _key, *_ent; \ _key.fd = slot; \ _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \ event_map_entry, &_key, ptr, \ { \ _ent = *ptr; \ }, \ { \ _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ assert(_ent); \ _ent->fd = slot; \ (ctor)(&_ent->ent.type); \ _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \ }); \ (x) = &_ent->ent.type; \ } while (0) void evmap_io_initmap(struct event_io_map *ctx) { HT_INIT(event_io_map, ctx); } void evmap_io_clear(struct event_io_map *ctx) { struct event_map_entry **ent, **next, *this; for (ent = HT_START(event_io_map, ctx); ent; ent = next) { this = *ent; next = HT_NEXT_RMV(event_io_map, ctx, ent); mm_free(this); } } #endif /* Set the variable 'x' to the field in event_map 'map' with fields of type 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL if there are no entries for 'slot'. Does no bounds-checking. */ #define GET_SIGNAL_SLOT(x, map, slot, type) \ (x) = (struct type *)((map)->entries[slot]) /* As GET_SLOT, but construct the entry for 'slot' if it is not present, by allocating enough memory for a 'struct type', and initializing the new value by calling the function 'ctor' on it. */ #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ do { \ if ((map)->entries[slot] == NULL) { \ assert(ctor != NULL); \ (map)->entries[slot]=mm_calloc(1,sizeof(struct type)+fdinfo_len);\ assert((map)->entries[slot] != NULL); \ (ctor)((struct type *)(map)->entries[slot]); \ } \ (x) = (struct type *)((map)->entries[slot]); \ } while (0) /* If we aren't using hashtables, then define the IO_SLOT macros and functions as thin aliases over the SIGNAL_SLOT versions. */ #ifndef EVMAP_USE_HT #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) #define FDINFO_OFFSET sizeof(struct evmap_io) void evmap_io_initmap(struct event_io_map* ctx) { evmap_signal_initmap(ctx); } void evmap_io_clear(struct event_io_map* ctx) { evmap_signal_clear(ctx); } #endif /** Expand 'map' with new entries of width 'msize' until it is big enough to store a value in 'slot'. */ static int evmap_make_space(struct event_signal_map *map, int slot, int msize) { if (map->nentries <= slot) { int nentries = map->nentries ? map->nentries : 32; void **tmp; while (nentries <= slot) nentries <<= 1; tmp = (void **)mm_realloc(map->entries, nentries * msize); if (tmp == NULL) return (-1); memset(&tmp[map->nentries], 0, (nentries - map->nentries) * msize); map->nentries = nentries; map->entries = tmp; } return (0); } void evmap_signal_initmap(struct event_signal_map *ctx) { ctx->nentries = 0; ctx->entries = NULL; } void evmap_signal_clear(struct event_signal_map *ctx) { if (ctx->entries != NULL) { int i; for (i = 0; i < ctx->nentries; ++i) { if (ctx->entries[i] != NULL) mm_free(ctx->entries[i]); } mm_free(ctx->entries); ctx->entries = NULL; } ctx->nentries = 0; } /* code specific to file descriptors */ /** Constructor for struct evmap_io */ static void evmap_io_init(struct evmap_io *entry) { TAILQ_INIT(&entry->events); entry->nread = 0; entry->nwrite = 0; } int evmap_io_add(struct event_base *base, int fd, struct event *ev) { const struct eventop *evsel = base->evsel; struct event_io_map *io = &base->io; struct evmap_io *ctx = NULL; int nread, nwrite; short res = 0, old = 0; assert(fd == ev->ev_fd); /*XXX(nickm) always true? */ /*XXX(nickm) Should we assert that ev is not already inserted, or should * we make this function idempotent? */ #ifndef EVMAP_USE_HT if (fd >= io->nentries) { if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) return (-1); } #endif GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, evsel->fdinfo_len); nread = ctx->nread; nwrite = ctx->nwrite; if (nread) old |= EV_READ; if (nwrite) old |= EV_WRITE; if (ev->ev_events & EV_READ) { if (++nread == 1) res |= EV_READ; } if (ev->ev_events & EV_WRITE) { if (++nwrite == 1) res |= EV_WRITE; } if (res) { void *extra = ((char*)ctx) + sizeof(struct evmap_io); /* XXX(niels): we cannot mix edge-triggered and * level-triggered, we should probably assert on * this. */ if (evsel->add(base, ev->ev_fd, old, (ev->ev_events & EV_ET) | res, extra) == -1) return (-1); } ctx->nread = nread; ctx->nwrite = nwrite; TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next); return (0); } int evmap_io_del(struct event_base *base, int fd, struct event *ev) { const struct eventop *evsel = base->evsel; struct event_io_map *io = &base->io; struct evmap_io *ctx; int nread, nwrite; short res = 0, old = 0; assert(fd == ev->ev_fd); /*XXX(nickm) always true? */ /*XXX(nickm) Should we assert that ev is not already inserted, or should * we make this function idempotent? */ #ifndef EVMAP_USE_HT if (fd >= io->nentries) return (-1); #endif GET_IO_SLOT(ctx, io, fd, evmap_io); nread = ctx->nread; nwrite = ctx->nwrite; if (nread) old |= EV_READ; if (nwrite) old |= EV_WRITE; if (ev->ev_events & EV_READ) { if (--nread == 0) res |= EV_READ; assert(nread >= 0); } if (ev->ev_events & EV_WRITE) { if (--nwrite == 0) res |= EV_WRITE; assert(nwrite >= 0); } if (res) { void *extra = ((char*)ctx) + sizeof(struct evmap_io); if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) return (-1); } ctx->nread = nread; ctx->nwrite = nwrite; TAILQ_REMOVE(&ctx->events, ev, ev_io_next); return (0); } void evmap_io_active(struct event_base *base, int fd, short events) { struct event_io_map *io = &base->io; struct evmap_io *ctx; struct event *ev; #ifndef EVMAP_USE_HT assert(fd < io->nentries); #endif GET_IO_SLOT(ctx, io, fd, evmap_io); assert(ctx); TAILQ_FOREACH(ev, &ctx->events, ev_io_next) { if (ev->ev_events & events) event_active(ev, ev->ev_events & events, 1); } } /* code specific to signals */ static void evmap_signal_init(struct evmap_signal *entry) { TAILQ_INIT(&entry->events); } int evmap_signal_add(struct event_base *base, int sig, struct event *ev) { const struct eventop *evsel = base->evsigsel; struct event_signal_map *map = &base->sigmap; struct evmap_signal *ctx = NULL; if (sig >= map->nentries) { if (evmap_make_space( map, sig, sizeof(struct evmap_signal *)) == -1) return (-1); } GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, 0); if (TAILQ_EMPTY(&ctx->events)) { if (evsel->add(base, EVENT_SIGNAL(ev), 0, EV_SIGNAL, NULL) == -1) return (-1); } TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next); return (0); } int evmap_signal_del(struct event_base *base, int sig, struct event *ev) { const struct eventop *evsel = base->evsigsel; struct event_signal_map *map = &base->sigmap; struct evmap_signal *ctx; if (sig >= map->nentries) return (-1); GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) { if (evsel->del(base, EVENT_SIGNAL(ev), 0, EV_SIGNAL, NULL) == -1) return (-1); } TAILQ_REMOVE(&ctx->events, ev, ev_signal_next); return (0); } void evmap_signal_active(struct event_base *base, int sig, int ncalls) { struct event_signal_map *map = &base->sigmap; struct evmap_signal *ctx; struct event *ev; assert(sig < map->nentries); GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) event_active(ev, EV_SIGNAL, ncalls); } void * evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd) { struct evmap_io *ctx; GET_IO_SLOT(ctx, map, fd, evmap_io); if (ctx) return ((char*)ctx) + sizeof(struct evmap_io); else return NULL; }