mirror of
https://github.com/libevent/libevent.git
synced 2025-01-31 09:12:55 +08:00
27308aae4d
This is necessary or useful for a few reasons: 1) Sometimes applications will add and delete the same event more than once between calls to dispatch. Processing these changes immediately is needless, and potentially expensive (especially if we're on a system that makes one syscall per changed event). Yes, this actually happens in practice for nonpathological code, such as in cases where the user's callback conditionally re-adds a non-persistent event, or where draining a buffer turns off writing and invokes a user callback which adds more data which in turn re-enabled writing. 2) Sometimes we can coalesce multiple changes on the same fd into a single syscall if we know about them in advance. For example, epoll can do an add and a delete at the same time, but only if we have found out about both of them before we tell epoll. 3) Sometimes adding an event that we immediately delete can cause unintended consequences: in kqueue, this makes pending events get reported spuriously.
251 lines
7.9 KiB
C
251 lines
7.9 KiB
C
/*
|
|
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
|
|
* Copyright (c) 2007-2009 Niels Provos and Nick Mathewson
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
* derived from this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
#ifndef _EVENT_INTERNAL_H_
|
|
#define _EVENT_INTERNAL_H_
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#include "event-config.h"
|
|
#include <sys/queue.h>
|
|
#include "event2/event_struct.h"
|
|
#include "minheap-internal.h"
|
|
#include "evsignal-internal.h"
|
|
#include "mm-internal.h"
|
|
#include "defer-internal.h"
|
|
|
|
/* map union members back */
|
|
|
|
/* mutually exclusive */
|
|
#define ev_signal_next _ev.ev_signal.ev_signal_next
|
|
|
|
#define ev_io_next _ev.ev_io.ev_io_next
|
|
#define ev_io_timeout _ev.ev_io.ev_timeout
|
|
|
|
/* used only by signals */
|
|
#define ev_ncalls _ev.ev_signal.ev_ncalls
|
|
#define ev_pncalls _ev.ev_signal.ev_pncalls
|
|
|
|
/* Possible event closures. */
|
|
#define EV_CLOSURE_NONE 0
|
|
#define EV_CLOSURE_SIGNAL 1
|
|
#define EV_CLOSURE_PERSIST 2
|
|
|
|
/** Structure to define the backend of a given event_base. */
|
|
struct eventop {
|
|
/** The name of this backend. */
|
|
const char *name;
|
|
/** Set up an event_base to use this backend.*/
|
|
void *(*init)(struct event_base *);
|
|
/** Enable reading/writing on a given fd. */
|
|
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
|
/** Disable reading/writing on a given fd. */
|
|
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
|
/** Function to implement the core of an event loop. It must see which
|
|
added events are ready, and cause event_active to be called for each
|
|
active event (usually via event_io_active or such).
|
|
*/
|
|
int (*dispatch)(struct event_base *, struct timeval *);
|
|
/** Function to clean up and free our data from the event_base. */
|
|
void (*dealloc)(struct event_base *);
|
|
/** Set if we need to reinitialize the event base after we fork. */
|
|
int need_reinit;
|
|
/** Bit-array of supported event_method_features */
|
|
enum event_method_feature features;
|
|
/** Length of extra information we should record for each fd that
|
|
has one or more active events.
|
|
*/
|
|
size_t fdinfo_len;
|
|
};
|
|
|
|
#ifdef WIN32
|
|
/* If we're on win32, then file descriptors are not nice low densely packed
|
|
integers. Instead, they are pointer-like windows handles, and we want to
|
|
use a hashtable instead of an array to map fds to events.
|
|
*/
|
|
#define EVMAP_USE_HT
|
|
#endif
|
|
|
|
#ifdef EVMAP_USE_HT
|
|
#include "ht-internal.h"
|
|
struct event_map_entry;
|
|
HT_HEAD(event_io_map, event_map_entry);
|
|
#else
|
|
#define event_io_map event_signal_map
|
|
#endif
|
|
|
|
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
|
|
defined, this is also used as event_io_map, to map fds to a list of events.
|
|
*/
|
|
struct event_signal_map {
|
|
void **entries;
|
|
int nentries;
|
|
};
|
|
|
|
struct common_timeout_list {
|
|
struct event_list events;
|
|
struct timeval duration;
|
|
struct event timeout_event;
|
|
struct event_base *base;
|
|
};
|
|
|
|
struct event_change;
|
|
|
|
struct event_changelist {
|
|
struct event_change *changes;
|
|
int n_changes;
|
|
int changes_size;
|
|
};
|
|
|
|
struct event_base {
|
|
/** Function pointers and other data to describe this event_base's
|
|
* backend. */
|
|
const struct eventop *evsel;
|
|
/** Pointer to backend-specific data. */
|
|
void *evbase;
|
|
|
|
/** List of changes to tell backend about at next dispatch. Only used
|
|
* by the O(1) backends. */
|
|
struct event_changelist changelist;
|
|
|
|
/* signal handling info */
|
|
const struct eventop *evsigsel;
|
|
void *evsigbase;
|
|
struct evsig_info sig;
|
|
|
|
int event_count; /**< counts number of total events */
|
|
int event_count_active; /**< counts number of active events */
|
|
|
|
int event_gotterm; /**< Set to terminate loop once done
|
|
* processing events. */
|
|
int event_break; /**< Set to exit loop immediately */
|
|
|
|
/* Active event management. */
|
|
/** An array of nactivequeues queues for active events (ones that
|
|
* have triggered, and whose callbacks need to be called). Low
|
|
* priority numbers are more important, and stall higher ones.
|
|
*/
|
|
struct event_list *activequeues;
|
|
int nactivequeues;
|
|
|
|
struct common_timeout_list **common_timeout_queues;
|
|
int n_common_timeouts;
|
|
int n_common_timeouts_allocated;
|
|
|
|
/** The event whose callback is executing right now */
|
|
struct event *current_event;
|
|
|
|
struct deferred_cb_queue defer_queue;
|
|
|
|
/** Mapping from file descriptors to enabled events */
|
|
struct event_io_map io;
|
|
|
|
/** Mapping from signal numbers to enabled events. */
|
|
struct event_signal_map sigmap;
|
|
|
|
/** All events that have been enabled (added) in this event_base */
|
|
struct event_list eventqueue;
|
|
|
|
struct timeval event_tv;
|
|
|
|
/** Priority queue of events with timeouts. */
|
|
struct min_heap timeheap;
|
|
|
|
struct timeval tv_cache;
|
|
|
|
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
|
/* threading support */
|
|
/** The thread currently running the event_loop for this base */
|
|
unsigned long th_owner_id;
|
|
/** A lock to prevent conflicting accesses to this event_base */
|
|
void *th_base_lock;
|
|
/** A lock to prevent event_del from deleting an event while its
|
|
* callback is executing. */
|
|
void *current_event_lock;
|
|
#endif
|
|
|
|
#ifdef WIN32
|
|
struct event_iocp_port *iocp;
|
|
#endif
|
|
|
|
enum event_base_config_flag flags;
|
|
|
|
/* Notify main thread to wake up break, etc. */
|
|
int th_notify_fd[2];
|
|
struct event th_notify;
|
|
int (*th_notify_fn)(struct event_base *base);
|
|
};
|
|
|
|
struct event_config_entry {
|
|
TAILQ_ENTRY(event_config_entry) (next);
|
|
|
|
const char *avoid_method;
|
|
};
|
|
|
|
/** Internal structure: describes the configuration we want for an event_base
|
|
* that we're about to allocate. */
|
|
struct event_config {
|
|
TAILQ_HEAD(event_configq, event_config_entry) entries;
|
|
|
|
enum event_method_feature require_features;
|
|
enum event_base_config_flag flags;
|
|
};
|
|
|
|
/* Internal use only: Functions that might be missing from <sys/queue.h> */
|
|
#ifndef _EVENT_HAVE_TAILQFOREACH
|
|
#define TAILQ_FIRST(head) ((head)->tqh_first)
|
|
#define TAILQ_END(head) NULL
|
|
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
|
#define TAILQ_FOREACH(var, head, field) \
|
|
for((var) = TAILQ_FIRST(head); \
|
|
(var) != TAILQ_END(head); \
|
|
(var) = TAILQ_NEXT(var, field))
|
|
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
|
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
|
|
(elm)->field.tqe_next = (listelm); \
|
|
*(listelm)->field.tqe_prev = (elm); \
|
|
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
|
|
} while (0)
|
|
#endif /* TAILQ_FOREACH */
|
|
|
|
#define N_ACTIVE_CALLBACKS(base) \
|
|
((base)->event_count_active + (base)->defer_queue.active_count)
|
|
|
|
int _evsig_set_handler(struct event_base *base, int evsignal,
|
|
void (*fn)(int));
|
|
int _evsig_restore_handler(struct event_base *base, int evsignal);
|
|
|
|
void event_active_nolock(struct event *ev, int res, short count);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif /* _EVENT_INTERNAL_H_ */
|
|
|