2002-04-09 15:14:06 +00:00
|
|
|
/*
|
2009-01-27 22:34:36 +00:00
|
|
|
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
|
2012-02-10 17:29:53 -05:00
|
|
|
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
|
2002-04-09 15:14:06 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-10-04 23:27:26 +00:00
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
2002-04-09 15:14:06 +00:00
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
2010-07-07 16:45:03 -04:00
|
|
|
#include "event2/event-config.h"
|
2011-01-02 08:43:45 -07:00
|
|
|
#include "evconfig-private.h"
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2008-05-12 01:03:36 +00:00
|
|
|
#include <winsock2.h>
|
2003-06-12 23:33:19 +00:00
|
|
|
#define WIN32_LEAN_AND_MEAN
|
|
|
|
#include <windows.h>
|
|
|
|
#undef WIN32_LEAN_AND_MEAN
|
|
|
|
#endif
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <sys/types.h>
|
2012-02-29 15:07:31 -05:00
|
|
|
#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <sys/time.h>
|
2008-05-15 03:14:48 +00:00
|
|
|
#endif
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <sys/queue.h>
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_SOCKET_H
|
2008-03-02 21:18:33 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#endif
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_UNISTD_H
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <unistd.h>
|
2003-06-12 23:33:19 +00:00
|
|
|
#endif
|
2008-05-29 01:39:43 +00:00
|
|
|
#include <ctype.h>
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <errno.h>
|
2006-03-28 04:16:14 +00:00
|
|
|
#include <signal.h>
|
2002-04-09 15:14:06 +00:00
|
|
|
#include <string.h>
|
2007-07-30 22:41:00 +00:00
|
|
|
#include <time.h>
|
2010-12-01 20:44:05 -05:00
|
|
|
#include <limits.h>
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2008-04-16 20:01:51 +00:00
|
|
|
#include "event2/event.h"
|
|
|
|
#include "event2/event_struct.h"
|
|
|
|
#include "event2/event_compat.h"
|
2004-11-25 09:50:18 +00:00
|
|
|
#include "event-internal.h"
|
2009-04-10 14:22:33 +00:00
|
|
|
#include "defer-internal.h"
|
2008-03-02 21:18:33 +00:00
|
|
|
#include "evthread-internal.h"
|
|
|
|
#include "event2/thread.h"
|
2008-04-16 20:01:51 +00:00
|
|
|
#include "event2/util.h"
|
2009-01-13 20:26:37 +00:00
|
|
|
#include "log-internal.h"
|
|
|
|
#include "evmap-internal.h"
|
2009-10-23 22:00:29 +00:00
|
|
|
#include "iocp-internal.h"
|
2010-01-14 16:30:40 -05:00
|
|
|
#include "changelist-internal.h"
|
2012-02-13 17:59:14 -05:00
|
|
|
#define HT_NO_CACHE_HASH_VALUES
|
2010-01-22 00:34:37 -05:00
|
|
|
#include "ht-internal.h"
|
2010-07-31 17:10:04 -04:00
|
|
|
#include "util-internal.h"
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2010-09-17 00:34:13 -04:00
|
|
|
|
|
|
|
#ifdef EVENT__HAVE_WORKING_KQUEUE
|
|
|
|
#include "kqueue-internal.h"
|
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_EVENT_PORTS
|
2006-07-15 02:55:57 +00:00
|
|
|
extern const struct eventop evportops;
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SELECT
|
2003-09-25 03:26:53 +00:00
|
|
|
extern const struct eventop selectops;
|
2002-04-09 15:14:06 +00:00
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_POLL
|
2003-09-25 03:26:53 +00:00
|
|
|
extern const struct eventop pollops;
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_EPOLL
|
2003-09-25 03:26:53 +00:00
|
|
|
extern const struct eventop epollops;
|
2003-03-07 23:20:36 +00:00
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_WORKING_KQUEUE
|
2003-09-25 03:26:53 +00:00
|
|
|
extern const struct eventop kqops;
|
2002-04-09 15:14:06 +00:00
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_DEVPOLL
|
2004-07-30 04:57:21 +00:00
|
|
|
extern const struct eventop devpollops;
|
|
|
|
#endif
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2003-09-25 03:26:53 +00:00
|
|
|
extern const struct eventop win32ops;
|
2003-06-12 23:33:19 +00:00
|
|
|
#endif
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Array of backends in order of preference. */
|
2008-12-13 06:11:12 +00:00
|
|
|
static const struct eventop *eventops[] = {
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_EVENT_PORTS
|
2006-07-15 02:55:57 +00:00
|
|
|
&evportops,
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_WORKING_KQUEUE
|
2002-04-09 15:14:06 +00:00
|
|
|
&kqops,
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_EPOLL
|
2003-03-07 23:20:36 +00:00
|
|
|
&epollops,
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_DEVPOLL
|
2004-07-30 04:57:21 +00:00
|
|
|
&devpollops,
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_POLL
|
2003-02-28 22:38:30 +00:00
|
|
|
&pollops,
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SELECT
|
2002-04-09 15:14:06 +00:00
|
|
|
&selectops,
|
2003-06-12 23:33:19 +00:00
|
|
|
#endif
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2003-06-12 23:33:19 +00:00
|
|
|
&win32ops,
|
2002-04-09 15:14:06 +00:00
|
|
|
#endif
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Global state; deprecated */
|
2010-05-04 13:27:36 -04:00
|
|
|
struct event_base *event_global_current_base_ = NULL;
|
|
|
|
#define current_base event_global_current_base_
|
2010-03-11 00:38:46 -05:00
|
|
|
|
|
|
|
/* Global state */
|
2010-05-04 13:27:36 -04:00
|
|
|
|
2012-03-12 20:42:39 +02:00
|
|
|
static void *event_self_cbarg_ptr_ = NULL;
|
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
/* Prototypes */
|
2012-04-05 12:38:18 -04:00
|
|
|
static void event_queue_insert_active(struct event_base *, struct event_callback *);
|
2012-04-06 03:00:40 -04:00
|
|
|
static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
|
2011-02-23 00:59:20 -05:00
|
|
|
static void event_queue_insert_timeout(struct event_base *, struct event *);
|
|
|
|
static void event_queue_insert_inserted(struct event_base *, struct event *);
|
2012-04-05 12:38:18 -04:00
|
|
|
static void event_queue_remove_active(struct event_base *, struct event_callback *);
|
2012-04-06 03:00:40 -04:00
|
|
|
static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
|
2011-02-23 00:59:20 -05:00
|
|
|
static void event_queue_remove_timeout(struct event_base *, struct event *);
|
|
|
|
static void event_queue_remove_inserted(struct event_base *, struct event *);
|
2012-04-06 03:00:40 -04:00
|
|
|
static void event_queue_make_later_events_active(struct event_base *base);
|
|
|
|
|
2012-06-28 12:00:57 -04:00
|
|
|
static int evthread_make_base_notifiable_nolock_(struct event_base *base);
|
2013-03-28 14:13:19 -04:00
|
|
|
static int event_del_(struct event *ev, int blocking);
|
2012-06-28 12:00:57 -04:00
|
|
|
|
2012-03-26 23:28:21 -04:00
|
|
|
#ifdef USE_REINSERT_TIMEOUT
|
|
|
|
/* This code seems buggy; only turn it on if we find out what the trouble is. */
|
2012-03-23 18:42:56 -04:00
|
|
|
static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
|
2012-03-26 23:28:21 -04:00
|
|
|
#endif
|
2011-02-02 20:05:41 -05:00
|
|
|
|
2004-11-25 09:50:18 +00:00
|
|
|
static int event_haveevents(struct event_base *);
|
2004-03-22 21:46:45 +00:00
|
|
|
|
2010-11-14 19:25:54 -05:00
|
|
|
static int event_process_active(struct event_base *);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2007-07-30 22:41:00 +00:00
|
|
|
static int timeout_next(struct event_base *, struct timeval **);
|
2004-11-25 09:50:18 +00:00
|
|
|
static void timeout_process(struct event_base *);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-10-27 05:16:32 +00:00
|
|
|
static inline void event_signal_closure(struct event_base *, struct event *ev);
|
|
|
|
static inline void event_persist_closure(struct event_base *, struct event *ev);
|
2008-05-03 21:37:33 +00:00
|
|
|
|
2009-07-21 19:20:25 +00:00
|
|
|
static int evthread_notify_base(struct event_base *base);
|
2009-01-19 01:34:14 +00:00
|
|
|
|
2011-02-02 20:05:41 -05:00
|
|
|
static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
|
|
|
|
struct event *ev);
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
2010-01-22 00:34:37 -05:00
|
|
|
/* These functions implement a hashtable of which 'struct event *' structures
|
|
|
|
* have been setup or added. We don't want to trust the content of the struct
|
|
|
|
* event itself, since we're trying to work through cases where an event gets
|
|
|
|
* clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct event_debug_entry {
|
|
|
|
HT_ENTRY(event_debug_entry) node;
|
|
|
|
const struct event *ptr;
|
|
|
|
unsigned added : 1;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline unsigned
|
|
|
|
hash_debug_entry(const struct event_debug_entry *e)
|
|
|
|
{
|
2010-01-26 12:08:34 -05:00
|
|
|
/* We need to do this silliness to convince compilers that we
|
|
|
|
* honestly mean to cast e->ptr to an integer, and discard any
|
|
|
|
* part of it that doesn't fit in an unsigned.
|
|
|
|
*/
|
|
|
|
unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
|
2010-01-25 13:44:56 -05:00
|
|
|
/* Our hashtable implementation is pretty sensitive to low bits,
|
|
|
|
* and every struct event is over 64 bytes in size, so we can
|
2010-01-26 12:08:34 -05:00
|
|
|
* just say >>6. */
|
|
|
|
return (u >> 6);
|
2010-01-22 00:34:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
eq_debug_entry(const struct event_debug_entry *a,
|
|
|
|
const struct event_debug_entry *b)
|
|
|
|
{
|
|
|
|
return a->ptr == b->ptr;
|
|
|
|
}
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
int event_debug_mode_on_ = 0;
|
2015-05-15 02:58:14 -07:00
|
|
|
|
|
|
|
|
2015-09-29 20:36:39 +03:00
|
|
|
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
|
2015-05-15 02:58:14 -07:00
|
|
|
/**
|
|
|
|
* @brief debug mode variable which is set for any function/structure that needs
|
|
|
|
* to be shared across threads (if thread support is enabled).
|
|
|
|
*
|
|
|
|
* When and if evthreads are initialized, this variable will be evaluated,
|
|
|
|
* and if set to something other than zero, this means the evthread setup
|
|
|
|
* functions were called out of order.
|
|
|
|
*
|
|
|
|
* See: "Locks and threading" in the documentation.
|
|
|
|
*/
|
|
|
|
int event_debug_created_threadable_ctx_ = 0;
|
2015-09-29 20:36:39 +03:00
|
|
|
#endif
|
2015-05-15 02:58:14 -07:00
|
|
|
|
2010-04-28 12:03:08 -04:00
|
|
|
/* Set if it's too late to enable event_debug_mode. */
|
|
|
|
static int event_debug_mode_too_late = 0;
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2012-02-29 15:07:32 -05:00
|
|
|
static void *event_debug_map_lock_ = NULL;
|
2011-04-22 12:01:25 -04:00
|
|
|
#endif
|
2010-01-22 00:34:37 -05:00
|
|
|
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
|
|
|
|
HT_INITIALIZER();
|
|
|
|
|
|
|
|
HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
2010-10-26 11:01:58 -04:00
|
|
|
eq_debug_entry)
|
2010-01-22 00:34:37 -05:00
|
|
|
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
2010-10-26 11:01:58 -04:00
|
|
|
eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Macro: record that ev is now setup (that is, ready for an add) */
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_setup_(ev) do { \
|
|
|
|
if (event_debug_mode_on_) { \
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry *dent,find; \
|
|
|
|
find.ptr = (ev); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
|
|
|
|
if (dent) { \
|
|
|
|
dent->added = 0; \
|
|
|
|
} else { \
|
|
|
|
dent = mm_malloc(sizeof(*dent)); \
|
|
|
|
if (!dent) \
|
|
|
|
event_err(1, \
|
|
|
|
"Out of memory in debugging code"); \
|
|
|
|
dent->ptr = (ev); \
|
|
|
|
dent->added = 0; \
|
|
|
|
HT_INSERT(event_debug_map, &global_debug_map, dent); \
|
|
|
|
} \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2010-04-28 12:03:08 -04:00
|
|
|
event_debug_mode_too_late = 1; \
|
2010-01-22 00:34:37 -05:00
|
|
|
} while (0)
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Macro: record that ev is no longer setup */
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_teardown_(ev) do { \
|
|
|
|
if (event_debug_mode_on_) { \
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry *dent,find; \
|
|
|
|
find.ptr = (ev); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
|
2010-02-18 17:08:50 -05:00
|
|
|
if (dent) \
|
2010-01-22 00:34:37 -05:00
|
|
|
mm_free(dent); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2010-04-28 12:03:08 -04:00
|
|
|
event_debug_mode_too_late = 1; \
|
2010-03-05 13:00:15 -05:00
|
|
|
} while (0)
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Macro: record that ev is now added */
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_add_(ev) do { \
|
|
|
|
if (event_debug_mode_on_) { \
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry *dent,find; \
|
|
|
|
find.ptr = (ev); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
|
|
|
|
if (dent) { \
|
|
|
|
dent->added = 1; \
|
|
|
|
} else { \
|
2012-02-29 15:07:32 -05:00
|
|
|
event_errx(EVENT_ERR_ABORT_, \
|
2011-10-19 17:44:17 +04:00
|
|
|
"%s: noting an add on a non-setup event %p" \
|
2012-11-01 17:38:34 -04:00
|
|
|
" (events: 0x%x, fd: "EV_SOCK_FMT \
|
|
|
|
", flags: 0x%x)", \
|
2011-10-19 17:44:17 +04:00
|
|
|
__func__, (ev), (ev)->ev_events, \
|
2012-11-01 17:38:34 -04:00
|
|
|
EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2010-04-28 12:03:08 -04:00
|
|
|
event_debug_mode_too_late = 1; \
|
2010-03-05 13:00:15 -05:00
|
|
|
} while (0)
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Macro: record that ev is no longer added */
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_del_(ev) do { \
|
|
|
|
if (event_debug_mode_on_) { \
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry *dent,find; \
|
|
|
|
find.ptr = (ev); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
|
|
|
|
if (dent) { \
|
|
|
|
dent->added = 0; \
|
|
|
|
} else { \
|
2012-02-29 15:07:32 -05:00
|
|
|
event_errx(EVENT_ERR_ABORT_, \
|
2011-10-19 17:44:17 +04:00
|
|
|
"%s: noting a del on a non-setup event %p" \
|
2012-11-01 17:38:34 -04:00
|
|
|
" (events: 0x%x, fd: "EV_SOCK_FMT \
|
|
|
|
", flags: 0x%x)", \
|
2011-10-19 17:44:17 +04:00
|
|
|
__func__, (ev), (ev)->ev_events, \
|
2012-11-01 17:38:34 -04:00
|
|
|
EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2010-04-28 12:03:08 -04:00
|
|
|
event_debug_mode_too_late = 1; \
|
2010-03-05 13:00:15 -05:00
|
|
|
} while (0)
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_assert_is_setup_(ev) do { \
|
|
|
|
if (event_debug_mode_on_) { \
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry *dent,find; \
|
|
|
|
find.ptr = (ev); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
|
|
|
|
if (!dent) { \
|
2012-02-29 15:07:32 -05:00
|
|
|
event_errx(EVENT_ERR_ABORT_, \
|
2011-10-19 17:44:17 +04:00
|
|
|
"%s called on a non-initialized event %p" \
|
2012-11-01 17:38:34 -04:00
|
|
|
" (events: 0x%x, fd: "EV_SOCK_FMT\
|
|
|
|
", flags: 0x%x)", \
|
2011-10-19 17:44:17 +04:00
|
|
|
__func__, (ev), (ev)->ev_events, \
|
2012-11-01 17:38:34 -04:00
|
|
|
EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
|
|
|
} while (0)
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Macro: assert that ev is not added (i.e., okay to tear down or set
|
|
|
|
* up again) */
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_assert_not_added_(ev) do { \
|
|
|
|
if (event_debug_mode_on_) { \
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry *dent,find; \
|
|
|
|
find.ptr = (ev); \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
|
|
|
|
if (dent && dent->added) { \
|
2012-02-29 15:07:32 -05:00
|
|
|
event_errx(EVENT_ERR_ABORT_, \
|
2011-10-19 17:44:17 +04:00
|
|
|
"%s called on an already added event %p" \
|
2012-11-01 17:38:34 -04:00
|
|
|
" (events: 0x%x, fd: "EV_SOCK_FMT", " \
|
|
|
|
"flags: 0x%x)", \
|
2011-10-19 17:44:17 +04:00
|
|
|
__func__, (ev), (ev)->ev_events, \
|
2012-11-01 17:38:34 -04:00
|
|
|
EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
|
2010-01-22 00:34:37 -05:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_setup_(ev) \
|
2010-01-22 00:34:37 -05:00
|
|
|
((void)0)
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_teardown_(ev) \
|
2010-01-22 00:34:37 -05:00
|
|
|
((void)0)
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_add_(ev) \
|
2010-01-22 00:34:37 -05:00
|
|
|
((void)0)
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_note_del_(ev) \
|
2010-01-22 00:34:37 -05:00
|
|
|
((void)0)
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_assert_is_setup_(ev) \
|
2010-01-22 00:34:37 -05:00
|
|
|
((void)0)
|
2012-02-29 15:07:32 -05:00
|
|
|
#define event_debug_assert_not_added_(ev) \
|
2010-01-22 00:34:37 -05:00
|
|
|
((void)0)
|
|
|
|
#endif
|
|
|
|
|
2010-02-23 15:14:57 -05:00
|
|
|
#define EVENT_BASE_ASSERT_LOCKED(base) \
|
|
|
|
EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
|
|
|
|
|
2011-03-03 15:34:22 -05:00
|
|
|
/* How often (in seconds) do we check for changes in wall clock time relative
|
|
|
|
* to monotonic time? Set this to -1 for 'never.' */
|
2011-03-07 23:01:54 -05:00
|
|
|
#define CLOCK_SYNC_INTERVAL 5
|
2011-03-03 15:34:22 -05:00
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Set 'tp' to the current time according to 'base'. We must hold the lock
|
|
|
|
* on 'base'. If there is a cached time, return it. Otherwise, use
|
|
|
|
* clock_gettime or gettimeofday as appropriate to find out the right time.
|
|
|
|
* Return 0 on success, -1 on failure.
|
|
|
|
*/
|
2006-03-28 04:33:41 +00:00
|
|
|
static int
|
2008-05-03 18:23:44 +00:00
|
|
|
gettime(struct event_base *base, struct timeval *tp)
|
2006-03-28 04:33:41 +00:00
|
|
|
{
|
2010-02-23 15:14:57 -05:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
|
2008-05-03 18:23:44 +00:00
|
|
|
if (base->tv_cache.tv_sec) {
|
|
|
|
*tp = base->tv_cache;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-04-20 13:14:10 -04:00
|
|
|
if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2011-03-03 15:34:22 -05:00
|
|
|
|
2012-04-20 13:14:10 -04:00
|
|
|
if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
|
|
|
|
< tp->tv_sec) {
|
|
|
|
struct timeval tv;
|
|
|
|
evutil_gettimeofday(&tv,NULL);
|
|
|
|
evutil_timersub(&tv, tp, &base->tv_clock_diff);
|
|
|
|
base->last_updated_clock_diff = tp->tv_sec;
|
2007-07-30 22:41:00 +00:00
|
|
|
}
|
2006-03-28 04:33:41 +00:00
|
|
|
|
2012-04-20 13:14:10 -04:00
|
|
|
return 0;
|
2006-03-28 04:33:41 +00:00
|
|
|
}
|
|
|
|
|
2009-12-28 01:40:37 -05:00
|
|
|
int
|
|
|
|
event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
if (!base) {
|
|
|
|
base = current_base;
|
|
|
|
if (!current_base)
|
|
|
|
return evutil_gettimeofday(tv, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2011-03-03 15:34:22 -05:00
|
|
|
if (base->tv_cache.tv_sec == 0) {
|
|
|
|
r = evutil_gettimeofday(tv, NULL);
|
|
|
|
} else {
|
|
|
|
evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
|
|
|
|
r = 0;
|
|
|
|
}
|
2009-12-28 01:40:37 -05:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Make 'base' have no current cached time. */
|
2009-11-09 18:30:33 +00:00
|
|
|
static inline void
|
|
|
|
clear_time_cache(struct event_base *base)
|
|
|
|
{
|
|
|
|
base->tv_cache.tv_sec = 0;
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Replace the cached time in 'base' with the current time. */
|
2009-11-09 18:30:33 +00:00
|
|
|
static inline void
|
|
|
|
update_time_cache(struct event_base *base)
|
|
|
|
{
|
|
|
|
base->tv_cache.tv_sec = 0;
|
|
|
|
if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
|
|
|
|
gettime(base, &base->tv_cache);
|
|
|
|
}
|
|
|
|
|
2011-10-21 19:53:32 +00:00
|
|
|
int
|
|
|
|
event_base_update_cache_time(struct event_base *base)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
base = current_base;
|
|
|
|
if (!current_base)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2013-04-24 13:23:15 -04:00
|
|
|
if (base->running_loop)
|
|
|
|
update_time_cache(base);
|
2011-10-21 19:53:32 +00:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
static inline struct event *
|
|
|
|
event_callback_to_event(struct event_callback *evcb)
|
|
|
|
{
|
|
|
|
EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
|
|
|
|
return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct event_callback *
|
|
|
|
event_to_event_callback(struct event *ev)
|
|
|
|
{
|
|
|
|
return &ev->ev_evcallback;
|
|
|
|
}
|
|
|
|
|
2007-11-07 01:48:44 +00:00
|
|
|
struct event_base *
|
2002-04-09 15:14:06 +00:00
|
|
|
event_init(void)
|
2007-11-14 17:52:21 +00:00
|
|
|
{
|
2009-10-27 06:47:25 +00:00
|
|
|
struct event_base *base = event_base_new_with_config(NULL);
|
2007-11-14 17:52:21 +00:00
|
|
|
|
2009-11-20 15:46:04 -05:00
|
|
|
if (base == NULL) {
|
2009-10-27 06:47:25 +00:00
|
|
|
event_errx(1, "%s: Unable to construct event_base", __func__);
|
2009-11-20 15:46:04 -05:00
|
|
|
return NULL;
|
|
|
|
}
|
2009-10-27 06:47:25 +00:00
|
|
|
|
|
|
|
current_base = base;
|
2007-11-14 17:52:21 +00:00
|
|
|
|
|
|
|
return (base);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct event_base *
|
|
|
|
event_base_new(void)
|
2008-05-08 05:56:20 +00:00
|
|
|
{
|
2009-10-27 06:47:25 +00:00
|
|
|
struct event_base *base = NULL;
|
|
|
|
struct event_config *cfg = event_config_new();
|
|
|
|
if (cfg) {
|
|
|
|
base = event_base_new_with_config(cfg);
|
|
|
|
event_config_free(cfg);
|
|
|
|
}
|
|
|
|
return base;
|
2008-05-08 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
|
|
|
|
* avoid. */
|
2008-05-08 05:56:20 +00:00
|
|
|
static int
|
2010-02-02 15:44:10 -05:00
|
|
|
event_config_is_avoided_method(const struct event_config *cfg,
|
|
|
|
const char *method)
|
2008-05-08 05:56:20 +00:00
|
|
|
{
|
|
|
|
struct event_config_entry *entry;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(entry, &cfg->entries, next) {
|
|
|
|
if (entry->avoid_method != NULL &&
|
|
|
|
strcmp(entry->avoid_method, method) == 0)
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Return true iff 'method' is disabled according to the environment. */
|
2008-05-29 01:39:43 +00:00
|
|
|
static int
|
|
|
|
event_is_method_disabled(const char *name)
|
|
|
|
{
|
|
|
|
char environment[64];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
|
|
|
|
for (i = 8; environment[i] != '\0'; ++i)
|
2012-02-29 15:07:33 -05:00
|
|
|
environment[i] = EVUTIL_TOUPPER_(environment[i]);
|
|
|
|
/* Note that evutil_getenv_() ignores the environment entirely if
|
2010-03-11 00:38:46 -05:00
|
|
|
* we're setuid */
|
2012-02-29 15:07:33 -05:00
|
|
|
return (evutil_getenv_(environment) != NULL);
|
2008-05-31 14:37:31 +00:00
|
|
|
}
|
|
|
|
|
2009-10-21 18:48:22 +00:00
|
|
|
int
|
2010-02-02 15:44:10 -05:00
|
|
|
event_base_get_features(const struct event_base *base)
|
2008-05-31 14:37:31 +00:00
|
|
|
{
|
|
|
|
return base->evsel->features;
|
2008-05-29 01:39:43 +00:00
|
|
|
}
|
|
|
|
|
2010-01-22 00:34:37 -05:00
|
|
|
void
|
|
|
|
event_enable_debug_mode(void)
|
|
|
|
{
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
2012-02-29 15:07:32 -05:00
|
|
|
if (event_debug_mode_on_)
|
2010-01-22 00:34:37 -05:00
|
|
|
event_errx(1, "%s was called twice!", __func__);
|
2010-04-28 12:03:08 -04:00
|
|
|
if (event_debug_mode_too_late)
|
|
|
|
event_errx(1, "%s must be called *before* creating any events "
|
|
|
|
"or event_bases",__func__);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_mode_on_ = 1;
|
2010-01-22 00:34:37 -05:00
|
|
|
|
|
|
|
HT_INIT(event_debug_map, &global_debug_map);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_disable_debug_mode(void)
|
|
|
|
{
|
2015-01-08 04:45:27 +03:00
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
2010-01-22 00:34:37 -05:00
|
|
|
struct event_debug_entry **ent, *victim;
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_LOCK(event_debug_map_lock_, 0);
|
2010-01-22 00:34:37 -05:00
|
|
|
for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
|
|
|
|
victim = *ent;
|
2015-01-08 04:45:27 +03:00
|
|
|
ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
|
2010-01-22 00:34:37 -05:00
|
|
|
mm_free(victim);
|
|
|
|
}
|
|
|
|
HT_CLEAR(event_debug_map, &global_debug_map);
|
2012-02-29 15:07:32 -05:00
|
|
|
EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
|
2015-01-08 04:45:27 +03:00
|
|
|
|
|
|
|
event_debug_mode_on_ = 0;
|
2010-01-22 00:34:37 -05:00
|
|
|
#endif
|
2015-01-08 04:45:27 +03:00
|
|
|
}
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2008-05-08 05:56:20 +00:00
|
|
|
struct event_base *
|
2010-02-02 15:44:10 -05:00
|
|
|
event_base_new_with_config(const struct event_config *cfg)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
|
|
|
int i;
|
2007-03-10 06:37:53 +00:00
|
|
|
struct event_base *base;
|
2009-04-22 19:41:23 +00:00
|
|
|
int should_check_environment;
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
2010-04-28 12:03:08 -04:00
|
|
|
event_debug_mode_too_late = 1;
|
2010-02-28 12:52:39 -05:00
|
|
|
#endif
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2009-10-27 06:47:25 +00:00
|
|
|
if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
|
|
|
|
event_warn("%s: calloc", __func__);
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-04-23 13:56:00 -04:00
|
|
|
|
2012-04-26 11:56:59 -04:00
|
|
|
if (cfg)
|
|
|
|
base->flags = cfg->flags;
|
|
|
|
|
2012-04-23 13:56:00 -04:00
|
|
|
should_check_environment =
|
|
|
|
!(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
|
|
|
|
|
2012-04-20 13:14:10 -04:00
|
|
|
{
|
|
|
|
struct timeval tmp;
|
2012-04-23 13:56:00 -04:00
|
|
|
int precise_time =
|
|
|
|
cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
|
2012-04-26 11:56:59 -04:00
|
|
|
int flags;
|
|
|
|
if (should_check_environment && !precise_time) {
|
2012-04-23 13:56:00 -04:00
|
|
|
precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
|
2012-04-26 11:56:59 -04:00
|
|
|
base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
|
|
|
|
}
|
|
|
|
flags = precise_time ? EV_MONOT_PRECISE : 0;
|
|
|
|
evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
|
2012-04-23 13:56:00 -04:00
|
|
|
|
2012-04-20 13:14:10 -04:00
|
|
|
gettime(base, &tmp);
|
|
|
|
}
|
2008-05-31 14:37:31 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
min_heap_ctor_(&base->timeheap);
|
2012-02-20 14:07:08 -05:00
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
base->sig.ev_signal_pair[0] = -1;
|
|
|
|
base->sig.ev_signal_pair[1] = -1;
|
2010-05-13 14:59:33 -04:00
|
|
|
base->th_notify_fd[0] = -1;
|
|
|
|
base->th_notify_fd[1] = -1;
|
2008-05-31 14:37:31 +00:00
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
TAILQ_INIT(&base->active_later_queue);
|
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
evmap_io_initmap_(&base->io);
|
|
|
|
evmap_signal_initmap_(&base->sigmap);
|
|
|
|
event_changelist_init_(&base->changelist);
|
2009-01-14 18:38:03 +00:00
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
base->evbase = NULL;
|
2009-04-22 19:41:23 +00:00
|
|
|
|
2011-08-11 12:38:47 -04:00
|
|
|
if (cfg) {
|
2010-12-01 20:44:05 -05:00
|
|
|
memcpy(&base->max_dispatch_time,
|
|
|
|
&cfg->max_dispatch_interval, sizeof(struct timeval));
|
2011-08-11 12:38:47 -04:00
|
|
|
base->limit_callbacks_after_prio =
|
|
|
|
cfg->limit_callbacks_after_prio;
|
|
|
|
} else {
|
2010-12-01 20:44:05 -05:00
|
|
|
base->max_dispatch_time.tv_sec = -1;
|
2011-08-11 12:38:47 -04:00
|
|
|
base->limit_callbacks_after_prio = 1;
|
|
|
|
}
|
2010-12-01 20:44:05 -05:00
|
|
|
if (cfg && cfg->max_dispatch_callbacks >= 0) {
|
|
|
|
base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
|
|
|
|
} else {
|
|
|
|
base->max_dispatch_callbacks = INT_MAX;
|
|
|
|
}
|
2011-08-11 12:38:47 -04:00
|
|
|
if (base->max_dispatch_callbacks == INT_MAX &&
|
|
|
|
base->max_dispatch_time.tv_sec == -1)
|
|
|
|
base->limit_callbacks_after_prio = INT_MAX;
|
2010-12-01 20:44:05 -05:00
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
for (i = 0; eventops[i] && !base->evbase; i++) {
|
2008-05-08 05:56:20 +00:00
|
|
|
if (cfg != NULL) {
|
|
|
|
/* determine if this backend should be avoided */
|
|
|
|
if (event_config_is_avoided_method(cfg,
|
|
|
|
eventops[i]->name))
|
|
|
|
continue;
|
2008-05-31 14:37:31 +00:00
|
|
|
if ((eventops[i]->features & cfg->require_features)
|
2008-12-23 16:37:01 +00:00
|
|
|
!= cfg->require_features)
|
2008-05-31 14:37:31 +00:00
|
|
|
continue;
|
2008-05-08 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
2008-05-29 01:39:43 +00:00
|
|
|
/* also obey the environment variables */
|
2009-04-22 19:41:23 +00:00
|
|
|
if (should_check_environment &&
|
|
|
|
event_is_method_disabled(eventops[i]->name))
|
2008-05-29 01:39:43 +00:00
|
|
|
continue;
|
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
base->evsel = eventops[i];
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
base->evbase = base->evsel->init(base);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2008-05-31 14:37:31 +00:00
|
|
|
if (base->evbase == NULL) {
|
2009-10-27 06:47:25 +00:00
|
|
|
event_warnx("%s: no event mechanism available",
|
|
|
|
__func__);
|
2011-06-01 17:27:28 -04:00
|
|
|
base->evsel = NULL;
|
2009-10-27 06:47:25 +00:00
|
|
|
event_base_free(base);
|
|
|
|
return NULL;
|
2008-05-31 14:37:31 +00:00
|
|
|
}
|
2003-03-01 19:46:27 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
if (evutil_getenv_("EVENT_SHOW_METHOD"))
|
2008-05-29 01:39:43 +00:00
|
|
|
event_msgx("libevent using: %s", base->evsel->name);
|
2004-09-19 21:08:09 +00:00
|
|
|
|
|
|
|
/* allocate a single active event queue */
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
if (event_base_priority_init(base, 1) < 0) {
|
|
|
|
event_base_free(base);
|
|
|
|
return NULL;
|
|
|
|
}
|
2004-11-25 09:50:18 +00:00
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
/* prepare for threading */
|
|
|
|
|
2015-09-29 20:36:39 +03:00
|
|
|
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
|
2015-05-15 02:58:14 -07:00
|
|
|
event_debug_created_threadable_ctx_ = 1;
|
2015-09-29 20:36:39 +03:00
|
|
|
#endif
|
2015-05-15 02:58:14 -07:00
|
|
|
|
2015-10-04 03:35:43 +03:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2011-11-14 17:33:02 -05:00
|
|
|
if (EVTHREAD_LOCKING_ENABLED() &&
|
|
|
|
(!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
|
2009-02-12 22:19:54 +00:00
|
|
|
int r;
|
2012-06-28 12:00:57 -04:00
|
|
|
EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
|
2010-08-17 13:18:18 -04:00
|
|
|
EVTHREAD_ALLOC_COND(base->current_event_cond);
|
2009-02-12 22:19:54 +00:00
|
|
|
r = evthread_make_base_notifiable(base);
|
|
|
|
if (r<0) {
|
2011-11-14 17:32:22 -05:00
|
|
|
event_warnx("%s: Unable to make base notifiable.", __func__);
|
2009-02-12 22:19:54 +00:00
|
|
|
event_base_free(base);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2009-11-18 21:16:33 +00:00
|
|
|
#endif
|
2009-02-12 22:19:54 +00:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2009-10-23 22:38:35 +00:00
|
|
|
if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_start_iocp_(base, cfg->n_cpus_hint);
|
2009-10-23 22:00:29 +00:00
|
|
|
#endif
|
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
return (base);
|
2004-09-19 21:08:09 +00:00
|
|
|
}
|
|
|
|
|
2009-10-23 22:00:29 +00:00
|
|
|
int
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_start_iocp_(struct event_base *base, int n_cpus)
|
2009-10-23 22:00:29 +00:00
|
|
|
{
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2009-10-23 22:00:29 +00:00
|
|
|
if (base->iocp)
|
|
|
|
return 0;
|
2012-02-29 15:07:33 -05:00
|
|
|
base->iocp = event_iocp_port_launch_(n_cpus);
|
2009-10-23 22:00:29 +00:00
|
|
|
if (!base->iocp) {
|
|
|
|
event_warnx("%s: Couldn't launch IOCP", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2010-08-28 02:08:27 -07:00
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_stop_iocp_(struct event_base *base)
|
2010-08-28 02:08:27 -07:00
|
|
|
{
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2010-08-28 02:08:27 -07:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (!base->iocp)
|
|
|
|
return;
|
2012-02-29 15:07:33 -05:00
|
|
|
rv = event_iocp_shutdown_(base->iocp, -1);
|
2010-08-28 02:08:27 -07:00
|
|
|
EVUTIL_ASSERT(rv >= 0);
|
|
|
|
base->iocp = NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-04-09 21:14:52 -04:00
|
|
|
static int
|
|
|
|
event_base_cancel_single_callback_(struct event_base *base,
|
|
|
|
struct event_callback *evcb,
|
|
|
|
int run_finalizers)
|
|
|
|
{
|
|
|
|
int result = 0;
|
|
|
|
|
|
|
|
if (evcb->evcb_flags & EVLIST_INIT) {
|
|
|
|
struct event *ev = event_callback_to_event(evcb);
|
|
|
|
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
|
|
|
|
event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
|
|
|
|
result = 1;
|
|
|
|
}
|
|
|
|
} else {
|
2013-05-10 20:22:56 +04:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2013-04-09 21:14:52 -04:00
|
|
|
event_callback_cancel_nolock_(base, evcb, 1);
|
2013-05-10 20:22:56 +04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2013-04-09 21:14:52 -04:00
|
|
|
result = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
|
|
|
|
switch (evcb->evcb_closure) {
|
|
|
|
case EV_CLOSURE_EVENT_FINALIZE:
|
|
|
|
case EV_CLOSURE_EVENT_FINALIZE_FREE: {
|
|
|
|
struct event *ev = event_callback_to_event(evcb);
|
|
|
|
ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
|
|
|
|
if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
|
|
|
|
mm_free(ev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case EV_CLOSURE_CB_FINALIZE:
|
|
|
|
evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-10-30 13:34:30 +03:00
|
|
|
static int event_base_free_queues_(struct event_base *base, int run_finalizers)
|
|
|
|
{
|
|
|
|
int deleted = 0, i;
|
|
|
|
|
|
|
|
for (i = 0; i < base->nactivequeues; ++i) {
|
|
|
|
struct event_callback *evcb, *next;
|
|
|
|
for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
|
|
|
|
next = TAILQ_NEXT(evcb, evcb_active_next);
|
|
|
|
deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
|
|
|
|
evcb = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
struct event_callback *evcb;
|
|
|
|
while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
|
|
|
|
deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return deleted;
|
|
|
|
}
|
|
|
|
|
2013-04-09 21:14:52 -04:00
|
|
|
static void
|
|
|
|
event_base_free_(struct event_base *base, int run_finalizers)
|
2006-03-28 04:40:54 +00:00
|
|
|
{
|
2007-11-07 05:02:21 +00:00
|
|
|
int i, n_deleted=0;
|
|
|
|
struct event *ev;
|
2009-10-27 05:16:32 +00:00
|
|
|
/* XXXX grab the lock? If there is contention when one thread frees
|
|
|
|
* the base, then the contending thread will be very sad soon. */
|
2006-03-28 04:40:54 +00:00
|
|
|
|
2011-06-08 14:24:45 -04:00
|
|
|
/* event_base_free(NULL) is how to free the current_base if we
|
|
|
|
* made it with event_init and forgot to hold a reference to it. */
|
2006-03-28 04:40:54 +00:00
|
|
|
if (base == NULL && current_base)
|
|
|
|
base = current_base;
|
2011-06-08 14:24:45 -04:00
|
|
|
/* Don't actually free NULL. */
|
|
|
|
if (base == NULL) {
|
|
|
|
event_warnx("%s: no base to free", __func__);
|
|
|
|
return;
|
|
|
|
}
|
2007-11-03 18:04:53 +00:00
|
|
|
/* XXX(niels) - check for internal events first */
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_stop_iocp_(base);
|
2010-08-28 02:08:27 -07:00
|
|
|
#endif
|
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
/* threading fds if we have them */
|
|
|
|
if (base->th_notify_fd[0] != -1) {
|
|
|
|
event_del(&base->th_notify);
|
2008-05-15 03:19:05 +00:00
|
|
|
EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
|
2010-03-12 18:35:15 -05:00
|
|
|
if (base->th_notify_fd[1] != -1)
|
|
|
|
EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
|
2009-01-19 20:37:24 +00:00
|
|
|
base->th_notify_fd[0] = -1;
|
|
|
|
base->th_notify_fd[1] = -1;
|
2010-01-25 13:38:07 -05:00
|
|
|
event_debug_unassign(&base->th_notify);
|
2008-03-02 21:18:33 +00:00
|
|
|
}
|
|
|
|
|
2007-11-07 05:02:21 +00:00
|
|
|
/* Delete all non-internal events. */
|
2012-02-29 15:07:33 -05:00
|
|
|
evmap_delete_all_(base);
|
2012-02-11 21:01:53 -05:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
|
2008-01-26 07:29:57 +00:00
|
|
|
event_del(ev);
|
|
|
|
++n_deleted;
|
|
|
|
}
|
2009-11-09 17:16:30 +00:00
|
|
|
for (i = 0; i < base->n_common_timeouts; ++i) {
|
|
|
|
struct common_timeout_list *ctl =
|
|
|
|
base->common_timeout_queues[i];
|
|
|
|
event_del(&ctl->timeout_event); /* Internal; doesn't count */
|
2010-01-25 13:38:07 -05:00
|
|
|
event_debug_unassign(&ctl->timeout_event);
|
2009-11-09 17:16:30 +00:00
|
|
|
for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
|
|
|
|
struct event *next = TAILQ_NEXT(ev,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout);
|
|
|
|
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
|
|
|
|
event_del(ev);
|
|
|
|
++n_deleted;
|
|
|
|
}
|
|
|
|
ev = next;
|
|
|
|
}
|
|
|
|
mm_free(ctl);
|
|
|
|
}
|
|
|
|
if (base->common_timeout_queues)
|
|
|
|
mm_free(base->common_timeout_queues);
|
2008-01-26 07:29:57 +00:00
|
|
|
|
2015-10-30 13:34:30 +03:00
|
|
|
for (;;) {
|
|
|
|
/* For finalizers we can register yet another finalizer out from
|
|
|
|
* finalizer, and iff finalizer will be in active_later_queue we can
|
|
|
|
* add finalizer to activequeues, and we will have events in
|
|
|
|
* activequeues after this function returns, which is not what we want
|
|
|
|
* (we even have an assertion for this).
|
|
|
|
*
|
|
|
|
* A simple case is bufferevent with underlying (i.e. filters).
|
|
|
|
*/
|
|
|
|
int i = event_base_free_queues_(base, run_finalizers);
|
|
|
|
if (!i) {
|
|
|
|
break;
|
2012-04-06 03:00:40 -04:00
|
|
|
}
|
2015-10-30 13:34:30 +03:00
|
|
|
n_deleted += i;
|
2012-04-06 03:00:40 -04:00
|
|
|
}
|
|
|
|
|
2007-11-07 05:02:21 +00:00
|
|
|
if (n_deleted)
|
|
|
|
event_debug(("%s: %d events were still set in base",
|
2008-07-25 00:19:15 +00:00
|
|
|
__func__, n_deleted));
|
2007-11-07 05:02:21 +00:00
|
|
|
|
2011-07-15 11:10:54 -04:00
|
|
|
while (LIST_FIRST(&base->once_events)) {
|
|
|
|
struct event_once *eonce = LIST_FIRST(&base->once_events);
|
|
|
|
LIST_REMOVE(eonce, next_once);
|
|
|
|
mm_free(eonce);
|
|
|
|
}
|
|
|
|
|
2008-09-26 13:36:15 +00:00
|
|
|
if (base->evsel != NULL && base->evsel->dealloc != NULL)
|
2008-12-23 16:37:01 +00:00
|
|
|
base->evsel->dealloc(base);
|
2007-11-07 05:02:21 +00:00
|
|
|
|
2008-01-26 07:29:57 +00:00
|
|
|
for (i = 0; i < base->nactivequeues; ++i)
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
|
2008-01-26 07:29:57 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
|
|
|
|
min_heap_dtor_(&base->timeheap);
|
2006-03-28 04:40:54 +00:00
|
|
|
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(base->activequeues);
|
2006-03-28 04:40:54 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
evmap_io_clear_(&base->io);
|
|
|
|
evmap_signal_clear_(&base->sigmap);
|
|
|
|
event_changelist_freemem_(&base->changelist);
|
2008-12-23 16:37:01 +00:00
|
|
|
|
2012-06-28 12:00:57 -04:00
|
|
|
EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
|
2010-08-17 13:18:18 -04:00
|
|
|
EVTHREAD_FREE_COND(base->current_event_cond);
|
2009-05-22 14:31:07 +00:00
|
|
|
|
2013-04-09 21:14:52 -04:00
|
|
|
/* If we're freeing current_base, there won't be a current_base. */
|
|
|
|
if (base == current_base)
|
|
|
|
current_base = NULL;
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(base);
|
2006-03-28 04:40:54 +00:00
|
|
|
}
|
|
|
|
|
2013-04-09 21:14:52 -04:00
|
|
|
void
|
|
|
|
event_base_free_nofinalize(struct event_base *base)
|
|
|
|
{
|
|
|
|
event_base_free_(base, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_base_free(struct event_base *base)
|
|
|
|
{
|
|
|
|
event_base_free_(base, 1);
|
|
|
|
}
|
|
|
|
|
2012-01-27 13:54:05 -05:00
|
|
|
/* Fake eventop; used to disable the backend temporarily inside event_reinit
|
|
|
|
* so that we can call event_del() on an event without telling the backend.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
|
|
|
|
short events, void *fdinfo)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
const struct eventop nil_eventop = {
|
|
|
|
"nil",
|
|
|
|
NULL, /* init: unused. */
|
|
|
|
NULL, /* add: unused. */
|
|
|
|
nil_backend_del, /* del: used, so needs to be killed. */
|
|
|
|
NULL, /* dispatch: unused. */
|
|
|
|
NULL, /* dealloc: unused. */
|
|
|
|
0, 0, 0
|
|
|
|
};
|
|
|
|
|
2009-10-27 05:16:32 +00:00
|
|
|
/* reinitialize the event base after a fork */
|
2007-11-25 06:57:59 +00:00
|
|
|
int
|
|
|
|
event_reinit(struct event_base *base)
|
|
|
|
{
|
2010-02-23 15:14:57 -05:00
|
|
|
const struct eventop *evsel;
|
2007-11-25 06:57:59 +00:00
|
|
|
int res = 0;
|
2010-09-01 16:36:30 -04:00
|
|
|
int was_notifiable = 0;
|
2012-01-27 14:30:41 -05:00
|
|
|
int had_signal_added = 0;
|
2007-11-25 06:57:59 +00:00
|
|
|
|
2010-02-23 15:14:57 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
evsel = base->evsel;
|
|
|
|
|
2012-01-27 14:30:41 -05:00
|
|
|
/* check if this event mechanism requires reinit on the backend */
|
|
|
|
if (evsel->need_reinit) {
|
|
|
|
/* We're going to call event_del() on our notify events (the
|
|
|
|
* ones that tell about signals and wakeup events). But we
|
|
|
|
* don't actually want to tell the backend to change its
|
|
|
|
* state, since it might still share some resource (a kqueue,
|
|
|
|
* an epoll fd) with the parent process, and we don't want to
|
|
|
|
* delete the fds from _that_ backend, we temporarily stub out
|
|
|
|
* the evsel with a replacement.
|
|
|
|
*/
|
|
|
|
base->evsel = &nil_eventop;
|
|
|
|
}
|
2012-01-27 13:54:05 -05:00
|
|
|
|
|
|
|
/* We need to re-create a new signal-notification fd and a new
|
|
|
|
* thread-notification fd. Otherwise, we'll still share those with
|
|
|
|
* the parent process, which would make any notification sent to them
|
|
|
|
* get received by one or both of the event loops, more or less at
|
|
|
|
* random.
|
|
|
|
*/
|
2008-07-25 00:48:30 +00:00
|
|
|
if (base->sig.ev_signal_added) {
|
2013-03-28 14:13:19 -04:00
|
|
|
event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
|
2012-01-27 13:54:05 -05:00
|
|
|
event_debug_unassign(&base->sig.ev_signal);
|
|
|
|
memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
|
2012-01-27 14:30:41 -05:00
|
|
|
had_signal_added = 1;
|
2008-07-25 00:19:15 +00:00
|
|
|
base->sig.ev_signal_added = 0;
|
2008-07-25 00:48:30 +00:00
|
|
|
}
|
2015-12-27 01:43:37 +03:00
|
|
|
if (base->sig.ev_signal_pair[0] != -1)
|
|
|
|
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
|
|
|
|
if (base->sig.ev_signal_pair[1] != -1)
|
|
|
|
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
|
2010-09-17 00:34:13 -04:00
|
|
|
if (base->th_notify_fn != NULL) {
|
2010-09-01 16:36:30 -04:00
|
|
|
was_notifiable = 1;
|
2010-09-17 00:34:13 -04:00
|
|
|
base->th_notify_fn = NULL;
|
|
|
|
}
|
|
|
|
if (base->th_notify_fd[0] != -1) {
|
2013-03-28 14:13:19 -04:00
|
|
|
event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
|
2010-09-01 16:36:30 -04:00
|
|
|
EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
|
|
|
|
if (base->th_notify_fd[1] != -1)
|
|
|
|
EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
|
|
|
|
base->th_notify_fd[0] = -1;
|
|
|
|
base->th_notify_fd[1] = -1;
|
|
|
|
event_debug_unassign(&base->th_notify);
|
|
|
|
}
|
2012-01-27 14:30:41 -05:00
|
|
|
|
2012-01-27 13:54:05 -05:00
|
|
|
/* Replace the original evsel. */
|
|
|
|
base->evsel = evsel;
|
|
|
|
|
2012-01-27 14:30:41 -05:00
|
|
|
if (evsel->need_reinit) {
|
|
|
|
/* Reconstruct the backend through brute-force, so that we do
|
|
|
|
* not share any structures with the parent process. For some
|
|
|
|
* backends, this is necessary: epoll and kqueue, for
|
|
|
|
* instance, have events associated with a kernel
|
|
|
|
* structure. If didn't reinitialize, we'd share that
|
|
|
|
* structure with the parent process, and any changes made by
|
|
|
|
* the parent would affect our backend's behavior (and vice
|
|
|
|
* versa).
|
|
|
|
*/
|
|
|
|
if (base->evsel->dealloc != NULL)
|
|
|
|
base->evsel->dealloc(base);
|
|
|
|
base->evbase = evsel->init(base);
|
|
|
|
if (base->evbase == NULL) {
|
|
|
|
event_errx(1,
|
|
|
|
"%s: could not reinitialize event mechanism",
|
|
|
|
__func__);
|
|
|
|
res = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
2008-12-23 16:37:01 +00:00
|
|
|
|
2012-01-27 14:30:41 -05:00
|
|
|
/* Empty out the changelist (if any): we are starting from a
|
|
|
|
* blank slate. */
|
2012-02-29 15:07:33 -05:00
|
|
|
event_changelist_freemem_(&base->changelist);
|
2012-01-27 14:30:41 -05:00
|
|
|
|
|
|
|
/* Tell the event maps to re-inform the backend about all
|
|
|
|
* pending events. This will make the signal notification
|
|
|
|
* event get re-created if necessary. */
|
2012-02-29 15:07:33 -05:00
|
|
|
if (evmap_reinit_(base) < 0)
|
2012-01-27 14:30:41 -05:00
|
|
|
res = -1;
|
|
|
|
} else {
|
2015-12-27 01:43:37 +03:00
|
|
|
res = evsig_init_(base);
|
2015-12-27 02:15:03 +03:00
|
|
|
if (res == 0 && had_signal_added) {
|
|
|
|
res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
|
|
|
|
if (res == 0)
|
|
|
|
base->sig.ev_signal_added = 1;
|
|
|
|
}
|
2012-01-27 14:30:41 -05:00
|
|
|
}
|
2007-11-25 06:57:59 +00:00
|
|
|
|
2012-01-27 13:54:05 -05:00
|
|
|
/* If we were notifiable before, and nothing just exploded, become
|
|
|
|
* notifiable again. */
|
2010-09-01 16:36:30 -04:00
|
|
|
if (was_notifiable && res == 0)
|
2012-06-28 12:00:57 -04:00
|
|
|
res = evthread_make_base_notifiable_nolock_(base);
|
2010-09-01 16:36:30 -04:00
|
|
|
|
2010-02-23 15:14:57 -05:00
|
|
|
done:
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2007-11-25 06:57:59 +00:00
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2014-11-19 12:18:05 +00:00
|
|
|
/* Get the monotonic time for this event_base' timer */
|
|
|
|
int
|
|
|
|
event_gettime_monotonic(struct event_base *base, struct timeval *tv)
|
|
|
|
{
|
|
|
|
int rv = -1;
|
|
|
|
|
|
|
|
if (base && tv) {
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-05-08 05:33:15 +00:00
|
|
|
const char **
|
2008-05-08 22:51:39 +00:00
|
|
|
event_get_supported_methods(void)
|
2008-05-08 05:33:15 +00:00
|
|
|
{
|
2009-07-28 05:09:06 +00:00
|
|
|
static const char **methods = NULL;
|
2008-05-08 05:33:15 +00:00
|
|
|
const struct eventop **method;
|
|
|
|
const char **tmp;
|
|
|
|
int i = 0, k;
|
|
|
|
|
|
|
|
/* count all methods */
|
2008-05-29 01:39:43 +00:00
|
|
|
for (method = &eventops[0]; *method != NULL; ++method) {
|
2008-05-08 05:33:15 +00:00
|
|
|
++i;
|
2008-05-29 01:39:43 +00:00
|
|
|
}
|
2008-05-08 05:33:15 +00:00
|
|
|
|
|
|
|
/* allocate one more than we need for the NULL pointer */
|
2009-11-15 19:00:12 +00:00
|
|
|
tmp = mm_calloc((i + 1), sizeof(char *));
|
2008-05-08 05:33:15 +00:00
|
|
|
if (tmp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
/* populate the array with the supported methods */
|
2008-05-29 01:39:43 +00:00
|
|
|
for (k = 0, i = 0; eventops[k] != NULL; ++k) {
|
|
|
|
tmp[i++] = eventops[k]->name;
|
|
|
|
}
|
2008-05-08 05:33:15 +00:00
|
|
|
tmp[i] = NULL;
|
|
|
|
|
2008-05-29 01:39:43 +00:00
|
|
|
if (methods != NULL)
|
2009-04-30 23:49:15 +00:00
|
|
|
mm_free((char**)methods);
|
2008-05-29 01:39:43 +00:00
|
|
|
|
2008-05-08 05:33:15 +00:00
|
|
|
methods = tmp;
|
2008-05-31 14:37:31 +00:00
|
|
|
|
2008-05-08 05:33:15 +00:00
|
|
|
return (methods);
|
|
|
|
}
|
|
|
|
|
2008-05-08 05:56:20 +00:00
|
|
|
struct event_config *
|
|
|
|
event_config_new(void)
|
|
|
|
{
|
2009-04-23 21:34:37 +00:00
|
|
|
struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
|
2008-05-08 05:56:20 +00:00
|
|
|
|
|
|
|
if (cfg == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
TAILQ_INIT(&cfg->entries);
|
2010-12-01 20:44:05 -05:00
|
|
|
cfg->max_dispatch_interval.tv_sec = -1;
|
2011-08-11 11:59:23 -04:00
|
|
|
cfg->max_dispatch_callbacks = INT_MAX;
|
2011-08-11 12:38:47 -04:00
|
|
|
cfg->limit_callbacks_after_prio = 1;
|
2008-05-31 14:37:31 +00:00
|
|
|
|
2008-05-08 05:56:20 +00:00
|
|
|
return (cfg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
event_config_entry_free(struct event_config_entry *entry)
|
|
|
|
{
|
|
|
|
if (entry->avoid_method != NULL)
|
|
|
|
mm_free((char *)entry->avoid_method);
|
|
|
|
mm_free(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_config_free(struct event_config *cfg)
|
|
|
|
{
|
|
|
|
struct event_config_entry *entry;
|
|
|
|
|
|
|
|
while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&cfg->entries, entry, next);
|
|
|
|
event_config_entry_free(entry);
|
|
|
|
}
|
2008-12-19 21:02:36 +00:00
|
|
|
mm_free(cfg);
|
2008-05-08 05:56:20 +00:00
|
|
|
}
|
|
|
|
|
2009-04-28 19:08:17 +00:00
|
|
|
int
|
2009-10-21 18:48:22 +00:00
|
|
|
event_config_set_flag(struct event_config *cfg, int flag)
|
2009-04-28 19:08:17 +00:00
|
|
|
{
|
|
|
|
if (!cfg)
|
|
|
|
return -1;
|
|
|
|
cfg->flags |= flag;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-08 05:56:20 +00:00
|
|
|
int
|
|
|
|
event_config_avoid_method(struct event_config *cfg, const char *method)
|
|
|
|
{
|
|
|
|
struct event_config_entry *entry = mm_malloc(sizeof(*entry));
|
|
|
|
if (entry == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if ((entry->avoid_method = mm_strdup(method)) == NULL) {
|
|
|
|
mm_free(entry);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-05-31 14:37:31 +00:00
|
|
|
int
|
|
|
|
event_config_require_features(struct event_config *cfg,
|
2010-02-18 17:41:15 -05:00
|
|
|
int features)
|
2008-05-31 14:37:31 +00:00
|
|
|
{
|
|
|
|
if (!cfg)
|
|
|
|
return (-1);
|
|
|
|
cfg->require_features = features;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-08-28 04:07:48 -07:00
|
|
|
int
|
|
|
|
event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
|
|
|
|
{
|
|
|
|
if (!cfg)
|
|
|
|
return (-1);
|
|
|
|
cfg->n_cpus_hint = cpus;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-12-01 20:44:05 -05:00
|
|
|
int
|
|
|
|
event_config_set_max_dispatch_interval(struct event_config *cfg,
|
2011-08-11 12:38:47 -04:00
|
|
|
const struct timeval *max_interval, int max_callbacks, int min_priority)
|
2010-12-01 20:44:05 -05:00
|
|
|
{
|
|
|
|
if (max_interval)
|
|
|
|
memcpy(&cfg->max_dispatch_interval, max_interval,
|
|
|
|
sizeof(struct timeval));
|
|
|
|
else
|
|
|
|
cfg->max_dispatch_interval.tv_sec = -1;
|
2011-08-11 11:59:23 -04:00
|
|
|
cfg->max_dispatch_callbacks =
|
2011-08-11 12:38:47 -04:00
|
|
|
max_callbacks >= 0 ? max_callbacks : INT_MAX;
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
if (min_priority < 0)
|
|
|
|
min_priority = 0;
|
2011-08-11 12:38:47 -04:00
|
|
|
cfg->limit_callbacks_after_prio = min_priority;
|
2010-12-01 20:44:05 -05:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-09-19 21:08:09 +00:00
|
|
|
int
|
2004-12-01 20:04:54 +00:00
|
|
|
event_priority_init(int npriorities)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
return event_base_priority_init(current_base, npriorities);
|
2004-12-01 20:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_base_priority_init(struct event_base *base, int npriorities)
|
2004-09-19 21:08:09 +00:00
|
|
|
{
|
2011-10-11 09:50:57 -04:00
|
|
|
int i, r;
|
|
|
|
r = -1;
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2004-09-19 21:08:09 +00:00
|
|
|
|
2009-07-26 01:29:39 +00:00
|
|
|
if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
|
2009-05-15 18:44:44 +00:00
|
|
|
|| npriorities >= EVENT_MAX_PRIORITIES)
|
2011-10-11 09:50:57 -04:00
|
|
|
goto err;
|
2004-09-19 21:08:09 +00:00
|
|
|
|
2009-01-27 16:29:48 +00:00
|
|
|
if (npriorities == base->nactivequeues)
|
2011-10-11 09:50:57 -04:00
|
|
|
goto ok;
|
2009-01-27 16:29:48 +00:00
|
|
|
|
|
|
|
if (base->nactivequeues) {
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(base->activequeues);
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
base->nactivequeues = 0;
|
2004-09-19 21:08:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate our priority queues */
|
2012-04-05 12:38:18 -04:00
|
|
|
base->activequeues = (struct evcallback_list *)
|
|
|
|
mm_calloc(npriorities, sizeof(struct evcallback_list));
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
if (base->activequeues == NULL) {
|
|
|
|
event_warn("%s: calloc", __func__);
|
2011-10-11 09:50:57 -04:00
|
|
|
goto err;
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
}
|
2004-11-25 09:50:18 +00:00
|
|
|
base->nactivequeues = npriorities;
|
2010-02-18 17:46:56 -05:00
|
|
|
|
2004-11-25 09:50:18 +00:00
|
|
|
for (i = 0; i < base->nactivequeues; ++i) {
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
TAILQ_INIT(&base->activequeues[i]);
|
2004-09-19 21:08:09 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 09:50:57 -04:00
|
|
|
ok:
|
|
|
|
r = 0;
|
|
|
|
err:
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return (r);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 11:12:34 +04:00
|
|
|
int
|
|
|
|
event_base_get_npriorities(struct event_base *base)
|
|
|
|
{
|
2011-10-11 09:50:57 -04:00
|
|
|
|
|
|
|
int n;
|
2012-05-09 10:49:28 -04:00
|
|
|
if (base == NULL)
|
|
|
|
base = current_base;
|
|
|
|
|
2011-10-11 09:50:57 -04:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
n = base->nactivequeues;
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return (n);
|
2011-10-11 11:12:34 +04:00
|
|
|
}
|
|
|
|
|
2013-07-02 16:01:02 -04:00
|
|
|
int
|
|
|
|
event_base_get_num_events(struct event_base *base, unsigned int type)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
if (type & EVENT_BASE_COUNT_ACTIVE)
|
|
|
|
r += base->event_count_active;
|
|
|
|
|
|
|
|
if (type & EVENT_BASE_COUNT_VIRTUAL)
|
|
|
|
r += base->virtual_event_count;
|
|
|
|
|
|
|
|
if (type & EVENT_BASE_COUNT_ADDED)
|
|
|
|
r += base->event_count;
|
|
|
|
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-12-30 14:06:20 -05:00
|
|
|
int
|
|
|
|
event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
if (type & EVENT_BASE_COUNT_ACTIVE) {
|
|
|
|
r += base->event_count_active_max;
|
|
|
|
if (clear)
|
|
|
|
base->event_count_active_max = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & EVENT_BASE_COUNT_VIRTUAL) {
|
|
|
|
r += base->virtual_event_count_max;
|
|
|
|
if (clear)
|
|
|
|
base->virtual_event_count_max = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & EVENT_BASE_COUNT_ADDED) {
|
|
|
|
r += base->event_count_max;
|
|
|
|
if (clear)
|
|
|
|
base->event_count_max = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Returns true iff we're currently watching any events. */
|
2009-07-21 19:20:25 +00:00
|
|
|
static int
|
2004-11-25 09:50:18 +00:00
|
|
|
event_haveevents(struct event_base *base)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2009-10-21 03:54:00 +00:00
|
|
|
/* Caller must hold th_base_lock */
|
2010-08-17 05:02:00 -07:00
|
|
|
return (base->virtual_event_count > 0 || base->event_count > 0);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* "closure" function called when processing active signal events */
|
2009-10-27 05:16:32 +00:00
|
|
|
static inline void
|
2008-05-03 21:37:33 +00:00
|
|
|
event_signal_closure(struct event_base *base, struct event *ev)
|
|
|
|
{
|
|
|
|
short ncalls;
|
2011-09-09 20:53:30 -04:00
|
|
|
int should_break;
|
2008-05-03 21:37:33 +00:00
|
|
|
|
|
|
|
/* Allows deletes to work */
|
|
|
|
ncalls = ev->ev_ncalls;
|
2011-12-05 15:02:27 -05:00
|
|
|
if (ncalls != 0)
|
|
|
|
ev->ev_pncalls = &ncalls;
|
2010-02-23 15:14:57 -05:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2008-05-03 21:37:33 +00:00
|
|
|
while (ncalls) {
|
|
|
|
ncalls--;
|
|
|
|
ev->ev_ncalls = ncalls;
|
2010-07-05 14:39:39 -04:00
|
|
|
if (ncalls == 0)
|
|
|
|
ev->ev_pncalls = NULL;
|
2012-01-09 11:33:38 -05:00
|
|
|
(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
|
2011-09-09 20:53:30 -04:00
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
should_break = base->event_break;
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
|
2011-12-05 15:02:27 -05:00
|
|
|
if (should_break) {
|
|
|
|
if (ncalls != 0)
|
|
|
|
ev->ev_pncalls = NULL;
|
2008-05-03 21:37:33 +00:00
|
|
|
return;
|
2011-12-05 15:02:27 -05:00
|
|
|
}
|
2008-05-03 21:37:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Common timeouts are special timeouts that are handled as queues rather than
|
|
|
|
* in the minheap. This is more efficient than the minheap if we happen to
|
|
|
|
* know that we're going to get several thousands of timeout events all with
|
|
|
|
* the same timeout value.
|
|
|
|
*
|
|
|
|
* Since all our timeout handling code assumes timevals can be copied,
|
|
|
|
* assigned, etc, we can't use "magic pointer" to encode these common
|
|
|
|
* timeouts. Searching through a list to see if every timeout is common could
|
|
|
|
* also get inefficient. Instead, we take advantage of the fact that tv_usec
|
|
|
|
* is 32 bits long, but only uses 20 of those bits (since it can never be over
|
|
|
|
* 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
|
|
|
|
* of index into the event_base's aray of common timeouts.
|
|
|
|
*/
|
|
|
|
|
2011-08-24 16:17:05 -04:00
|
|
|
#define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
|
2009-11-09 17:16:30 +00:00
|
|
|
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
|
|
|
|
#define COMMON_TIMEOUT_IDX_SHIFT 20
|
|
|
|
#define COMMON_TIMEOUT_MASK 0xf0000000
|
|
|
|
#define COMMON_TIMEOUT_MAGIC 0x50000000
|
|
|
|
|
|
|
|
#define COMMON_TIMEOUT_IDX(tv) \
|
|
|
|
(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Return true iff if 'tv' is a common timeout in 'base' */
|
2009-11-09 17:16:30 +00:00
|
|
|
static inline int
|
|
|
|
is_common_timeout(const struct timeval *tv,
|
|
|
|
const struct event_base *base)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
|
|
|
|
return 0;
|
|
|
|
idx = COMMON_TIMEOUT_IDX(tv);
|
|
|
|
return idx < base->n_common_timeouts;
|
|
|
|
}
|
|
|
|
|
2009-11-09 18:30:57 +00:00
|
|
|
/* True iff tv1 and tv2 have the same common-timeout index, or if neither
|
|
|
|
* one is a common timeout. */
|
|
|
|
static inline int
|
|
|
|
is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
|
|
|
|
{
|
|
|
|
return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
|
|
|
|
(tv2->tv_usec & ~MICROSECONDS_MASK);
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Requires that 'tv' is a common timeout. Return the corresponding
|
|
|
|
* common_timeout_list. */
|
2009-11-09 17:16:30 +00:00
|
|
|
static inline struct common_timeout_list *
|
|
|
|
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
|
|
|
|
{
|
|
|
|
return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
#if 0
|
2009-11-09 17:16:30 +00:00
|
|
|
static inline int
|
|
|
|
common_timeout_ok(const struct timeval *tv,
|
|
|
|
struct event_base *base)
|
|
|
|
{
|
|
|
|
const struct timeval *expect =
|
|
|
|
&get_common_timeout_list(base, tv)->duration;
|
|
|
|
return tv->tv_sec == expect->tv_sec &&
|
|
|
|
tv->tv_usec == expect->tv_usec;
|
|
|
|
}
|
2010-03-11 00:38:46 -05:00
|
|
|
#endif
|
2009-11-09 17:16:30 +00:00
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Add the timeout for the first event in given common timeout list to the
|
|
|
|
* event_base's minheap. */
|
2009-11-09 17:16:30 +00:00
|
|
|
static void
|
|
|
|
common_timeout_schedule(struct common_timeout_list *ctl,
|
|
|
|
const struct timeval *now, struct event *head)
|
|
|
|
{
|
|
|
|
struct timeval timeout = head->ev_timeout;
|
|
|
|
timeout.tv_usec &= MICROSECONDS_MASK;
|
2012-06-28 12:00:57 -04:00
|
|
|
event_add_nolock_(&ctl->timeout_event, &timeout, 1);
|
2009-11-09 17:16:30 +00:00
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Callback: invoked when the timeout for a common timeout queue triggers.
|
|
|
|
* This means that (at least) the first event in that queue should be run,
|
|
|
|
* and the timeout should be rescheduled if there are more events. */
|
2009-11-09 17:16:30 +00:00
|
|
|
static void
|
|
|
|
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
|
|
|
|
{
|
|
|
|
struct timeval now;
|
|
|
|
struct common_timeout_list *ctl = arg;
|
|
|
|
struct event_base *base = ctl->base;
|
|
|
|
struct event *ev = NULL;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2009-11-09 17:16:30 +00:00
|
|
|
gettime(base, &now);
|
|
|
|
while (1) {
|
|
|
|
ev = TAILQ_FIRST(&ctl->events);
|
|
|
|
if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
|
|
|
|
(ev->ev_timeout.tv_sec == now.tv_sec &&
|
|
|
|
(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
|
|
|
|
break;
|
2013-03-28 14:13:19 -04:00
|
|
|
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
2012-02-29 15:07:33 -05:00
|
|
|
event_active_nolock_(ev, EV_TIMEOUT, 1);
|
2009-11-09 17:16:30 +00:00
|
|
|
}
|
|
|
|
if (ev)
|
|
|
|
common_timeout_schedule(ctl, &now, ev);
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2009-11-09 17:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_COMMON_TIMEOUTS 256
|
|
|
|
|
|
|
|
const struct timeval *
|
|
|
|
event_base_init_common_timeout(struct event_base *base,
|
|
|
|
const struct timeval *duration)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct timeval tv;
|
|
|
|
const struct timeval *result=NULL;
|
|
|
|
struct common_timeout_list *new_ctl;
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2009-11-09 17:16:30 +00:00
|
|
|
if (duration->tv_usec > 1000000) {
|
|
|
|
memcpy(&tv, duration, sizeof(struct timeval));
|
|
|
|
if (is_common_timeout(duration, base))
|
|
|
|
tv.tv_usec &= MICROSECONDS_MASK;
|
|
|
|
tv.tv_sec += tv.tv_usec / 1000000;
|
|
|
|
tv.tv_usec %= 1000000;
|
|
|
|
duration = &tv;
|
|
|
|
}
|
|
|
|
for (i = 0; i < base->n_common_timeouts; ++i) {
|
|
|
|
const struct common_timeout_list *ctl =
|
|
|
|
base->common_timeout_queues[i];
|
|
|
|
if (duration->tv_sec == ctl->duration.tv_sec &&
|
|
|
|
duration->tv_usec ==
|
|
|
|
(ctl->duration.tv_usec & MICROSECONDS_MASK)) {
|
|
|
|
EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
|
|
|
|
result = &ctl->duration;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
|
2010-10-27 10:36:08 -04:00
|
|
|
event_warnx("%s: Too many common timeouts already in use; "
|
2009-11-09 17:16:30 +00:00
|
|
|
"we only support %d per event_base", __func__,
|
|
|
|
MAX_COMMON_TIMEOUTS);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
|
|
|
|
int n = base->n_common_timeouts < 16 ? 16 :
|
|
|
|
base->n_common_timeouts*2;
|
|
|
|
struct common_timeout_list **newqueues =
|
|
|
|
mm_realloc(base->common_timeout_queues,
|
|
|
|
n*sizeof(struct common_timeout_queue *));
|
|
|
|
if (!newqueues) {
|
|
|
|
event_warn("%s: realloc",__func__);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
base->n_common_timeouts_allocated = n;
|
|
|
|
base->common_timeout_queues = newqueues;
|
|
|
|
}
|
|
|
|
new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
|
|
|
|
if (!new_ctl) {
|
|
|
|
event_warn("%s: calloc",__func__);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
TAILQ_INIT(&new_ctl->events);
|
|
|
|
new_ctl->duration.tv_sec = duration->tv_sec;
|
|
|
|
new_ctl->duration.tv_usec =
|
|
|
|
duration->tv_usec | COMMON_TIMEOUT_MAGIC |
|
|
|
|
(base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
|
|
|
|
evtimer_assign(&new_ctl->timeout_event, base,
|
|
|
|
common_timeout_callback, new_ctl);
|
|
|
|
new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
|
|
|
|
event_priority_set(&new_ctl->timeout_event, 0);
|
|
|
|
new_ctl->base = base;
|
|
|
|
base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
|
|
|
|
result = &new_ctl->duration;
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (result)
|
|
|
|
EVUTIL_ASSERT(is_common_timeout(result, base));
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2009-11-09 17:16:30 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Closure function invoked when we're activating a persistent event. */
|
2009-11-09 18:30:57 +00:00
|
|
|
static inline void
|
|
|
|
event_persist_closure(struct event_base *base, struct event *ev)
|
|
|
|
{
|
2014-01-27 14:54:55 -05:00
|
|
|
void (*evcb_callback)(evutil_socket_t, short, void *);
|
|
|
|
|
2014-09-22 12:19:37 -07:00
|
|
|
// Other fields of *ev that must be stored before executing
|
|
|
|
evutil_socket_t evcb_fd;
|
|
|
|
short evcb_res;
|
|
|
|
void *evcb_arg;
|
|
|
|
|
2009-11-09 18:30:57 +00:00
|
|
|
/* reschedule the persistent event if we have a timeout. */
|
|
|
|
if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
|
2010-02-23 14:24:10 -05:00
|
|
|
/* If there was a timeout, we want it to run at an interval of
|
|
|
|
* ev_io_timeout after the last time it was _scheduled_ for,
|
|
|
|
* not ev_io_timeout after _now_. If it fired for another
|
|
|
|
* reason, though, the timeout ought to start ticking _now_. */
|
2012-04-19 18:15:12 -04:00
|
|
|
struct timeval run_at, relative_to, delay, now;
|
|
|
|
ev_uint32_t usec_mask = 0;
|
2009-11-09 18:30:57 +00:00
|
|
|
EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
|
|
|
|
&ev->ev_io_timeout));
|
2012-04-19 18:15:12 -04:00
|
|
|
gettime(base, &now);
|
2009-11-09 18:30:57 +00:00
|
|
|
if (is_common_timeout(&ev->ev_timeout, base)) {
|
|
|
|
delay = ev->ev_io_timeout;
|
|
|
|
usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
|
|
|
|
delay.tv_usec &= MICROSECONDS_MASK;
|
2010-02-23 14:24:10 -05:00
|
|
|
if (ev->ev_res & EV_TIMEOUT) {
|
|
|
|
relative_to = ev->ev_timeout;
|
|
|
|
relative_to.tv_usec &= MICROSECONDS_MASK;
|
|
|
|
} else {
|
2012-04-19 18:15:12 -04:00
|
|
|
relative_to = now;
|
2010-02-23 14:24:10 -05:00
|
|
|
}
|
2009-11-09 18:30:57 +00:00
|
|
|
} else {
|
2012-04-19 18:15:12 -04:00
|
|
|
delay = ev->ev_io_timeout;
|
2010-02-23 14:24:10 -05:00
|
|
|
if (ev->ev_res & EV_TIMEOUT) {
|
|
|
|
relative_to = ev->ev_timeout;
|
|
|
|
} else {
|
2012-04-19 18:15:12 -04:00
|
|
|
relative_to = now;
|
2010-02-23 14:24:10 -05:00
|
|
|
}
|
2009-11-09 18:30:57 +00:00
|
|
|
}
|
2012-04-19 18:15:12 -04:00
|
|
|
evutil_timeradd(&relative_to, &delay, &run_at);
|
2012-04-19 00:25:12 -04:00
|
|
|
if (evutil_timercmp(&run_at, &now, <)) {
|
|
|
|
/* Looks like we missed at least one invocation due to
|
|
|
|
* a clock jump, not running the event loop for a
|
|
|
|
* while, really slow callbacks, or
|
|
|
|
* something. Reschedule relative to now.
|
|
|
|
*/
|
|
|
|
evutil_timeradd(&now, &delay, &run_at);
|
|
|
|
}
|
|
|
|
run_at.tv_usec |= usec_mask;
|
2012-06-28 12:00:57 -04:00
|
|
|
event_add_nolock_(ev, &run_at, 1);
|
2009-11-09 18:30:57 +00:00
|
|
|
}
|
2014-01-27 13:03:36 -05:00
|
|
|
|
2014-01-27 14:54:55 -05:00
|
|
|
// Save our callback before we release the lock
|
2014-09-22 12:19:37 -07:00
|
|
|
evcb_callback = ev->ev_callback;
|
|
|
|
evcb_fd = ev->ev_fd;
|
|
|
|
evcb_res = ev->ev_res;
|
|
|
|
evcb_arg = ev->ev_arg;
|
2014-01-27 13:03:36 -05:00
|
|
|
|
|
|
|
// Release the lock
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
// Execute the callback
|
2014-09-22 12:19:37 -07:00
|
|
|
(evcb_callback)(evcb_fd, evcb_res, evcb_arg);
|
2009-11-09 18:30:57 +00:00
|
|
|
}
|
|
|
|
|
2004-09-19 21:08:09 +00:00
|
|
|
/*
|
2009-04-10 14:21:53 +00:00
|
|
|
Helper for event_process_active to process all the events in a single queue,
|
|
|
|
releasing the lock as we go. This function requires that the lock be held
|
|
|
|
when it's invoked. Returns -1 if we get a signal or an event_break that
|
|
|
|
means we should stop processing any active events now. Otherwise returns
|
2012-04-05 12:38:18 -04:00
|
|
|
the number of non-internal event_callbacks that we processed.
|
2009-04-10 14:21:53 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
event_process_active_single_queue(struct event_base *base,
|
2012-04-05 12:38:18 -04:00
|
|
|
struct evcallback_list *activeq,
|
2010-12-01 20:44:05 -05:00
|
|
|
int max_to_process, const struct timeval *endtime)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2012-04-05 12:38:18 -04:00
|
|
|
struct event_callback *evcb;
|
2009-04-10 14:21:53 +00:00
|
|
|
int count = 0;
|
2004-09-19 21:08:09 +00:00
|
|
|
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(activeq != NULL);
|
2007-01-27 04:06:52 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
|
|
|
|
struct event *ev=NULL;
|
|
|
|
if (evcb->evcb_flags & EVLIST_INIT) {
|
|
|
|
ev = event_callback_to_event(evcb);
|
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
|
2012-04-05 12:38:18 -04:00
|
|
|
event_queue_remove_active(base, evcb);
|
|
|
|
else
|
2013-03-28 14:13:19 -04:00
|
|
|
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
2012-04-05 12:38:18 -04:00
|
|
|
event_debug((
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
"event_process_active: event: %p, %s%s%scall %p",
|
2012-04-05 12:38:18 -04:00
|
|
|
ev,
|
|
|
|
ev->ev_res & EV_READ ? "EV_READ " : " ",
|
|
|
|
ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
|
2012-04-05 12:38:18 -04:00
|
|
|
ev->ev_callback));
|
|
|
|
} else {
|
|
|
|
event_queue_remove_active(base, evcb);
|
|
|
|
event_debug(("event_process_active: event_callback %p, "
|
|
|
|
"closure %d, call %p",
|
2012-04-06 04:33:19 -04:00
|
|
|
evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
|
2012-04-05 12:38:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(evcb->evcb_flags & EVLIST_INTERNAL))
|
2009-04-10 14:21:53 +00:00
|
|
|
++count;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2008-06-24 22:41:43 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
base->current_event = evcb;
|
2012-04-03 14:53:00 -04:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2010-08-17 13:18:18 -04:00
|
|
|
base->current_event_waiters = 0;
|
|
|
|
#endif
|
2009-07-14 16:54:48 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
switch (evcb->evcb_closure) {
|
|
|
|
case EV_CLOSURE_EVENT_SIGNAL:
|
2013-08-06 19:06:58 -04:00
|
|
|
EVUTIL_ASSERT(ev != NULL);
|
2009-05-15 18:44:44 +00:00
|
|
|
event_signal_closure(base, ev);
|
|
|
|
break;
|
2012-04-05 12:38:18 -04:00
|
|
|
case EV_CLOSURE_EVENT_PERSIST:
|
2013-08-06 19:06:58 -04:00
|
|
|
EVUTIL_ASSERT(ev != NULL);
|
2009-05-15 18:44:44 +00:00
|
|
|
event_persist_closure(base, ev);
|
|
|
|
break;
|
2014-01-27 14:54:55 -05:00
|
|
|
case EV_CLOSURE_EVENT: {
|
2014-09-16 22:25:52 -04:00
|
|
|
void (*evcb_callback)(evutil_socket_t, short, void *);
|
2013-08-06 19:06:58 -04:00
|
|
|
EVUTIL_ASSERT(ev != NULL);
|
2014-09-16 22:25:52 -04:00
|
|
|
evcb_callback = *ev->ev_callback;
|
2010-02-23 15:14:57 -05:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2014-01-27 14:54:55 -05:00
|
|
|
evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case EV_CLOSURE_CB_SELF: {
|
|
|
|
void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
|
2012-04-06 04:33:19 -04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2014-01-27 14:54:55 -05:00
|
|
|
evcb_selfcb(evcb, evcb->evcb_arg);
|
|
|
|
}
|
|
|
|
break;
|
2013-03-28 14:13:19 -04:00
|
|
|
case EV_CLOSURE_EVENT_FINALIZE:
|
2014-01-27 14:54:55 -05:00
|
|
|
case EV_CLOSURE_EVENT_FINALIZE_FREE: {
|
2014-03-06 10:09:03 -05:00
|
|
|
void (*evcb_evfinalize)(struct event *, void *);
|
2014-03-18 11:25:58 -04:00
|
|
|
int evcb_closure = evcb->evcb_closure;
|
2013-08-06 20:00:53 -04:00
|
|
|
EVUTIL_ASSERT(ev != NULL);
|
2014-03-06 10:09:03 -05:00
|
|
|
base->current_event = NULL;
|
|
|
|
evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
|
2013-03-28 14:13:19 -04:00
|
|
|
EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2014-01-27 14:54:55 -05:00
|
|
|
evcb_evfinalize(ev, ev->ev_arg);
|
2013-08-06 19:08:42 -04:00
|
|
|
event_debug_note_teardown_(ev);
|
2014-03-18 11:25:58 -04:00
|
|
|
if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
|
2013-03-28 14:13:19 -04:00
|
|
|
mm_free(ev);
|
2014-01-27 14:54:55 -05:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case EV_CLOSURE_CB_FINALIZE: {
|
|
|
|
void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
|
2013-03-28 14:13:19 -04:00
|
|
|
base->current_event = NULL;
|
|
|
|
EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2014-01-27 14:54:55 -05:00
|
|
|
evcb_cbfinalize(evcb, evcb->evcb_arg);
|
|
|
|
}
|
|
|
|
break;
|
2012-04-05 12:38:18 -04:00
|
|
|
default:
|
|
|
|
EVUTIL_ASSERT(0);
|
2009-05-15 18:44:44 +00:00
|
|
|
}
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2009-07-14 16:54:48 +00:00
|
|
|
base->current_event = NULL;
|
2012-04-03 14:53:00 -04:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2010-08-17 13:18:18 -04:00
|
|
|
if (base->current_event_waiters) {
|
|
|
|
base->current_event_waiters = 0;
|
|
|
|
EVTHREAD_COND_BROADCAST(base->current_event_cond);
|
|
|
|
}
|
|
|
|
#endif
|
2009-07-14 16:54:48 +00:00
|
|
|
|
2009-05-03 18:56:08 +00:00
|
|
|
if (base->event_break)
|
2009-04-10 14:21:53 +00:00
|
|
|
return -1;
|
2010-12-01 20:44:05 -05:00
|
|
|
if (count >= max_to_process)
|
|
|
|
return count;
|
|
|
|
if (count && endtime) {
|
|
|
|
struct timeval now;
|
2011-08-11 12:47:21 -04:00
|
|
|
update_time_cache(base);
|
|
|
|
gettime(base, &now);
|
2010-12-01 20:44:05 -05:00
|
|
|
if (evutil_timercmp(&now, endtime, >=))
|
|
|
|
return count;
|
|
|
|
}
|
2012-04-30 17:30:48 -04:00
|
|
|
if (base->event_continue)
|
|
|
|
break;
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
2009-04-10 14:21:53 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Active events are stored in priority queues. Lower priorities are always
|
|
|
|
* process before higher priorities. Low priority events can starve high
|
|
|
|
* priority ones.
|
|
|
|
*/
|
|
|
|
|
2010-11-14 19:25:54 -05:00
|
|
|
static int
|
2009-04-10 14:21:53 +00:00
|
|
|
event_process_active(struct event_base *base)
|
|
|
|
{
|
2009-10-21 03:54:00 +00:00
|
|
|
/* Caller must hold th_base_lock */
|
2012-04-05 12:38:18 -04:00
|
|
|
struct evcallback_list *activeq = NULL;
|
2010-11-14 19:25:54 -05:00
|
|
|
int i, c = 0;
|
2010-12-01 20:44:05 -05:00
|
|
|
const struct timeval *endtime;
|
|
|
|
struct timeval tv;
|
|
|
|
const int maxcb = base->max_dispatch_callbacks;
|
2011-08-11 12:38:47 -04:00
|
|
|
const int limit_after_prio = base->limit_callbacks_after_prio;
|
2010-12-01 20:44:05 -05:00
|
|
|
if (base->max_dispatch_time.tv_sec >= 0) {
|
2011-08-11 12:47:21 -04:00
|
|
|
update_time_cache(base);
|
|
|
|
gettime(base, &tv);
|
2010-12-01 20:44:05 -05:00
|
|
|
evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
|
|
|
|
endtime = &tv;
|
|
|
|
} else {
|
|
|
|
endtime = NULL;
|
|
|
|
}
|
2009-04-10 14:21:53 +00:00
|
|
|
|
|
|
|
for (i = 0; i < base->nactivequeues; ++i) {
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
|
2012-04-30 17:30:48 -04:00
|
|
|
base->event_running_priority = i;
|
Change event_base.activequeues to "array of eventlist".
Previously, event_base.activequeues was of type "array of pointers to
eventlist." This was pointless: none of the eventlists were allowed
to be NULL. Worse, it was inefficient:
- It made looking up an active event queue take two pointer
deferences instead of one, thus risking extra cache misses.
- It used more RAM than it needed to, because of the extra pointer
and the malloc overhead.
Also, this patch fixes a bug where we were saying
calloc(N,N*sizeof(X)) instead of calloc(N,sizeof(X)) when allocating
activequeues. That part, I'll backport.
Also, we warn and return -1 on failure to allocate activequeues,
rather than calling event_err.
svn:r1525
2009-11-09 19:37:27 +00:00
|
|
|
activeq = &base->activequeues[i];
|
2011-08-11 12:38:47 -04:00
|
|
|
if (i < limit_after_prio)
|
2011-07-22 12:29:07 +04:00
|
|
|
c = event_process_active_single_queue(base, activeq,
|
|
|
|
INT_MAX, NULL);
|
|
|
|
else
|
|
|
|
c = event_process_active_single_queue(base, activeq,
|
|
|
|
maxcb, endtime);
|
2012-04-30 17:30:48 -04:00
|
|
|
if (c < 0) {
|
2012-05-09 10:50:07 -04:00
|
|
|
goto done;
|
2012-04-30 17:30:48 -04:00
|
|
|
} else if (c > 0)
|
2009-04-10 14:21:53 +00:00
|
|
|
break; /* Processed a real event; do not
|
|
|
|
* consider lower-priority events */
|
|
|
|
/* If we get here, all of the events we processed
|
|
|
|
* were internal. Continue. */
|
|
|
|
}
|
|
|
|
}
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2012-05-09 10:50:07 -04:00
|
|
|
done:
|
2012-04-30 17:30:48 -04:00
|
|
|
base->event_running_priority = -1;
|
2012-05-09 10:50:07 -04:00
|
|
|
|
2010-11-14 19:25:54 -05:00
|
|
|
return c;
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2004-03-22 21:46:45 +00:00
|
|
|
/*
|
2009-10-16 13:19:57 +00:00
|
|
|
* Wait continuously for events. We exit only if no events are left.
|
2004-03-22 21:46:45 +00:00
|
|
|
*/
|
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
int
|
|
|
|
event_dispatch(void)
|
|
|
|
{
|
|
|
|
return (event_loop(0));
|
|
|
|
}
|
|
|
|
|
2004-12-01 20:04:54 +00:00
|
|
|
int
|
|
|
|
event_base_dispatch(struct event_base *event_base)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
return (event_base_loop(event_base, 0));
|
2004-12-01 20:04:54 +00:00
|
|
|
}
|
|
|
|
|
2008-02-16 06:09:39 +00:00
|
|
|
const char *
|
2010-02-02 15:44:10 -05:00
|
|
|
event_base_get_method(const struct event_base *base)
|
2008-02-16 06:09:39 +00:00
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(base);
|
2008-02-16 06:09:39 +00:00
|
|
|
return (base->evsel->name);
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Callback: used to implement event_base_loopexit by telling the event_base
|
|
|
|
* that it's time to exit its loop. */
|
2004-03-22 21:46:45 +00:00
|
|
|
static void
|
2007-11-25 21:53:06 +00:00
|
|
|
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
|
2004-03-22 21:46:45 +00:00
|
|
|
{
|
2004-11-25 09:50:18 +00:00
|
|
|
struct event_base *base = arg;
|
|
|
|
base->event_gotterm = 1;
|
2004-03-22 21:46:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-05-17 02:14:17 +00:00
|
|
|
event_loopexit(const struct timeval *tv)
|
2004-03-22 21:46:45 +00:00
|
|
|
{
|
2004-11-25 09:50:18 +00:00
|
|
|
return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
|
|
|
|
current_base, tv));
|
2004-03-22 21:46:45 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 08:43:55 +00:00
|
|
|
int
|
2008-05-17 02:14:17 +00:00
|
|
|
event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
|
2005-04-02 08:43:55 +00:00
|
|
|
{
|
2007-03-10 06:37:53 +00:00
|
|
|
return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
|
2005-04-02 08:43:55 +00:00
|
|
|
event_base, tv));
|
|
|
|
}
|
|
|
|
|
2007-11-17 22:21:42 +00:00
|
|
|
int
|
|
|
|
event_loopbreak(void)
|
|
|
|
{
|
|
|
|
return (event_base_loopbreak(current_base));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_base_loopbreak(struct event_base *event_base)
|
|
|
|
{
|
2010-09-08 13:02:58 -04:00
|
|
|
int r = 0;
|
2007-11-17 22:21:42 +00:00
|
|
|
if (event_base == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
|
2009-01-19 20:22:47 +00:00
|
|
|
event_base->event_break = 1;
|
|
|
|
|
2010-09-08 13:02:58 -04:00
|
|
|
if (EVBASE_NEED_NOTIFY(event_base)) {
|
|
|
|
r = evthread_notify_base(event_base);
|
2009-01-19 01:34:14 +00:00
|
|
|
} else {
|
2010-09-08 13:02:58 -04:00
|
|
|
r = (0);
|
2009-01-19 01:34:14 +00:00
|
|
|
}
|
2010-09-08 13:02:58 -04:00
|
|
|
EVBASE_RELEASE_LOCK(event_base, th_base_lock);
|
|
|
|
return r;
|
2007-11-17 22:21:42 +00:00
|
|
|
}
|
|
|
|
|
2012-05-09 12:05:07 -04:00
|
|
|
int
|
|
|
|
event_base_loopcontinue(struct event_base *event_base)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
if (event_base == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
|
|
|
|
event_base->event_continue = 1;
|
|
|
|
|
|
|
|
if (EVBASE_NEED_NOTIFY(event_base)) {
|
|
|
|
r = evthread_notify_base(event_base);
|
|
|
|
} else {
|
|
|
|
r = (0);
|
|
|
|
}
|
|
|
|
EVBASE_RELEASE_LOCK(event_base, th_base_lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2009-10-01 15:29:08 +00:00
|
|
|
int
|
|
|
|
event_base_got_break(struct event_base *event_base)
|
|
|
|
{
|
|
|
|
int res;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
|
2009-10-01 15:29:08 +00:00
|
|
|
res = event_base->event_break;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(event_base, th_base_lock);
|
2009-10-01 15:29:08 +00:00
|
|
|
return res;
|
|
|
|
}
|
2007-11-17 22:21:42 +00:00
|
|
|
|
2009-10-01 15:29:08 +00:00
|
|
|
int
|
|
|
|
event_base_got_exit(struct event_base *event_base)
|
|
|
|
{
|
|
|
|
int res;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
|
2009-10-01 15:29:08 +00:00
|
|
|
res = event_base->event_gotterm;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(event_base, th_base_lock);
|
2009-10-01 15:29:08 +00:00
|
|
|
return res;
|
|
|
|
}
|
2007-11-17 22:21:42 +00:00
|
|
|
|
2004-11-25 09:50:18 +00:00
|
|
|
/* not thread safe */
|
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
int
|
|
|
|
event_loop(int flags)
|
|
|
|
{
|
2004-12-01 20:04:54 +00:00
|
|
|
return event_base_loop(current_base, flags);
|
2004-11-25 09:50:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2004-12-01 20:04:54 +00:00
|
|
|
event_base_loop(struct event_base *base, int flags)
|
2004-11-25 09:50:18 +00:00
|
|
|
{
|
|
|
|
const struct eventop *evsel = base->evsel;
|
2002-04-09 15:14:06 +00:00
|
|
|
struct timeval tv;
|
2007-07-30 22:41:00 +00:00
|
|
|
struct timeval *tv_p;
|
2009-11-27 16:00:59 -05:00
|
|
|
int res, done, retval = 0;
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-10-21 03:54:00 +00:00
|
|
|
/* Grab the lock. We will release it inside evsel.dispatch, and again
|
|
|
|
* as we invoke user callbacks. */
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2009-10-21 03:54:00 +00:00
|
|
|
|
2010-03-21 13:28:48 -04:00
|
|
|
if (base->running_loop) {
|
2010-10-27 10:36:08 -04:00
|
|
|
event_warnx("%s: reentrant invocation. Only one event_base_loop"
|
2010-03-21 13:28:48 -04:00
|
|
|
" can run on each event_base at once.", __func__);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
base->running_loop = 1;
|
|
|
|
|
2009-11-09 18:30:33 +00:00
|
|
|
clear_time_cache(base);
|
2008-10-30 19:38:31 +00:00
|
|
|
|
2010-09-15 01:08:39 -04:00
|
|
|
if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
|
2012-02-29 15:07:33 -05:00
|
|
|
evsig_set_base_(base);
|
2010-09-15 01:08:39 -04:00
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
done = 0;
|
2009-02-12 22:19:54 +00:00
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2009-02-12 22:19:54 +00:00
|
|
|
base->th_owner_id = EVTHREAD_GET_ID();
|
|
|
|
#endif
|
|
|
|
|
2009-10-01 15:29:08 +00:00
|
|
|
base->event_gotterm = base->event_break = 0;
|
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
while (!done) {
|
2012-04-30 17:30:48 -04:00
|
|
|
base->event_continue = 0;
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
base->n_deferreds_queued = 0;
|
2012-04-30 17:30:48 -04:00
|
|
|
|
2004-03-22 21:46:45 +00:00
|
|
|
/* Terminate the loop if we have been asked to */
|
2004-11-25 09:50:18 +00:00
|
|
|
if (base->event_gotterm) {
|
2004-03-23 03:43:53 +00:00
|
|
|
break;
|
2004-03-22 21:46:45 +00:00
|
|
|
}
|
|
|
|
|
2007-11-17 22:21:42 +00:00
|
|
|
if (base->event_break) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-07-30 22:41:00 +00:00
|
|
|
tv_p = &tv;
|
2009-07-26 01:29:39 +00:00
|
|
|
if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
|
2007-07-30 22:41:00 +00:00
|
|
|
timeout_next(base, &tv_p);
|
|
|
|
} else {
|
2009-01-27 21:10:31 +00:00
|
|
|
/*
|
2007-07-30 22:41:00 +00:00
|
|
|
* if we have active events, we just poll new events
|
|
|
|
* without waiting.
|
|
|
|
*/
|
2007-11-07 06:01:57 +00:00
|
|
|
evutil_timerclear(&tv);
|
2007-07-30 22:41:00 +00:00
|
|
|
}
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
/* If we have no events, we just exit */
|
2010-12-01 21:53:08 -05:00
|
|
|
if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
|
|
|
|
!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
|
2005-04-02 08:43:55 +00:00
|
|
|
event_debug(("%s: no events registered.", __func__));
|
2009-11-27 16:00:59 -05:00
|
|
|
retval = 1;
|
|
|
|
goto done;
|
2005-04-02 08:43:55 +00:00
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
event_queue_make_later_events_active(base);
|
|
|
|
|
2009-11-09 18:30:33 +00:00
|
|
|
clear_time_cache(base);
|
2008-05-03 18:23:44 +00:00
|
|
|
|
2008-12-23 16:37:01 +00:00
|
|
|
res = evsel->dispatch(base, tv_p);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-11-27 16:00:59 -05:00
|
|
|
if (res == -1) {
|
|
|
|
event_debug(("%s: dispatch returned unsuccessfully.",
|
|
|
|
__func__));
|
|
|
|
retval = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
2009-11-09 18:30:33 +00:00
|
|
|
|
|
|
|
update_time_cache(base);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2004-11-25 09:50:18 +00:00
|
|
|
timeout_process(base);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-07-26 01:29:39 +00:00
|
|
|
if (N_ACTIVE_CALLBACKS(base)) {
|
2010-11-14 19:25:54 -05:00
|
|
|
int n = event_process_active(base);
|
|
|
|
if ((flags & EVLOOP_ONCE)
|
2010-11-14 19:32:13 -05:00
|
|
|
&& N_ACTIVE_CALLBACKS(base) == 0
|
2010-11-14 19:25:54 -05:00
|
|
|
&& n != 0)
|
2002-04-09 15:14:06 +00:00
|
|
|
done = 1;
|
|
|
|
} else if (flags & EVLOOP_NONBLOCK)
|
|
|
|
done = 1;
|
|
|
|
}
|
2009-11-27 16:00:59 -05:00
|
|
|
event_debug(("%s: asked to terminate loop.", __func__));
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-11-27 16:00:59 -05:00
|
|
|
done:
|
2009-11-09 18:30:33 +00:00
|
|
|
clear_time_cache(base);
|
2010-03-21 13:28:48 -04:00
|
|
|
base->running_loop = 0;
|
2008-11-27 19:27:33 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2009-10-21 03:54:00 +00:00
|
|
|
|
2009-11-27 16:00:59 -05:00
|
|
|
return (retval);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* One-time callback to implement event_base_once: invokes the user callback,
|
|
|
|
* then deletes the allocated storage */
|
2004-02-22 21:17:23 +00:00
|
|
|
static void
|
2007-11-25 21:53:06 +00:00
|
|
|
event_once_cb(evutil_socket_t fd, short events, void *arg)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
|
|
|
struct event_once *eonce = arg;
|
|
|
|
|
|
|
|
(*eonce->cb)(fd, events, eonce->arg);
|
2012-11-18 01:40:13 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
|
2011-07-15 11:10:54 -04:00
|
|
|
LIST_REMOVE(eonce, next_once);
|
2012-11-18 01:40:13 -05:00
|
|
|
EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
|
2010-01-25 13:38:07 -05:00
|
|
|
event_debug_unassign(&eonce->ev);
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(eonce);
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2007-03-10 06:37:53 +00:00
|
|
|
/* not threadsafe, event scheduled once. */
|
2004-02-22 21:17:23 +00:00
|
|
|
int
|
2007-11-25 21:53:06 +00:00
|
|
|
event_once(evutil_socket_t fd, short events,
|
|
|
|
void (*callback)(evutil_socket_t, short, void *),
|
2010-02-18 17:41:15 -05:00
|
|
|
void *arg, const struct timeval *tv)
|
2007-03-10 06:37:53 +00:00
|
|
|
{
|
|
|
|
return event_base_once(current_base, fd, events, callback, arg, tv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Schedules an event once */
|
|
|
|
int
|
2007-11-25 21:53:06 +00:00
|
|
|
event_base_once(struct event_base *base, evutil_socket_t fd, short events,
|
|
|
|
void (*callback)(evutil_socket_t, short, void *),
|
2010-02-18 17:41:15 -05:00
|
|
|
void *arg, const struct timeval *tv)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
|
|
|
struct event_once *eonce;
|
2008-05-03 22:14:44 +00:00
|
|
|
int res = 0;
|
2011-07-15 11:10:54 -04:00
|
|
|
int activate = 0;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-29 20:48:28 +00:00
|
|
|
/* We cannot support signals that just fire once, or persistent
|
|
|
|
* events. */
|
|
|
|
if (events & (EV_SIGNAL|EV_PERSIST))
|
2004-04-04 02:20:21 +00:00
|
|
|
return (-1);
|
|
|
|
|
2008-04-25 01:18:08 +00:00
|
|
|
if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
|
2004-02-22 21:17:23 +00:00
|
|
|
return (-1);
|
|
|
|
|
2005-03-12 02:30:32 +00:00
|
|
|
eonce->cb = callback;
|
|
|
|
eonce->arg = arg;
|
|
|
|
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
|
2008-05-03 22:14:44 +00:00
|
|
|
evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
|
2011-12-07 11:49:52 -05:00
|
|
|
|
|
|
|
if (tv == NULL || ! evutil_timerisset(tv)) {
|
|
|
|
/* If the event is going to become active immediately,
|
|
|
|
* don't put it on the timeout queue. This is one
|
|
|
|
* idiom for scheduling a callback, so let's make
|
|
|
|
* it fast (and order-preserving). */
|
2011-07-15 11:10:54 -04:00
|
|
|
activate = 1;
|
2011-12-07 11:49:52 -05:00
|
|
|
}
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
|
|
|
|
events &= EV_READ|EV_WRITE|EV_CLOSED;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-05-03 22:14:44 +00:00
|
|
|
event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
|
2004-02-22 21:17:23 +00:00
|
|
|
} else {
|
|
|
|
/* Bad event combination */
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(eonce);
|
2004-02-22 21:17:23 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2011-07-15 11:10:54 -04:00
|
|
|
if (res == 0) {
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
if (activate)
|
|
|
|
event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
|
|
|
|
else
|
|
|
|
res = event_add_nolock_(&eonce->ev, tv, 0);
|
|
|
|
|
|
|
|
if (res != 0) {
|
|
|
|
mm_free(eonce);
|
|
|
|
return (res);
|
|
|
|
} else {
|
|
|
|
LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
|
|
|
|
}
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2006-10-28 03:20:22 +00:00
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-10-27 04:25:45 +00:00
|
|
|
int
|
|
|
|
event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2009-10-27 05:16:23 +00:00
|
|
|
if (!base)
|
|
|
|
base = current_base;
|
2012-03-13 21:41:22 +02:00
|
|
|
if (arg == &event_self_cbarg_ptr_)
|
|
|
|
arg = ev;
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_not_added_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2009-10-27 05:16:23 +00:00
|
|
|
ev->ev_base = base;
|
2004-11-25 09:50:18 +00:00
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
ev->ev_callback = callback;
|
|
|
|
ev->ev_arg = arg;
|
|
|
|
ev->ev_fd = fd;
|
|
|
|
ev->ev_events = events;
|
2007-12-24 22:49:30 +00:00
|
|
|
ev->ev_res = 0;
|
|
|
|
ev->ev_flags = EVLIST_INIT;
|
2002-04-17 02:07:31 +00:00
|
|
|
ev->ev_ncalls = 0;
|
|
|
|
ev->ev_pncalls = NULL;
|
2004-09-19 21:08:09 +00:00
|
|
|
|
2008-05-03 21:37:33 +00:00
|
|
|
if (events & EV_SIGNAL) {
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
|
2009-10-27 04:25:45 +00:00
|
|
|
event_warnx("%s: EV_SIGNAL is not compatible with "
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
"EV_READ, EV_WRITE or EV_CLOSED", __func__);
|
2009-10-27 04:25:45 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2012-04-05 12:38:18 -04:00
|
|
|
ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
|
2008-05-03 21:37:33 +00:00
|
|
|
} else {
|
2009-01-22 02:33:38 +00:00
|
|
|
if (events & EV_PERSIST) {
|
Remove compat/sys/_time.h
I've gone through everything that it declared to see where it was used,
and it seems that we probably don't need it anywhere.
Here's what it declared, and why I think we're okay dropping it.
o struct timeval {}
(Used all over, and we can't really get away with declaring it ourselves;
we need the same definition the system uses. If we can't find struct
timeval, we're pretty much sunk.)
o struct timespec {}
(Used in event.c, evdns.c, kqueue.c, evport.c. Of these,
kqueue.c and event.c include sys/_time.h. event.c conditions its use on
_EVENT_HAVE_CLOCK_GETTIME, and kqueue() only works if timespec is defined.)
o TIMEVAL_TO_TIMESPEC
(Used in kqueue.c, but every place with kqueue has sys/time.h)
o struct timezone {}
(event2/util.h has a forward declaration; only evutil.c references it and
doesn't look at its contents.)
o timerclear, timerisset, timercmp, timeradd, timersub
(Everything now uses the evutil_timer* variants.)
o ITIMER_REAL, ITIMER_VIRTUAL, ITIMER_PROF, struct itemerval
(These are only used in test/regress.c, which does not include _time.h)
o CLOCK_REALTIME
(Only used in evdns.c, which does not include _time.h)
o TIMESPEC_TO_TIMEVAL
o DST_*
o timespecclear, timespecisset, timespeccmp, timespecadd, timespecsub
o struct clockinfo {}
o CLOCK_VIRTUAL, CLOCK_PROF
o TIMER_RELTIME, TIMER_ABSTIME
(unused)
svn:r1494
2009-11-03 19:54:56 +00:00
|
|
|
evutil_timerclear(&ev->ev_io_timeout);
|
2012-04-05 12:38:18 -04:00
|
|
|
ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
|
2009-01-22 02:33:38 +00:00
|
|
|
} else {
|
2012-04-05 12:38:18 -04:00
|
|
|
ev->ev_closure = EV_CLOSURE_EVENT;
|
2009-01-22 02:33:38 +00:00
|
|
|
}
|
2008-05-03 21:37:33 +00:00
|
|
|
}
|
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
min_heap_elem_init_(ev);
|
2007-11-03 18:04:53 +00:00
|
|
|
|
2009-10-27 04:25:45 +00:00
|
|
|
if (base != NULL) {
|
|
|
|
/* by default, we put new events into the middle priority */
|
2009-10-27 05:16:23 +00:00
|
|
|
ev->ev_pri = base->nactivequeues / 2;
|
2009-10-27 04:25:45 +00:00
|
|
|
}
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_note_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2009-10-27 04:25:45 +00:00
|
|
|
return 0;
|
2004-11-25 09:50:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_base_set(struct event_base *base, struct event *ev)
|
|
|
|
{
|
|
|
|
/* Only innocent events may be assigned to a different base */
|
2007-12-24 22:49:30 +00:00
|
|
|
if (ev->ev_flags != EVLIST_INIT)
|
2004-11-25 09:50:18 +00:00
|
|
|
return (-1);
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2004-11-25 09:50:18 +00:00
|
|
|
ev->ev_base = base;
|
2007-12-24 22:49:30 +00:00
|
|
|
ev->ev_pri = base->nactivequeues/2;
|
2004-11-25 09:50:18 +00:00
|
|
|
|
|
|
|
return (0);
|
2004-09-19 21:08:09 +00:00
|
|
|
}
|
|
|
|
|
2008-05-03 22:14:44 +00:00
|
|
|
void
|
2009-10-27 04:25:45 +00:00
|
|
|
event_set(struct event *ev, evutil_socket_t fd, short events,
|
|
|
|
void (*callback)(evutil_socket_t, short, void *), void *arg)
|
2008-04-25 01:18:18 +00:00
|
|
|
{
|
2009-10-27 04:25:45 +00:00
|
|
|
int r;
|
2009-10-27 05:16:23 +00:00
|
|
|
r = event_assign(ev, current_base, fd, events, callback, arg);
|
2010-02-18 17:08:50 -05:00
|
|
|
EVUTIL_ASSERT(r == 0);
|
2008-04-25 01:18:18 +00:00
|
|
|
}
|
|
|
|
|
2012-03-12 20:42:39 +02:00
|
|
|
void *
|
|
|
|
event_self_cbarg(void)
|
|
|
|
{
|
|
|
|
return &event_self_cbarg_ptr_;
|
|
|
|
}
|
|
|
|
|
2012-03-25 18:54:40 -04:00
|
|
|
struct event *
|
|
|
|
event_base_get_running_event(struct event_base *base)
|
|
|
|
{
|
|
|
|
struct event *ev = NULL;
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2012-04-05 12:38:18 -04:00
|
|
|
if (EVBASE_IN_THREAD(base)) {
|
|
|
|
struct event_callback *evcb = base->current_event;
|
|
|
|
if (evcb->evcb_flags & EVLIST_INIT)
|
|
|
|
ev = event_callback_to_event(evcb);
|
|
|
|
}
|
2012-03-25 18:54:40 -04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return ev;
|
|
|
|
}
|
|
|
|
|
2008-04-25 01:18:18 +00:00
|
|
|
struct event *
|
|
|
|
event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
|
|
|
|
{
|
|
|
|
struct event *ev;
|
|
|
|
ev = mm_malloc(sizeof(struct event));
|
2008-05-03 22:14:44 +00:00
|
|
|
if (ev == NULL)
|
|
|
|
return (NULL);
|
2009-10-27 04:25:45 +00:00
|
|
|
if (event_assign(ev, base, fd, events, cb, arg) < 0) {
|
|
|
|
mm_free(ev);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2008-05-03 22:14:44 +00:00
|
|
|
|
|
|
|
return (ev);
|
2008-04-25 01:18:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_free(struct event *ev)
|
|
|
|
{
|
2013-03-28 14:13:19 -04:00
|
|
|
/* This is disabled, so that events which have been finalized be a
|
|
|
|
* valid target for event_free(). That's */
|
|
|
|
// event_debug_assert_is_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2008-04-25 01:29:15 +00:00
|
|
|
/* make sure that this event won't be coming back to haunt us. */
|
2008-04-25 01:18:18 +00:00
|
|
|
event_del(ev);
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_note_teardown_(ev);
|
2008-04-25 01:18:18 +00:00
|
|
|
mm_free(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_debug_unassign(struct event *ev)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_not_added_(ev);
|
|
|
|
event_debug_note_teardown_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
|
|
|
ev->ev_flags &= ~EVLIST_INIT;
|
2008-04-25 01:18:18 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
#define EVENT_FINALIZE_FREE_ 0x10000
|
2013-04-26 11:57:40 -04:00
|
|
|
static int
|
2013-03-28 14:13:19 -04:00
|
|
|
event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
|
|
|
{
|
2013-04-30 22:57:25 -04:00
|
|
|
ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
|
2013-03-28 14:13:19 -04:00
|
|
|
EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
|
|
|
|
|
|
|
|
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
|
|
|
ev->ev_closure = closure;
|
|
|
|
ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
|
|
|
|
event_active_nolock_(ev, EV_FINALIZE, 1);
|
|
|
|
ev->ev_flags |= EVLIST_FINALIZING;
|
2013-04-26 11:57:40 -04:00
|
|
|
return 0;
|
2013-03-28 14:13:19 -04:00
|
|
|
}
|
|
|
|
|
2013-04-26 11:57:40 -04:00
|
|
|
static int
|
2013-03-28 14:13:19 -04:00
|
|
|
event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
|
|
|
{
|
2013-04-26 11:57:40 -04:00
|
|
|
int r;
|
2013-03-28 14:13:19 -04:00
|
|
|
struct event_base *base = ev->ev_base;
|
|
|
|
if (EVUTIL_FAILURE_CHECK(!base)) {
|
|
|
|
event_warnx("%s: event has no event_base set.", __func__);
|
2013-04-26 11:57:40 -04:00
|
|
|
return -1;
|
2013-03-28 14:13:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2013-04-26 11:57:40 -04:00
|
|
|
r = event_finalize_nolock_(base, flags, ev, cb);
|
2013-03-28 14:13:19 -04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2013-04-26 11:57:40 -04:00
|
|
|
return r;
|
2013-03-28 14:13:19 -04:00
|
|
|
}
|
|
|
|
|
2013-04-26 11:57:40 -04:00
|
|
|
int
|
2013-03-28 14:13:19 -04:00
|
|
|
event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
|
|
|
{
|
|
|
|
return event_finalize_impl_(flags, ev, cb);
|
|
|
|
}
|
|
|
|
|
2013-04-26 11:57:40 -04:00
|
|
|
int
|
2013-03-28 14:13:19 -04:00
|
|
|
event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
|
|
|
{
|
|
|
|
return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
|
|
|
|
{
|
|
|
|
struct event *ev = NULL;
|
|
|
|
if (evcb->evcb_flags & EVLIST_INIT) {
|
|
|
|
ev = event_callback_to_event(evcb);
|
|
|
|
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
|
|
|
} else {
|
|
|
|
event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
|
|
|
|
}
|
|
|
|
|
|
|
|
evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
|
|
|
|
evcb->evcb_cb_union.evcb_cbfinalize = cb;
|
|
|
|
event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
|
|
|
|
evcb->evcb_flags |= EVLIST_FINALIZING;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
|
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
event_callback_finalize_nolock_(base, flags, evcb, cb);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
|
|
|
|
* callback will be invoked on *one of them*, after they have *all* been
|
|
|
|
* finalized. */
|
|
|
|
int
|
|
|
|
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
|
|
|
|
{
|
|
|
|
int n_pending = 0, i;
|
|
|
|
|
|
|
|
if (base == NULL)
|
|
|
|
base = current_base;
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
|
|
|
|
event_debug(("%s: %d events finalizing", __func__, n_cbs));
|
|
|
|
|
|
|
|
/* At most one can be currently executing; the rest we just
|
|
|
|
* cancel... But we always make sure that the finalize callback
|
|
|
|
* runs. */
|
|
|
|
for (i = 0; i < n_cbs; ++i) {
|
|
|
|
struct event_callback *evcb = evcbs[i];
|
|
|
|
if (evcb == base->current_event) {
|
|
|
|
event_callback_finalize_nolock_(base, 0, evcb, cb);
|
|
|
|
++n_pending;
|
|
|
|
} else {
|
|
|
|
event_callback_cancel_nolock_(base, evcb, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_pending == 0) {
|
|
|
|
/* Just do the first one. */
|
|
|
|
event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-09-19 21:08:09 +00:00
|
|
|
/*
|
|
|
|
* Set's the priority of an event - if an event is already scheduled
|
|
|
|
* changing the priority is going to fail.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
event_priority_set(struct event *ev, int pri)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2007-12-24 22:49:30 +00:00
|
|
|
if (ev->ev_flags & EVLIST_ACTIVE)
|
|
|
|
return (-1);
|
|
|
|
if (pri < 0 || pri >= ev->ev_base->nactivequeues)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
ev->ev_pri = pri;
|
|
|
|
|
|
|
|
return (0);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks if a specific event is pending or scheduled.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2010-02-02 15:44:10 -05:00
|
|
|
event_pending(const struct event *ev, short event, struct timeval *tv)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
|
|
|
int flags = 0;
|
|
|
|
|
2012-11-16 11:55:27 -05:00
|
|
|
if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
|
|
|
|
event_warnx("%s: event has no event_base set.", __func__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-15 01:01:05 +08:00
|
|
|
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
if (ev->ev_flags & EVLIST_INSERTED)
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
|
2012-04-06 03:00:40 -04:00
|
|
|
if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
|
2007-12-24 22:49:30 +00:00
|
|
|
flags |= ev->ev_res;
|
2002-04-09 15:14:06 +00:00
|
|
|
if (ev->ev_flags & EVLIST_TIMEOUT)
|
|
|
|
flags |= EV_TIMEOUT;
|
|
|
|
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
|
|
|
/* See if there is a timeout that we should report */
|
2006-03-28 04:33:41 +00:00
|
|
|
if (tv != NULL && (flags & event & EV_TIMEOUT)) {
|
2009-11-09 17:16:30 +00:00
|
|
|
struct timeval tmp = ev->ev_timeout;
|
|
|
|
tmp.tv_usec &= MICROSECONDS_MASK;
|
2011-03-03 15:34:22 -05:00
|
|
|
/* correctly remamp to real time */
|
|
|
|
evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
|
2006-03-28 04:33:41 +00:00
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2012-06-15 01:01:05 +08:00
|
|
|
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
return (flags & event);
|
|
|
|
}
|
|
|
|
|
2008-05-05 15:45:30 +00:00
|
|
|
int
|
2010-11-23 13:08:07 -05:00
|
|
|
event_initialized(const struct event *ev)
|
2008-05-05 15:45:30 +00:00
|
|
|
{
|
|
|
|
if (!(ev->ev_flags & EVLIST_INIT))
|
|
|
|
return 0;
|
2010-11-23 13:08:07 -05:00
|
|
|
|
2008-05-05 15:45:30 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2010-01-19 14:01:36 -05:00
|
|
|
void
|
|
|
|
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(event);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2010-01-19 14:01:36 -05:00
|
|
|
if (base_out)
|
|
|
|
*base_out = event->ev_base;
|
|
|
|
if (fd_out)
|
|
|
|
*fd_out = event->ev_fd;
|
|
|
|
if (events_out)
|
|
|
|
*events_out = event->ev_events;
|
|
|
|
if (callback_out)
|
|
|
|
*callback_out = event->ev_callback;
|
|
|
|
if (arg_out)
|
|
|
|
*arg_out = event->ev_arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
event_get_struct_event_size(void)
|
|
|
|
{
|
|
|
|
return sizeof(struct event);
|
|
|
|
}
|
|
|
|
|
2008-05-05 15:45:30 +00:00
|
|
|
evutil_socket_t
|
2010-01-19 14:01:36 -05:00
|
|
|
event_get_fd(const struct event *ev)
|
2008-05-05 15:45:30 +00:00
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2008-05-05 15:45:30 +00:00
|
|
|
return ev->ev_fd;
|
|
|
|
}
|
|
|
|
|
2009-05-05 01:09:03 +00:00
|
|
|
struct event_base *
|
2010-01-19 14:01:36 -05:00
|
|
|
event_get_base(const struct event *ev)
|
2009-05-05 01:09:03 +00:00
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2009-05-05 01:09:03 +00:00
|
|
|
return ev->ev_base;
|
|
|
|
}
|
|
|
|
|
2010-01-19 14:01:36 -05:00
|
|
|
short
|
|
|
|
event_get_events(const struct event *ev)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-19 14:01:36 -05:00
|
|
|
return ev->ev_events;
|
|
|
|
}
|
|
|
|
|
|
|
|
event_callback_fn
|
|
|
|
event_get_callback(const struct event *ev)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-19 14:01:36 -05:00
|
|
|
return ev->ev_callback;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
event_get_callback_arg(const struct event *ev)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-19 14:01:36 -05:00
|
|
|
return ev->ev_arg;
|
|
|
|
}
|
|
|
|
|
2012-05-08 17:46:46 -04:00
|
|
|
int
|
|
|
|
event_get_priority(const struct event *ev)
|
|
|
|
{
|
|
|
|
event_debug_assert_is_setup_(ev);
|
|
|
|
return ev->ev_pri;
|
|
|
|
}
|
|
|
|
|
2002-04-09 15:14:06 +00:00
|
|
|
int
|
2008-05-17 02:14:17 +00:00
|
|
|
event_add(struct event *ev, const struct timeval *tv)
|
2008-03-02 21:18:33 +00:00
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
2010-08-23 12:01:45 -04:00
|
|
|
if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
|
|
|
|
event_warnx("%s: event has no event_base set.", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
2008-05-05 15:45:30 +00:00
|
|
|
|
2012-06-28 12:00:57 -04:00
|
|
|
res = event_add_nolock_(ev, tv, 0);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Helper callback: wake an event_base from another thread. This version
|
|
|
|
* works by writing a byte to one end of a socketpair, so that the event_base
|
|
|
|
* listening on the other end will wake up as the corresponding event
|
|
|
|
* triggers */
|
2009-01-19 01:34:14 +00:00
|
|
|
static int
|
2009-01-19 20:37:24 +00:00
|
|
|
evthread_notify_base_default(struct event_base *base)
|
2009-01-19 01:34:14 +00:00
|
|
|
{
|
|
|
|
char buf[1];
|
|
|
|
int r;
|
2009-01-19 20:22:47 +00:00
|
|
|
buf[0] = (char) 0;
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2009-01-19 01:34:14 +00:00
|
|
|
r = send(base->th_notify_fd[1], buf, 1, 0);
|
2009-01-19 19:46:03 +00:00
|
|
|
#else
|
|
|
|
r = write(base->th_notify_fd[1], buf, 1);
|
|
|
|
#endif
|
2013-01-11 16:37:34 -08:00
|
|
|
return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
|
2009-01-19 01:34:14 +00:00
|
|
|
}
|
|
|
|
|
2012-04-03 16:15:49 -04:00
|
|
|
#ifdef EVENT__HAVE_EVENTFD
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Helper callback: wake an event_base from another thread. This version
|
|
|
|
* assumes that you have a working eventfd() implementation. */
|
2009-01-19 20:37:24 +00:00
|
|
|
static int
|
|
|
|
evthread_notify_base_eventfd(struct event_base *base)
|
|
|
|
{
|
|
|
|
ev_uint64_t msg = 1;
|
|
|
|
int r;
|
|
|
|
do {
|
|
|
|
r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
|
|
|
|
} while (r < 0 && errno == EAGAIN);
|
|
|
|
|
|
|
|
return (r < 0) ? -1 : 0;
|
|
|
|
}
|
2012-04-03 16:15:49 -04:00
|
|
|
#endif
|
|
|
|
|
2009-01-19 20:37:24 +00:00
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/** Tell the thread currently running the event_loop for base (if any) that it
|
|
|
|
* needs to stop waiting in its dispatch function (if it is) and process all
|
2012-04-06 11:05:35 -04:00
|
|
|
* active callbacks. */
|
2009-01-19 20:37:24 +00:00
|
|
|
static int
|
|
|
|
evthread_notify_base(struct event_base *base)
|
|
|
|
{
|
2010-09-08 13:22:55 -04:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
2009-01-19 20:37:24 +00:00
|
|
|
if (!base->th_notify_fn)
|
|
|
|
return -1;
|
2010-09-08 13:22:55 -04:00
|
|
|
if (base->is_notify_pending)
|
|
|
|
return 0;
|
|
|
|
base->is_notify_pending = 1;
|
2009-01-19 20:37:24 +00:00
|
|
|
return base->th_notify_fn(base);
|
|
|
|
}
|
|
|
|
|
2012-11-16 16:15:03 -05:00
|
|
|
/* Implementation function to remove a timeout on a currently pending event.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
event_remove_timer_nolock_(struct event *ev)
|
|
|
|
{
|
|
|
|
struct event_base *base = ev->ev_base;
|
|
|
|
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
event_debug_assert_is_setup_(ev);
|
|
|
|
|
|
|
|
event_debug(("event_remove_timer_nolock: event: %p", ev));
|
|
|
|
|
|
|
|
/* If it's not pending on a timeout, we don't need to do anything. */
|
|
|
|
if (ev->ev_flags & EVLIST_TIMEOUT) {
|
|
|
|
event_queue_remove_timeout(base, ev);
|
2013-02-12 15:10:50 -05:00
|
|
|
evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
|
2012-11-16 16:15:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_remove_timer(struct event *ev)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
|
|
|
if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
|
|
|
|
event_warnx("%s: event has no event_base set.", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
|
|
|
|
|
|
|
res = event_remove_timer_nolock_(ev);
|
|
|
|
|
|
|
|
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
|
|
|
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Implementation function to add an event. Works just like event_add,
|
|
|
|
* except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
|
|
|
|
* we treat tv as an absolute time, not as an interval to add to the current
|
|
|
|
* time */
|
2012-06-28 12:00:57 -04:00
|
|
|
int
|
|
|
|
event_add_nolock_(struct event *ev, const struct timeval *tv,
|
2009-11-09 18:30:57 +00:00
|
|
|
int tv_is_absolute)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2004-11-25 09:50:18 +00:00
|
|
|
struct event_base *base = ev->ev_base;
|
2008-03-02 21:18:33 +00:00
|
|
|
int res = 0;
|
2009-10-02 03:03:58 +00:00
|
|
|
int notify = 0;
|
2004-11-25 09:50:18 +00:00
|
|
|
|
2010-02-23 15:14:57 -05:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2005-03-29 07:03:10 +00:00
|
|
|
event_debug((
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
"event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
|
2002-04-09 15:14:06 +00:00
|
|
|
ev,
|
2012-11-01 17:38:34 -04:00
|
|
|
EV_SOCK_ARG(ev->ev_fd),
|
2002-04-09 15:14:06 +00:00
|
|
|
ev->ev_events & EV_READ ? "EV_READ " : " ",
|
|
|
|
ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
|
2002-04-09 15:14:06 +00:00
|
|
|
tv ? "EV_TIMEOUT " : " ",
|
|
|
|
ev->ev_callback));
|
|
|
|
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
if (ev->ev_flags & EVLIST_FINALIZING) {
|
|
|
|
/* XXXX debug */
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2008-07-25 01:29:54 +00:00
|
|
|
/*
|
|
|
|
* prepare for timeout insertion further below, if we get a
|
|
|
|
* failure on any step, we should not change any state.
|
|
|
|
*/
|
|
|
|
if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
|
2012-02-29 15:07:33 -05:00
|
|
|
if (min_heap_reserve_(&base->timeheap,
|
|
|
|
1 + min_heap_size_(&base->timeheap)) == -1)
|
2008-07-25 01:29:54 +00:00
|
|
|
return (-1); /* ENOMEM == errno */
|
|
|
|
}
|
|
|
|
|
2010-07-05 14:39:39 -04:00
|
|
|
/* If the main thread is currently executing a signal event's
|
|
|
|
* callback, and we are not the main thread, then we want to wait
|
|
|
|
* until the callback is done before we mess with the event, or else
|
|
|
|
* we can race on ev_ncalls and ev_pncalls below. */
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2012-04-05 12:38:18 -04:00
|
|
|
if (base->current_event == event_to_event_callback(ev) &&
|
|
|
|
(ev->ev_events & EV_SIGNAL)
|
2010-08-17 13:18:18 -04:00
|
|
|
&& !EVBASE_IN_THREAD(base)) {
|
|
|
|
++base->current_event_waiters;
|
|
|
|
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
|
|
|
}
|
|
|
|
#endif
|
2010-07-05 14:39:39 -04:00
|
|
|
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
|
2012-04-06 03:00:40 -04:00
|
|
|
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
|
2012-02-29 15:07:33 -05:00
|
|
|
res = evmap_io_add_(base, ev->ev_fd, ev);
|
2008-12-23 16:37:01 +00:00
|
|
|
else if (ev->ev_events & EV_SIGNAL)
|
2012-02-29 15:07:33 -05:00
|
|
|
res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
|
2008-07-25 01:29:54 +00:00
|
|
|
if (res != -1)
|
2011-02-23 00:59:20 -05:00
|
|
|
event_queue_insert_inserted(base, ev);
|
2009-10-02 03:03:58 +00:00
|
|
|
if (res == 1) {
|
|
|
|
/* evmap says we need to notify the main thread. */
|
|
|
|
notify = 1;
|
|
|
|
res = 0;
|
|
|
|
}
|
2008-07-25 01:29:54 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 21:10:31 +00:00
|
|
|
/*
|
2009-10-16 13:19:57 +00:00
|
|
|
* we should change the timeout state only if the previous event
|
2008-07-25 01:29:54 +00:00
|
|
|
* addition succeeded.
|
|
|
|
*/
|
|
|
|
if (res != -1 && tv != NULL) {
|
2002-04-09 15:14:06 +00:00
|
|
|
struct timeval now;
|
2009-11-09 17:16:30 +00:00
|
|
|
int common_timeout;
|
2012-03-26 23:28:21 -04:00
|
|
|
#ifdef USE_REINSERT_TIMEOUT
|
2012-03-23 18:42:56 -04:00
|
|
|
int was_common;
|
|
|
|
int old_timeout_idx;
|
2012-03-26 23:28:21 -04:00
|
|
|
#endif
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-01-22 02:33:38 +00:00
|
|
|
/*
|
|
|
|
* for persistent timeout events, we remember the
|
|
|
|
* timeout value and re-add the event.
|
2009-11-09 18:30:57 +00:00
|
|
|
*
|
|
|
|
* If tv_is_absolute, this was already set.
|
2009-01-22 02:33:38 +00:00
|
|
|
*/
|
2012-04-05 12:38:18 -04:00
|
|
|
if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
|
2009-01-22 02:33:38 +00:00
|
|
|
ev->ev_io_timeout = *tv;
|
|
|
|
|
2012-03-26 23:28:21 -04:00
|
|
|
#ifndef USE_REINSERT_TIMEOUT
|
|
|
|
if (ev->ev_flags & EVLIST_TIMEOUT) {
|
|
|
|
event_queue_remove_timeout(base, ev);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2003-04-10 19:14:03 +00:00
|
|
|
/* Check if it is active due to a timeout. Rescheduling
|
|
|
|
* this timeout before the callback can be executed
|
|
|
|
* removes it from the active list. */
|
2007-12-24 22:49:30 +00:00
|
|
|
if ((ev->ev_flags & EVLIST_ACTIVE) &&
|
|
|
|
(ev->ev_res & EV_TIMEOUT)) {
|
2008-07-11 15:49:04 +00:00
|
|
|
if (ev->ev_events & EV_SIGNAL) {
|
2008-05-03 21:37:33 +00:00
|
|
|
/* See if we are just active executing
|
|
|
|
* this event in a loop
|
|
|
|
*/
|
|
|
|
if (ev->ev_ncalls && ev->ev_pncalls) {
|
|
|
|
/* Abort loop */
|
|
|
|
*ev->ev_pncalls = 0;
|
|
|
|
}
|
2003-04-10 19:14:03 +00:00
|
|
|
}
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
event_queue_remove_active(base, event_to_event_callback(ev));
|
2003-04-10 19:14:03 +00:00
|
|
|
}
|
|
|
|
|
2008-05-03 18:23:44 +00:00
|
|
|
gettime(base, &now);
|
2009-11-09 18:30:57 +00:00
|
|
|
|
2009-11-09 17:16:30 +00:00
|
|
|
common_timeout = is_common_timeout(tv, base);
|
2012-03-26 23:28:21 -04:00
|
|
|
#ifdef USE_REINSERT_TIMEOUT
|
2012-03-23 18:42:56 -04:00
|
|
|
was_common = is_common_timeout(&ev->ev_timeout, base);
|
|
|
|
old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
|
2012-03-26 23:28:21 -04:00
|
|
|
#endif
|
2012-03-23 18:42:56 -04:00
|
|
|
|
2009-11-09 18:30:57 +00:00
|
|
|
if (tv_is_absolute) {
|
|
|
|
ev->ev_timeout = *tv;
|
|
|
|
} else if (common_timeout) {
|
2009-11-09 17:16:30 +00:00
|
|
|
struct timeval tmp = *tv;
|
|
|
|
tmp.tv_usec &= MICROSECONDS_MASK;
|
|
|
|
evutil_timeradd(&now, &tmp, &ev->ev_timeout);
|
|
|
|
ev->ev_timeout.tv_usec |=
|
|
|
|
(tv->tv_usec & ~MICROSECONDS_MASK);
|
|
|
|
} else {
|
|
|
|
evutil_timeradd(&now, tv, &ev->ev_timeout);
|
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2005-03-29 07:03:10 +00:00
|
|
|
event_debug((
|
2011-08-11 13:25:24 -05:00
|
|
|
"event_add: event %p, timeout in %d seconds %d useconds, call %p",
|
|
|
|
ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2012-03-26 23:28:21 -04:00
|
|
|
#ifdef USE_REINSERT_TIMEOUT
|
2012-03-23 18:42:56 -04:00
|
|
|
event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
|
2012-03-26 23:28:21 -04:00
|
|
|
#else
|
|
|
|
event_queue_insert_timeout(base, ev);
|
|
|
|
#endif
|
2011-02-02 20:05:41 -05:00
|
|
|
|
2009-11-09 17:16:30 +00:00
|
|
|
if (common_timeout) {
|
|
|
|
struct common_timeout_list *ctl =
|
|
|
|
get_common_timeout_list(base, &ev->ev_timeout);
|
|
|
|
if (ev == TAILQ_FIRST(&ctl->events)) {
|
|
|
|
common_timeout_schedule(ctl, &now, ev);
|
|
|
|
}
|
|
|
|
} else {
|
2013-03-05 11:29:33 -08:00
|
|
|
struct event* top = NULL;
|
2009-11-09 17:16:30 +00:00
|
|
|
/* See if the earliest timeout is now earlier than it
|
|
|
|
* was before: if so, we will need to tell the main
|
2013-03-05 11:29:33 -08:00
|
|
|
* thread to wake up earlier than it would otherwise.
|
|
|
|
* We double check the timeout of the top element to
|
|
|
|
* handle time distortions due to system suspension.
|
|
|
|
*/
|
2012-02-29 15:07:33 -05:00
|
|
|
if (min_heap_elt_is_top_(ev))
|
2009-11-09 17:16:30 +00:00
|
|
|
notify = 1;
|
2013-03-05 11:29:33 -08:00
|
|
|
else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
|
|
|
|
evutil_timercmp(&top->ev_timeout, &now, <))
|
|
|
|
notify = 1;
|
2009-10-02 03:03:58 +00:00
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
/* if we are not in the right thread, we need to wake up the loop */
|
2010-09-08 13:02:58 -04:00
|
|
|
if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
|
2009-01-19 20:22:47 +00:00
|
|
|
evthread_notify_base(base);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_note_add_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
return (res);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
static int
|
|
|
|
event_del_(struct event *ev, int blocking)
|
2008-03-02 21:18:33 +00:00
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
2010-08-23 12:01:45 -04:00
|
|
|
if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
|
|
|
|
event_warnx("%s: event has no event_base set.", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
res = event_del_nolock_(ev, blocking);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2012-06-28 12:00:57 -04:00
|
|
|
int
|
2013-03-28 14:13:19 -04:00
|
|
|
event_del(struct event *ev)
|
|
|
|
{
|
|
|
|
return event_del_(ev, EVENT_DEL_AUTOBLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_del_block(struct event *ev)
|
|
|
|
{
|
|
|
|
return event_del_(ev, EVENT_DEL_BLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_del_noblock(struct event *ev)
|
|
|
|
{
|
|
|
|
return event_del_(ev, EVENT_DEL_NOBLOCK);
|
|
|
|
}
|
|
|
|
|
2013-04-26 11:36:43 -04:00
|
|
|
/** Helper for event_del: always called with th_base_lock held.
|
|
|
|
*
|
|
|
|
* "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
|
|
|
|
* EVEN_IF_FINALIZING} values. See those for more information.
|
|
|
|
*/
|
2013-03-28 14:13:19 -04:00
|
|
|
int
|
|
|
|
event_del_nolock_(struct event *ev, int blocking)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2004-12-14 03:36:12 +00:00
|
|
|
struct event_base *base;
|
2009-10-02 03:03:58 +00:00
|
|
|
int res = 0, notify = 0;
|
2004-11-25 09:50:18 +00:00
|
|
|
|
2012-11-01 17:38:34 -04:00
|
|
|
event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
|
|
|
|
ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2004-12-14 03:36:12 +00:00
|
|
|
/* An event without a base has not been added */
|
|
|
|
if (ev->ev_base == NULL)
|
2005-02-22 15:47:53 +00:00
|
|
|
return (-1);
|
2004-12-14 03:36:12 +00:00
|
|
|
|
2010-02-23 15:14:57 -05:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
|
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
|
|
|
|
if (ev->ev_flags & EVLIST_FINALIZING) {
|
|
|
|
/* XXXX Debug */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-14 16:54:48 +00:00
|
|
|
/* If the main thread is currently executing this event's callback,
|
|
|
|
* and we are not the main thread, then we want to wait until the
|
|
|
|
* callback is done before we start removing the event. That way,
|
|
|
|
* when this function returns, it will be safe to free the
|
|
|
|
* user-supplied argument. */
|
2004-12-14 03:36:12 +00:00
|
|
|
base = ev->ev_base;
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2013-03-28 14:13:19 -04:00
|
|
|
if (blocking != EVENT_DEL_NOBLOCK &&
|
|
|
|
base->current_event == event_to_event_callback(ev) &&
|
|
|
|
!EVBASE_IN_THREAD(base) &&
|
|
|
|
(blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
|
2010-08-17 13:18:18 -04:00
|
|
|
++base->current_event_waiters;
|
|
|
|
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
|
|
|
}
|
|
|
|
#endif
|
2004-12-14 03:36:12 +00:00
|
|
|
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2002-04-10 03:15:19 +00:00
|
|
|
/* See if we are just active executing this event in a loop */
|
2008-07-11 15:49:04 +00:00
|
|
|
if (ev->ev_events & EV_SIGNAL) {
|
2008-05-03 21:37:33 +00:00
|
|
|
if (ev->ev_ncalls && ev->ev_pncalls) {
|
|
|
|
/* Abort loop */
|
|
|
|
*ev->ev_pncalls = 0;
|
|
|
|
}
|
2002-04-10 03:15:19 +00:00
|
|
|
}
|
|
|
|
|
2009-10-02 03:03:58 +00:00
|
|
|
if (ev->ev_flags & EVLIST_TIMEOUT) {
|
|
|
|
/* NOTE: We never need to notify the main thread because of a
|
|
|
|
* deleted timeout event: all that could happen if we don't is
|
|
|
|
* that the dispatch loop might wake up too early. But the
|
|
|
|
* point of notifying the main thread _is_ to wake up the
|
|
|
|
* dispatch loop early anyway, so we wouldn't gain anything by
|
|
|
|
* doing it.
|
|
|
|
*/
|
2011-02-23 00:59:20 -05:00
|
|
|
event_queue_remove_timeout(base, ev);
|
2009-10-02 03:03:58 +00:00
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2007-12-24 22:49:30 +00:00
|
|
|
if (ev->ev_flags & EVLIST_ACTIVE)
|
2012-04-05 12:38:18 -04:00
|
|
|
event_queue_remove_active(base, event_to_event_callback(ev));
|
2012-04-06 03:00:40 -04:00
|
|
|
else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
|
|
|
|
event_queue_remove_active_later(base, event_to_event_callback(ev));
|
2002-04-09 15:14:06 +00:00
|
|
|
|
|
|
|
if (ev->ev_flags & EVLIST_INSERTED) {
|
2011-02-23 00:59:20 -05:00
|
|
|
event_queue_remove_inserted(base, ev);
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
|
2012-02-29 15:07:33 -05:00
|
|
|
res = evmap_io_del_(base, ev->ev_fd, ev);
|
2008-12-23 16:37:01 +00:00
|
|
|
else
|
2012-02-29 15:07:33 -05:00
|
|
|
res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
|
2009-10-02 03:03:58 +00:00
|
|
|
if (res == 1) {
|
|
|
|
/* evmap says we need to notify the main thread. */
|
|
|
|
notify = 1;
|
|
|
|
res = 0;
|
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
/* if we are not in the right thread, we need to wake up the loop */
|
2010-09-08 13:02:58 -04:00
|
|
|
if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
|
2009-01-19 20:22:47 +00:00
|
|
|
evthread_notify_base(base);
|
2009-01-19 01:34:14 +00:00
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_note_del_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
return (res);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-04-10 02:10:47 +00:00
|
|
|
event_active(struct event *ev, int res, short ncalls)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2010-08-23 12:01:45 -04:00
|
|
|
if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
|
|
|
|
event_warnx("%s: event has no event_base set.", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
event_debug_assert_is_setup_(ev);
|
2010-01-22 00:34:37 -05:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
event_active_nolock_(ev, res, ncalls);
|
2009-01-19 01:34:14 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
2008-03-02 21:18:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-27 05:16:32 +00:00
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
event_active_nolock_(struct event *ev, int res, short ncalls)
|
2008-03-02 21:18:33 +00:00
|
|
|
{
|
|
|
|
struct event_base *base;
|
|
|
|
|
2012-11-01 17:38:34 -04:00
|
|
|
event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
|
|
|
|
ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
|
2010-09-21 22:44:39 -04:00
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
base = ev->ev_base;
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
2010-09-21 22:44:39 -04:00
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
if (ev->ev_flags & EVLIST_FINALIZING) {
|
|
|
|
/* XXXX debug */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
|
|
|
|
default:
|
|
|
|
case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
|
|
|
|
EVUTIL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
case EVLIST_ACTIVE:
|
|
|
|
/* We get different kinds of events, add them together */
|
2007-12-24 22:49:30 +00:00
|
|
|
ev->ev_res |= res;
|
2002-06-11 18:38:37 +00:00
|
|
|
return;
|
2012-04-06 03:00:40 -04:00
|
|
|
case EVLIST_ACTIVE_LATER:
|
|
|
|
ev->ev_res |= res;
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
ev->ev_res = res;
|
|
|
|
break;
|
2007-12-24 22:49:30 +00:00
|
|
|
}
|
2002-06-11 18:38:37 +00:00
|
|
|
|
2012-04-30 17:30:48 -04:00
|
|
|
if (ev->ev_pri < base->event_running_priority)
|
|
|
|
base->event_continue = 1;
|
|
|
|
|
2008-07-11 15:49:04 +00:00
|
|
|
if (ev->ev_events & EV_SIGNAL) {
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2012-04-05 12:38:18 -04:00
|
|
|
if (base->current_event == event_to_event_callback(ev) &&
|
|
|
|
!EVBASE_IN_THREAD(base)) {
|
2010-08-17 13:18:18 -04:00
|
|
|
++base->current_event_waiters;
|
|
|
|
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
|
|
|
}
|
|
|
|
#endif
|
2008-05-03 21:37:33 +00:00
|
|
|
ev->ev_ncalls = ncalls;
|
|
|
|
ev->ev_pncalls = NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
event_callback_activate_nolock_(base, event_to_event_callback(ev));
|
|
|
|
}
|
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
void
|
|
|
|
event_active_later_(struct event *ev, int res)
|
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
|
|
|
event_active_later_nolock_(ev, res);
|
|
|
|
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_active_later_nolock_(struct event *ev, int res)
|
|
|
|
{
|
|
|
|
struct event_base *base = ev->ev_base;
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
|
|
|
|
if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
|
|
|
|
/* We get different kinds of events, add them together */
|
|
|
|
ev->ev_res |= res;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ev->ev_res = res;
|
|
|
|
|
|
|
|
event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
|
|
|
|
}
|
|
|
|
|
2012-04-06 04:33:19 -04:00
|
|
|
int
|
|
|
|
event_callback_activate_(struct event_base *base,
|
|
|
|
struct event_callback *evcb)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
r = event_callback_activate_nolock_(base, evcb);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-04-05 12:38:18 -04:00
|
|
|
event_callback_activate_nolock_(struct event_base *base,
|
|
|
|
struct event_callback *evcb)
|
|
|
|
{
|
2012-04-06 04:33:19 -04:00
|
|
|
int r = 1;
|
|
|
|
|
2013-03-28 14:13:19 -04:00
|
|
|
if (evcb->evcb_flags & EVLIST_FINALIZING)
|
|
|
|
return 0;
|
|
|
|
|
2012-04-06 04:33:19 -04:00
|
|
|
switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
|
|
|
|
default:
|
|
|
|
EVUTIL_ASSERT(0);
|
|
|
|
case EVLIST_ACTIVE_LATER:
|
2012-04-06 03:00:40 -04:00
|
|
|
event_queue_remove_active_later(base, evcb);
|
2012-04-06 04:33:19 -04:00
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
case EVLIST_ACTIVE:
|
|
|
|
return 0;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
}
|
2012-04-06 03:00:40 -04:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
event_queue_insert_active(base, evcb);
|
2010-12-01 21:28:03 -05:00
|
|
|
|
|
|
|
if (EVBASE_NEED_NOTIFY(base))
|
|
|
|
evthread_notify_base(base);
|
2012-04-06 04:33:19 -04:00
|
|
|
|
|
|
|
return r;
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2015-03-24 17:29:40 -07:00
|
|
|
int
|
2012-04-06 03:00:40 -04:00
|
|
|
event_callback_activate_later_nolock_(struct event_base *base,
|
|
|
|
struct event_callback *evcb)
|
|
|
|
{
|
|
|
|
if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
|
2015-03-24 17:29:40 -07:00
|
|
|
return 0;
|
2012-04-06 03:00:40 -04:00
|
|
|
|
|
|
|
event_queue_insert_active_later(base, evcb);
|
|
|
|
if (EVBASE_NEED_NOTIFY(base))
|
|
|
|
evthread_notify_base(base);
|
2015-03-24 17:45:52 -07:00
|
|
|
return 1;
|
2012-04-06 03:00:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_callback_init_(struct event_base *base,
|
|
|
|
struct event_callback *cb)
|
|
|
|
{
|
|
|
|
memset(cb, 0, sizeof(*cb));
|
|
|
|
cb->evcb_pri = base->nactivequeues - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_callback_cancel_(struct event_base *base,
|
|
|
|
struct event_callback *evcb)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2013-03-28 14:13:19 -04:00
|
|
|
r = event_callback_cancel_nolock_(base, evcb, 0);
|
2012-04-06 03:00:40 -04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
event_callback_cancel_nolock_(struct event_base *base,
|
2013-03-28 14:13:19 -04:00
|
|
|
struct event_callback *evcb, int even_if_finalizing)
|
2012-04-06 03:00:40 -04:00
|
|
|
{
|
2013-03-28 14:13:19 -04:00
|
|
|
if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
|
|
|
|
return 0;
|
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
if (evcb->evcb_flags & EVLIST_INIT)
|
2013-03-28 14:13:19 -04:00
|
|
|
return event_del_nolock_(event_callback_to_event(evcb),
|
|
|
|
even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
|
2012-04-06 03:00:40 -04:00
|
|
|
|
|
|
|
switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
|
|
|
|
default:
|
|
|
|
case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
|
|
|
|
EVUTIL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
case EVLIST_ACTIVE:
|
|
|
|
/* We get different kinds of events, add them together */
|
|
|
|
event_queue_remove_active(base, evcb);
|
|
|
|
return 0;
|
|
|
|
case EVLIST_ACTIVE_LATER:
|
|
|
|
event_queue_remove_active_later(base, evcb);
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
}
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-10 14:22:33 +00:00
|
|
|
void
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
|
2009-04-10 14:22:33 +00:00
|
|
|
{
|
2012-04-06 04:33:19 -04:00
|
|
|
memset(cb, 0, sizeof(*cb));
|
|
|
|
cb->evcb_cb_union.evcb_selfcb = fn;
|
|
|
|
cb->evcb_arg = arg;
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
cb->evcb_pri = priority;
|
2012-04-06 04:33:19 -04:00
|
|
|
cb->evcb_closure = EV_CLOSURE_CB_SELF;
|
2009-04-10 14:22:33 +00:00
|
|
|
}
|
|
|
|
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
void
|
|
|
|
event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
|
|
|
|
{
|
|
|
|
cb->evcb_pri = priority;
|
|
|
|
}
|
|
|
|
|
2009-04-10 14:22:33 +00:00
|
|
|
void
|
2012-04-06 04:33:19 -04:00
|
|
|
event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
|
2009-04-10 14:22:33 +00:00
|
|
|
{
|
2012-04-06 04:33:19 -04:00
|
|
|
if (!base)
|
|
|
|
base = current_base;
|
|
|
|
event_callback_cancel_(base, cb);
|
2009-04-10 14:22:33 +00:00
|
|
|
}
|
|
|
|
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
#define MAX_DEFERREDS_QUEUED 32
|
2012-04-06 04:33:19 -04:00
|
|
|
int
|
|
|
|
event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
|
2009-04-10 14:22:33 +00:00
|
|
|
{
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
int r = 1;
|
2012-04-06 04:33:19 -04:00
|
|
|
if (!base)
|
|
|
|
base = current_base;
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
|
2015-03-24 17:29:40 -07:00
|
|
|
r = event_callback_activate_later_nolock_(base, cb);
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
} else {
|
|
|
|
r = event_callback_activate_nolock_(base, cb);
|
2015-03-24 17:29:40 -07:00
|
|
|
if (r) {
|
|
|
|
++base->n_deferreds_queued;
|
|
|
|
}
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
}
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return r;
|
2009-04-10 14:22:33 +00:00
|
|
|
}
|
|
|
|
|
2007-07-30 22:41:00 +00:00
|
|
|
static int
|
|
|
|
timeout_next(struct event_base *base, struct timeval **tv_p)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2009-10-21 03:54:00 +00:00
|
|
|
/* Caller must hold th_base_lock */
|
2002-04-09 15:14:06 +00:00
|
|
|
struct timeval now;
|
|
|
|
struct event *ev;
|
2007-07-30 22:41:00 +00:00
|
|
|
struct timeval *tv = *tv_p;
|
2008-03-02 21:18:33 +00:00
|
|
|
int res = 0;
|
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
ev = min_heap_top_(&base->timeheap);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
if (ev == NULL) {
|
2007-07-30 22:41:00 +00:00
|
|
|
/* if no time-based events are active wait for I/O */
|
|
|
|
*tv_p = NULL;
|
2008-03-02 21:18:33 +00:00
|
|
|
goto out;
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2008-05-03 18:23:44 +00:00
|
|
|
if (gettime(base, &now) == -1) {
|
2008-03-02 21:18:33 +00:00
|
|
|
res = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2007-11-07 06:01:57 +00:00
|
|
|
if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
|
|
|
|
evutil_timerclear(tv);
|
2008-03-02 21:18:33 +00:00
|
|
|
goto out;
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2007-11-07 06:01:57 +00:00
|
|
|
evutil_timersub(&ev->ev_timeout, &now, tv);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(tv->tv_sec >= 0);
|
|
|
|
EVUTIL_ASSERT(tv->tv_usec >= 0);
|
2011-08-11 13:25:24 -05:00
|
|
|
event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
|
2008-03-02 21:18:33 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
return (res);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Activate every event whose timeout has elapsed. */
|
2009-04-08 16:57:38 +00:00
|
|
|
static void
|
2004-11-25 09:50:18 +00:00
|
|
|
timeout_process(struct event_base *base)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2009-10-21 03:54:00 +00:00
|
|
|
/* Caller must hold lock. */
|
2002-04-09 15:14:06 +00:00
|
|
|
struct timeval now;
|
2007-11-03 18:04:53 +00:00
|
|
|
struct event *ev;
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
if (min_heap_empty_(&base->timeheap)) {
|
2007-09-20 18:27:01 +00:00
|
|
|
return;
|
2008-03-02 21:18:33 +00:00
|
|
|
}
|
2007-09-20 18:27:01 +00:00
|
|
|
|
2008-05-03 18:23:44 +00:00
|
|
|
gettime(base, &now);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
while ((ev = min_heap_top_(&base->timeheap))) {
|
2007-11-07 06:01:57 +00:00
|
|
|
if (evutil_timercmp(&ev->ev_timeout, &now, >))
|
2002-04-09 15:14:06 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* delete this event from the I/O queues */
|
2013-03-28 14:13:19 -04:00
|
|
|
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
2002-04-09 15:14:06 +00:00
|
|
|
|
2011-08-11 16:53:01 -05:00
|
|
|
event_debug(("timeout_process: event: %p, call %p",
|
|
|
|
ev, ev->ev_callback));
|
2012-02-29 15:07:33 -05:00
|
|
|
event_active_nolock_(ev, EV_TIMEOUT, 1);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-23 01:08:54 -05:00
|
|
|
#if (EVLIST_INTERNAL >> 4) != 1
|
|
|
|
#error "Mismatch for value of EVLIST_INTERNAL"
|
|
|
|
#endif
|
2013-12-30 14:06:20 -05:00
|
|
|
|
|
|
|
#ifndef MAX
|
|
|
|
#define MAX(a,b) (((a)>(b))?(a):(b))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
|
|
|
|
|
2011-02-23 01:08:54 -05:00
|
|
|
/* These are a fancy way to spell
|
2012-04-05 12:38:18 -04:00
|
|
|
if (flags & EVLIST_INTERNAL)
|
2011-02-23 01:08:54 -05:00
|
|
|
base->event_count--/++;
|
|
|
|
*/
|
2012-04-05 12:38:18 -04:00
|
|
|
#define DECR_EVENT_COUNT(base,flags) \
|
|
|
|
((base)->event_count -= (~((flags) >> 4) & 1))
|
2013-12-30 14:06:20 -05:00
|
|
|
#define INCR_EVENT_COUNT(base,flags) do { \
|
|
|
|
((base)->event_count += (~((flags) >> 4) & 1)); \
|
2014-01-05 16:29:52 -05:00
|
|
|
MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
|
2013-12-30 14:06:20 -05:00
|
|
|
} while (0)
|
2011-02-23 01:08:54 -05:00
|
|
|
|
2009-07-21 19:20:25 +00:00
|
|
|
static void
|
2011-02-23 00:59:20 -05:00
|
|
|
event_queue_remove_inserted(struct event_base *base, struct event *ev)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2010-02-23 15:14:57 -05:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
2011-02-23 00:59:20 -05:00
|
|
|
if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
|
2012-11-02 10:58:02 -04:00
|
|
|
event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
|
|
|
|
ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
|
2009-11-20 15:46:04 -05:00
|
|
|
return;
|
|
|
|
}
|
2012-04-05 12:38:18 -04:00
|
|
|
DECR_EVENT_COUNT(base, ev->ev_flags);
|
2011-02-23 00:59:20 -05:00
|
|
|
ev->ev_flags &= ~EVLIST_INSERTED;
|
|
|
|
}
|
|
|
|
static void
|
2012-04-05 12:38:18 -04:00
|
|
|
event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
|
2011-02-23 00:59:20 -05:00
|
|
|
{
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
2012-04-05 12:38:18 -04:00
|
|
|
if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
|
|
|
|
event_errx(1, "%s: %p not on queue %x", __func__,
|
|
|
|
evcb, EVLIST_ACTIVE);
|
2009-11-20 15:46:04 -05:00
|
|
|
return;
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
2012-04-05 12:38:18 -04:00
|
|
|
DECR_EVENT_COUNT(base, evcb->evcb_flags);
|
|
|
|
evcb->evcb_flags &= ~EVLIST_ACTIVE;
|
2011-02-23 00:59:20 -05:00
|
|
|
base->event_count_active--;
|
2012-04-05 12:38:18 -04:00
|
|
|
|
|
|
|
TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
|
|
|
|
evcb, evcb_active_next);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
2011-02-02 20:05:41 -05:00
|
|
|
static void
|
2012-04-06 03:00:40 -04:00
|
|
|
event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
|
|
|
|
{
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
|
|
|
|
event_errx(1, "%s: %p not on queue %x", __func__,
|
|
|
|
evcb, EVLIST_ACTIVE_LATER);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
DECR_EVENT_COUNT(base, evcb->evcb_flags);
|
|
|
|
evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
|
|
|
|
base->event_count_active--;
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
|
|
|
|
}
|
|
|
|
static void
|
2011-02-23 00:59:20 -05:00
|
|
|
event_queue_remove_timeout(struct event_base *base, struct event *ev)
|
2011-02-02 20:05:41 -05:00
|
|
|
{
|
2011-02-23 00:59:20 -05:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
|
2012-11-01 17:38:34 -04:00
|
|
|
event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
|
2012-11-01 18:12:07 -04:00
|
|
|
ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
|
2011-02-02 20:05:41 -05:00
|
|
|
return;
|
|
|
|
}
|
2012-04-05 12:38:18 -04:00
|
|
|
DECR_EVENT_COUNT(base, ev->ev_flags);
|
2011-02-23 00:59:20 -05:00
|
|
|
ev->ev_flags &= ~EVLIST_TIMEOUT;
|
2004-05-24 00:19:52 +00:00
|
|
|
|
2011-02-23 00:59:20 -05:00
|
|
|
if (is_common_timeout(&ev->ev_timeout, base)) {
|
|
|
|
struct common_timeout_list *ctl =
|
|
|
|
get_common_timeout_list(base, &ev->ev_timeout);
|
|
|
|
TAILQ_REMOVE(&ctl->events, ev,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout);
|
|
|
|
} else {
|
2012-02-29 15:07:33 -05:00
|
|
|
min_heap_erase_(&base->timeheap, ev);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
}
|
2011-02-02 20:05:41 -05:00
|
|
|
|
2012-03-26 23:28:21 -04:00
|
|
|
#ifdef USE_REINSERT_TIMEOUT
|
2011-08-08 16:20:53 -04:00
|
|
|
/* Remove and reinsert 'ev' into the timeout queue. */
|
|
|
|
static void
|
2012-03-23 18:42:56 -04:00
|
|
|
event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
|
|
|
|
int was_common, int is_common, int old_timeout_idx)
|
2011-08-08 16:20:53 -04:00
|
|
|
{
|
2012-03-23 18:42:56 -04:00
|
|
|
struct common_timeout_list *ctl;
|
2011-08-08 16:20:53 -04:00
|
|
|
if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
|
|
|
|
event_queue_insert_timeout(base, ev);
|
|
|
|
return;
|
2011-02-02 20:05:41 -05:00
|
|
|
}
|
|
|
|
|
2012-03-23 18:42:56 -04:00
|
|
|
switch ((was_common<<1) | is_common) {
|
|
|
|
case 3: /* Changing from one common timeout to another */
|
|
|
|
ctl = base->common_timeout_queues[old_timeout_idx];
|
2011-02-02 20:05:41 -05:00
|
|
|
TAILQ_REMOVE(&ctl->events, ev,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout);
|
2012-03-23 18:42:56 -04:00
|
|
|
ctl = get_common_timeout_list(base, &ev->ev_timeout);
|
2011-02-02 20:05:41 -05:00
|
|
|
insert_common_timeout_inorder(ctl, ev);
|
2012-03-23 18:42:56 -04:00
|
|
|
break;
|
|
|
|
case 2: /* Was common; is no longer common */
|
|
|
|
ctl = base->common_timeout_queues[old_timeout_idx];
|
|
|
|
TAILQ_REMOVE(&ctl->events, ev,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout);
|
|
|
|
min_heap_push_(&base->timeheap, ev);
|
|
|
|
break;
|
|
|
|
case 1: /* Wasn't common; has become common. */
|
|
|
|
min_heap_erase_(&base->timeheap, ev);
|
|
|
|
ctl = get_common_timeout_list(base, &ev->ev_timeout);
|
|
|
|
insert_common_timeout_inorder(ctl, ev);
|
|
|
|
break;
|
|
|
|
case 0: /* was in heap; is still on heap. */
|
2012-02-29 15:07:33 -05:00
|
|
|
min_heap_adjust_(&base->timeheap, ev);
|
2012-03-23 18:42:56 -04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
EVUTIL_ASSERT(0); /* unreachable */
|
|
|
|
break;
|
2011-02-02 20:05:41 -05:00
|
|
|
}
|
|
|
|
}
|
2012-03-26 23:28:21 -04:00
|
|
|
#endif
|
2011-02-02 20:05:41 -05:00
|
|
|
|
2010-03-11 00:38:46 -05:00
|
|
|
/* Add 'ev' to the common timeout list in 'ev'. */
|
2009-11-09 18:30:48 +00:00
|
|
|
static void
|
|
|
|
insert_common_timeout_inorder(struct common_timeout_list *ctl,
|
|
|
|
struct event *ev)
|
|
|
|
{
|
|
|
|
struct event *e;
|
2010-03-11 00:38:46 -05:00
|
|
|
/* By all logic, we should just be able to append 'ev' to the end of
|
|
|
|
* ctl->events, since the timeout on each 'ev' is set to {the common
|
|
|
|
* timeout} + {the time when we add the event}, and so the events
|
|
|
|
* should arrive in order of their timeeouts. But just in case
|
|
|
|
* there's some wacky threading issue going on, we do a search from
|
|
|
|
* the end of 'ev' to find the right insertion point.
|
|
|
|
*/
|
2009-11-09 18:30:48 +00:00
|
|
|
TAILQ_FOREACH_REVERSE(e, &ctl->events,
|
2010-05-03 11:37:16 -04:00
|
|
|
event_list, ev_timeout_pos.ev_next_with_common_timeout) {
|
2009-11-09 18:30:48 +00:00
|
|
|
/* This timercmp is a little sneaky, since both ev and e have
|
|
|
|
* magic values in tv_usec. Fortunately, they ought to have
|
|
|
|
* the _same_ magic values in tv_usec. Let's assert for that.
|
|
|
|
*/
|
2009-11-09 18:30:57 +00:00
|
|
|
EVUTIL_ASSERT(
|
|
|
|
is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
|
2009-11-09 18:30:48 +00:00
|
|
|
if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
|
|
|
|
TAILQ_INSERT_AFTER(&ctl->events, e, ev,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_HEAD(&ctl->events, ev,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout);
|
|
|
|
}
|
|
|
|
|
2009-07-21 19:20:25 +00:00
|
|
|
static void
|
2011-02-23 00:59:20 -05:00
|
|
|
event_queue_insert_inserted(struct event_base *base, struct event *ev)
|
2002-04-09 15:14:06 +00:00
|
|
|
{
|
2010-02-23 15:14:57 -05:00
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
|
2011-02-23 00:59:20 -05:00
|
|
|
if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
|
2012-11-01 18:12:07 -04:00
|
|
|
event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
|
|
|
|
ev, EV_SOCK_ARG(ev->ev_fd));
|
2011-02-23 00:59:20 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
INCR_EVENT_COUNT(base, ev->ev_flags);
|
2011-02-23 00:59:20 -05:00
|
|
|
|
|
|
|
ev->ev_flags |= EVLIST_INSERTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-04-05 12:38:18 -04:00
|
|
|
event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
|
2011-02-23 00:59:20 -05:00
|
|
|
{
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
2007-12-24 22:49:30 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
if (evcb->evcb_flags & EVLIST_ACTIVE) {
|
2011-02-23 00:59:20 -05:00
|
|
|
/* Double insertion is possible for active events */
|
2009-11-20 15:46:04 -05:00
|
|
|
return;
|
2007-12-24 22:49:30 +00:00
|
|
|
}
|
2004-09-19 21:08:09 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
INCR_EVENT_COUNT(base, evcb->evcb_flags);
|
2004-05-24 00:19:52 +00:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
evcb->evcb_flags |= EVLIST_ACTIVE;
|
2011-02-23 00:59:20 -05:00
|
|
|
|
|
|
|
base->event_count_active++;
|
2013-12-30 14:06:20 -05:00
|
|
|
MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
|
2012-04-05 12:38:18 -04:00
|
|
|
TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
|
|
|
|
evcb, evcb_active_next);
|
2011-02-23 00:59:20 -05:00
|
|
|
}
|
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
static void
|
|
|
|
event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
|
|
|
|
{
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
|
|
|
|
/* Double insertion is possible */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
INCR_EVENT_COUNT(base, evcb->evcb_flags);
|
|
|
|
evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
|
|
|
|
base->event_count_active++;
|
2013-12-30 14:06:20 -05:00
|
|
|
MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
|
2012-04-06 03:00:40 -04:00
|
|
|
TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
|
|
|
|
}
|
|
|
|
|
2011-02-23 00:59:20 -05:00
|
|
|
static void
|
|
|
|
event_queue_insert_timeout(struct event_base *base, struct event *ev)
|
|
|
|
{
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
|
|
|
|
if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
|
2012-11-01 18:12:07 -04:00
|
|
|
event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
|
|
|
|
ev, EV_SOCK_ARG(ev->ev_fd));
|
2011-02-23 00:59:20 -05:00
|
|
|
return;
|
2004-03-27 17:42:49 +00:00
|
|
|
}
|
2011-02-23 00:59:20 -05:00
|
|
|
|
2012-04-05 12:38:18 -04:00
|
|
|
INCR_EVENT_COUNT(base, ev->ev_flags);
|
2011-02-23 00:59:20 -05:00
|
|
|
|
|
|
|
ev->ev_flags |= EVLIST_TIMEOUT;
|
|
|
|
|
|
|
|
if (is_common_timeout(&ev->ev_timeout, base)) {
|
|
|
|
struct common_timeout_list *ctl =
|
|
|
|
get_common_timeout_list(base, &ev->ev_timeout);
|
|
|
|
insert_common_timeout_inorder(ctl, ev);
|
|
|
|
} else {
|
2012-02-29 15:07:33 -05:00
|
|
|
min_heap_push_(&base->timeheap, ev);
|
2002-04-09 15:14:06 +00:00
|
|
|
}
|
|
|
|
}
|
2005-02-25 05:28:57 +00:00
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
static void
|
|
|
|
event_queue_make_later_events_active(struct event_base *base)
|
|
|
|
{
|
|
|
|
struct event_callback *evcb;
|
|
|
|
EVENT_BASE_ASSERT_LOCKED(base);
|
|
|
|
|
|
|
|
while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
|
|
|
|
TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
|
|
|
|
evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
|
2012-04-06 03:00:40 -04:00
|
|
|
TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
|
2012-04-06 03:00:40 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-02-25 05:28:57 +00:00
|
|
|
/* Functions for debugging */
|
|
|
|
|
|
|
|
const char *
|
|
|
|
event_get_version(void)
|
|
|
|
{
|
2012-02-29 15:07:31 -05:00
|
|
|
return (EVENT__VERSION);
|
2005-02-25 05:28:57 +00:00
|
|
|
}
|
|
|
|
|
2009-04-17 17:22:32 +00:00
|
|
|
ev_uint32_t
|
|
|
|
event_get_version_number(void)
|
|
|
|
{
|
2012-02-29 15:07:31 -05:00
|
|
|
return (EVENT__NUMERIC_VERSION);
|
2009-04-17 17:22:32 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 21:10:31 +00:00
|
|
|
/*
|
2005-02-25 05:28:57 +00:00
|
|
|
* No thread-safe interface needed - the information should be the same
|
|
|
|
* for all threads.
|
|
|
|
*/
|
|
|
|
|
|
|
|
const char *
|
|
|
|
event_get_method(void)
|
|
|
|
{
|
|
|
|
return (current_base->evsel->name);
|
|
|
|
}
|
2007-11-25 17:14:19 +00:00
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_MM_REPLACEMENT
|
2012-02-29 15:07:32 -05:00
|
|
|
static void *(*mm_malloc_fn_)(size_t sz) = NULL;
|
|
|
|
static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
|
|
|
|
static void (*mm_free_fn_)(void *p) = NULL;
|
2007-11-25 17:14:19 +00:00
|
|
|
|
|
|
|
void *
|
2010-05-04 12:57:40 -04:00
|
|
|
event_mm_malloc_(size_t sz)
|
2007-11-25 17:14:19 +00:00
|
|
|
{
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
if (sz == 0)
|
|
|
|
return NULL;
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
if (mm_malloc_fn_)
|
|
|
|
return mm_malloc_fn_(sz);
|
2007-11-25 17:14:19 +00:00
|
|
|
else
|
|
|
|
return malloc(sz);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2010-05-04 12:57:40 -04:00
|
|
|
event_mm_calloc_(size_t count, size_t size)
|
2007-11-25 17:14:19 +00:00
|
|
|
{
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
if (count == 0 || size == 0)
|
|
|
|
return NULL;
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
if (mm_malloc_fn_) {
|
2007-11-25 17:14:19 +00:00
|
|
|
size_t sz = count * size;
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
void *p = NULL;
|
|
|
|
if (count > EV_SIZE_MAX / size)
|
|
|
|
goto error;
|
2012-02-29 15:07:32 -05:00
|
|
|
p = mm_malloc_fn_(sz);
|
2007-11-25 17:14:19 +00:00
|
|
|
if (p)
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
return memset(p, 0, sz);
|
2012-02-28 14:49:47 -05:00
|
|
|
} else {
|
|
|
|
void *p = calloc(count, size);
|
|
|
|
#ifdef _WIN32
|
|
|
|
/* Windows calloc doesn't reliably set ENOMEM */
|
|
|
|
if (p == NULL)
|
|
|
|
goto error;
|
|
|
|
#endif
|
|
|
|
return p;
|
|
|
|
}
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
|
|
|
|
error:
|
|
|
|
errno = ENOMEM;
|
|
|
|
return NULL;
|
2007-11-25 17:14:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
char *
|
2010-05-04 12:57:40 -04:00
|
|
|
event_mm_strdup_(const char *str)
|
2007-11-25 17:14:19 +00:00
|
|
|
{
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
if (!str) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
if (mm_malloc_fn_) {
|
2007-11-25 17:14:19 +00:00
|
|
|
size_t ln = strlen(str);
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
void *p = NULL;
|
|
|
|
if (ln == EV_SIZE_MAX)
|
|
|
|
goto error;
|
2012-02-29 15:07:32 -05:00
|
|
|
p = mm_malloc_fn_(ln+1);
|
2007-11-25 17:14:19 +00:00
|
|
|
if (p)
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
return memcpy(p, str, ln+1);
|
2007-11-25 17:14:19 +00:00
|
|
|
} else
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2007-11-25 17:14:19 +00:00
|
|
|
return _strdup(str);
|
|
|
|
#else
|
|
|
|
return strdup(str);
|
|
|
|
#endif
|
Add argument checks to some memory functions in `event.c'.
Add a zero check to the function `event_mm_malloc_',
i.e. simply return NULL if the sz argument is zero.
On failure, set errno to ENOMEM and return NULL.
Add a zero check to the function `event_mm_calloc_',
i.e. simply return NULL if either argument is zero.
Also add an unsigned integer multiplication check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
On failure, set errno to ENOMEM and return NULL.
Add a NULL check to the function `event_mm_strdup_',
i.e. set errno to EINVAL and return NULL.
Also add an unsigned integer addition check, and if an integer
overflow would occur, set errno to ENOMEM and return NULL.
If a memory allocation error occurs, again set errno to ENOMEM
and return NULL.
Add unit tests to `test/regress_util.c'.
2011-10-14 17:16:03 -04:00
|
|
|
|
|
|
|
error:
|
|
|
|
errno = ENOMEM;
|
|
|
|
return NULL;
|
2007-11-25 17:14:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2010-05-04 12:57:40 -04:00
|
|
|
event_mm_realloc_(void *ptr, size_t sz)
|
2007-11-25 17:14:19 +00:00
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
if (mm_realloc_fn_)
|
|
|
|
return mm_realloc_fn_(ptr, sz);
|
2007-11-25 17:14:19 +00:00
|
|
|
else
|
|
|
|
return realloc(ptr, sz);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-05-04 12:57:40 -04:00
|
|
|
event_mm_free_(void *ptr)
|
2007-11-25 17:14:19 +00:00
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
if (mm_free_fn_)
|
|
|
|
mm_free_fn_(ptr);
|
2007-11-25 17:14:19 +00:00
|
|
|
else
|
|
|
|
free(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_set_mem_functions(void *(*malloc_fn)(size_t sz),
|
|
|
|
void *(*realloc_fn)(void *ptr, size_t sz),
|
|
|
|
void (*free_fn)(void *ptr))
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
mm_malloc_fn_ = malloc_fn;
|
|
|
|
mm_realloc_fn_ = realloc_fn;
|
|
|
|
mm_free_fn_ = free_fn;
|
2007-11-25 17:14:19 +00:00
|
|
|
}
|
2009-04-17 06:56:57 +00:00
|
|
|
#endif
|
2009-02-12 22:19:54 +00:00
|
|
|
|
2012-04-03 16:15:49 -04:00
|
|
|
#ifdef EVENT__HAVE_EVENTFD
|
2008-03-02 21:18:33 +00:00
|
|
|
static void
|
2010-03-05 12:47:46 -05:00
|
|
|
evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
|
2008-03-02 21:18:33 +00:00
|
|
|
{
|
2009-01-19 20:37:24 +00:00
|
|
|
ev_uint64_t msg;
|
2010-04-09 15:28:26 -04:00
|
|
|
ev_ssize_t r;
|
2010-09-08 13:22:55 -04:00
|
|
|
struct event_base *base = arg;
|
2009-01-19 20:37:24 +00:00
|
|
|
|
2010-04-09 15:28:26 -04:00
|
|
|
r = read(fd, (void*) &msg, sizeof(msg));
|
|
|
|
if (r<0 && errno != EAGAIN) {
|
|
|
|
event_sock_warn(fd, "Error reading from eventfd");
|
|
|
|
}
|
2010-09-08 13:22:55 -04:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
base->is_notify_pending = 0;
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2009-01-19 20:37:24 +00:00
|
|
|
}
|
2012-04-03 16:15:49 -04:00
|
|
|
#endif
|
2009-01-19 01:34:14 +00:00
|
|
|
|
2009-01-19 20:37:24 +00:00
|
|
|
static void
|
|
|
|
evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
|
|
|
|
{
|
2010-07-05 13:24:12 -04:00
|
|
|
unsigned char buf[1024];
|
2010-09-08 13:22:55 -04:00
|
|
|
struct event_base *base = arg;
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2009-01-19 20:22:47 +00:00
|
|
|
while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
|
|
|
|
;
|
2009-01-19 19:46:03 +00:00
|
|
|
#else
|
2009-01-19 20:22:47 +00:00
|
|
|
while (read(fd, (char*)buf, sizeof(buf)) > 0)
|
|
|
|
;
|
2009-01-19 19:46:03 +00:00
|
|
|
#endif
|
2010-09-08 13:22:55 -04:00
|
|
|
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
base->is_notify_pending = 0;
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2008-03-02 21:18:33 +00:00
|
|
|
}
|
|
|
|
|
2009-01-19 01:34:14 +00:00
|
|
|
int
|
|
|
|
evthread_make_base_notifiable(struct event_base *base)
|
|
|
|
{
|
2012-06-28 12:00:57 -04:00
|
|
|
int r;
|
2009-01-19 01:34:14 +00:00
|
|
|
if (!base)
|
|
|
|
return -1;
|
|
|
|
|
2012-06-28 12:00:57 -04:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
r = evthread_make_base_notifiable_nolock_(base);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
evthread_make_base_notifiable_nolock_(struct event_base *base)
|
|
|
|
{
|
|
|
|
void (*cb)(evutil_socket_t, short, void *);
|
|
|
|
int (*notify)(struct event_base *);
|
|
|
|
|
2011-05-27 22:54:16 -04:00
|
|
|
if (base->th_notify_fn != NULL) {
|
|
|
|
/* The base is already notifiable: we're doing fine. */
|
2009-01-19 01:34:14 +00:00
|
|
|
return 0;
|
2011-05-27 22:54:16 -04:00
|
|
|
}
|
2009-01-19 01:34:14 +00:00
|
|
|
|
2010-09-17 00:34:13 -04:00
|
|
|
#if defined(EVENT__HAVE_WORKING_KQUEUE)
|
|
|
|
if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
|
|
|
|
base->th_notify_fn = event_kq_notify_base_;
|
|
|
|
/* No need to add an event here; the backend can wake
|
|
|
|
* itself up just fine. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-04-03 16:15:49 -04:00
|
|
|
#ifdef EVENT__HAVE_EVENTFD
|
2012-02-29 15:07:33 -05:00
|
|
|
base->th_notify_fd[0] = evutil_eventfd_(0,
|
2012-02-10 16:15:10 -05:00
|
|
|
EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
|
2009-01-19 20:37:24 +00:00
|
|
|
if (base->th_notify_fd[0] >= 0) {
|
2012-02-10 16:15:10 -05:00
|
|
|
base->th_notify_fd[1] = -1;
|
2009-01-19 20:37:24 +00:00
|
|
|
notify = evthread_notify_base_eventfd;
|
|
|
|
cb = evthread_notify_drain_eventfd;
|
2012-04-03 16:15:49 -04:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
|
2012-02-10 16:15:10 -05:00
|
|
|
notify = evthread_notify_base_default;
|
|
|
|
cb = evthread_notify_drain_default;
|
|
|
|
} else {
|
|
|
|
return -1;
|
2009-01-19 01:34:14 +00:00
|
|
|
}
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2009-01-19 20:37:24 +00:00
|
|
|
base->th_notify_fn = notify;
|
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
/* prepare an event that we can use for wakeup */
|
2009-01-22 17:48:55 +00:00
|
|
|
event_assign(&base->th_notify, base, base->th_notify_fd[0],
|
|
|
|
EV_READ|EV_PERSIST, cb, base);
|
2008-05-02 16:28:25 +00:00
|
|
|
|
2008-03-02 21:18:33 +00:00
|
|
|
/* we need to mark this as internal event */
|
|
|
|
base->th_notify.ev_flags |= EVLIST_INTERNAL;
|
2010-09-17 00:24:50 -04:00
|
|
|
event_priority_set(&base->th_notify, 0);
|
2008-03-02 21:18:33 +00:00
|
|
|
|
2012-06-28 12:00:57 -04:00
|
|
|
return event_add_nolock_(&base->th_notify, NULL, 0);
|
2008-03-02 21:18:33 +00:00
|
|
|
}
|
|
|
|
|
2012-02-11 21:01:53 -05:00
|
|
|
int
|
2012-09-07 09:58:24 -04:00
|
|
|
event_base_foreach_event_nolock_(struct event_base *base,
|
2012-09-07 09:47:50 -04:00
|
|
|
event_base_foreach_event_cb fn, void *arg)
|
2008-05-15 03:49:03 +00:00
|
|
|
{
|
2012-02-11 21:01:53 -05:00
|
|
|
int r, i;
|
|
|
|
unsigned u;
|
|
|
|
struct event *ev;
|
2008-05-15 03:49:03 +00:00
|
|
|
|
2012-02-11 21:01:53 -05:00
|
|
|
/* Start out with all the EVLIST_INSERTED events. */
|
2012-02-29 15:07:33 -05:00
|
|
|
if ((r = evmap_foreach_event_(base, fn, arg)))
|
2012-02-11 21:01:53 -05:00
|
|
|
return r;
|
2008-05-15 03:49:03 +00:00
|
|
|
|
2012-02-11 21:01:53 -05:00
|
|
|
/* Okay, now we deal with those events that have timeouts and are in
|
|
|
|
* the min-heap. */
|
|
|
|
for (u = 0; u < base->timeheap.n; ++u) {
|
|
|
|
ev = base->timeheap.p[u];
|
|
|
|
if (ev->ev_flags & EVLIST_INSERTED) {
|
|
|
|
/* we already processed this one */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((r = fn(base, ev, arg)))
|
|
|
|
return r;
|
2008-05-15 03:49:03 +00:00
|
|
|
}
|
2012-02-11 21:01:53 -05:00
|
|
|
|
|
|
|
/* Now for the events in one of the timeout queues.
|
|
|
|
* the min-heap. */
|
|
|
|
for (i = 0; i < base->n_common_timeouts; ++i) {
|
|
|
|
struct common_timeout_list *ctl =
|
|
|
|
base->common_timeout_queues[i];
|
|
|
|
TAILQ_FOREACH(ev, &ctl->events,
|
|
|
|
ev_timeout_pos.ev_next_with_common_timeout) {
|
|
|
|
if (ev->ev_flags & EVLIST_INSERTED) {
|
|
|
|
/* we already processed this one */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((r = fn(base, ev, arg)))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, we deal wit all the active events that we haven't touched
|
|
|
|
* yet. */
|
2008-05-15 03:49:03 +00:00
|
|
|
for (i = 0; i < base->nactivequeues; ++i) {
|
2012-04-05 12:38:18 -04:00
|
|
|
struct event_callback *evcb;
|
|
|
|
TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
|
|
|
|
if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
|
|
|
|
/* This isn't an event (evlist_init clear), or
|
|
|
|
* we already processed it. (inserted or
|
|
|
|
* timeout set */
|
2012-02-11 21:01:53 -05:00
|
|
|
continue;
|
|
|
|
}
|
2012-04-05 12:38:18 -04:00
|
|
|
ev = event_callback_to_event(evcb);
|
2012-02-11 21:01:53 -05:00
|
|
|
if ((r = fn(base, ev, arg)))
|
|
|
|
return r;
|
2008-05-15 03:49:03 +00:00
|
|
|
}
|
|
|
|
}
|
2012-02-11 21:01:53 -05:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper for event_base_dump_events: called on each event in the event base;
|
|
|
|
* dumps only the inserted events. */
|
|
|
|
static int
|
2012-09-07 09:47:50 -04:00
|
|
|
dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
|
2012-02-11 21:01:53 -05:00
|
|
|
{
|
|
|
|
FILE *output = arg;
|
|
|
|
const char *gloss = (e->ev_events & EV_SIGNAL) ?
|
|
|
|
"sig" : "fd ";
|
|
|
|
|
|
|
|
if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
|
|
|
|
return 0;
|
|
|
|
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
|
2012-11-01 18:12:07 -04:00
|
|
|
(void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
|
2012-02-11 21:01:53 -05:00
|
|
|
(e->ev_events&EV_READ)?" Read":"",
|
|
|
|
(e->ev_events&EV_WRITE)?" Write":"",
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
(e->ev_events&EV_CLOSED)?" EOF":"",
|
2012-02-11 21:01:53 -05:00
|
|
|
(e->ev_events&EV_SIGNAL)?" Signal":"",
|
2012-03-23 17:53:08 -04:00
|
|
|
(e->ev_events&EV_PERSIST)?" Persist":"",
|
|
|
|
(e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
|
2012-02-11 21:01:53 -05:00
|
|
|
if (e->ev_flags & EVLIST_TIMEOUT) {
|
|
|
|
struct timeval tv;
|
|
|
|
tv.tv_sec = e->ev_timeout.tv_sec;
|
|
|
|
tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
|
|
|
|
evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
|
|
|
|
fprintf(output, " Timeout=%ld.%06d",
|
|
|
|
(long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
|
|
|
|
}
|
|
|
|
fputc('\n', output);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper for event_base_dump_events: called on each event in the event base;
|
|
|
|
* dumps only the active events. */
|
|
|
|
static int
|
2012-09-07 09:47:50 -04:00
|
|
|
dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
|
2012-02-11 21:01:53 -05:00
|
|
|
{
|
|
|
|
FILE *output = arg;
|
|
|
|
const char *gloss = (e->ev_events & EV_SIGNAL) ?
|
|
|
|
"sig" : "fd ";
|
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
|
2012-02-11 21:01:53 -05:00
|
|
|
return 0;
|
|
|
|
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
|
2012-11-01 18:12:07 -04:00
|
|
|
(void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
|
2012-03-23 17:53:08 -04:00
|
|
|
(e->ev_res&EV_READ)?" Read":"",
|
|
|
|
(e->ev_res&EV_WRITE)?" Write":"",
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
(e->ev_res&EV_CLOSED)?" EOF":"",
|
2012-03-23 17:53:08 -04:00
|
|
|
(e->ev_res&EV_SIGNAL)?" Signal":"",
|
|
|
|
(e->ev_res&EV_TIMEOUT)?" Timeout":"",
|
2012-04-06 03:00:40 -04:00
|
|
|
(e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
|
|
|
|
(e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
|
2012-02-11 21:01:53 -05:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-07 09:58:24 -04:00
|
|
|
int
|
|
|
|
event_base_foreach_event(struct event_base *base,
|
2012-09-07 09:47:50 -04:00
|
|
|
event_base_foreach_event_cb fn, void *arg)
|
|
|
|
{
|
2012-09-07 09:58:24 -04:00
|
|
|
int r;
|
2012-09-07 09:47:50 -04:00
|
|
|
if ((!fn) || (!base)) {
|
2012-09-10 13:43:26 -04:00
|
|
|
return -1;
|
2012-09-07 09:47:50 -04:00
|
|
|
}
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2012-09-07 09:58:24 -04:00
|
|
|
r = event_base_foreach_event_nolock_(base, fn, arg);
|
2012-09-07 09:47:50 -04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2012-09-07 09:58:24 -04:00
|
|
|
return r;
|
2012-09-07 09:47:50 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-11 21:01:53 -05:00
|
|
|
void
|
|
|
|
event_base_dump_events(struct event_base *base, FILE *output)
|
|
|
|
{
|
2012-09-07 09:47:50 -04:00
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2012-02-11 21:01:53 -05:00
|
|
|
fprintf(output, "Inserted events:\n");
|
2012-09-07 09:58:24 -04:00
|
|
|
event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
|
2012-02-11 21:01:53 -05:00
|
|
|
|
|
|
|
fprintf(output, "Active events:\n");
|
2012-09-07 09:58:24 -04:00
|
|
|
event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
|
2012-09-07 09:47:50 -04:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
2008-05-15 03:49:03 +00:00
|
|
|
}
|
2010-08-17 05:02:00 -07:00
|
|
|
|
2013-01-16 16:31:08 -08:00
|
|
|
void
|
|
|
|
event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
|
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
Implemented EV_CLOSED event for epoll backend (EPOLLRDHUP).
- Added new EV_CLOSED event - detects premature connection close
by clients without the necessity of reading all the pending
data. Does not depend on EV_READ and/or EV_WRITE.
- Added new EV_FEATURE_EARLY_CLOSED feature for epoll.
Must be supported for listening to EV_CLOSED event.
- Added new regression test: test-closed.c
- All regression tests passed (test/regress and test/test.sh)
- strace output of test-closed using EV_CLOSED:
socketpair(PF_LOCAL, SOCK_STREAM, 0, [6, 7]) = 0
sendto(6, "test string\0", 12, 0, NULL, 0) = 12
shutdown(6, SHUT_WR) = 0
epoll_ctl(3, EPOLL_CTL_ADD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
epoll_wait(3, {{EPOLLRDHUP, {u32=7, u64=7}}}, 32, 3000) = 1
epoll_ctl(3, EPOLL_CTL_MOD, 7, {EPOLLRDHUP, {u32=7, u64=7}}) = 0
fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 4), ...})
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYM...
write(1, "closed_cb: detected connection close "..., 45) = 45
2014-01-17 23:20:42 -02:00
|
|
|
evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
|
2013-01-16 16:31:08 -08:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
2013-12-21 23:32:10 -05:00
|
|
|
void
|
|
|
|
event_base_active_by_signal(struct event_base *base, int sig)
|
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
evmap_signal_active_(base, sig, 1);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-17 05:02:00 -07:00
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_add_virtual_(struct event_base *base)
|
2010-08-17 05:02:00 -07:00
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
base->virtual_event_count++;
|
2013-12-30 14:06:20 -05:00
|
|
|
MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
|
2010-08-17 05:02:00 -07:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_del_virtual_(struct event_base *base)
|
2010-08-17 05:02:00 -07:00
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
2010-09-08 20:33:21 -07:00
|
|
|
EVUTIL_ASSERT(base->virtual_event_count > 0);
|
2010-08-17 05:02:00 -07:00
|
|
|
base->virtual_event_count--;
|
2010-09-09 14:36:45 -04:00
|
|
|
if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
|
|
|
|
evthread_notify_base(base);
|
2010-08-17 05:02:00 -07:00
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
2011-04-22 12:01:25 -04:00
|
|
|
|
2011-07-12 12:25:41 -04:00
|
|
|
static void
|
|
|
|
event_free_debug_globals_locks(void)
|
|
|
|
{
|
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
|
|
|
if (event_debug_map_lock_ != NULL) {
|
|
|
|
EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
|
2012-03-26 17:35:21 -04:00
|
|
|
event_debug_map_lock_ = NULL;
|
2015-01-26 00:29:15 +03:00
|
|
|
evthreadimpl_disable_lock_debugging_();
|
2011-07-12 12:25:41 -04:00
|
|
|
}
|
|
|
|
#endif /* EVENT__DISABLE_DEBUG_MODE */
|
|
|
|
#endif /* EVENT__DISABLE_THREAD_SUPPORT */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
event_free_debug_globals(void)
|
|
|
|
{
|
|
|
|
event_free_debug_globals_locks();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
event_free_evsig_globals(void)
|
|
|
|
{
|
|
|
|
evsig_free_globals_();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
event_free_evutil_globals(void)
|
|
|
|
{
|
|
|
|
evutil_free_globals_();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
event_free_globals(void)
|
|
|
|
{
|
|
|
|
event_free_debug_globals();
|
|
|
|
event_free_evsig_globals();
|
|
|
|
event_free_evutil_globals();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libevent_global_shutdown(void)
|
|
|
|
{
|
2015-01-08 04:45:27 +03:00
|
|
|
event_disable_debug_mode();
|
2011-07-12 12:25:41 -04:00
|
|
|
event_free_globals();
|
|
|
|
}
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2011-04-22 12:01:25 -04:00
|
|
|
int
|
|
|
|
event_global_setup_locks_(const int enable_locks)
|
|
|
|
{
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
2012-02-29 15:07:32 -05:00
|
|
|
EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
|
2011-04-22 12:01:25 -04:00
|
|
|
#endif
|
|
|
|
if (evsig_global_setup_locks_(enable_locks) < 0)
|
|
|
|
return -1;
|
2013-02-07 17:20:08 -08:00
|
|
|
if (evutil_global_setup_locks_(enable_locks) < 0)
|
|
|
|
return -1;
|
2011-04-22 12:01:25 -04:00
|
|
|
if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2012-01-21 12:55:15 -05:00
|
|
|
|
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
event_base_assert_ok_(struct event_base *base)
|
2012-06-28 12:00:57 -04:00
|
|
|
{
|
|
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
event_base_assert_ok_nolock_(base);
|
|
|
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
event_base_assert_ok_nolock_(struct event_base *base)
|
2012-01-21 12:55:15 -05:00
|
|
|
{
|
|
|
|
int i;
|
2012-04-06 03:15:50 -04:00
|
|
|
int count;
|
2012-02-11 21:01:53 -05:00
|
|
|
|
|
|
|
/* First do checks on the per-fd and per-signal lists */
|
2012-02-29 15:07:33 -05:00
|
|
|
evmap_check_integrity_(base);
|
2012-01-21 12:55:15 -05:00
|
|
|
|
|
|
|
/* Check the heap property */
|
|
|
|
for (i = 1; i < (int)base->timeheap.n; ++i) {
|
|
|
|
int parent = (i - 1) / 2;
|
|
|
|
struct event *ev, *p_ev;
|
|
|
|
ev = base->timeheap.p[i];
|
|
|
|
p_ev = base->timeheap.p[parent];
|
2012-02-11 21:01:53 -05:00
|
|
|
EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
|
2012-01-21 12:55:15 -05:00
|
|
|
EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
|
|
|
|
EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check that the common timeouts are fine */
|
|
|
|
for (i = 0; i < base->n_common_timeouts; ++i) {
|
|
|
|
struct common_timeout_list *ctl = base->common_timeout_queues[i];
|
|
|
|
struct event *last=NULL, *ev;
|
2012-02-11 21:01:53 -05:00
|
|
|
|
|
|
|
EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
|
|
|
|
|
2012-01-21 12:55:15 -05:00
|
|
|
TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
|
|
|
|
if (last)
|
|
|
|
EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
|
2012-02-11 21:01:53 -05:00
|
|
|
EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
|
2012-01-21 12:55:15 -05:00
|
|
|
EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
|
|
|
|
EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
|
|
|
|
last = ev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-11 21:01:53 -05:00
|
|
|
/* Check the active queues. */
|
2012-04-06 03:15:50 -04:00
|
|
|
count = 0;
|
2012-02-11 21:01:53 -05:00
|
|
|
for (i = 0; i < base->nactivequeues; ++i) {
|
2012-04-05 12:38:18 -04:00
|
|
|
struct event_callback *evcb;
|
|
|
|
EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
|
|
|
|
TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
|
2012-04-06 03:00:40 -04:00
|
|
|
EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
|
2012-04-05 12:38:18 -04:00
|
|
|
EVUTIL_ASSERT(evcb->evcb_pri == i);
|
2012-04-06 03:15:50 -04:00
|
|
|
++count;
|
2012-02-11 21:01:53 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-06 03:00:40 -04:00
|
|
|
{
|
|
|
|
struct event_callback *evcb;
|
|
|
|
TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
|
|
|
|
EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
|
2012-04-06 03:15:50 -04:00
|
|
|
++count;
|
2012-04-06 03:00:40 -04:00
|
|
|
}
|
|
|
|
}
|
2012-04-06 03:15:50 -04:00
|
|
|
EVUTIL_ASSERT(count == base->event_count_active);
|
2012-01-21 12:55:15 -05:00
|
|
|
}
|