mirror of
https://github.com/libevent/libevent.git
synced 2025-01-09 00:56:20 +08:00
Merge branch '21_deadlock_fix_v2'
This commit is contained in:
commit
3555befd1c
18
buffer.c
18
buffer.c
@ -3345,3 +3345,21 @@ evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs,
|
||||
int max_cbs)
|
||||
{
|
||||
int r = 0;
|
||||
EVBUFFER_LOCK(buffer);
|
||||
if (buffer->deferred_cbs) {
|
||||
if (max_cbs < 1) {
|
||||
r = -1;
|
||||
goto done;
|
||||
}
|
||||
cbs[0] = &buffer->deferred;
|
||||
r = 1;
|
||||
}
|
||||
done:
|
||||
EVBUFFER_UNLOCK(buffer);
|
||||
return r;
|
||||
}
|
||||
|
@ -252,8 +252,13 @@ struct bufferevent_ops {
|
||||
*/
|
||||
int (*disable)(struct bufferevent *, short);
|
||||
|
||||
/** Detatches the bufferevent from related data structures. Called as
|
||||
* soon as its reference count reaches 0. */
|
||||
void (*unlink)(struct bufferevent *);
|
||||
|
||||
/** Free any storage and deallocate any extra data or structures used
|
||||
in this implementation.
|
||||
in this implementation. Called when the bufferevent is
|
||||
finalized.
|
||||
*/
|
||||
void (*destruct)(struct bufferevent *);
|
||||
|
||||
@ -356,9 +361,6 @@ int bufferevent_add_event_(struct event *ev, const struct timeval *tv);
|
||||
* the other "generic_timeout" functions will work on it. Call this from
|
||||
* the constructor function. */
|
||||
void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev);
|
||||
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
|
||||
* Call this from the destructor function. */
|
||||
int bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev);
|
||||
/** Internal use: Add or delete the generic timeout events as appropriate.
|
||||
* (If an event is enabled and a timeout is set, we add the event. Otherwise
|
||||
* we delete it.) Call this from anything that changes the timeout values,
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include "event2/bufferevent_struct.h"
|
||||
#include "event2/bufferevent_compat.h"
|
||||
#include "event2/event.h"
|
||||
#include "event-internal.h"
|
||||
#include "log-internal.h"
|
||||
#include "mm-internal.h"
|
||||
#include "bufferevent-internal.h"
|
||||
@ -61,7 +62,7 @@
|
||||
#include "util-internal.h"
|
||||
|
||||
static void bufferevent_cancel_all_(struct bufferevent *bev);
|
||||
|
||||
static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_);
|
||||
|
||||
void
|
||||
bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
|
||||
@ -640,7 +641,9 @@ bufferevent_decref_and_unlock_(struct bufferevent *bufev)
|
||||
{
|
||||
struct bufferevent_private *bufev_private =
|
||||
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
|
||||
struct bufferevent *underlying;
|
||||
int n_cbs = 0;
|
||||
#define MAX_CBS 16
|
||||
struct event_callback *cbs[MAX_CBS];
|
||||
|
||||
EVUTIL_ASSERT(bufev_private->refcnt > 0);
|
||||
|
||||
@ -649,6 +652,41 @@ bufferevent_decref_and_unlock_(struct bufferevent *bufev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bufev->be_ops->unlink)
|
||||
bufev->be_ops->unlink(bufev);
|
||||
|
||||
/* Okay, we're out of references. Let's finalize this once all the
|
||||
* callbacks are done running. */
|
||||
cbs[0] = &bufev->ev_read.ev_evcallback;
|
||||
cbs[1] = &bufev->ev_write.ev_evcallback;
|
||||
cbs[2] = &bufev_private->deferred;
|
||||
n_cbs = 3;
|
||||
if (bufev_private->rate_limiting) {
|
||||
struct event *e = &bufev_private->rate_limiting->refill_bucket_event;
|
||||
if (event_initialized(e))
|
||||
cbs[n_cbs++] = &e->ev_evcallback;
|
||||
}
|
||||
n_cbs += evbuffer_get_callbacks_(bufev->input, cbs+n_cbs, MAX_CBS-n_cbs);
|
||||
n_cbs += evbuffer_get_callbacks_(bufev->output, cbs+n_cbs, MAX_CBS-n_cbs);
|
||||
|
||||
event_callback_finalize_many_(bufev->ev_base, n_cbs, cbs,
|
||||
bufferevent_finalize_cb_);
|
||||
|
||||
#undef MAX_CBS
|
||||
BEV_UNLOCK(bufev);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_)
|
||||
{
|
||||
struct bufferevent *bufev = arg_;
|
||||
struct bufferevent *underlying;
|
||||
struct bufferevent_private *bufev_private =
|
||||
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
|
||||
|
||||
BEV_LOCK(bufev);
|
||||
underlying = bufferevent_get_underlying(bufev);
|
||||
|
||||
/* Clean up the shared info */
|
||||
@ -665,17 +703,13 @@ bufferevent_decref_and_unlock_(struct bufferevent *bufev)
|
||||
if (bufev_private->rate_limiting) {
|
||||
if (bufev_private->rate_limiting->group)
|
||||
bufferevent_remove_from_rate_limit_group_internal_(bufev,0);
|
||||
if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event))
|
||||
event_del(&bufev_private->rate_limiting->refill_bucket_event);
|
||||
event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event);
|
||||
mm_free(bufev_private->rate_limiting);
|
||||
bufev_private->rate_limiting = NULL;
|
||||
}
|
||||
|
||||
event_debug_unassign(&bufev->ev_read);
|
||||
event_debug_unassign(&bufev->ev_write);
|
||||
|
||||
BEV_UNLOCK(bufev);
|
||||
|
||||
if (bufev_private->own_lock)
|
||||
EVTHREAD_FREE_LOCK(bufev_private->lock,
|
||||
EVTHREAD_LOCKTYPE_RECURSIVE);
|
||||
@ -695,8 +729,6 @@ bufferevent_decref_and_unlock_(struct bufferevent *bufev)
|
||||
*/
|
||||
if (underlying)
|
||||
bufferevent_decref_(underlying);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
@ -844,23 +876,12 @@ bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
|
||||
void
|
||||
bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
|
||||
{
|
||||
evtimer_assign(&bev->ev_read, bev->ev_base,
|
||||
event_assign(&bev->ev_read, bev->ev_base, -1, EV_FINALIZE,
|
||||
bufferevent_generic_read_timeout_cb, bev);
|
||||
evtimer_assign(&bev->ev_write, bev->ev_base,
|
||||
event_assign(&bev->ev_write, bev->ev_base, -1, EV_FINALIZE,
|
||||
bufferevent_generic_write_timeout_cb, bev);
|
||||
}
|
||||
|
||||
int
|
||||
bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev)
|
||||
{
|
||||
int r1,r2;
|
||||
r1 = event_del(&bev->ev_read);
|
||||
r2 = event_del(&bev->ev_write);
|
||||
if (r1<0 || r2<0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
|
||||
{
|
||||
|
@ -93,6 +93,7 @@ const struct bufferevent_ops bufferevent_ops_async = {
|
||||
evutil_offsetof(struct bufferevent_async, bev.bev),
|
||||
be_async_enable,
|
||||
be_async_disable,
|
||||
NULL, /* Unlink */
|
||||
be_async_destruct,
|
||||
bufferevent_generic_adj_timeouts_,
|
||||
be_async_flush,
|
||||
@ -384,11 +385,6 @@ be_async_destruct(struct bufferevent *bev)
|
||||
/* XXXX possible double-close */
|
||||
evutil_closesocket(fd);
|
||||
}
|
||||
/* delete this in case non-blocking connect was used */
|
||||
if (event_initialized(&bev->ev_write)) {
|
||||
event_del(&bev->ev_write);
|
||||
bufferevent_del_generic_timeout_cbs_(bev);
|
||||
}
|
||||
}
|
||||
|
||||
/* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
|
||||
|
@ -61,6 +61,7 @@
|
||||
/* prototypes */
|
||||
static int be_filter_enable(struct bufferevent *, short);
|
||||
static int be_filter_disable(struct bufferevent *, short);
|
||||
static void be_filter_unlink(struct bufferevent *);
|
||||
static void be_filter_destruct(struct bufferevent *);
|
||||
|
||||
static void be_filter_readcb(struct bufferevent *, void *);
|
||||
@ -99,6 +100,7 @@ const struct bufferevent_ops bufferevent_ops_filter = {
|
||||
evutil_offsetof(struct bufferevent_filtered, bev.bev),
|
||||
be_filter_enable,
|
||||
be_filter_disable,
|
||||
be_filter_unlink,
|
||||
be_filter_destruct,
|
||||
bufferevent_generic_adj_timeouts_,
|
||||
be_filter_flush,
|
||||
@ -214,12 +216,10 @@ bufferevent_filter_new(struct bufferevent *underlying,
|
||||
}
|
||||
|
||||
static void
|
||||
be_filter_destruct(struct bufferevent *bev)
|
||||
be_filter_unlink(struct bufferevent *bev)
|
||||
{
|
||||
struct bufferevent_filtered *bevf = upcast(bev);
|
||||
EVUTIL_ASSERT(bevf);
|
||||
if (bevf->free_context)
|
||||
bevf->free_context(bevf->context);
|
||||
|
||||
if (bevf->bev.options & BEV_OPT_CLOSE_ON_FREE) {
|
||||
/* Yes, there is also a decref in bufferevent_decref_.
|
||||
@ -242,8 +242,15 @@ be_filter_destruct(struct bufferevent *bev)
|
||||
BEV_SUSPEND_FILT_READ);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bufferevent_del_generic_timeout_cbs_(bev);
|
||||
static void
|
||||
be_filter_destruct(struct bufferevent *bev)
|
||||
{
|
||||
struct bufferevent_filtered *bevf = upcast(bev);
|
||||
EVUTIL_ASSERT(bevf);
|
||||
if (bevf->free_context)
|
||||
bevf->free_context(bevf->context);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -326,6 +326,7 @@ struct bufferevent_openssl {
|
||||
|
||||
static int be_openssl_enable(struct bufferevent *, short);
|
||||
static int be_openssl_disable(struct bufferevent *, short);
|
||||
static void be_openssl_unlink(struct bufferevent *);
|
||||
static void be_openssl_destruct(struct bufferevent *);
|
||||
static int be_openssl_adj_timeouts(struct bufferevent *);
|
||||
static int be_openssl_flush(struct bufferevent *bufev,
|
||||
@ -337,6 +338,7 @@ const struct bufferevent_ops bufferevent_ops_openssl = {
|
||||
evutil_offsetof(struct bufferevent_openssl, bev.bev),
|
||||
be_openssl_enable,
|
||||
be_openssl_disable,
|
||||
be_openssl_unlink,
|
||||
be_openssl_destruct,
|
||||
be_openssl_adj_timeouts,
|
||||
be_openssl_flush,
|
||||
@ -977,9 +979,11 @@ set_open_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
|
||||
event_del(&bev->ev_write);
|
||||
}
|
||||
event_assign(&bev->ev_read, bev->ev_base, fd,
|
||||
EV_READ|EV_PERSIST, be_openssl_readeventcb, bev_ssl);
|
||||
EV_READ|EV_PERSIST|EV_FINALIZE,
|
||||
be_openssl_readeventcb, bev_ssl);
|
||||
event_assign(&bev->ev_write, bev->ev_base, fd,
|
||||
EV_WRITE|EV_PERSIST, be_openssl_writeeventcb, bev_ssl);
|
||||
EV_WRITE|EV_PERSIST|EV_FINALIZE,
|
||||
be_openssl_writeeventcb, bev_ssl);
|
||||
if (rpending)
|
||||
r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
|
||||
if (wpending)
|
||||
@ -1079,9 +1083,11 @@ set_handshake_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
|
||||
event_del(&bev->ev_write);
|
||||
}
|
||||
event_assign(&bev->ev_read, bev->ev_base, fd,
|
||||
EV_READ|EV_PERSIST, be_openssl_handshakeeventcb, bev_ssl);
|
||||
EV_READ|EV_PERSIST|EV_FINALIZE,
|
||||
be_openssl_handshakeeventcb, bev_ssl);
|
||||
event_assign(&bev->ev_write, bev->ev_base, fd,
|
||||
EV_WRITE|EV_PERSIST, be_openssl_handshakeeventcb, bev_ssl);
|
||||
EV_WRITE|EV_PERSIST|EV_FINALIZE,
|
||||
be_openssl_handshakeeventcb, bev_ssl);
|
||||
if (fd >= 0) {
|
||||
r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
|
||||
r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
|
||||
@ -1176,17 +1182,10 @@ be_openssl_disable(struct bufferevent *bev, short events)
|
||||
}
|
||||
|
||||
static void
|
||||
be_openssl_destruct(struct bufferevent *bev)
|
||||
be_openssl_unlink(struct bufferevent *bev)
|
||||
{
|
||||
struct bufferevent_openssl *bev_ssl = upcast(bev);
|
||||
|
||||
if (bev_ssl->underlying) {
|
||||
bufferevent_del_generic_timeout_cbs_(bev);
|
||||
} else {
|
||||
event_del(&bev->ev_read);
|
||||
event_del(&bev->ev_write);
|
||||
}
|
||||
|
||||
if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) {
|
||||
if (bev_ssl->underlying) {
|
||||
if (BEV_UPCAST(bev_ssl->underlying)->refcnt < 2) {
|
||||
@ -1194,17 +1193,11 @@ be_openssl_destruct(struct bufferevent *bev)
|
||||
"bufferevent with too few references");
|
||||
} else {
|
||||
bufferevent_free(bev_ssl->underlying);
|
||||
bev_ssl->underlying = NULL;
|
||||
/* We still have a reference to it, via our
|
||||
* BIO. So we don't drop this. */
|
||||
// bev_ssl->underlying = NULL;
|
||||
}
|
||||
} else {
|
||||
evutil_socket_t fd = -1;
|
||||
BIO *bio = SSL_get_wbio(bev_ssl->ssl);
|
||||
if (bio)
|
||||
fd = BIO_get_fd(bio, NULL);
|
||||
if (fd >= 0)
|
||||
evutil_closesocket(fd);
|
||||
}
|
||||
SSL_free(bev_ssl->ssl);
|
||||
} else {
|
||||
if (bev_ssl->underlying) {
|
||||
if (bev_ssl->underlying->errorcb == be_openssl_eventcb)
|
||||
@ -1216,6 +1209,24 @@ be_openssl_destruct(struct bufferevent *bev)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
be_openssl_destruct(struct bufferevent *bev)
|
||||
{
|
||||
struct bufferevent_openssl *bev_ssl = upcast(bev);
|
||||
|
||||
if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) {
|
||||
if (! bev_ssl->underlying) {
|
||||
evutil_socket_t fd = -1;
|
||||
BIO *bio = SSL_get_wbio(bev_ssl->ssl);
|
||||
if (bio)
|
||||
fd = BIO_get_fd(bio, NULL);
|
||||
if (fd >= 0)
|
||||
evutil_closesocket(fd);
|
||||
}
|
||||
SSL_free(bev_ssl->ssl);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
be_openssl_adj_timeouts(struct bufferevent *bev)
|
||||
{
|
||||
|
@ -267,7 +267,7 @@ be_pair_disable(struct bufferevent *bev, short events)
|
||||
}
|
||||
|
||||
static void
|
||||
be_pair_destruct(struct bufferevent *bev)
|
||||
be_pair_unlink(struct bufferevent *bev)
|
||||
{
|
||||
struct bufferevent_pair *bev_p = upcast(bev);
|
||||
|
||||
@ -275,8 +275,6 @@ be_pair_destruct(struct bufferevent *bev)
|
||||
bev_p->partner->partner = NULL;
|
||||
bev_p->partner = NULL;
|
||||
}
|
||||
|
||||
bufferevent_del_generic_timeout_cbs_(bev);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -327,7 +325,8 @@ const struct bufferevent_ops bufferevent_ops_pair = {
|
||||
evutil_offsetof(struct bufferevent_pair, bev.bev),
|
||||
be_pair_enable,
|
||||
be_pair_disable,
|
||||
be_pair_destruct,
|
||||
be_pair_unlink,
|
||||
NULL, /* be_pair_destruct, */
|
||||
bufferevent_generic_adj_timeouts_,
|
||||
be_pair_flush,
|
||||
NULL, /* ctrl */
|
||||
|
@ -609,8 +609,8 @@ bufferevent_set_rate_limit(struct bufferevent *bev,
|
||||
EVUTIL_ASSERT(event_initialized(&rlim->refill_bucket_event));
|
||||
event_del(&rlim->refill_bucket_event);
|
||||
}
|
||||
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
|
||||
bev_refill_callback_, bevp);
|
||||
event_assign(&rlim->refill_bucket_event, bev->ev_base,
|
||||
-1, EV_FINALIZE, bev_refill_callback_, bevp);
|
||||
|
||||
if (rlim->limit.read_limit > 0) {
|
||||
bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
|
||||
@ -654,7 +654,7 @@ bufferevent_rate_limit_group_new(struct event_base *base,
|
||||
|
||||
ev_token_bucket_init_(&g->rate_limit, cfg, tick, 0);
|
||||
|
||||
event_assign(&g->master_refill_event, base, -1, EV_PERSIST,
|
||||
event_assign(&g->master_refill_event, base, -1, EV_PERSIST|EV_FINALIZE,
|
||||
bev_group_refill_callback_, g);
|
||||
/*XXXX handle event_add failure */
|
||||
event_add(&g->master_refill_event, &cfg->tick_timeout);
|
||||
@ -748,8 +748,8 @@ bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
|
||||
BEV_UNLOCK(bev);
|
||||
return -1;
|
||||
}
|
||||
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
|
||||
bev_refill_callback_, bevp);
|
||||
event_assign(&rlim->refill_bucket_event, bev->ev_base,
|
||||
-1, EV_FINALIZE, bev_refill_callback_, bevp);
|
||||
bevp->rate_limiting = rlim;
|
||||
}
|
||||
|
||||
|
@ -90,6 +90,7 @@ const struct bufferevent_ops bufferevent_ops_socket = {
|
||||
evutil_offsetof(struct bufferevent_private, bev),
|
||||
be_socket_enable,
|
||||
be_socket_disable,
|
||||
NULL, /* unlink */
|
||||
be_socket_destruct,
|
||||
be_socket_adj_timeouts,
|
||||
be_socket_flush,
|
||||
@ -338,9 +339,9 @@ bufferevent_socket_new(struct event_base *base, evutil_socket_t fd,
|
||||
evbuffer_set_flags(bufev->output, EVBUFFER_FLAG_DRAINS_TO_FD);
|
||||
|
||||
event_assign(&bufev->ev_read, bufev->ev_base, fd,
|
||||
EV_READ|EV_PERSIST, bufferevent_readcb, bufev);
|
||||
EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev);
|
||||
event_assign(&bufev->ev_write, bufev->ev_base, fd,
|
||||
EV_WRITE|EV_PERSIST, bufferevent_writecb, bufev);
|
||||
EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev);
|
||||
|
||||
evbuffer_add_cb(bufev->output, bufferevent_socket_outbuf_cb, bufev);
|
||||
|
||||
@ -399,7 +400,7 @@ bufferevent_socket_connect(struct bufferevent *bev,
|
||||
* on a non-blocking connect() when ConnectEx() is unavailable. */
|
||||
if (BEV_IS_ASYNC(bev)) {
|
||||
event_assign(&bev->ev_write, bev->ev_base, fd,
|
||||
EV_WRITE|EV_PERSIST, bufferevent_writecb, bev);
|
||||
EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bev);
|
||||
}
|
||||
#endif
|
||||
bufferevent_setfd(bev, fd);
|
||||
@ -589,9 +590,6 @@ be_socket_destruct(struct bufferevent *bufev)
|
||||
|
||||
fd = event_get_fd(&bufev->ev_read);
|
||||
|
||||
event_del(&bufev->ev_read);
|
||||
event_del(&bufev->ev_write);
|
||||
|
||||
if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0)
|
||||
EVUTIL_CLOSESOCKET(fd);
|
||||
}
|
||||
@ -637,9 +635,9 @@ be_socket_setfd(struct bufferevent *bufev, evutil_socket_t fd)
|
||||
event_del(&bufev->ev_write);
|
||||
|
||||
event_assign(&bufev->ev_read, bufev->ev_base, fd,
|
||||
EV_READ|EV_PERSIST, bufferevent_readcb, bufev);
|
||||
EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev);
|
||||
event_assign(&bufev->ev_write, bufev->ev_base, fd,
|
||||
EV_WRITE|EV_PERSIST, bufferevent_writecb, bufev);
|
||||
EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev);
|
||||
|
||||
if (fd >= 0)
|
||||
bufferevent_enable(bufev, bufev->enabled);
|
||||
|
@ -327,6 +327,11 @@ void evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev);
|
||||
|
||||
void evbuffer_invoke_callbacks_(struct evbuffer *buf);
|
||||
|
||||
|
||||
int evbuffer_get_callbacks_(struct evbuffer *buffer,
|
||||
struct event_callback **cbs,
|
||||
int max_cbs);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -59,11 +59,28 @@ extern "C" {
|
||||
#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
|
||||
#define ev_arg ev_evcallback.evcb_arg
|
||||
|
||||
/* Possible values for evcb_closure in struct event_callback */
|
||||
/** @name Event closure codes
|
||||
|
||||
Possible values for evcb_closure in struct event_callback
|
||||
|
||||
@{
|
||||
*/
|
||||
/** A regular event. Uses the evcb_callback callback */
|
||||
#define EV_CLOSURE_EVENT 0
|
||||
/** A signal event. Uses the evcb_callback callback */
|
||||
#define EV_CLOSURE_EVENT_SIGNAL 1
|
||||
/** A persistent non-signal event. Uses the evcb_callback callback */
|
||||
#define EV_CLOSURE_EVENT_PERSIST 2
|
||||
/** A simple callback. Uses the evcb_selfcb callback. */
|
||||
#define EV_CLOSURE_CB_SELF 3
|
||||
/** A finalizing callback. Uses the evcb_cbfinalize callback. */
|
||||
#define EV_CLOSURE_CB_FINALIZE 4
|
||||
/** A finalizing event. Uses the evcb_evfinalize callback. */
|
||||
#define EV_CLOSURE_EVENT_FINALIZE 5
|
||||
/** A finalizing event that should get freed after. Uses the evcb_evfinalize
|
||||
* callback. */
|
||||
#define EV_CLOSURE_EVENT_FINALIZE_FREE 6
|
||||
/** @} */
|
||||
|
||||
/** Structure to define the backend of a given event_base. */
|
||||
struct eventop {
|
||||
@ -382,7 +399,21 @@ int evsig_restore_handler_(struct event_base *base, int evsignal);
|
||||
|
||||
int event_add_nolock_(struct event *ev,
|
||||
const struct timeval *tv, int tv_is_absolute);
|
||||
int event_del_nolock_(struct event *ev);
|
||||
/** Argument for event_del_nolock_. Tells event_del not to block on the event
|
||||
* if it's running in another thread. */
|
||||
#define EVENT_DEL_NOBLOCK 0
|
||||
/** Argument for event_del_nolock_. Tells event_del to block on the event
|
||||
* if it's running in another thread, regardless of its value for EV_FINALIZE
|
||||
*/
|
||||
#define EVENT_DEL_BLOCK 1
|
||||
/** Argument for event_del_nolock_. Tells event_del to block on the event
|
||||
* if it is running in another thread and it doesn't have EV_FINALIZE set.
|
||||
*/
|
||||
#define EVENT_DEL_AUTOBLOCK 2
|
||||
/** Argument for event_del_nolock_. Tells event_del to procede even if the
|
||||
* event is set up for finalization rather for regular use.*/
|
||||
#define EVENT_DEL_EVEN_IF_FINALIZING 3
|
||||
int event_del_nolock_(struct event *ev, int blocking);
|
||||
int event_remove_timer_nolock_(struct event *ev);
|
||||
|
||||
void event_active_nolock_(struct event *ev, int res, short count);
|
||||
@ -391,12 +422,17 @@ int event_callback_activate_nolock_(struct event_base *, struct event_callback *
|
||||
int event_callback_cancel_(struct event_base *base,
|
||||
struct event_callback *evcb);
|
||||
|
||||
void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
|
||||
void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
|
||||
int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
|
||||
|
||||
|
||||
void event_active_later_(struct event *ev, int res);
|
||||
void event_active_later_nolock_(struct event *ev, int res);
|
||||
void event_callback_activate_later_nolock_(struct event_base *base,
|
||||
struct event_callback *evcb);
|
||||
int event_callback_cancel_nolock_(struct event_base *base,
|
||||
struct event_callback *evcb);
|
||||
struct event_callback *evcb, int even_if_finalizing);
|
||||
void event_callback_init_(struct event_base *base,
|
||||
struct event_callback *cb);
|
||||
|
||||
|
290
event.c
290
event.c
@ -142,7 +142,7 @@ static void event_queue_remove_inserted(struct event_base *, struct event *);
|
||||
static void event_queue_make_later_events_active(struct event_base *base);
|
||||
|
||||
static int evthread_make_base_notifiable_nolock_(struct event_base *base);
|
||||
|
||||
static int event_del_(struct event *ev, int blocking);
|
||||
|
||||
#ifdef USE_REINSERT_TIMEOUT
|
||||
/* This code seems buggy; only turn it on if we find out what the trouble is. */
|
||||
@ -707,8 +707,46 @@ event_base_stop_iocp_(struct event_base *base)
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
event_base_free(struct event_base *base)
|
||||
static int
|
||||
event_base_cancel_single_callback_(struct event_base *base,
|
||||
struct event_callback *evcb,
|
||||
int run_finalizers)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
if (evcb->evcb_flags & EVLIST_INIT) {
|
||||
struct event *ev = event_callback_to_event(evcb);
|
||||
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
|
||||
event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
|
||||
result = 1;
|
||||
}
|
||||
} else {
|
||||
event_callback_cancel_nolock_(base, evcb, 1);
|
||||
result = 1;
|
||||
}
|
||||
|
||||
if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
|
||||
switch (evcb->evcb_closure) {
|
||||
case EV_CLOSURE_EVENT_FINALIZE:
|
||||
case EV_CLOSURE_EVENT_FINALIZE_FREE: {
|
||||
struct event *ev = event_callback_to_event(evcb);
|
||||
ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
|
||||
if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
|
||||
mm_free(ev);
|
||||
break;
|
||||
}
|
||||
case EV_CLOSURE_CB_FINALIZE:
|
||||
evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static void
|
||||
event_base_free_(struct event_base *base, int run_finalizers)
|
||||
{
|
||||
int i, n_deleted=0;
|
||||
struct event *ev;
|
||||
@ -719,9 +757,6 @@ event_base_free(struct event_base *base)
|
||||
* made it with event_init and forgot to hold a reference to it. */
|
||||
if (base == NULL && current_base)
|
||||
base = current_base;
|
||||
/* If we're freeing current_base, there won't be a current_base. */
|
||||
if (base == current_base)
|
||||
current_base = NULL;
|
||||
/* Don't actually free NULL. */
|
||||
if (base == NULL) {
|
||||
event_warnx("%s: no base to free", __func__);
|
||||
@ -774,30 +809,14 @@ event_base_free(struct event_base *base)
|
||||
struct event_callback *evcb, *next;
|
||||
for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
|
||||
next = TAILQ_NEXT(evcb, evcb_active_next);
|
||||
if (evcb->evcb_flags & EVLIST_INIT) {
|
||||
ev = event_callback_to_event(evcb);
|
||||
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
|
||||
event_del(ev);
|
||||
++n_deleted;
|
||||
}
|
||||
} else {
|
||||
event_callback_cancel_(base, evcb);
|
||||
++n_deleted;
|
||||
}
|
||||
n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
|
||||
evcb = next;
|
||||
}
|
||||
}
|
||||
{
|
||||
struct event_callback *evcb;
|
||||
while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
|
||||
if (evcb->evcb_flags & EVLIST_INIT) {
|
||||
ev = event_callback_to_event(evcb);
|
||||
event_del(ev);
|
||||
++n_deleted;
|
||||
} else {
|
||||
event_callback_cancel_(base, evcb);
|
||||
++n_deleted;
|
||||
}
|
||||
n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
|
||||
}
|
||||
}
|
||||
|
||||
@ -830,9 +849,24 @@ event_base_free(struct event_base *base)
|
||||
EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
|
||||
EVTHREAD_FREE_COND(base->current_event_cond);
|
||||
|
||||
/* If we're freeing current_base, there won't be a current_base. */
|
||||
if (base == current_base)
|
||||
current_base = NULL;
|
||||
mm_free(base);
|
||||
}
|
||||
|
||||
void
|
||||
event_base_free_nofinalize(struct event_base *base)
|
||||
{
|
||||
event_base_free_(base, 0);
|
||||
}
|
||||
|
||||
void
|
||||
event_base_free(struct event_base *base)
|
||||
{
|
||||
event_base_free_(base, 1);
|
||||
}
|
||||
|
||||
/* Fake eventop; used to disable the backend temporarily inside event_reinit
|
||||
* so that we can call event_del() on an event without telling the backend.
|
||||
*/
|
||||
@ -885,7 +919,7 @@ event_reinit(struct event_base *base)
|
||||
* random.
|
||||
*/
|
||||
if (base->sig.ev_signal_added) {
|
||||
event_del_nolock_(&base->sig.ev_signal);
|
||||
event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
|
||||
event_debug_unassign(&base->sig.ev_signal);
|
||||
memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
|
||||
if (base->sig.ev_signal_pair[0] != -1)
|
||||
@ -900,7 +934,7 @@ event_reinit(struct event_base *base)
|
||||
base->th_notify_fn = NULL;
|
||||
}
|
||||
if (base->th_notify_fd[0] != -1) {
|
||||
event_del_nolock_(&base->th_notify);
|
||||
event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
|
||||
EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
|
||||
if (base->th_notify_fd[1] != -1)
|
||||
EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
|
||||
@ -1279,7 +1313,7 @@ common_timeout_callback(evutil_socket_t fd, short what, void *arg)
|
||||
(ev->ev_timeout.tv_sec == now.tv_sec &&
|
||||
(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
|
||||
break;
|
||||
event_del_nolock_(ev);
|
||||
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
||||
event_active_nolock_(ev, EV_TIMEOUT, 1);
|
||||
}
|
||||
if (ev)
|
||||
@ -1434,10 +1468,10 @@ event_process_active_single_queue(struct event_base *base,
|
||||
if (evcb->evcb_flags & EVLIST_INIT) {
|
||||
ev = event_callback_to_event(evcb);
|
||||
|
||||
if (ev->ev_events & EV_PERSIST)
|
||||
if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
|
||||
event_queue_remove_active(base, evcb);
|
||||
else
|
||||
event_del_nolock_(ev);
|
||||
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
||||
event_debug((
|
||||
"event_process_active: event: %p, %s%scall %p",
|
||||
ev,
|
||||
@ -1476,6 +1510,23 @@ event_process_active_single_queue(struct event_base *base,
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
evcb->evcb_cb_union.evcb_selfcb(evcb, evcb->evcb_arg);
|
||||
break;
|
||||
case EV_CLOSURE_EVENT_FINALIZE:
|
||||
case EV_CLOSURE_EVENT_FINALIZE_FREE:
|
||||
base->current_event = NULL;
|
||||
EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
|
||||
if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
|
||||
mm_free(ev);
|
||||
event_debug_note_teardown_(ev);
|
||||
break;
|
||||
case EV_CLOSURE_CB_FINALIZE:
|
||||
base->current_event = NULL;
|
||||
EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
base->current_event = NULL;
|
||||
evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
|
||||
break;
|
||||
default:
|
||||
EVUTIL_ASSERT(0);
|
||||
}
|
||||
@ -1823,7 +1874,7 @@ event_base_once(struct event_base *base, evutil_socket_t fd, short events,
|
||||
eonce->cb = callback;
|
||||
eonce->arg = arg;
|
||||
|
||||
if (events == EV_TIMEOUT) {
|
||||
if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE)) == EV_TIMEOUT) {
|
||||
evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
|
||||
|
||||
if (tv == NULL || ! evutil_timerisset(tv)) {
|
||||
@ -1973,7 +2024,9 @@ event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(
|
||||
void
|
||||
event_free(struct event *ev)
|
||||
{
|
||||
event_debug_assert_is_setup_(ev);
|
||||
/* This is disabled, so that events which have been finalized be a
|
||||
* valid target for event_free(). That's */
|
||||
// event_debug_assert_is_setup_(ev);
|
||||
|
||||
/* make sure that this event won't be coming back to haunt us. */
|
||||
event_del(ev);
|
||||
@ -1991,6 +2044,111 @@ event_debug_unassign(struct event *ev)
|
||||
ev->ev_flags &= ~EVLIST_INIT;
|
||||
}
|
||||
|
||||
#define EVENT_FINALIZE_FREE_ 0x10000
|
||||
static int
|
||||
event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
||||
{
|
||||
uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
|
||||
EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
|
||||
|
||||
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
||||
ev->ev_closure = closure;
|
||||
ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
|
||||
event_active_nolock_(ev, EV_FINALIZE, 1);
|
||||
ev->ev_flags |= EVLIST_FINALIZING;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
||||
{
|
||||
int r;
|
||||
struct event_base *base = ev->ev_base;
|
||||
if (EVUTIL_FAILURE_CHECK(!base)) {
|
||||
event_warnx("%s: event has no event_base set.", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
||||
r = event_finalize_nolock_(base, flags, ev, cb);
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
int
|
||||
event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
||||
{
|
||||
return event_finalize_impl_(flags, ev, cb);
|
||||
}
|
||||
|
||||
int
|
||||
event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
|
||||
{
|
||||
return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
|
||||
}
|
||||
|
||||
void
|
||||
event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
|
||||
{
|
||||
struct event *ev = NULL;
|
||||
if (evcb->evcb_flags & EVLIST_INIT) {
|
||||
ev = event_callback_to_event(evcb);
|
||||
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
||||
} else {
|
||||
event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
|
||||
}
|
||||
|
||||
evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
|
||||
evcb->evcb_cb_union.evcb_cbfinalize = cb;
|
||||
event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
|
||||
evcb->evcb_flags |= EVLIST_FINALIZING;
|
||||
}
|
||||
|
||||
void
|
||||
event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
|
||||
{
|
||||
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
||||
event_callback_finalize_nolock_(base, flags, evcb, cb);
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
}
|
||||
|
||||
/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
|
||||
* callback will be invoked on *one of them*, after they have *all* been
|
||||
* finalized. */
|
||||
int
|
||||
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
|
||||
{
|
||||
int n_pending = 0, i;
|
||||
|
||||
if (base == NULL)
|
||||
base = current_base;
|
||||
|
||||
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
||||
|
||||
event_debug(("%s: %d events finalizing", __func__, n_cbs));
|
||||
|
||||
/* At most one can be currently executing; the rest we just
|
||||
* cancel... But we always make sure that the finalize callback
|
||||
* runs. */
|
||||
for (i = 0; i < n_cbs; ++i) {
|
||||
struct event_callback *evcb = evcbs[i];
|
||||
if (evcb == base->current_event) {
|
||||
event_callback_finalize_nolock_(base, 0, evcb, cb);
|
||||
++n_pending;
|
||||
} else {
|
||||
event_callback_cancel_nolock_(base, evcb, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (n_pending == 0) {
|
||||
/* Just do the first one. */
|
||||
event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
|
||||
}
|
||||
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set's the priority of an event - if an event is already scheduled
|
||||
* changing the priority is going to fail.
|
||||
@ -2259,6 +2417,11 @@ event_add_nolock_(struct event *ev, const struct timeval *tv,
|
||||
|
||||
EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
|
||||
|
||||
if (ev->ev_flags & EVLIST_FINALIZING) {
|
||||
/* XXXX debug */
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/*
|
||||
* prepare for timeout insertion further below, if we get a
|
||||
* failure on any step, we should not change any state.
|
||||
@ -2403,8 +2566,8 @@ event_add_nolock_(struct event *ev, const struct timeval *tv,
|
||||
return (res);
|
||||
}
|
||||
|
||||
int
|
||||
event_del(struct event *ev)
|
||||
static int
|
||||
event_del_(struct event *ev, int blocking)
|
||||
{
|
||||
int res;
|
||||
|
||||
@ -2415,16 +2578,38 @@ event_del(struct event *ev)
|
||||
|
||||
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
|
||||
|
||||
res = event_del_nolock_(ev);
|
||||
res = event_del_nolock_(ev, blocking);
|
||||
|
||||
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
|
||||
|
||||
return (res);
|
||||
}
|
||||
|
||||
/* Helper for event_del: always called with th_base_lock held. */
|
||||
int
|
||||
event_del_nolock_(struct event *ev)
|
||||
event_del(struct event *ev)
|
||||
{
|
||||
return event_del_(ev, EVENT_DEL_AUTOBLOCK);
|
||||
}
|
||||
|
||||
int
|
||||
event_del_block(struct event *ev)
|
||||
{
|
||||
return event_del_(ev, EVENT_DEL_BLOCK);
|
||||
}
|
||||
|
||||
int
|
||||
event_del_noblock(struct event *ev)
|
||||
{
|
||||
return event_del_(ev, EVENT_DEL_NOBLOCK);
|
||||
}
|
||||
|
||||
/** Helper for event_del: always called with th_base_lock held.
|
||||
*
|
||||
* "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
|
||||
* EVEN_IF_FINALIZING} values. See those for more information.
|
||||
*/
|
||||
int
|
||||
event_del_nolock_(struct event *ev, int blocking)
|
||||
{
|
||||
struct event_base *base;
|
||||
int res = 0, notify = 0;
|
||||
@ -2438,6 +2623,13 @@ event_del_nolock_(struct event *ev)
|
||||
|
||||
EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
|
||||
|
||||
if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
|
||||
if (ev->ev_flags & EVLIST_FINALIZING) {
|
||||
/* XXXX Debug */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If the main thread is currently executing this event's callback,
|
||||
* and we are not the main thread, then we want to wait until the
|
||||
* callback is done before we start removing the event. That way,
|
||||
@ -2445,8 +2637,10 @@ event_del_nolock_(struct event *ev)
|
||||
* user-supplied argument. */
|
||||
base = ev->ev_base;
|
||||
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
||||
if (base->current_event == event_to_event_callback(ev) &&
|
||||
!EVBASE_IN_THREAD(base)) {
|
||||
if (blocking != EVENT_DEL_NOBLOCK &&
|
||||
base->current_event == event_to_event_callback(ev) &&
|
||||
!EVBASE_IN_THREAD(base) &&
|
||||
(blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
|
||||
++base->current_event_waiters;
|
||||
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
||||
}
|
||||
@ -2529,6 +2723,11 @@ event_active_nolock_(struct event *ev, int res, short ncalls)
|
||||
base = ev->ev_base;
|
||||
EVENT_BASE_ASSERT_LOCKED(base);
|
||||
|
||||
if (ev->ev_flags & EVLIST_FINALIZING) {
|
||||
/* XXXX debug */
|
||||
return;
|
||||
}
|
||||
|
||||
switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
|
||||
default:
|
||||
case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
|
||||
@ -2606,6 +2805,9 @@ event_callback_activate_nolock_(struct event_base *base,
|
||||
{
|
||||
int r = 1;
|
||||
|
||||
if (evcb->evcb_flags & EVLIST_FINALIZING)
|
||||
return 0;
|
||||
|
||||
switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
|
||||
default:
|
||||
EVUTIL_ASSERT(0);
|
||||
@ -2653,17 +2855,21 @@ event_callback_cancel_(struct event_base *base,
|
||||
{
|
||||
int r;
|
||||
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
||||
r = event_callback_cancel_nolock_(base, evcb);
|
||||
r = event_callback_cancel_nolock_(base, evcb, 0);
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
int
|
||||
event_callback_cancel_nolock_(struct event_base *base,
|
||||
struct event_callback *evcb)
|
||||
struct event_callback *evcb, int even_if_finalizing)
|
||||
{
|
||||
if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
|
||||
return 0;
|
||||
|
||||
if (evcb->evcb_flags & EVLIST_INIT)
|
||||
return event_del_nolock_(event_callback_to_event(evcb));
|
||||
return event_del_nolock_(event_callback_to_event(evcb),
|
||||
even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
|
||||
|
||||
switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
|
||||
default:
|
||||
@ -2784,7 +2990,7 @@ timeout_process(struct event_base *base)
|
||||
break;
|
||||
|
||||
/* delete this event from the I/O queues */
|
||||
event_del_nolock_(ev);
|
||||
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
|
||||
|
||||
event_debug(("timeout_process: event: %p, call %p",
|
||||
ev, ev->ev_callback));
|
||||
|
@ -599,10 +599,18 @@ struct event_base *event_base_new_with_config(const struct event_config *);
|
||||
Note that this function will not close any fds or free any memory passed
|
||||
to event_new as the argument to callback.
|
||||
|
||||
If there are any pending finalizer callbacks, this function will invoke
|
||||
them.
|
||||
|
||||
@param eb an event_base to be freed
|
||||
*/
|
||||
void event_base_free(struct event_base *);
|
||||
|
||||
/**
|
||||
As event_free, but do not run finalizers.
|
||||
*/
|
||||
void event_base_free_nofinalize(struct event_base *);
|
||||
|
||||
/** @name Log severities
|
||||
*/
|
||||
/**@{*/
|
||||
@ -829,7 +837,16 @@ int event_base_got_break(struct event_base *);
|
||||
*/
|
||||
#define EV_PERSIST 0x10
|
||||
/** Select edge-triggered behavior, if supported by the backend. */
|
||||
#define EV_ET 0x20
|
||||
#define EV_ET 0x20
|
||||
/**
|
||||
* If this option is provided, then event_del() will not block in one thread
|
||||
* while waiting for the event callback to complete in another thread.
|
||||
*
|
||||
* To use this option safely, you may need to use event_finalize() or
|
||||
* event_free_finalize() in order to safely tear down an event in a
|
||||
* multithreaded application. See those functions for more information.
|
||||
**/
|
||||
#define EV_FINALIZE 0x40
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
@ -998,6 +1015,45 @@ int event_assign(struct event *, struct event_base *, evutil_socket_t, short, ev
|
||||
*/
|
||||
void event_free(struct event *);
|
||||
|
||||
/**
|
||||
Callback type for event_finalize and event_free_finalize().
|
||||
**/
|
||||
typedef void (*event_finalize_callback_fn)(struct event *, void *);
|
||||
/**
|
||||
@name Finalization functions
|
||||
|
||||
These functions are used to safely tear down an event in a multithreaded
|
||||
application. If you construct your events with EV_FINALIZE to avoid
|
||||
deadlocks, you will need a way to remove an event in the certainty that
|
||||
it will definitely not be running its callback when you deallocate it
|
||||
and its callback argument.
|
||||
|
||||
To do this, call one of event_finalize() or event_free_finalize with
|
||||
0 for its first argument, the event to tear down as its second argument,
|
||||
and a callback function as its third argument. The callback will be
|
||||
invoked as part of the event loop, with the event's priority.
|
||||
|
||||
After you call a finalizer function, event_add() and event_active() will
|
||||
no longer work on the event, and event_del() will produce a no-op. You
|
||||
must not try to change the event's fields with event_assign() or
|
||||
event_set() while the finalize callback is in progress. Once the
|
||||
callback has been invoked, you should treat the event structure as
|
||||
containing uninitialized memory.
|
||||
|
||||
The event_free_finalize() function frees the event after it's finalized;
|
||||
event_finalize() does not.
|
||||
|
||||
A finalizer callback must not make events pending or active. It must not
|
||||
add events, activate events, or attempt to "resucitate" the event being
|
||||
finalized in any way.
|
||||
|
||||
@return 0 on succes, -1 on failure.
|
||||
*/
|
||||
/**@{*/
|
||||
int event_finalize(unsigned, struct event *, event_finalize_callback_fn);
|
||||
int event_free_finalize(unsigned, struct event *, event_finalize_callback_fn);
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
Schedule a one-time event
|
||||
|
||||
@ -1071,6 +1127,18 @@ int event_remove_timer(struct event *ev);
|
||||
*/
|
||||
int event_del(struct event *);
|
||||
|
||||
/**
|
||||
As event_del(), but never blocks while the event's callback is running
|
||||
in another thread, even if the event was constructed without the
|
||||
EV_FINALIZE flag.
|
||||
*/
|
||||
int event_del_noblock(struct event *ev);
|
||||
/**
|
||||
As event_del(), but always blocks while the event's callback is running
|
||||
in another thread, even if the event was constructed with the
|
||||
EV_FINALIZE flag.
|
||||
*/
|
||||
int event_del_block(struct event *ev);
|
||||
|
||||
/**
|
||||
Make an event active.
|
||||
|
@ -60,9 +60,10 @@ extern "C" {
|
||||
#define EVLIST_ACTIVE 0x08
|
||||
#define EVLIST_INTERNAL 0x10
|
||||
#define EVLIST_ACTIVE_LATER 0x20
|
||||
#define EVLIST_FINALIZING 0x40
|
||||
#define EVLIST_INIT 0x80
|
||||
|
||||
#define EVLIST_ALL 0xbf
|
||||
#define EVLIST_ALL 0xff
|
||||
|
||||
/* Fix so that people don't have to run with <sys/queue.h> */
|
||||
#ifndef TAILQ_ENTRY
|
||||
@ -101,6 +102,8 @@ struct name { \
|
||||
}
|
||||
#endif /* !LIST_HEAD */
|
||||
|
||||
struct event;
|
||||
|
||||
struct event_callback {
|
||||
TAILQ_ENTRY(event_callback) evcb_active_next;
|
||||
short evcb_flags;
|
||||
@ -108,8 +111,10 @@ struct event_callback {
|
||||
ev_uint8_t evcb_closure;
|
||||
/* allows us to adopt for different types of events */
|
||||
union {
|
||||
void (*evcb_callback)(evutil_socket_t, short, void *arg);
|
||||
void (*evcb_selfcb)(struct event_callback *, void *arg);
|
||||
void (*evcb_callback)(evutil_socket_t, short, void *);
|
||||
void (*evcb_selfcb)(struct event_callback *, void *);
|
||||
void (*evcb_evfinalize)(struct event *, void *);
|
||||
void (*evcb_cbfinalize)(struct event_callback *, void *);
|
||||
} evcb_cb_union;
|
||||
void *evcb_arg;
|
||||
};
|
||||
|
@ -36,6 +36,7 @@ endif
|
||||
|
||||
noinst_HEADERS+= \
|
||||
test/regress.h \
|
||||
test/regress_thread.h \
|
||||
test/tinytest.h \
|
||||
test/tinytest_local.h \
|
||||
test/tinytest_macros.h
|
||||
@ -78,6 +79,7 @@ test_regress_SOURCES = \
|
||||
test/regress_bufferevent.c \
|
||||
test/regress_dns.c \
|
||||
test/regress_et.c \
|
||||
test/regress_finalize.c \
|
||||
test/regress_http.c \
|
||||
test/regress_listener.c \
|
||||
test/regress_main.c \
|
||||
|
@ -37,6 +37,7 @@ extern "C" {
|
||||
extern struct testcase_t main_testcases[];
|
||||
extern struct testcase_t evtag_testcases[];
|
||||
extern struct testcase_t evbuffer_testcases[];
|
||||
extern struct testcase_t finalize_testcases[];
|
||||
extern struct testcase_t bufferevent_testcases[];
|
||||
extern struct testcase_t bufferevent_iocp_testcases[];
|
||||
extern struct testcase_t util_testcases[];
|
||||
|
@ -420,6 +420,7 @@ sender_writecb(struct bufferevent *bev, void *ctx)
|
||||
{
|
||||
if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
|
||||
bufferevent_disable(bev,EV_READ|EV_WRITE);
|
||||
TT_BLATHER(("Flushed %d: freeing it.", (int)bufferevent_getfd(bev)));
|
||||
bufferevent_free(bev);
|
||||
}
|
||||
}
|
||||
@ -462,6 +463,7 @@ reader_eventcb(struct bufferevent *bev, short what, void *ctx)
|
||||
return;
|
||||
}
|
||||
if (what & BEV_EVENT_CONNECTED) {
|
||||
TT_BLATHER(("connected on %d", (int)bufferevent_getfd(bev)));
|
||||
bufferevent_enable(bev, EV_READ);
|
||||
}
|
||||
if (what & BEV_EVENT_EOF) {
|
||||
@ -472,6 +474,8 @@ reader_eventcb(struct bufferevent *bev, short what, void *ctx)
|
||||
tt_str_op(buf, ==, TEST_STR);
|
||||
if (++n_strings_read == 2)
|
||||
event_base_loopexit(base, NULL);
|
||||
TT_BLATHER(("EOF on %d: %d strings read.",
|
||||
(int)bufferevent_getfd(bev), n_strings_read));
|
||||
}
|
||||
end:
|
||||
;
|
||||
@ -480,6 +484,7 @@ end:
|
||||
static void
|
||||
reader_readcb(struct bufferevent *bev, void *ctx)
|
||||
{
|
||||
TT_BLATHER(("Read invoked on %d.", (int)bufferevent_getfd(bev)));
|
||||
n_reads_invoked++;
|
||||
}
|
||||
|
||||
|
340
test/regress_finalize.c
Normal file
340
test/regress_finalize.c
Normal file
@ -0,0 +1,340 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Niels Provos and Nick Mathewson
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "event2/event-config.h"
|
||||
#include "tinytest.h"
|
||||
#include "tinytest_macros.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/util.h"
|
||||
#include "event-internal.h"
|
||||
#include "defer-internal.h"
|
||||
|
||||
#include "regress.h"
|
||||
#include "regress_thread.h"
|
||||
|
||||
static void
|
||||
timer_callback(evutil_socket_t fd, short what, void *arg)
|
||||
{
|
||||
int *int_arg = arg;
|
||||
*int_arg += 1;
|
||||
(void)fd;
|
||||
(void)what;
|
||||
}
|
||||
static void
|
||||
simple_callback(struct event_callback *evcb, void *arg)
|
||||
{
|
||||
int *int_arg = arg;
|
||||
*int_arg += 1;
|
||||
(void)evcb;
|
||||
}
|
||||
static void
|
||||
event_finalize_callback_1(struct event *ev, void *arg)
|
||||
{
|
||||
int *int_arg = arg;
|
||||
*int_arg += 100;
|
||||
(void)ev;
|
||||
}
|
||||
static void
|
||||
callback_finalize_callback_1(struct event_callback *evcb, void *arg)
|
||||
{
|
||||
int *int_arg = arg;
|
||||
*int_arg += 100;
|
||||
(void)evcb;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
test_fin_cb_invoked(void *arg)
|
||||
{
|
||||
struct basic_test_data *data = arg;
|
||||
struct event_base *base = data->base;
|
||||
|
||||
struct event *ev;
|
||||
struct event ev2;
|
||||
struct event_callback evcb;
|
||||
int cb_called = 0;
|
||||
int ev_called = 0;
|
||||
|
||||
const struct timeval ten_sec = {10,0};
|
||||
|
||||
event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called);
|
||||
ev = evtimer_new(base, timer_callback, &ev_called);
|
||||
/* Just finalize them; don't bother adding. */
|
||||
event_free_finalize(0, ev, event_finalize_callback_1);
|
||||
event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
|
||||
tt_int_op(cb_called, ==, 100);
|
||||
tt_int_op(ev_called, ==, 100);
|
||||
|
||||
ev_called = cb_called = 0;
|
||||
event_base_assert_ok_(base);
|
||||
|
||||
/* Now try it when they're active. (actually, don't finalize: make
|
||||
* sure activation can happen! */
|
||||
ev = evtimer_new(base, timer_callback, &ev_called);
|
||||
event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called);
|
||||
|
||||
event_active(ev, EV_TIMEOUT, 1);
|
||||
event_callback_activate_(base, &evcb);
|
||||
|
||||
event_base_dispatch(base);
|
||||
tt_int_op(cb_called, ==, 1);
|
||||
tt_int_op(ev_called, ==, 1);
|
||||
|
||||
ev_called = cb_called = 0;
|
||||
event_base_assert_ok_(base);
|
||||
|
||||
/* Great, it worked. Now activate and finalize and make sure only
|
||||
* finalizing happens. */
|
||||
event_active(ev, EV_TIMEOUT, 1);
|
||||
event_callback_activate_(base, &evcb);
|
||||
event_free_finalize(0, ev, event_finalize_callback_1);
|
||||
event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
tt_int_op(cb_called, ==, 100);
|
||||
tt_int_op(ev_called, ==, 100);
|
||||
|
||||
ev_called = 0;
|
||||
|
||||
event_base_assert_ok_(base);
|
||||
|
||||
/* Okay, now add but don't have it become active, and make sure *that*
|
||||
* works. */
|
||||
ev = evtimer_new(base, timer_callback, &ev_called);
|
||||
event_add(ev, &ten_sec);
|
||||
event_free_finalize(0, ev, event_finalize_callback_1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
tt_int_op(ev_called, ==, 100);
|
||||
|
||||
ev_called = 0;
|
||||
event_base_assert_ok_(base);
|
||||
|
||||
/* Now try adding and deleting after finalizing. */
|
||||
ev = evtimer_new(base, timer_callback, &ev_called);
|
||||
evtimer_assign(&ev2, base, timer_callback, &ev_called);
|
||||
event_add(ev, &ten_sec);
|
||||
event_free_finalize(0, ev, event_finalize_callback_1);
|
||||
event_finalize(0, &ev2, event_finalize_callback_1);
|
||||
|
||||
event_add(&ev2, &ten_sec);
|
||||
event_del(ev);
|
||||
event_active(&ev2, EV_TIMEOUT, 1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
tt_int_op(ev_called, ==, 200);
|
||||
|
||||
event_base_assert_ok_(base);
|
||||
|
||||
end:
|
||||
;
|
||||
}
|
||||
|
||||
static void *
|
||||
tfff_malloc(size_t n)
|
||||
{
|
||||
return malloc(n);
|
||||
}
|
||||
static void *tfff_p1=NULL, *tfff_p2=NULL;
|
||||
static int tfff_p1_freed=0, tfff_p2_freed=0;
|
||||
static void
|
||||
tfff_free(void *p)
|
||||
{
|
||||
if (! p)
|
||||
return;
|
||||
if (p == tfff_p1)
|
||||
++tfff_p1_freed;
|
||||
if (p == tfff_p2)
|
||||
++tfff_p2_freed;
|
||||
free(p);
|
||||
}
|
||||
static void *
|
||||
tfff_realloc(void *p, size_t sz)
|
||||
{
|
||||
return realloc(p,sz);
|
||||
}
|
||||
|
||||
static void
|
||||
test_fin_free_finalize(void *arg)
|
||||
{
|
||||
struct event_base *base = NULL;
|
||||
|
||||
struct event *ev, *ev2;
|
||||
int ev_called = 0;
|
||||
int ev2_called = 0;
|
||||
|
||||
(void)arg;
|
||||
|
||||
event_set_mem_functions(tfff_malloc, tfff_realloc, tfff_free);
|
||||
|
||||
base = event_base_new();
|
||||
|
||||
ev = evtimer_new(base, timer_callback, &ev_called);
|
||||
ev2 = evtimer_new(base, timer_callback, &ev2_called);
|
||||
tfff_p1 = ev;
|
||||
tfff_p2 = ev2;
|
||||
event_free_finalize(0, ev, event_finalize_callback_1);
|
||||
event_finalize(0, ev2, event_finalize_callback_1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
|
||||
tt_int_op(ev_called, ==, 100);
|
||||
tt_int_op(ev2_called, ==, 100);
|
||||
|
||||
event_base_assert_ok_(base);
|
||||
tt_int_op(tfff_p1_freed, ==, 1);
|
||||
tt_int_op(tfff_p2_freed, ==, 0);
|
||||
|
||||
event_free(ev2);
|
||||
|
||||
end:
|
||||
if (base)
|
||||
event_base_free(base);
|
||||
}
|
||||
|
||||
/* For test_fin_within_cb */
|
||||
struct event_and_count {
|
||||
struct event *ev;
|
||||
struct event *ev2;
|
||||
int count;
|
||||
};
|
||||
static void
|
||||
event_finalize_callback_2(struct event *ev, void *arg)
|
||||
{
|
||||
struct event_and_count *evc = arg;
|
||||
evc->count += 100;
|
||||
event_free(ev);
|
||||
}
|
||||
static void
|
||||
timer_callback_2(evutil_socket_t fd, short what, void *arg)
|
||||
{
|
||||
struct event_and_count *evc = arg;
|
||||
event_finalize(0, evc->ev, event_finalize_callback_2);
|
||||
event_finalize(0, evc->ev2, event_finalize_callback_2);
|
||||
++ evc->count;
|
||||
(void)fd;
|
||||
(void)what;
|
||||
}
|
||||
|
||||
static void
|
||||
test_fin_within_cb(void *arg)
|
||||
{
|
||||
struct basic_test_data *data = arg;
|
||||
struct event_base *base = data->base;
|
||||
|
||||
struct event_and_count evc1, evc2;
|
||||
evc1.count = evc2.count = 0;
|
||||
evc2.ev2 = evc1.ev = evtimer_new(base, timer_callback_2, &evc1);
|
||||
evc1.ev2 = evc2.ev = evtimer_new(base, timer_callback_2, &evc2);
|
||||
|
||||
/* Activate both. The first one will have its callback run, which
|
||||
* will finalize both of them, preventing the second one's callback
|
||||
* from running. */
|
||||
event_active(evc1.ev, EV_TIMEOUT, 1);
|
||||
event_active(evc2.ev, EV_TIMEOUT, 1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
tt_int_op(evc1.count, ==, 101);
|
||||
tt_int_op(evc2.count, ==, 100);
|
||||
|
||||
event_base_assert_ok_(base);
|
||||
/* Now try with EV_PERSIST events. */
|
||||
evc1.count = evc2.count = 0;
|
||||
evc2.ev2 = evc1.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc1);
|
||||
evc1.ev2 = evc2.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc2);
|
||||
|
||||
event_active(evc1.ev, EV_TIMEOUT, 1);
|
||||
event_active(evc2.ev, EV_TIMEOUT, 1);
|
||||
|
||||
event_base_dispatch(base);
|
||||
tt_int_op(evc1.count, ==, 101);
|
||||
tt_int_op(evc2.count, ==, 100);
|
||||
|
||||
event_base_assert_ok_(base);
|
||||
end:
|
||||
;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void
|
||||
timer_callback_3(evutil_socket_t *fd, short what, void *arg)
|
||||
{
|
||||
(void)fd;
|
||||
(void)what;
|
||||
|
||||
}
|
||||
static void
|
||||
test_fin_many(void *arg)
|
||||
{
|
||||
struct basic_test_data *data = arg;
|
||||
struct event_base *base = data->base;
|
||||
|
||||
struct event *ev1, *ev2;
|
||||
struct event_callback evcb1, evcb2;
|
||||
int ev1_count = 0, ev2_count = 0;
|
||||
int evcb1_count = 0, evcb2_count = 0;
|
||||
struct event_callback *array[4];
|
||||
|
||||
int n;
|
||||
|
||||
/* First attempt: call finalize_many with no events running */
|
||||
ev1 = evtimer_new(base, timer_callback, &ev1_count);
|
||||
ev1 = evtimer_new(base, timer_callback, &ev2_count);
|
||||
event_deferred_cb_init_(&evcb1, 0, simple_callback, &evcb1_called);
|
||||
event_deferred_cb_init_(&evcb2, 0, simple_callback, &evcb2_called);
|
||||
array[0] = &ev1->ev_evcallback;
|
||||
array[1] = &ev2->ev_evcallback;
|
||||
array[2] = &evcb1;
|
||||
array[3] = &evcb2;
|
||||
|
||||
|
||||
|
||||
n = event_callback_finalize_many(base, 4, array,
|
||||
callback_finalize_callback_1);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#define TEST(name, flags) \
|
||||
{ #name, test_fin_##name, (flags), &basic_setup, NULL }
|
||||
|
||||
struct testcase_t finalize_testcases[] = {
|
||||
|
||||
TEST(cb_invoked, TT_FORK|TT_NEED_BASE),
|
||||
TEST(free_finalize, TT_FORK),
|
||||
TEST(within_cb, TT_FORK|TT_NEED_BASE),
|
||||
// TEST(many, TT_FORK|TT_NEED_BASE),
|
||||
|
||||
|
||||
END_OF_TESTCASES
|
||||
};
|
||||
|
@ -371,6 +371,7 @@ struct testgroup_t testgroups[] = {
|
||||
{ "main/", main_testcases },
|
||||
{ "heap/", minheap_testcases },
|
||||
{ "et/", edgetriggered_testcases },
|
||||
{ "finalize/", finalize_testcases },
|
||||
{ "evbuffer/", evbuffer_testcases },
|
||||
{ "signal/", signal_testcases },
|
||||
{ "util/", util_testcases },
|
||||
|
@ -64,24 +64,7 @@
|
||||
#include "regress.h"
|
||||
#include "tinytest_macros.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
#ifdef EVENT__HAVE_PTHREADS
|
||||
#define THREAD_T pthread_t
|
||||
#define THREAD_FN void *
|
||||
#define THREAD_RETURN() return (NULL)
|
||||
#define THREAD_START(threadvar, fn, arg) \
|
||||
pthread_create(&(threadvar), NULL, fn, arg)
|
||||
#define THREAD_JOIN(th) pthread_join(th, NULL)
|
||||
#else
|
||||
#define THREAD_T HANDLE
|
||||
#define THREAD_FN unsigned __stdcall
|
||||
#define THREAD_RETURN() return (0)
|
||||
#define THREAD_START(threadvar, fn, arg) do { \
|
||||
uintptr_t threadhandle = _beginthreadex(NULL,0,fn,(arg),0,NULL); \
|
||||
(threadvar) = (HANDLE) threadhandle; \
|
||||
} while (0)
|
||||
#define THREAD_JOIN(th) WaitForSingleObject(th, INFINITE)
|
||||
#endif
|
||||
#include "regress_thread.h"
|
||||
|
||||
struct cond_wait {
|
||||
void *lock;
|
||||
|
48
test/regress_thread.h
Normal file
48
test/regress_thread.h
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef REGRESS_THREAD_H_INCLUDED_
|
||||
#define REGRESS_THREAD_H_INCLUDED_
|
||||
|
||||
#ifdef EVENT__HAVE_PTHREADS
|
||||
#define THREAD_T pthread_t
|
||||
#define THREAD_FN void *
|
||||
#define THREAD_RETURN() return (NULL)
|
||||
#define THREAD_START(threadvar, fn, arg) \
|
||||
pthread_create(&(threadvar), NULL, fn, arg)
|
||||
#define THREAD_JOIN(th) pthread_join(th, NULL)
|
||||
#else
|
||||
#define THREAD_T HANDLE
|
||||
#define THREAD_FN unsigned __stdcall
|
||||
#define THREAD_RETURN() return (0)
|
||||
#define THREAD_START(threadvar, fn, arg) do { \
|
||||
uintptr_t threadhandle = _beginthreadex(NULL,0,fn,(arg),0,NULL); \
|
||||
(threadvar) = (HANDLE) threadhandle; \
|
||||
} while (0)
|
||||
#define THREAD_JOIN(th) WaitForSingleObject(th, INFINITE)
|
||||
#endif
|
||||
|
||||
#endif
|
Loading…
x
Reference in New Issue
Block a user