mirror of
https://github.com/libevent/libevent.git
synced 2025-01-31 09:12:55 +08:00
Avoid race-condition when initializing global locks
Previously, we did stuff like if (!lock) EVTHREAD_ALLOC_LOCK(lock,0); for the evsig base global lock, the arc4random lock, and the debug_map lock. But that's potentially racy! Instead, we move the responisiblity for global lock initialization to the functions where we set up the lock callbacks. (Rationale: We already require that you set up the locking callbacks before you create any event_base, and that you do so exatly once.)
This commit is contained in:
parent
b4f89f00c6
commit
b683cae3cb
@ -335,6 +335,7 @@ int _evsig_set_handler(struct event_base *base, int evsignal,
|
||||
void (*fn)(int));
|
||||
int _evsig_restore_handler(struct event_base *base, int evsignal);
|
||||
|
||||
|
||||
void event_active_nolock(struct event *ev, int res, short count);
|
||||
|
||||
/* FIXME document. */
|
||||
|
22
event.c
22
event.c
@ -182,7 +182,9 @@ eq_debug_entry(const struct event_debug_entry *a,
|
||||
int _event_debug_mode_on = 0;
|
||||
/* Set if it's too late to enable event_debug_mode. */
|
||||
static int event_debug_mode_too_late = 0;
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
static void *_event_debug_map_lock = NULL;
|
||||
#endif
|
||||
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
|
||||
HT_INITIALIZER();
|
||||
|
||||
@ -514,8 +516,6 @@ event_enable_debug_mode(void)
|
||||
_event_debug_mode_on = 1;
|
||||
|
||||
HT_INIT(event_debug_map, &global_debug_map);
|
||||
|
||||
EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -545,9 +545,6 @@ event_base_new_with_config(const struct event_config *cfg)
|
||||
|
||||
#ifndef _EVENT_DISABLE_DEBUG_MODE
|
||||
event_debug_mode_too_late = 1;
|
||||
if (_event_debug_mode_on && !_event_debug_map_lock) {
|
||||
EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
|
||||
@ -2813,3 +2810,18 @@ event_base_del_virtual(struct event_base *base)
|
||||
evthread_notify_base(base);
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
}
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
int
|
||||
event_global_setup_locks_(const int enable_locks)
|
||||
{
|
||||
#ifndef _EVENT_DISABLE_DEBUG_MODE
|
||||
EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
|
||||
#endif
|
||||
if (evsig_global_setup_locks_(enable_locks) < 0)
|
||||
return -1;
|
||||
if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -345,6 +345,24 @@ EVLOCK_TRY_LOCK(void *lock)
|
||||
|
||||
int _evthread_is_debug_lock_held(void *lock);
|
||||
void *_evthread_debug_get_real_lock(void *lock);
|
||||
|
||||
void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
|
||||
int enable_locks);
|
||||
|
||||
#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
|
||||
do { \
|
||||
lockvar = evthread_setup_global_lock_(lockvar, \
|
||||
(locktype), enable_locks); \
|
||||
if (!lockvar) { \
|
||||
event_warn("Couldn't allocate %s", #lockvar); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
int event_global_setup_locks_(const int enable_locks);
|
||||
int evsig_global_setup_locks_(const int enable_locks);
|
||||
int evutil_secure_rng_global_setup_locks_(const int enable_locks);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
61
evthread.c
61
evthread.c
@ -81,7 +81,7 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
|
||||
}
|
||||
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
|
||||
memcpy(target, cbs, sizeof(_evthread_lock_fns));
|
||||
return 0;
|
||||
return event_global_setup_locks_(1);
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
@ -246,6 +246,9 @@ evthread_enable_lock_debuging(void)
|
||||
sizeof(struct evthread_condition_callbacks));
|
||||
_evthread_cond_fns.wait_condition = debug_cond_wait;
|
||||
_evthread_lock_debugging_enabled = 1;
|
||||
|
||||
/* XXX return value should get checked. */
|
||||
event_global_setup_locks_(0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -269,6 +272,62 @@ _evthread_debug_get_real_lock(void *lock_)
|
||||
return lock->lock;
|
||||
}
|
||||
|
||||
void *
|
||||
evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
|
||||
{
|
||||
/* there are four cases here:
|
||||
1) we're turning on debugging; locking is not on.
|
||||
2) we're turning on debugging; locking is on.
|
||||
3) we're turning on locking; debugging is not on.
|
||||
4) we're turning on locking; debugging is on. */
|
||||
|
||||
if (!enable_locks && _original_lock_fns.alloc == NULL) {
|
||||
/* Case 1: allocate a debug lock. */
|
||||
EVUTIL_ASSERT(lock_ == NULL);
|
||||
return debug_lock_alloc(locktype);
|
||||
} else if (!enable_locks && _original_lock_fns.alloc != NULL) {
|
||||
/* Case 2: wrap the lock in a debug lock. */
|
||||
struct debug_lock *lock;
|
||||
EVUTIL_ASSERT(lock_ != NULL);
|
||||
|
||||
if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
|
||||
/* We can't wrap it: We need a recursive lock */
|
||||
_original_lock_fns.free(lock_, locktype);
|
||||
return debug_lock_alloc(locktype);
|
||||
}
|
||||
lock = mm_malloc(sizeof(struct debug_lock));
|
||||
if (!lock) {
|
||||
_original_lock_fns.free(lock_, locktype);
|
||||
return NULL;
|
||||
}
|
||||
lock->lock = lock_;
|
||||
lock->locktype = locktype;
|
||||
lock->count = 0;
|
||||
lock->held_by = 0;
|
||||
return lock;
|
||||
} else if (enable_locks && ! _evthread_lock_debugging_enabled) {
|
||||
/* Case 3: allocate a regular lock */
|
||||
EVUTIL_ASSERT(lock_ == NULL);
|
||||
return _evthread_lock_fns.alloc(locktype);
|
||||
} else {
|
||||
/* Case 4: Fill in a debug lock with a real lock */
|
||||
struct debug_lock *lock = lock_;
|
||||
EVUTIL_ASSERT(enable_locks &&
|
||||
_evthread_lock_debugging_enabled);
|
||||
EVUTIL_ASSERT(lock->locktype == locktype);
|
||||
EVUTIL_ASSERT(lock->lock == NULL);
|
||||
lock->lock = _original_lock_fns.alloc(
|
||||
locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
|
||||
if (!lock->lock) {
|
||||
lock->count = -200;
|
||||
mm_free(lock);
|
||||
return NULL;
|
||||
}
|
||||
return lock;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef EVTHREAD_EXPOSE_STRUCTS
|
||||
unsigned long
|
||||
_evthreadimpl_get_id()
|
||||
|
@ -49,6 +49,11 @@ evutil_secure_rng_init(void)
|
||||
(void) arc4random();
|
||||
return 0;
|
||||
}
|
||||
int
|
||||
evutil_secure_rng_global_setup_locks_(const int enable_locks)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef _EVENT_HAVE_ARC4RANDOM_BUF
|
||||
static void
|
||||
@ -93,13 +98,19 @@ static void *arc4rand_lock;
|
||||
|
||||
#include "./arc4random.c"
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
int
|
||||
evutil_secure_rng_global_setup_locks_(const int enable_locks)
|
||||
{
|
||||
EVTHREAD_SETUP_GLOBAL_LOCK(arc4rand_lock, 0);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
evutil_secure_rng_init(void)
|
||||
{
|
||||
int val;
|
||||
if (!arc4rand_lock) {
|
||||
EVTHREAD_ALLOC_LOCK(arc4rand_lock, 0);
|
||||
}
|
||||
|
||||
_ARC4_LOCK();
|
||||
if (!arc4_seeded_ok)
|
||||
|
14
signal.c
14
signal.c
@ -168,11 +168,6 @@ evsig_cb(evutil_socket_t fd, short what, void *arg)
|
||||
int
|
||||
evsig_init(struct event_base *base)
|
||||
{
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
if (! evsig_base_lock)
|
||||
EVTHREAD_ALLOC_LOCK(evsig_base_lock, 0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Our signal handler is going to write to one end of the socket
|
||||
* pair to wake up our event loop. The event loop then scans for
|
||||
@ -436,3 +431,12 @@ evsig_dealloc(struct event_base *base)
|
||||
base->sig.sh_old = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
int
|
||||
evsig_global_setup_locks_(const int enable_locks)
|
||||
{
|
||||
EVTHREAD_SETUP_GLOBAL_LOCK(evsig_base_lock, 0);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user