mirror of
https://github.com/libevent/libevent.git
synced 2025-01-09 00:56:20 +08:00
Improved optional lock debugging.
There were a couple of places in the code where we manually kept lock counts to make sure we never accessed resources without holding a lock, and that we never released a lock we didn't have. The lock-debugging code already puts counts on _every_ lock when lock debugging is enabled, so there is no need to keep these counts around otherwise. This patch rewrites the ASSERT_FOO_LOCKED macros to all use a common EVLOCK_ASSERT_LOCKED(). We also teach the lock debugging code to keep track of who exactly holds each lock, so that EVLOCK_ASSERT_LOCKED() means "locked by this thread."
This commit is contained in:
parent
2df1f82bfa
commit
0cd3bb9f3a
@ -121,9 +121,6 @@ struct evbuffer {
|
||||
/** Used to implement deferred callbacks. */
|
||||
struct deferred_cb_queue *cb_queue;
|
||||
|
||||
/** For debugging: how many times have we acquired the lock for this
|
||||
* evbuffer? */
|
||||
int lock_count;
|
||||
/** A reference count on this evbuffer. When the reference count
|
||||
* reaches 0, the buffer is destroyed. Manipulated with
|
||||
* evbuffer_incref and evbuffer_decref_and_unlock and
|
||||
@ -202,47 +199,24 @@ struct evbuffer_chain_reference {
|
||||
/** Return a pointer to extra data allocated along with an evbuffer. */
|
||||
#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
|
||||
|
||||
/** Assert that somebody (hopefully us) is holding the lock on an evbuffer */
|
||||
/** Assert that we are holding the lock on an evbuffer */
|
||||
#define ASSERT_EVBUFFER_LOCKED(buffer) \
|
||||
do { \
|
||||
EVUTIL_ASSERT((buffer)->lock_count > 0); \
|
||||
} while (0)
|
||||
/** Assert that nobody is holding the lock on an evbuffer */
|
||||
#define ASSERT_EVBUFFER_UNLOCKED(buffer) \
|
||||
do { \
|
||||
EVUTIL_ASSERT((buffer)->lock_count == 0); \
|
||||
} while (0)
|
||||
#define _EVBUFFER_INCREMENT_LOCK_COUNT(buffer) \
|
||||
do { \
|
||||
((struct evbuffer*)(buffer))->lock_count++; \
|
||||
} while (0)
|
||||
#define _EVBUFFER_DECREMENT_LOCK_COUNT(buffer) \
|
||||
do { \
|
||||
ASSERT_EVBUFFER_LOCKED(buffer); \
|
||||
((struct evbuffer*)(buffer))->lock_count--; \
|
||||
} while (0)
|
||||
EVLOCK_ASSERT_LOCKED((buffer)->lock)
|
||||
|
||||
#define EVBUFFER_LOCK(buffer) \
|
||||
do { \
|
||||
EVLOCK_LOCK((buffer)->lock, 0); \
|
||||
_EVBUFFER_INCREMENT_LOCK_COUNT(buffer); \
|
||||
} while(0)
|
||||
#define EVBUFFER_UNLOCK(buffer) \
|
||||
do { \
|
||||
_EVBUFFER_DECREMENT_LOCK_COUNT(buffer); \
|
||||
EVLOCK_UNLOCK((buffer)->lock, 0); \
|
||||
} while(0)
|
||||
|
||||
#define EVBUFFER_LOCK2(buffer1, buffer2) \
|
||||
do { \
|
||||
EVLOCK_LOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
|
||||
_EVBUFFER_INCREMENT_LOCK_COUNT(buffer1); \
|
||||
_EVBUFFER_INCREMENT_LOCK_COUNT(buffer2); \
|
||||
} while(0)
|
||||
#define EVBUFFER_UNLOCK2(buffer1, buffer2) \
|
||||
do { \
|
||||
_EVBUFFER_DECREMENT_LOCK_COUNT(buffer1); \
|
||||
_EVBUFFER_DECREMENT_LOCK_COUNT(buffer2); \
|
||||
EVLOCK_UNLOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
|
||||
} while(0)
|
||||
|
||||
|
24
evdns.c
24
evdns.c
@ -265,7 +265,6 @@ struct evdns_server_port {
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
void *lock;
|
||||
int lock_count;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -362,7 +361,6 @@ struct evdns_base {
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
void *lock;
|
||||
int lock_count;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -418,22 +416,12 @@ static int strtoint(const char *const str);
|
||||
#define EVDNS_UNLOCK(base) _EVUTIL_NIL_STMT
|
||||
#define ASSERT_LOCKED(base) _EVUTIL_NIL_STMT
|
||||
#else
|
||||
#define EVDNS_LOCK(base) \
|
||||
do { \
|
||||
if ((base)->lock) { \
|
||||
EVLOCK_LOCK((base)->lock, 0); \
|
||||
} \
|
||||
++(base)->lock_count; \
|
||||
} while (0)
|
||||
#define EVDNS_UNLOCK(base) \
|
||||
do { \
|
||||
EVUTIL_ASSERT((base)->lock_count > 0); \
|
||||
--(base)->lock_count; \
|
||||
if ((base)->lock) { \
|
||||
EVLOCK_UNLOCK((base)->lock, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
#define ASSERT_LOCKED(base) EVUTIL_ASSERT((base)->lock_count > 0)
|
||||
#define EVDNS_LOCK(base) \
|
||||
EVLOCK_LOCK((base)->lock, 0)
|
||||
#define EVDNS_UNLOCK(base) \
|
||||
EVLOCK_UNLOCK((base)->lock, 0)
|
||||
#define ASSERT_LOCKED(base) \
|
||||
EVLOCK_ASSERT_LOCKED((base)->lock)
|
||||
#endif
|
||||
|
||||
#define CLOSE_SOCKET(s) EVUTIL_CLOSESOCKET(s)
|
||||
|
@ -40,6 +40,7 @@ struct event_base;
|
||||
enabled. */
|
||||
extern struct evthread_lock_callbacks _evthread_lock_fns;
|
||||
extern unsigned long (*_evthread_id_fn)(void);
|
||||
extern int _evthread_lock_debugging_enabled;
|
||||
|
||||
/** True iff the given event_base is set up to use locking */
|
||||
#define EVBASE_USING_LOCKS(base) \
|
||||
@ -129,6 +130,18 @@ extern unsigned long (*_evthread_id_fn)(void);
|
||||
if (EVBASE_USING_LOCKS(base)) \
|
||||
_evthread_lock_fns.unlock(0, (base)->lockvar); \
|
||||
} while (0)
|
||||
|
||||
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
|
||||
* locked and held by us. */
|
||||
#define EVLOCK_ASSERT_LOCKED(lock) \
|
||||
do { \
|
||||
if ((lock) && _evthread_lock_debugging_enabled) { \
|
||||
EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
int _evthread_is_debug_lock_held(void *lock);
|
||||
|
||||
#else /* _EVENT_DISABLE_THREAD_SUPPORT */
|
||||
|
||||
#define EVTHREAD_GET_ID() 1
|
||||
@ -143,7 +156,7 @@ extern unsigned long (*_evthread_id_fn)(void);
|
||||
#define EVBASE_IN_THREAD(base) 1
|
||||
#define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT
|
||||
#define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT
|
||||
|
||||
#define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
30
evthread.c
30
evthread.c
@ -36,6 +36,7 @@
|
||||
#include "log-internal.h"
|
||||
#include "mm-internal.h"
|
||||
#include "util-internal.h"
|
||||
#include "evthread-internal.h"
|
||||
|
||||
/* globals */
|
||||
int _evthread_lock_debugging_enabled = 0;
|
||||
@ -150,6 +151,7 @@ evthread_set_lock_create_callbacks(void *(*alloc_fn)(void),
|
||||
|
||||
struct debug_lock {
|
||||
unsigned locktype;
|
||||
unsigned long held_by;
|
||||
/* XXXX if we ever use read-write locks, we will need a separate
|
||||
* lock to protect count. */
|
||||
int count;
|
||||
@ -171,6 +173,7 @@ debug_lock_alloc(unsigned locktype)
|
||||
}
|
||||
result->locktype = locktype;
|
||||
result->count = 0;
|
||||
result->held_by = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -204,6 +207,13 @@ debug_lock_lock(unsigned mode, void *lock_)
|
||||
++lock->count;
|
||||
if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
|
||||
EVUTIL_ASSERT(lock->count == 1);
|
||||
if (_evthread_id_fn) {
|
||||
unsigned long me;
|
||||
me = _evthread_id_fn();
|
||||
if (lock->count > 1)
|
||||
EVUTIL_ASSERT(lock->held_by == me);
|
||||
lock->held_by = me;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -217,6 +227,12 @@ debug_lock_unlock(unsigned mode, void *lock_)
|
||||
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
|
||||
else
|
||||
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
|
||||
if (_evthread_id_fn) {
|
||||
unsigned long me = _evthread_id_fn();
|
||||
EVUTIL_ASSERT(lock->held_by == me);
|
||||
if (lock->count == 1)
|
||||
lock->held_by = 0;
|
||||
}
|
||||
--lock->count;
|
||||
EVUTIL_ASSERT(lock->count >= 0);
|
||||
if (_original_lock_fns.unlock)
|
||||
@ -244,4 +260,18 @@ evthread_enable_lock_debuging(void)
|
||||
_evthread_lock_debugging_enabled = 1;
|
||||
}
|
||||
|
||||
int
|
||||
_evthread_is_debug_lock_held(void *lock_)
|
||||
{
|
||||
struct debug_lock *lock = lock_;
|
||||
if (! lock->count)
|
||||
return 0;
|
||||
if (_evthread_id_fn) {
|
||||
unsigned long me = _evthread_id_fn();
|
||||
if (lock->held_by != me)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user