Document many internal functions and pieces of code.

svn:r1181
This commit is contained in:
Nick Mathewson 2009-04-17 06:55:08 +00:00
parent 433e2339ca
commit 838d0a81c3
13 changed files with 339 additions and 38 deletions

View File

@ -81,8 +81,12 @@ BUILT_SOURCES = event-config.h
event-config.h: config.h event-config.h: config.h
echo '/* event-config.h' > $@ echo '/* event-config.h' > $@
echo ' * Generated by autoconf; post-processed by libevent.' >> $@ echo ' *' >> $@
echo ' * Do not edit this file.' >> $@ echo ' * This file was generated by autoconf when libevent was built, and post-' >> $@
echo ' * processed by Libevent so that its macros would have a uniform prefix.' >> $@
echo ' *' >> $@
echo ' * DO NOT EDIT THIS FILE.' >> $@
echo ' *' >> $@
echo ' * Do not rely on macros in this file existing in later versions.'>> $@ echo ' * Do not rely on macros in this file existing in later versions.'>> $@
echo ' */' >> $@ echo ' */' >> $@
echo '#ifndef _EVENT_CONFIG_H_' >> $@ echo '#ifndef _EVENT_CONFIG_H_' >> $@

View File

@ -1,3 +1,35 @@
/*
* Copyright (c) 2009 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
@file buffer_iocp.c
This module implements overlapped read and write functions for evbuffer
objects on Windows.
*/
#include <windows.h> #include <windows.h>
#include <assert.h> #include <assert.h>
@ -15,25 +47,38 @@
#define MAX_WSABUFS 16 #define MAX_WSABUFS 16
/** Wrapper for an OVERLAPPED that holds the necessary info to notice
when an overlapped read or write is done on an evbuffer.
**/
struct buffer_overlapped { struct buffer_overlapped {
struct event_overlapped event_overlapped; struct event_overlapped event_overlapped;
/** The first pinned chain in the buffer. */
struct evbuffer_chain *first_pinned; struct evbuffer_chain *first_pinned;
/** The buffer itself. */
struct evbuffer_overlapped *buf; struct evbuffer_overlapped *buf;
/** How many chains are pinned; how many of the fields in buffers
* are we using. */
int n_buffers; int n_buffers;
WSABUF buffers[MAX_WSABUFS]; WSABUF buffers[MAX_WSABUFS];
}; };
/** An evbuffer that can handle overlapped IO. */
struct evbuffer_overlapped { struct evbuffer_overlapped {
struct evbuffer buffer; struct evbuffer buffer;
/** The socket that we're doing overlapped IO on. */
evutil_socket_t fd; evutil_socket_t fd;
/** True iff we have scheduled a write. */
unsigned write_in_progress : 1; unsigned write_in_progress : 1;
/** True iff we have scheduled a read. */
unsigned read_in_progress : 1; unsigned read_in_progress : 1;
struct buffer_overlapped read_info; struct buffer_overlapped read_info;
struct buffer_overlapped write_info; struct buffer_overlapped write_info;
}; };
/** Given an evbuffer, return the correponding evbuffer structure, or NULL if
* the evbuffer isn't overlapped. */
static inline struct evbuffer_overlapped * static inline struct evbuffer_overlapped *
upcast_evbuffer(struct evbuffer *buf) upcast_evbuffer(struct evbuffer *buf)
{ {
@ -48,6 +93,7 @@ upcast_overlapped(struct event_overlapped *o)
return EVUTIL_UPCAST(o, struct buffer_overlapped, event_overlapped); return EVUTIL_UPCAST(o, struct buffer_overlapped, event_overlapped);
} }
/** Unpin all the chains noted as pinned in 'eo'. */
static void static void
pin_release(struct event_overlapped *eo, unsigned flag) pin_release(struct event_overlapped *eo, unsigned flag)
{ {
@ -62,6 +108,7 @@ pin_release(struct event_overlapped *eo, unsigned flag)
} }
} }
/** IOCP callback invoked when a read operation is finished. */
static void static void
read_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes) read_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes)
{ {
@ -95,6 +142,7 @@ read_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes)
_evbuffer_decref_and_unlock(evbuf); _evbuffer_decref_and_unlock(evbuf);
} }
/** IOCP callback invoked when a write operation is finished. */
static void static void
write_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes) write_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes)
{ {

View File

@ -37,15 +37,34 @@ struct deferred_cb;
typedef void (*deferred_cb_fn)(struct deferred_cb *, void *); typedef void (*deferred_cb_fn)(struct deferred_cb *, void *);
/** A deferred_cb is a callback that can be scheduled to run as part of
* an event_base's event_loop, rather than running immediately. */
struct deferred_cb { struct deferred_cb {
/** Links to the adjacent active (pending) deferred_cb objects. */
TAILQ_ENTRY (deferred_cb) (cb_next); TAILQ_ENTRY (deferred_cb) (cb_next);
/** True iff this deferred_cb is pending in an event_base. */
unsigned queued : 1; unsigned queued : 1;
/** The function to execute when the callback runs. */
deferred_cb_fn cb; deferred_cb_fn cb;
/** The function's second argument. */
void *arg; void *arg;
}; };
/**
Initialize an empty, non-pending deferred_cb.
@param deferred The deferred_cb structure to initialize.
@param cb The function to run when the deferred_cb executes.
@param arg The function's second argument.
*/
void event_deferred_cb_init(struct deferred_cb *, deferred_cb_fn, void *); void event_deferred_cb_init(struct deferred_cb *, deferred_cb_fn, void *);
/**
Cancel a deferred_cb if it is currently scheduled in an event_base.
*/
void event_deferred_cb_cancel(struct event_base *, struct deferred_cb *); void event_deferred_cb_cancel(struct event_base *, struct deferred_cb *);
/**
Activate a deferred_cb if it is not currently scheduled in an event_base.
*/
void event_deferred_cb_schedule(struct event_base *, struct deferred_cb *); void event_deferred_cb_schedule(struct event_base *, struct deferred_cb *);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -39,11 +39,14 @@ extern "C" {
/* minimum allocation for a chain. */ /* minimum allocation for a chain. */
#define MIN_BUFFER_SIZE 256 #define MIN_BUFFER_SIZE 256
/** A single evbuffer callback for an evbuffer. */ /** A single evbuffer callback for an evbuffer. This function will be invoked
* when bytes are added to or removed from the evbuffer. */
struct evbuffer_cb_entry { struct evbuffer_cb_entry {
/** Structures to implement a doubly-linked queue of callbacks */ /** Structures to implement a doubly-linked queue of callbacks */
TAILQ_ENTRY(evbuffer_cb_entry) next; TAILQ_ENTRY(evbuffer_cb_entry) next;
/** The callback function to invoke when this callback is called */ /** The callback function to invoke when this callback is called.
If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
valid; otherwise, cb_func is valid. */
union { union {
evbuffer_cb_func cb_func; evbuffer_cb_func cb_func;
evbuffer_cb cb_obsolete; evbuffer_cb cb_obsolete;
@ -61,39 +64,75 @@ struct evbuffer_cb_entry {
struct evbuffer_chain; struct evbuffer_chain;
struct evbuffer { struct evbuffer {
/** The first chain in this buffer's linked list of chains. */
struct evbuffer_chain *first; struct evbuffer_chain *first;
/** The last chain in this buffer's linked list of chains. */
struct evbuffer_chain *last; struct evbuffer_chain *last;
/** The next-to-last chain in this buffer's linked list of chains.
* NULL if the buffer has 0 or 1 chains. Used in case there's an
* ongoing read that needs to be split across multiple chains: we want
* to add a new chain as a read target, but we don't want to lose our
* pointer to the next-to-last chain if the read turns out to be
* incomplete.
*/
struct evbuffer_chain *previous_to_last; struct evbuffer_chain *previous_to_last;
size_t total_len; /* total length of all buffers */ /** Total amount of bytes stored in all chains.*/
size_t total_len;
evbuffer_cb cb;
void *cbarg;
/** Number of bytes we have added to the buffer since we last tried to
* invoke callbacks. */
size_t n_add_for_cb; size_t n_add_for_cb;
/** Number of bytes we have removed from the buffer since we last
* tried to invoke callbacks. */
size_t n_del_for_cb; size_t n_del_for_cb;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT #ifndef _EVENT_DISABLE_THREAD_SUPPORT
/** A lock used to mediate access to this buffer. */
void *lock; void *lock;
#endif #endif
/** True iff we should free the lock field when we free this
* evbuffer. */
unsigned own_lock : 1; unsigned own_lock : 1;
/** True iff we should not allow changes to the front of the buffer
* (drains or prepends). */
unsigned freeze_start : 1; unsigned freeze_start : 1;
/** True iff we should not allow changes to the end of the buffer
* (appends) */
unsigned freeze_end : 1; unsigned freeze_end : 1;
/** True iff this evbuffer's callbacks are not invoked immediately
* upon a change in the buffer, but instead are deferred to be invoked
* from the event_base's loop. Useful for preventing enormous stack
* overflows when we have mutually recursive callbacks, and for
* serializing callbacks in a single thread. */
unsigned deferred_cbs : 1; unsigned deferred_cbs : 1;
#ifdef WIN32 #ifdef WIN32
/** True iff this buffer is set up for overlapped IO. */
unsigned is_overlapped : 1; unsigned is_overlapped : 1;
#endif #endif
/** An event_base associated with this evbuffer. Used to implement
* deferred callbacks. */
struct event_base *ev_base; struct event_base *ev_base;
/** For debugging: how many times have we acquired the lock for this
* evbuffer? */
int lock_count; int lock_count;
/** A reference count on this evbuffer. When the reference count
* reaches 0, the buffer is destroyed. Manipulated with
* evbuffer_incref and evbuffer_decref_and_unlock and
* evbuffer_free. */
int refcnt; int refcnt;
/** A deferred_cb handle to make all of this buffer's callbacks
* invoked from the event loop. */
struct deferred_cb deferred; struct deferred_cb deferred;
/** A doubly-linked-list of callback functions */
TAILQ_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks; TAILQ_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
}; };
/** A single item in an evbuffer. */
struct evbuffer_chain { struct evbuffer_chain {
/** points to next buffer in the chain */ /** points to next buffer in the chain */
struct evbuffer_chain *next; struct evbuffer_chain *next;
@ -150,12 +189,15 @@ struct evbuffer_chain_reference {
}; };
#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain) #define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain)
/** Return a pointer to extra data allocated along with an evbuffer. */
#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1) #define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
/** Assert that somebody (hopefully us) is holding the lock on an evbuffer */
#define ASSERT_EVBUFFER_LOCKED(buffer) \ #define ASSERT_EVBUFFER_LOCKED(buffer) \
do { \ do { \
assert((buffer)->lock_count > 0); \ assert((buffer)->lock_count > 0); \
} while (0) } while (0)
/** Assert that nobody is holding the lock on an evbuffer */
#define ASSERT_EVBUFFER_UNLOCKED(buffer) \ #define ASSERT_EVBUFFER_UNLOCKED(buffer) \
do { \ do { \
assert((buffer)->lock_count == 0); \ assert((buffer)->lock_count == 0); \
@ -196,13 +238,27 @@ struct evbuffer_chain_reference {
EVTHREAD_WRITE, EVTHREAD_WRITE); \ EVTHREAD_WRITE, EVTHREAD_WRITE); \
} while(0) } while(0)
/** Increase the reference count of buf by one. */
void _evbuffer_incref(struct evbuffer *buf); void _evbuffer_incref(struct evbuffer *buf);
/** Pin a single buffer chain using a given flag. A pinned chunk may not be
* moved or freed until it is unpinned. */
void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag); void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag);
/** Unpin a single buffer chain using a given flag. */
void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag); void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag);
/** As evbuffer_free, but requires that we hold a lock on the buffer, and
* releases the lock before freeing it and the buffer. */
void _evbuffer_decref_and_unlock(struct evbuffer *buffer); void _evbuffer_decref_and_unlock(struct evbuffer *buffer);
/** As evbuffer_expand, but does not guarantee that the newly allocated memory
* is contiguous. Instead, it may be split across two chunks. */
int _evbuffer_expand_fast(struct evbuffer *, size_t); int _evbuffer_expand_fast(struct evbuffer *, size_t);
#ifdef _EVENT_HAVE_SYS_UIO_H #ifdef _EVENT_HAVE_SYS_UIO_H
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
* Sets up the one or two iovecs in 'vecs' to point to the free memory and its
* extent, and *chainp to poitn to the first chain that we'll try to read into.
* Returns the number of vecs used.
*/
int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch, int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
struct iovec *vecs, struct evbuffer_chain **chainp); struct iovec *vecs, struct evbuffer_chain **chainp);
#elif defined(WIN32) #elif defined(WIN32)
@ -210,10 +266,6 @@ int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
WSABUF *vecs, struct evbuffer_chain **chainp); WSABUF *vecs, struct evbuffer_chain **chainp);
#endif #endif
#ifdef WIN32
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -48,20 +48,38 @@ extern "C" {
#define ev_ncalls _ev.ev_signal.ev_ncalls #define ev_ncalls _ev.ev_signal.ev_ncalls
#define ev_pncalls _ev.ev_signal.ev_pncalls #define ev_pncalls _ev.ev_signal.ev_pncalls
/** Structure to define the backend of a given event_base. */
struct eventop { struct eventop {
/** The name of this backend. */
const char *name; const char *name;
/** Set up an event_base to use this backend.*/
void *(*init)(struct event_base *); void *(*init)(struct event_base *);
/** Enable reading/writing on a given fd. */
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Disable reading/writing on a given fd. */
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Function to implement the core of an event loop. It must see which
added events are ready, and cause event_active to be called for each
active event (usually via event_io_active or such).
*/
int (*dispatch)(struct event_base *, struct timeval *); int (*dispatch)(struct event_base *, struct timeval *);
/** Function to clean up and free our data from the event_base. */
void (*dealloc)(struct event_base *); void (*dealloc)(struct event_base *);
/* set if we need to reinitialize the event base */ /** Set if we need to reinitialize the event base after we fork. */
int need_reinit; int need_reinit;
/** Bit-array of supported event_method_features */
enum event_method_feature features; enum event_method_feature features;
/** Length of extra information we should record for each fd that
has one or more active events.
*/
size_t fdinfo_len; size_t fdinfo_len;
}; };
#ifdef WIN32 #ifdef WIN32
/* If we're on win32, then file descriptors are not nice low densely packed
integers. Instead, they are pointer-like windows handles, and we want to
use a hashtable instead of an array to map fds to events.
*/
#define EVMAP_USE_HT #define EVMAP_USE_HT
#endif #endif
@ -82,45 +100,57 @@ struct event_signal_map {
}; };
struct event_base { struct event_base {
/** Function pointers and other data to describe this event_base's
* backend. */
const struct eventop *evsel; const struct eventop *evsel;
/** Pointer to backend-specific data. */
void *evbase; void *evbase;
/* signal handling info */ /* signal handling info */
const struct eventop *evsigsel; const struct eventop *evsigsel;
void *evsigbase; void *evsigbase;
struct evsig_info sig; struct evsig_info sig;
int event_count; /* counts number of total events */ int event_count; /**< counts number of total events */
int event_count_active; /* counts number of active events */ int event_count_active; /**< counts number of active events */
int event_gotterm; /* Set to terminate loop */ int event_gotterm; /**< Set to terminate loop once done
int event_break; /* Set to terminate loop immediately */ * processing events. */
int event_break; /**< Set to exit loop immediately */
/* active event management */ /* Active event management. */
/** An array of nactivequeues queues for active events (ones that
* have triggered, and whose callbacks need to be called). Low
* priority numbers are more important, and stall higher ones.
*/
struct event_list **activequeues; struct event_list **activequeues;
int nactivequeues; int nactivequeues;
/* deferred callback management */ /** Deferred callback management: a list of deferred callbacks to
* run active the active events. */
TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list; TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list;
/* for mapping io activity to events */ /** Mapping from file descriptors to enabled events */
struct event_io_map io; struct event_io_map io;
/* for mapping signal activity to events */ /** Mapping from signal numbers to enabled events. */
struct event_signal_map sigmap; struct event_signal_map sigmap;
/** All events that have been enabled (added) in this event_base */
struct event_list eventqueue; struct event_list eventqueue;
struct timeval event_tv; struct timeval event_tv;
/** Priority queue of events with timeouts. */
struct min_heap timeheap; struct min_heap timeheap;
struct timeval tv_cache; struct timeval tv_cache;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT #ifndef _EVENT_DISABLE_THREAD_SUPPORT
/* threading support */ /* threading support */
/** The thread currently running the event_loop for this base */
unsigned long th_owner_id; unsigned long th_owner_id;
/** A lock to prevent conflicting accesses to this event_base */
void *th_base_lock; void *th_base_lock;
#endif #endif
@ -136,6 +166,8 @@ struct event_config_entry {
const char *avoid_method; const char *avoid_method;
}; };
/** Internal structure: describes the configuration we want for an event_base
* that we're about to allocate. */
struct event_config { struct event_config {
TAILQ_HEAD(event_configq, event_config_entry) entries; TAILQ_HEAD(event_configq, event_config_entry) entries;

View File

@ -27,7 +27,7 @@
#define _EVTHREAD_INTERNAL_H_ #define _EVTHREAD_INTERNAL_H_
#ifdef __cplusplus #ifdef __cplusplus
//extern "C" { extern "C" {
#endif #endif
#include "event-config.h" #include "event-config.h"
@ -36,41 +36,55 @@
struct event_base; struct event_base;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT #ifndef _EVENT_DISABLE_THREAD_SUPPORT
/* Global function pointers to lock-related functions. NULL if locking isn't
enabled. */
extern void (*_evthread_locking_fn)(int mode, void *lock); extern void (*_evthread_locking_fn)(int mode, void *lock);
extern unsigned long (*_evthread_id_fn)(void); extern unsigned long (*_evthread_id_fn)(void);
extern void *(*_evthread_lock_alloc_fn)(void); extern void *(*_evthread_lock_alloc_fn)(void);
extern void (*_evthread_lock_free_fn)(void *); extern void (*_evthread_lock_free_fn)(void *);
/** True iff the given event_base is set up to use locking */
#define EVBASE_USING_LOCKS(base) \ #define EVBASE_USING_LOCKS(base) \
(base != NULL && (base)->th_base_lock != NULL) (base != NULL && (base)->th_base_lock != NULL)
/** Return the ID of the current thread, or 1 if threading isn't enabled. */
#define EVTHREAD_GET_ID() \ #define EVTHREAD_GET_ID() \
(_evthread_id_fn ? _evthread_id_fn() : 1) (_evthread_id_fn ? _evthread_id_fn() : 1)
/** Return true iff we're in the thread that is currently running a given
* event_base's loop. */
#define EVBASE_IN_THREAD(base) \ #define EVBASE_IN_THREAD(base) \
(_evthread_id_fn == NULL || \ (_evthread_id_fn == NULL || \
(base)->th_owner_id == _evthread_id_fn()) (base)->th_owner_id == _evthread_id_fn())
/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
NULL if locking is not enabled. */
#define EVTHREAD_ALLOC_LOCK(lockvar) \ #define EVTHREAD_ALLOC_LOCK(lockvar) \
((lockvar) = _evthread_lock_alloc_fn ? \ ((lockvar) = _evthread_lock_alloc_fn ? \
_evthread_lock_alloc_fn() : NULL) _evthread_lock_alloc_fn() : NULL)
/** Free a given lock, if it is present and locking is enabled. */
#define EVTHREAD_FREE_LOCK(lockvar) \ #define EVTHREAD_FREE_LOCK(lockvar) \
do { \ do { \
if (lockvar && _evthread_lock_free_fn) \ if (lockvar && _evthread_lock_free_fn) \
_evthread_lock_free_fn(lockvar); \ _evthread_lock_free_fn(lockvar); \
} while (0) } while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lock,mode) \ #define EVLOCK_LOCK(lock,mode) \
do { \ do { \
if (lock) \ if (lock) \
_evthread_locking_fn(EVTHREAD_LOCK|mode, lock); \ _evthread_locking_fn(EVTHREAD_LOCK|mode, lock); \
} while (0) } while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lock,mode) \ #define EVLOCK_UNLOCK(lock,mode) \
do { \ do { \
if (lock) \ if (lock) \
_evthread_locking_fn(EVTHREAD_UNLOCK|mode, lock); \ _evthread_locking_fn(EVTHREAD_UNLOCK|mode, lock); \
} while (0) } while (0)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \ #define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
do { \ do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
@ -80,6 +94,8 @@ extern void (*_evthread_lock_free_fn)(void *);
} \ } \
} while (0) } while (0)
/** Acquire both lock1 and lock2. Always allocates locks in the same order,
* so that two threads locking two locks with LOCK2 will not deadlock. */
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \ #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
do { \ do { \
void *_lock1_tmplock = (lock1); \ void *_lock1_tmplock = (lock1); \
@ -90,6 +106,7 @@ extern void (*_evthread_lock_free_fn)(void *);
EVLOCK_LOCK(_lock2_tmplock,mode2); \ EVLOCK_LOCK(_lock2_tmplock,mode2); \
} while (0) } while (0)
/** Releaes both lock1 and lock2. */
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \ #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
do { \ do { \
void *_lock1_tmplock = (lock1); \ void *_lock1_tmplock = (lock1); \
@ -101,12 +118,15 @@ extern void (*_evthread_lock_free_fn)(void *);
} while (0) } while (0)
/** Lock an event_base, if it is set up for locking. Acquires the lock
in the base structure whose field is named 'lock'. */
#define EVBASE_ACQUIRE_LOCK(base, mode, lock) do { \ #define EVBASE_ACQUIRE_LOCK(base, mode, lock) do { \
if (EVBASE_USING_LOCKS(base)) \ if (EVBASE_USING_LOCKS(base)) \
_evthread_locking_fn(EVTHREAD_LOCK | mode, \ _evthread_locking_fn(EVTHREAD_LOCK | mode, \
(base)->lock); \ (base)->lock); \
} while (0) } while (0)
/** Unlock an event_base, if it is set up for locking. */
#define EVBASE_RELEASE_LOCK(base, mode, lock) do { \ #define EVBASE_RELEASE_LOCK(base, mode, lock) do { \
if (EVBASE_USING_LOCKS(base)) \ if (EVBASE_USING_LOCKS(base)) \
_evthread_locking_fn(EVTHREAD_UNLOCK | mode, \ _evthread_locking_fn(EVTHREAD_UNLOCK | mode, \

View File

@ -427,7 +427,7 @@ ht_string_hash(const char *s)
/* /*
* Copyright 2005, Nick Mathewson. Implementation logic is adapted from code * Copyright 2005, Nick Mathewson. Implementation logic is adapted from code
* by Cristopher Clark, retrofit to allow drop-in memory management, and to * by Cristopher Clark, retrofit to allow drop-in memory management, and to
* use the same interface as Niels Provos's HT_H. I'm not sure whether this * use the same interface as Niels Provos's tree.h. I'm not sure whether this
* is a derived work any more, but whether it is or not, the license below * is a derived work any more, but whether it is or not, the license below
* applies. * applies.
* *

View File

@ -119,10 +119,17 @@ void evbuffer_free(struct evbuffer *buf);
*/ */
int evbuffer_enable_locking(struct evbuffer *buf, void *lock); int evbuffer_enable_locking(struct evbuffer *buf, void *lock);
/* DOCDOC */ /**
Acquire the lock on an evbuffer. Has no effect if locking was not enabled
with evbuffer_enable_locking.
*/
void evbuffer_lock(struct evbuffer *buf); void evbuffer_lock(struct evbuffer *buf);
void evbuffer_unlock(struct evbuffer *buf);
/**
Release the lock on an evbuffer. Has no effect if locking was not enabled
with evbuffer_enable_locking.
*/
void evbuffer_unlock(struct evbuffer *buf);
/** /**
Returns the total number of bytes stored in the event buffer Returns the total number of bytes stored in the event buffer
@ -493,7 +500,7 @@ int evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg
@param buffer the evbuffer that the callback is watching. @param buffer the evbuffer that the callback is watching.
@param cb the callback whose status we want to change. @param cb the callback whose status we want to change.
@param flags EVBUFFER_CB_ENABLED to enable the callback, or @param flags EVBUFFER_CB_ENABLED to enable the callback, or
EVBUFFER_CB_DISABLEDD to disable it. EVBUFFER_CB_DISABLED to disable it.
@return 0 on success, -1 on failure. @return 0 on success, -1 on failure.
*/ */
int evbuffer_cb_set_flags(struct evbuffer *buffer, int evbuffer_cb_set_flags(struct evbuffer *buffer,

View File

@ -115,9 +115,14 @@ typedef void (*evbuffercb)(struct bufferevent *bev, void *ctx);
/* XXXX we should rename this to bufferevent_error_cb; see above. */ /* XXXX we should rename this to bufferevent_error_cb; see above. */
typedef void (*everrorcb)(struct bufferevent *bev, short what, void *ctx); typedef void (*everrorcb)(struct bufferevent *bev, short what, void *ctx);
/** Options that can be specified when creating a bufferevent */
enum bufferevent_options { enum bufferevent_options {
/** If set, we close the underlying file
* descriptor/bufferevent/whatever when this bufferevent is freed. */
BEV_OPT_CLOSE_ON_FREE = (1<<0), BEV_OPT_CLOSE_ON_FREE = (1<<0),
/** If set, and threading is enabled, operations on this bufferevent
* are protected by a lock */
BEV_OPT_THREADSAFE = (1<<1), BEV_OPT_THREADSAFE = (1<<1),
}; };
@ -389,14 +394,18 @@ typedef enum bufferevent_filter_result (*bufferevent_filter_cb)(
struct evbuffer *src, struct evbuffer *dst, ssize_t dst_limit, struct evbuffer *src, struct evbuffer *dst, ssize_t dst_limit,
enum bufferevent_flush_mode mode, void *ctx); enum bufferevent_flush_mode mode, void *ctx);
struct bufferevent_filter; /**
Allocate a new filtering bufferevent on top of an existing bufferevent.
enum bufferevent_filter_options { @param underlying the underlying bufferevent.
BEV_FILT_FREE_UNDERLYING = (1<<0), @param input_filter The filter to apply to data we read from the underlying
}; bufferevent
@param output_filter The filer to apply to data we write to the underlying
bufferevent
/** Allocate a new filtering bufferevent on top of an existing bufferevent. @param options A bitfield of bufferevent options.
@param free_context A function to use to free the filter context when
this bufferevent is freed.
@param ctx A context pointer to pass to the filter functions.
*/ */
struct bufferevent * struct bufferevent *
bufferevent_filter_new(struct bufferevent *underlying, bufferevent_filter_new(struct bufferevent *underlying,
@ -412,6 +421,9 @@ bufferevent_filter_new(struct bufferevent *underlying,
socketpair(), except that no internel socketpair is allocated. socketpair(), except that no internel socketpair is allocated.
@param base The event base to associate with the socketpair. @param base The event base to associate with the socketpair.
@param options A set of options for this bufferevent
@param pair A pointer to an array to hold the two new bufferevent objects.
@return 0 on success, -1 on failure.
*/ */
int int
bufferevent_pair_new(struct event_base *base, enum bufferevent_options options, bufferevent_pair_new(struct event_base *base, enum bufferevent_options options,

View File

@ -54,6 +54,18 @@ extern "C" {
#endif #endif
#include <stdarg.h> #include <stdarg.h>
/* Integer type definitions for types that are supposed to be defined in the
* C99-specified stdint.h. Shamefully, some platforms do not include
* stdint.h, so we need to replace it. (If you are on a platform like this,
* your C headers are now 10 years out of date. You should bug them to do
* somthing about this.)
*
* We define:
* ev_uint64_t, ev_uint32_t, ev_uint16_t, ev_uint8_t -- unsigned integer
* types of exactly 64, 32, 16, and 8 bits respectively.
* ev_int64_t, ev_int32_t, ev_int16_t, ev_int8_t -- signed integer
* types of exactly 64, 32, 16, and 8 bits respectively.
*/
#ifdef _EVENT_HAVE_UINT64_T #ifdef _EVENT_HAVE_UINT64_T
#define ev_uint64_t uint64_t #define ev_uint64_t uint64_t
#define ev_int64_t int64_t #define ev_int64_t int64_t
@ -112,6 +124,9 @@ extern "C" {
simply calls socketpair(). On Windows, it uses the loopback network simply calls socketpair(). On Windows, it uses the loopback network
interface on 127.0.0.1, and only AF_INET,SOCK_STREAM are supported. interface on 127.0.0.1, and only AF_INET,SOCK_STREAM are supported.
(This may fail on some Windows hosts where firewall software has cleverly
decided to keep 127.0.0.1 from talking to itself.)
Parameters and return values are as for socketpair() Parameters and return values are as for socketpair()
*/ */
int evutil_socketpair(int d, int type, int protocol, evutil_socket_t sv[2]); int evutil_socketpair(int d, int type, int protocol, evutil_socket_t sv[2]);
@ -166,7 +181,8 @@ const char *evutil_socket_error_to_string(int errcode);
#endif #endif
/* /*
* Manipulation macros for struct timeval * Manipulation macros for struct timeval. We define replacements
* for timeradd, timersub, timerclear, timercmp, and timerisset.
*/ */
#ifdef _EVENT_HAVE_TIMERADD #ifdef _EVENT_HAVE_TIMERADD
#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp)) #define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
@ -198,6 +214,8 @@ const char *evutil_socket_error_to_string(int errcode);
#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 #define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
#endif #endif
/** Return true iff the tvp is related to uvp according to the relational
* operator cmp. Recognized values for cmp are ==, <=, <, >=, and >. */
#define evutil_timercmp(tvp, uvp, cmp) \ #define evutil_timercmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
@ -209,6 +227,7 @@ const char *evutil_socket_error_to_string(int errcode);
#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) #define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#endif #endif
/* Replacement for offsetof on platforms that don't define it. */
#ifdef offsetof #ifdef offsetof
#define evutil_offsetof(type, field) offsetof(type, field) #define evutil_offsetof(type, field) offsetof(type, field)
#else #else

View File

@ -32,28 +32,95 @@ extern "C" {
#endif #endif
struct event_overlapped; struct event_overlapped;
struct event_iocp_port;
struct evbuffer;
typedef void (*iocp_callback)(struct event_overlapped *, uintptr_t, ssize_t); typedef void (*iocp_callback)(struct event_overlapped *, uintptr_t, ssize_t);
/* This whole file is actually win32 only. We wrap the structures in a win32
* ifdef so that we can test-compile code that uses these interfaces on
* non-win32 platforms. */
#ifdef WIN32
/**
Internal use only. Wraps an OVERLAPPED that we're using for libevent
functionality. Whenever an event_iocp_port gets an event for a given
OVERLAPPED*, it upcasts the pointer to an event_overlapped, and calls the
iocp_callback function with the event_overlapped, the iocp key, and the
number of bytes transferred as arguments.
*/
struct event_overlapped { struct event_overlapped {
OVERLAPPED overlapped; OVERLAPPED overlapped;
iocp_callback cb; iocp_callback cb;
}; };
/**
Internal use only. Stores a Windows IO Completion port, along with
related data.
*/
struct event_iocp_port { struct event_iocp_port {
/** The port itself */
HANDLE port; HANDLE port;
/** Number of threads open on the port. */
int n_threads; int n_threads;
/** True iff we're shutting down all the threads on this port */
int shutdown; int shutdown;
/** How often the threads on this port check for shutdown and other
* conditions */
long ms; long ms;
}; };
#endif
struct evbuffer; /** Initialize the fields in an event_overlapped.
@param overlapped The struct event_overlapped to initialize
@param cb The callback that should be invoked once the IO operation has
finished.
*/
void event_overlapped_init(struct event_overlapped *, iocp_callback cb); void event_overlapped_init(struct event_overlapped *, iocp_callback cb);
/** Allocate and return a new evbuffer that supports overlapped IO on a given
socket. The socket must be associated with an IO completion port using
event_iocp_port_associate.
*/
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
/** Start reading data onto the end of an overlapped evbuffer.
An evbuffer can only have one read pending at a time. While the read
is in progress, no other data may be added to the end of the buffer.
The buffer must be created with event_overlapped_init().
@param buf The buffer to read onto
@param n The number of bytes to try to read.
@return 0 on success, -1 on error.
*/
int evbuffer_launch_read(struct evbuffer *, size_t n); int evbuffer_launch_read(struct evbuffer *, size_t n);
/** Start writing data from the start of an evbuffer.
An evbuffer can only have one write pending at a time. While the write is
in progress, no other data may be removed from the front of the buffer.
The buffer must be created with event_overlapped_init().
@param buf The buffer to read onto
@param n The number of bytes to try to read.
@return 0 on success, -1 on error.
*/
int evbuffer_launch_write(struct evbuffer *, ssize_t n); int evbuffer_launch_write(struct evbuffer *, ssize_t n);
/** Create an IOCP, and launch its worker threads. Internal use only.
This interface is unstable, and will change.
*/
struct event_iocp_port *event_iocp_port_launch(void); struct event_iocp_port *event_iocp_port_launch(void);
/** Associate a file descriptor with an iocp, such that overlapped IO on the
fd will happen on one of the iocp's worker threads.
*/
int event_iocp_port_associate(struct event_iocp_port *port, evutil_socket_t fd, int event_iocp_port_associate(struct event_iocp_port *port, evutil_socket_t fd,
uintptr_t key); uintptr_t key);
/** Shut down all threads serving an iocp. */
void event_iocp_shutdown(struct event_iocp_port *port); void event_iocp_shutdown(struct event_iocp_port *port);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -38,6 +38,12 @@
extern "C" { extern "C" {
#endif #endif
/** @file ipv6-internal.h
*
* Replacement types and functions for platfoms that don't support ipv6
* properly.
*/
#ifndef _EVENT_HAVE_STRUCT_IN6_ADDR #ifndef _EVENT_HAVE_STRUCT_IN6_ADDR
struct in6_addr { struct in6_addr {
ev_uint8_t s6_addr[16]; ev_uint8_t s6_addr[16];

View File

@ -101,6 +101,21 @@ extern const char EVUTIL_TOLOWER_TABLE[];
#define EVUTIL_TOLOWER(c) (EVUTIL_TOLOWER_TABLE[(ev_uint8_t)c]) #define EVUTIL_TOLOWER(c) (EVUTIL_TOLOWER_TABLE[(ev_uint8_t)c])
#define EVUTIL_TOUPPER(c) (EVUTIL_TOUPPER_TABLE[(ev_uint8_t)c]) #define EVUTIL_TOUPPER(c) (EVUTIL_TOUPPER_TABLE[(ev_uint8_t)c])
/** Helper macro. If we know that a given pointer points to a field in a
structure, return a pointer to the structure itself. Used to implement
our half-baked C OO. Example:
struct subtype {
int x;
struct supertype common;
int y;
};
...
void fn(struct supertype *super) {
struct subtype *sub = EVUTIL_UPCAST(super, struct subtype, common);
...
}
*/
#define EVUTIL_UPCAST(ptr, type, field) \ #define EVUTIL_UPCAST(ptr, type, field) \
((type *)((char*)ptr) - evutil_offsetof(type, field)) ((type *)((char*)ptr) - evutil_offsetof(type, field))