mirror of
https://github.com/libevent/libevent.git
synced 2025-01-31 09:12:55 +08:00
Document many internal functions and pieces of code.
svn:r1181
This commit is contained in:
parent
433e2339ca
commit
838d0a81c3
@ -81,8 +81,12 @@ BUILT_SOURCES = event-config.h
|
||||
|
||||
event-config.h: config.h
|
||||
echo '/* event-config.h' > $@
|
||||
echo ' * Generated by autoconf; post-processed by libevent.' >> $@
|
||||
echo ' * Do not edit this file.' >> $@
|
||||
echo ' *' >> $@
|
||||
echo ' * This file was generated by autoconf when libevent was built, and post-' >> $@
|
||||
echo ' * processed by Libevent so that its macros would have a uniform prefix.' >> $@
|
||||
echo ' *' >> $@
|
||||
echo ' * DO NOT EDIT THIS FILE.' >> $@
|
||||
echo ' *' >> $@
|
||||
echo ' * Do not rely on macros in this file existing in later versions.'>> $@
|
||||
echo ' */' >> $@
|
||||
echo '#ifndef _EVENT_CONFIG_H_' >> $@
|
||||
|
@ -1,3 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2009 Niels Provos and Nick Mathewson
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
@file buffer_iocp.c
|
||||
|
||||
This module implements overlapped read and write functions for evbuffer
|
||||
objects on Windows.
|
||||
*/
|
||||
|
||||
#include <windows.h>
|
||||
#include <assert.h>
|
||||
@ -15,25 +47,38 @@
|
||||
|
||||
#define MAX_WSABUFS 16
|
||||
|
||||
/** Wrapper for an OVERLAPPED that holds the necessary info to notice
|
||||
when an overlapped read or write is done on an evbuffer.
|
||||
**/
|
||||
struct buffer_overlapped {
|
||||
struct event_overlapped event_overlapped;
|
||||
|
||||
/** The first pinned chain in the buffer. */
|
||||
struct evbuffer_chain *first_pinned;
|
||||
/** The buffer itself. */
|
||||
struct evbuffer_overlapped *buf;
|
||||
/** How many chains are pinned; how many of the fields in buffers
|
||||
* are we using. */
|
||||
int n_buffers;
|
||||
WSABUF buffers[MAX_WSABUFS];
|
||||
};
|
||||
|
||||
/** An evbuffer that can handle overlapped IO. */
|
||||
struct evbuffer_overlapped {
|
||||
struct evbuffer buffer;
|
||||
/** The socket that we're doing overlapped IO on. */
|
||||
evutil_socket_t fd;
|
||||
/** True iff we have scheduled a write. */
|
||||
unsigned write_in_progress : 1;
|
||||
/** True iff we have scheduled a read. */
|
||||
unsigned read_in_progress : 1;
|
||||
|
||||
struct buffer_overlapped read_info;
|
||||
struct buffer_overlapped write_info;
|
||||
};
|
||||
|
||||
/** Given an evbuffer, return the correponding evbuffer structure, or NULL if
|
||||
* the evbuffer isn't overlapped. */
|
||||
static inline struct evbuffer_overlapped *
|
||||
upcast_evbuffer(struct evbuffer *buf)
|
||||
{
|
||||
@ -48,6 +93,7 @@ upcast_overlapped(struct event_overlapped *o)
|
||||
return EVUTIL_UPCAST(o, struct buffer_overlapped, event_overlapped);
|
||||
}
|
||||
|
||||
/** Unpin all the chains noted as pinned in 'eo'. */
|
||||
static void
|
||||
pin_release(struct event_overlapped *eo, unsigned flag)
|
||||
{
|
||||
@ -62,6 +108,7 @@ pin_release(struct event_overlapped *eo, unsigned flag)
|
||||
}
|
||||
}
|
||||
|
||||
/** IOCP callback invoked when a read operation is finished. */
|
||||
static void
|
||||
read_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes)
|
||||
{
|
||||
@ -95,6 +142,7 @@ read_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes)
|
||||
_evbuffer_decref_and_unlock(evbuf);
|
||||
}
|
||||
|
||||
/** IOCP callback invoked when a write operation is finished. */
|
||||
static void
|
||||
write_completed(struct event_overlapped *eo, uintptr_t _, ssize_t nBytes)
|
||||
{
|
||||
|
@ -37,15 +37,34 @@ struct deferred_cb;
|
||||
|
||||
typedef void (*deferred_cb_fn)(struct deferred_cb *, void *);
|
||||
|
||||
/** A deferred_cb is a callback that can be scheduled to run as part of
|
||||
* an event_base's event_loop, rather than running immediately. */
|
||||
struct deferred_cb {
|
||||
/** Links to the adjacent active (pending) deferred_cb objects. */
|
||||
TAILQ_ENTRY (deferred_cb) (cb_next);
|
||||
/** True iff this deferred_cb is pending in an event_base. */
|
||||
unsigned queued : 1;
|
||||
/** The function to execute when the callback runs. */
|
||||
deferred_cb_fn cb;
|
||||
/** The function's second argument. */
|
||||
void *arg;
|
||||
};
|
||||
|
||||
/**
|
||||
Initialize an empty, non-pending deferred_cb.
|
||||
|
||||
@param deferred The deferred_cb structure to initialize.
|
||||
@param cb The function to run when the deferred_cb executes.
|
||||
@param arg The function's second argument.
|
||||
*/
|
||||
void event_deferred_cb_init(struct deferred_cb *, deferred_cb_fn, void *);
|
||||
/**
|
||||
Cancel a deferred_cb if it is currently scheduled in an event_base.
|
||||
*/
|
||||
void event_deferred_cb_cancel(struct event_base *, struct deferred_cb *);
|
||||
/**
|
||||
Activate a deferred_cb if it is not currently scheduled in an event_base.
|
||||
*/
|
||||
void event_deferred_cb_schedule(struct event_base *, struct deferred_cb *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -39,11 +39,14 @@ extern "C" {
|
||||
/* minimum allocation for a chain. */
|
||||
#define MIN_BUFFER_SIZE 256
|
||||
|
||||
/** A single evbuffer callback for an evbuffer. */
|
||||
/** A single evbuffer callback for an evbuffer. This function will be invoked
|
||||
* when bytes are added to or removed from the evbuffer. */
|
||||
struct evbuffer_cb_entry {
|
||||
/** Structures to implement a doubly-linked queue of callbacks */
|
||||
TAILQ_ENTRY(evbuffer_cb_entry) next;
|
||||
/** The callback function to invoke when this callback is called */
|
||||
/** The callback function to invoke when this callback is called.
|
||||
If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
|
||||
valid; otherwise, cb_func is valid. */
|
||||
union {
|
||||
evbuffer_cb_func cb_func;
|
||||
evbuffer_cb cb_obsolete;
|
||||
@ -61,39 +64,75 @@ struct evbuffer_cb_entry {
|
||||
|
||||
struct evbuffer_chain;
|
||||
struct evbuffer {
|
||||
/** The first chain in this buffer's linked list of chains. */
|
||||
struct evbuffer_chain *first;
|
||||
/** The last chain in this buffer's linked list of chains. */
|
||||
struct evbuffer_chain *last;
|
||||
/** The next-to-last chain in this buffer's linked list of chains.
|
||||
* NULL if the buffer has 0 or 1 chains. Used in case there's an
|
||||
* ongoing read that needs to be split across multiple chains: we want
|
||||
* to add a new chain as a read target, but we don't want to lose our
|
||||
* pointer to the next-to-last chain if the read turns out to be
|
||||
* incomplete.
|
||||
*/
|
||||
struct evbuffer_chain *previous_to_last;
|
||||
|
||||
size_t total_len; /* total length of all buffers */
|
||||
|
||||
evbuffer_cb cb;
|
||||
void *cbarg;
|
||||
/** Total amount of bytes stored in all chains.*/
|
||||
size_t total_len;
|
||||
|
||||
/** Number of bytes we have added to the buffer since we last tried to
|
||||
* invoke callbacks. */
|
||||
size_t n_add_for_cb;
|
||||
/** Number of bytes we have removed from the buffer since we last
|
||||
* tried to invoke callbacks. */
|
||||
size_t n_del_for_cb;
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
/** A lock used to mediate access to this buffer. */
|
||||
void *lock;
|
||||
#endif
|
||||
/** True iff we should free the lock field when we free this
|
||||
* evbuffer. */
|
||||
unsigned own_lock : 1;
|
||||
/** True iff we should not allow changes to the front of the buffer
|
||||
* (drains or prepends). */
|
||||
unsigned freeze_start : 1;
|
||||
/** True iff we should not allow changes to the end of the buffer
|
||||
* (appends) */
|
||||
unsigned freeze_end : 1;
|
||||
/** True iff this evbuffer's callbacks are not invoked immediately
|
||||
* upon a change in the buffer, but instead are deferred to be invoked
|
||||
* from the event_base's loop. Useful for preventing enormous stack
|
||||
* overflows when we have mutually recursive callbacks, and for
|
||||
* serializing callbacks in a single thread. */
|
||||
unsigned deferred_cbs : 1;
|
||||
#ifdef WIN32
|
||||
/** True iff this buffer is set up for overlapped IO. */
|
||||
unsigned is_overlapped : 1;
|
||||
#endif
|
||||
|
||||
/** An event_base associated with this evbuffer. Used to implement
|
||||
* deferred callbacks. */
|
||||
struct event_base *ev_base;
|
||||
|
||||
/** For debugging: how many times have we acquired the lock for this
|
||||
* evbuffer? */
|
||||
int lock_count;
|
||||
/** A reference count on this evbuffer. When the reference count
|
||||
* reaches 0, the buffer is destroyed. Manipulated with
|
||||
* evbuffer_incref and evbuffer_decref_and_unlock and
|
||||
* evbuffer_free. */
|
||||
int refcnt;
|
||||
|
||||
/** A deferred_cb handle to make all of this buffer's callbacks
|
||||
* invoked from the event loop. */
|
||||
struct deferred_cb deferred;
|
||||
|
||||
/** A doubly-linked-list of callback functions */
|
||||
TAILQ_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
|
||||
};
|
||||
|
||||
/** A single item in an evbuffer. */
|
||||
struct evbuffer_chain {
|
||||
/** points to next buffer in the chain */
|
||||
struct evbuffer_chain *next;
|
||||
@ -150,12 +189,15 @@ struct evbuffer_chain_reference {
|
||||
};
|
||||
|
||||
#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain)
|
||||
/** Return a pointer to extra data allocated along with an evbuffer. */
|
||||
#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
|
||||
|
||||
/** Assert that somebody (hopefully us) is holding the lock on an evbuffer */
|
||||
#define ASSERT_EVBUFFER_LOCKED(buffer) \
|
||||
do { \
|
||||
assert((buffer)->lock_count > 0); \
|
||||
} while (0)
|
||||
/** Assert that nobody is holding the lock on an evbuffer */
|
||||
#define ASSERT_EVBUFFER_UNLOCKED(buffer) \
|
||||
do { \
|
||||
assert((buffer)->lock_count == 0); \
|
||||
@ -196,13 +238,27 @@ struct evbuffer_chain_reference {
|
||||
EVTHREAD_WRITE, EVTHREAD_WRITE); \
|
||||
} while(0)
|
||||
|
||||
/** Increase the reference count of buf by one. */
|
||||
void _evbuffer_incref(struct evbuffer *buf);
|
||||
/** Pin a single buffer chain using a given flag. A pinned chunk may not be
|
||||
* moved or freed until it is unpinned. */
|
||||
void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag);
|
||||
/** Unpin a single buffer chain using a given flag. */
|
||||
void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag);
|
||||
/** As evbuffer_free, but requires that we hold a lock on the buffer, and
|
||||
* releases the lock before freeing it and the buffer. */
|
||||
void _evbuffer_decref_and_unlock(struct evbuffer *buffer);
|
||||
/** As evbuffer_expand, but does not guarantee that the newly allocated memory
|
||||
* is contiguous. Instead, it may be split across two chunks. */
|
||||
int _evbuffer_expand_fast(struct evbuffer *, size_t);
|
||||
|
||||
#ifdef _EVENT_HAVE_SYS_UIO_H
|
||||
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
|
||||
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
|
||||
* Sets up the one or two iovecs in 'vecs' to point to the free memory and its
|
||||
* extent, and *chainp to poitn to the first chain that we'll try to read into.
|
||||
* Returns the number of vecs used.
|
||||
*/
|
||||
int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
|
||||
struct iovec *vecs, struct evbuffer_chain **chainp);
|
||||
#elif defined(WIN32)
|
||||
@ -210,10 +266,6 @@ int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
|
||||
WSABUF *vecs, struct evbuffer_chain **chainp);
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -48,20 +48,38 @@ extern "C" {
|
||||
#define ev_ncalls _ev.ev_signal.ev_ncalls
|
||||
#define ev_pncalls _ev.ev_signal.ev_pncalls
|
||||
|
||||
/** Structure to define the backend of a given event_base. */
|
||||
struct eventop {
|
||||
/** The name of this backend. */
|
||||
const char *name;
|
||||
/** Set up an event_base to use this backend.*/
|
||||
void *(*init)(struct event_base *);
|
||||
/** Enable reading/writing on a given fd. */
|
||||
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
||||
/** Disable reading/writing on a given fd. */
|
||||
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
||||
/** Function to implement the core of an event loop. It must see which
|
||||
added events are ready, and cause event_active to be called for each
|
||||
active event (usually via event_io_active or such).
|
||||
*/
|
||||
int (*dispatch)(struct event_base *, struct timeval *);
|
||||
/** Function to clean up and free our data from the event_base. */
|
||||
void (*dealloc)(struct event_base *);
|
||||
/* set if we need to reinitialize the event base */
|
||||
/** Set if we need to reinitialize the event base after we fork. */
|
||||
int need_reinit;
|
||||
/** Bit-array of supported event_method_features */
|
||||
enum event_method_feature features;
|
||||
/** Length of extra information we should record for each fd that
|
||||
has one or more active events.
|
||||
*/
|
||||
size_t fdinfo_len;
|
||||
};
|
||||
|
||||
#ifdef WIN32
|
||||
/* If we're on win32, then file descriptors are not nice low densely packed
|
||||
integers. Instead, they are pointer-like windows handles, and we want to
|
||||
use a hashtable instead of an array to map fds to events.
|
||||
*/
|
||||
#define EVMAP_USE_HT
|
||||
#endif
|
||||
|
||||
@ -82,45 +100,57 @@ struct event_signal_map {
|
||||
};
|
||||
|
||||
struct event_base {
|
||||
/** Function pointers and other data to describe this event_base's
|
||||
* backend. */
|
||||
const struct eventop *evsel;
|
||||
/** Pointer to backend-specific data. */
|
||||
void *evbase;
|
||||
|
||||
/* signal handling info */
|
||||
const struct eventop *evsigsel;
|
||||
void *evsigbase;
|
||||
|
||||
struct evsig_info sig;
|
||||
|
||||
int event_count; /* counts number of total events */
|
||||
int event_count_active; /* counts number of active events */
|
||||
int event_count; /**< counts number of total events */
|
||||
int event_count_active; /**< counts number of active events */
|
||||
|
||||
int event_gotterm; /* Set to terminate loop */
|
||||
int event_break; /* Set to terminate loop immediately */
|
||||
int event_gotterm; /**< Set to terminate loop once done
|
||||
* processing events. */
|
||||
int event_break; /**< Set to exit loop immediately */
|
||||
|
||||
/* active event management */
|
||||
/* Active event management. */
|
||||
/** An array of nactivequeues queues for active events (ones that
|
||||
* have triggered, and whose callbacks need to be called). Low
|
||||
* priority numbers are more important, and stall higher ones.
|
||||
*/
|
||||
struct event_list **activequeues;
|
||||
int nactivequeues;
|
||||
|
||||
/* deferred callback management */
|
||||
/** Deferred callback management: a list of deferred callbacks to
|
||||
* run active the active events. */
|
||||
TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list;
|
||||
|
||||
/* for mapping io activity to events */
|
||||
/** Mapping from file descriptors to enabled events */
|
||||
struct event_io_map io;
|
||||
|
||||
/* for mapping signal activity to events */
|
||||
/** Mapping from signal numbers to enabled events. */
|
||||
struct event_signal_map sigmap;
|
||||
|
||||
|
||||
/** All events that have been enabled (added) in this event_base */
|
||||
struct event_list eventqueue;
|
||||
|
||||
struct timeval event_tv;
|
||||
|
||||
/** Priority queue of events with timeouts. */
|
||||
struct min_heap timeheap;
|
||||
|
||||
struct timeval tv_cache;
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
/* threading support */
|
||||
/** The thread currently running the event_loop for this base */
|
||||
unsigned long th_owner_id;
|
||||
/** A lock to prevent conflicting accesses to this event_base */
|
||||
void *th_base_lock;
|
||||
#endif
|
||||
|
||||
@ -136,6 +166,8 @@ struct event_config_entry {
|
||||
const char *avoid_method;
|
||||
};
|
||||
|
||||
/** Internal structure: describes the configuration we want for an event_base
|
||||
* that we're about to allocate. */
|
||||
struct event_config {
|
||||
TAILQ_HEAD(event_configq, event_config_entry) entries;
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
#define _EVTHREAD_INTERNAL_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
//extern "C" {
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "event-config.h"
|
||||
@ -36,41 +36,55 @@
|
||||
struct event_base;
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
/* Global function pointers to lock-related functions. NULL if locking isn't
|
||||
enabled. */
|
||||
extern void (*_evthread_locking_fn)(int mode, void *lock);
|
||||
extern unsigned long (*_evthread_id_fn)(void);
|
||||
extern void *(*_evthread_lock_alloc_fn)(void);
|
||||
extern void (*_evthread_lock_free_fn)(void *);
|
||||
|
||||
/** True iff the given event_base is set up to use locking */
|
||||
#define EVBASE_USING_LOCKS(base) \
|
||||
(base != NULL && (base)->th_base_lock != NULL)
|
||||
|
||||
/** Return the ID of the current thread, or 1 if threading isn't enabled. */
|
||||
#define EVTHREAD_GET_ID() \
|
||||
(_evthread_id_fn ? _evthread_id_fn() : 1)
|
||||
|
||||
/** Return true iff we're in the thread that is currently running a given
|
||||
* event_base's loop. */
|
||||
#define EVBASE_IN_THREAD(base) \
|
||||
(_evthread_id_fn == NULL || \
|
||||
(base)->th_owner_id == _evthread_id_fn())
|
||||
|
||||
/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
|
||||
NULL if locking is not enabled. */
|
||||
#define EVTHREAD_ALLOC_LOCK(lockvar) \
|
||||
((lockvar) = _evthread_lock_alloc_fn ? \
|
||||
_evthread_lock_alloc_fn() : NULL)
|
||||
|
||||
/** Free a given lock, if it is present and locking is enabled. */
|
||||
#define EVTHREAD_FREE_LOCK(lockvar) \
|
||||
do { \
|
||||
if (lockvar && _evthread_lock_free_fn) \
|
||||
_evthread_lock_free_fn(lockvar); \
|
||||
} while (0)
|
||||
|
||||
/** Acquire a lock. */
|
||||
#define EVLOCK_LOCK(lock,mode) \
|
||||
do { \
|
||||
if (lock) \
|
||||
_evthread_locking_fn(EVTHREAD_LOCK|mode, lock); \
|
||||
} while (0)
|
||||
|
||||
/** Release a lock */
|
||||
#define EVLOCK_UNLOCK(lock,mode) \
|
||||
do { \
|
||||
if (lock) \
|
||||
_evthread_locking_fn(EVTHREAD_UNLOCK|mode, lock); \
|
||||
} while (0)
|
||||
|
||||
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
|
||||
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
|
||||
do { \
|
||||
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
|
||||
@ -80,6 +94,8 @@ extern void (*_evthread_lock_free_fn)(void *);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/** Acquire both lock1 and lock2. Always allocates locks in the same order,
|
||||
* so that two threads locking two locks with LOCK2 will not deadlock. */
|
||||
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
|
||||
do { \
|
||||
void *_lock1_tmplock = (lock1); \
|
||||
@ -90,6 +106,7 @@ extern void (*_evthread_lock_free_fn)(void *);
|
||||
EVLOCK_LOCK(_lock2_tmplock,mode2); \
|
||||
} while (0)
|
||||
|
||||
/** Releaes both lock1 and lock2. */
|
||||
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
|
||||
do { \
|
||||
void *_lock1_tmplock = (lock1); \
|
||||
@ -101,12 +118,15 @@ extern void (*_evthread_lock_free_fn)(void *);
|
||||
} while (0)
|
||||
|
||||
|
||||
/** Lock an event_base, if it is set up for locking. Acquires the lock
|
||||
in the base structure whose field is named 'lock'. */
|
||||
#define EVBASE_ACQUIRE_LOCK(base, mode, lock) do { \
|
||||
if (EVBASE_USING_LOCKS(base)) \
|
||||
_evthread_locking_fn(EVTHREAD_LOCK | mode, \
|
||||
(base)->lock); \
|
||||
} while (0)
|
||||
|
||||
/** Unlock an event_base, if it is set up for locking. */
|
||||
#define EVBASE_RELEASE_LOCK(base, mode, lock) do { \
|
||||
if (EVBASE_USING_LOCKS(base)) \
|
||||
_evthread_locking_fn(EVTHREAD_UNLOCK | mode, \
|
||||
|
@ -427,7 +427,7 @@ ht_string_hash(const char *s)
|
||||
/*
|
||||
* Copyright 2005, Nick Mathewson. Implementation logic is adapted from code
|
||||
* by Cristopher Clark, retrofit to allow drop-in memory management, and to
|
||||
* use the same interface as Niels Provos's HT_H. I'm not sure whether this
|
||||
* use the same interface as Niels Provos's tree.h. I'm not sure whether this
|
||||
* is a derived work any more, but whether it is or not, the license below
|
||||
* applies.
|
||||
*
|
||||
|
@ -119,10 +119,17 @@ void evbuffer_free(struct evbuffer *buf);
|
||||
*/
|
||||
int evbuffer_enable_locking(struct evbuffer *buf, void *lock);
|
||||
|
||||
/* DOCDOC */
|
||||
/**
|
||||
Acquire the lock on an evbuffer. Has no effect if locking was not enabled
|
||||
with evbuffer_enable_locking.
|
||||
*/
|
||||
void evbuffer_lock(struct evbuffer *buf);
|
||||
void evbuffer_unlock(struct evbuffer *buf);
|
||||
|
||||
/**
|
||||
Release the lock on an evbuffer. Has no effect if locking was not enabled
|
||||
with evbuffer_enable_locking.
|
||||
*/
|
||||
void evbuffer_unlock(struct evbuffer *buf);
|
||||
|
||||
/**
|
||||
Returns the total number of bytes stored in the event buffer
|
||||
@ -493,7 +500,7 @@ int evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg
|
||||
@param buffer the evbuffer that the callback is watching.
|
||||
@param cb the callback whose status we want to change.
|
||||
@param flags EVBUFFER_CB_ENABLED to enable the callback, or
|
||||
EVBUFFER_CB_DISABLEDD to disable it.
|
||||
EVBUFFER_CB_DISABLED to disable it.
|
||||
@return 0 on success, -1 on failure.
|
||||
*/
|
||||
int evbuffer_cb_set_flags(struct evbuffer *buffer,
|
||||
|
@ -115,9 +115,14 @@ typedef void (*evbuffercb)(struct bufferevent *bev, void *ctx);
|
||||
/* XXXX we should rename this to bufferevent_error_cb; see above. */
|
||||
typedef void (*everrorcb)(struct bufferevent *bev, short what, void *ctx);
|
||||
|
||||
|
||||
/** Options that can be specified when creating a bufferevent */
|
||||
enum bufferevent_options {
|
||||
/** If set, we close the underlying file
|
||||
* descriptor/bufferevent/whatever when this bufferevent is freed. */
|
||||
BEV_OPT_CLOSE_ON_FREE = (1<<0),
|
||||
|
||||
/** If set, and threading is enabled, operations on this bufferevent
|
||||
* are protected by a lock */
|
||||
BEV_OPT_THREADSAFE = (1<<1),
|
||||
};
|
||||
|
||||
@ -389,14 +394,18 @@ typedef enum bufferevent_filter_result (*bufferevent_filter_cb)(
|
||||
struct evbuffer *src, struct evbuffer *dst, ssize_t dst_limit,
|
||||
enum bufferevent_flush_mode mode, void *ctx);
|
||||
|
||||
struct bufferevent_filter;
|
||||
/**
|
||||
Allocate a new filtering bufferevent on top of an existing bufferevent.
|
||||
|
||||
enum bufferevent_filter_options {
|
||||
BEV_FILT_FREE_UNDERLYING = (1<<0),
|
||||
};
|
||||
|
||||
|
||||
/** Allocate a new filtering bufferevent on top of an existing bufferevent.
|
||||
@param underlying the underlying bufferevent.
|
||||
@param input_filter The filter to apply to data we read from the underlying
|
||||
bufferevent
|
||||
@param output_filter The filer to apply to data we write to the underlying
|
||||
bufferevent
|
||||
@param options A bitfield of bufferevent options.
|
||||
@param free_context A function to use to free the filter context when
|
||||
this bufferevent is freed.
|
||||
@param ctx A context pointer to pass to the filter functions.
|
||||
*/
|
||||
struct bufferevent *
|
||||
bufferevent_filter_new(struct bufferevent *underlying,
|
||||
@ -412,6 +421,9 @@ bufferevent_filter_new(struct bufferevent *underlying,
|
||||
socketpair(), except that no internel socketpair is allocated.
|
||||
|
||||
@param base The event base to associate with the socketpair.
|
||||
@param options A set of options for this bufferevent
|
||||
@param pair A pointer to an array to hold the two new bufferevent objects.
|
||||
@return 0 on success, -1 on failure.
|
||||
*/
|
||||
int
|
||||
bufferevent_pair_new(struct event_base *base, enum bufferevent_options options,
|
||||
|
@ -54,6 +54,18 @@ extern "C" {
|
||||
#endif
|
||||
#include <stdarg.h>
|
||||
|
||||
/* Integer type definitions for types that are supposed to be defined in the
|
||||
* C99-specified stdint.h. Shamefully, some platforms do not include
|
||||
* stdint.h, so we need to replace it. (If you are on a platform like this,
|
||||
* your C headers are now 10 years out of date. You should bug them to do
|
||||
* somthing about this.)
|
||||
*
|
||||
* We define:
|
||||
* ev_uint64_t, ev_uint32_t, ev_uint16_t, ev_uint8_t -- unsigned integer
|
||||
* types of exactly 64, 32, 16, and 8 bits respectively.
|
||||
* ev_int64_t, ev_int32_t, ev_int16_t, ev_int8_t -- signed integer
|
||||
* types of exactly 64, 32, 16, and 8 bits respectively.
|
||||
*/
|
||||
#ifdef _EVENT_HAVE_UINT64_T
|
||||
#define ev_uint64_t uint64_t
|
||||
#define ev_int64_t int64_t
|
||||
@ -112,6 +124,9 @@ extern "C" {
|
||||
simply calls socketpair(). On Windows, it uses the loopback network
|
||||
interface on 127.0.0.1, and only AF_INET,SOCK_STREAM are supported.
|
||||
|
||||
(This may fail on some Windows hosts where firewall software has cleverly
|
||||
decided to keep 127.0.0.1 from talking to itself.)
|
||||
|
||||
Parameters and return values are as for socketpair()
|
||||
*/
|
||||
int evutil_socketpair(int d, int type, int protocol, evutil_socket_t sv[2]);
|
||||
@ -166,7 +181,8 @@ const char *evutil_socket_error_to_string(int errcode);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Manipulation macros for struct timeval
|
||||
* Manipulation macros for struct timeval. We define replacements
|
||||
* for timeradd, timersub, timerclear, timercmp, and timerisset.
|
||||
*/
|
||||
#ifdef _EVENT_HAVE_TIMERADD
|
||||
#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
|
||||
@ -198,6 +214,8 @@ const char *evutil_socket_error_to_string(int errcode);
|
||||
#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
|
||||
#endif
|
||||
|
||||
/** Return true iff the tvp is related to uvp according to the relational
|
||||
* operator cmp. Recognized values for cmp are ==, <=, <, >=, and >. */
|
||||
#define evutil_timercmp(tvp, uvp, cmp) \
|
||||
(((tvp)->tv_sec == (uvp)->tv_sec) ? \
|
||||
((tvp)->tv_usec cmp (uvp)->tv_usec) : \
|
||||
@ -209,6 +227,7 @@ const char *evutil_socket_error_to_string(int errcode);
|
||||
#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
|
||||
#endif
|
||||
|
||||
/* Replacement for offsetof on platforms that don't define it. */
|
||||
#ifdef offsetof
|
||||
#define evutil_offsetof(type, field) offsetof(type, field)
|
||||
#else
|
||||
|
@ -32,28 +32,95 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
struct event_overlapped;
|
||||
struct event_iocp_port;
|
||||
struct evbuffer;
|
||||
typedef void (*iocp_callback)(struct event_overlapped *, uintptr_t, ssize_t);
|
||||
|
||||
/* This whole file is actually win32 only. We wrap the structures in a win32
|
||||
* ifdef so that we can test-compile code that uses these interfaces on
|
||||
* non-win32 platforms. */
|
||||
#ifdef WIN32
|
||||
|
||||
/**
|
||||
Internal use only. Wraps an OVERLAPPED that we're using for libevent
|
||||
functionality. Whenever an event_iocp_port gets an event for a given
|
||||
OVERLAPPED*, it upcasts the pointer to an event_overlapped, and calls the
|
||||
iocp_callback function with the event_overlapped, the iocp key, and the
|
||||
number of bytes transferred as arguments.
|
||||
*/
|
||||
struct event_overlapped {
|
||||
OVERLAPPED overlapped;
|
||||
iocp_callback cb;
|
||||
};
|
||||
|
||||
/**
|
||||
Internal use only. Stores a Windows IO Completion port, along with
|
||||
related data.
|
||||
*/
|
||||
struct event_iocp_port {
|
||||
/** The port itself */
|
||||
HANDLE port;
|
||||
/** Number of threads open on the port. */
|
||||
int n_threads;
|
||||
/** True iff we're shutting down all the threads on this port */
|
||||
int shutdown;
|
||||
/** How often the threads on this port check for shutdown and other
|
||||
* conditions */
|
||||
long ms;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct evbuffer;
|
||||
/** Initialize the fields in an event_overlapped.
|
||||
|
||||
@param overlapped The struct event_overlapped to initialize
|
||||
@param cb The callback that should be invoked once the IO operation has
|
||||
finished.
|
||||
*/
|
||||
void event_overlapped_init(struct event_overlapped *, iocp_callback cb);
|
||||
|
||||
/** Allocate and return a new evbuffer that supports overlapped IO on a given
|
||||
socket. The socket must be associated with an IO completion port using
|
||||
event_iocp_port_associate.
|
||||
*/
|
||||
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
|
||||
|
||||
/** Start reading data onto the end of an overlapped evbuffer.
|
||||
|
||||
An evbuffer can only have one read pending at a time. While the read
|
||||
is in progress, no other data may be added to the end of the buffer.
|
||||
The buffer must be created with event_overlapped_init().
|
||||
|
||||
@param buf The buffer to read onto
|
||||
@param n The number of bytes to try to read.
|
||||
@return 0 on success, -1 on error.
|
||||
*/
|
||||
int evbuffer_launch_read(struct evbuffer *, size_t n);
|
||||
|
||||
/** Start writing data from the start of an evbuffer.
|
||||
|
||||
An evbuffer can only have one write pending at a time. While the write is
|
||||
in progress, no other data may be removed from the front of the buffer.
|
||||
The buffer must be created with event_overlapped_init().
|
||||
|
||||
@param buf The buffer to read onto
|
||||
@param n The number of bytes to try to read.
|
||||
@return 0 on success, -1 on error.
|
||||
*/
|
||||
int evbuffer_launch_write(struct evbuffer *, ssize_t n);
|
||||
|
||||
/** Create an IOCP, and launch its worker threads. Internal use only.
|
||||
|
||||
This interface is unstable, and will change.
|
||||
*/
|
||||
struct event_iocp_port *event_iocp_port_launch(void);
|
||||
|
||||
/** Associate a file descriptor with an iocp, such that overlapped IO on the
|
||||
fd will happen on one of the iocp's worker threads.
|
||||
*/
|
||||
int event_iocp_port_associate(struct event_iocp_port *port, evutil_socket_t fd,
|
||||
uintptr_t key);
|
||||
|
||||
/** Shut down all threads serving an iocp. */
|
||||
void event_iocp_shutdown(struct event_iocp_port *port);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -38,6 +38,12 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** @file ipv6-internal.h
|
||||
*
|
||||
* Replacement types and functions for platfoms that don't support ipv6
|
||||
* properly.
|
||||
*/
|
||||
|
||||
#ifndef _EVENT_HAVE_STRUCT_IN6_ADDR
|
||||
struct in6_addr {
|
||||
ev_uint8_t s6_addr[16];
|
||||
|
@ -101,6 +101,21 @@ extern const char EVUTIL_TOLOWER_TABLE[];
|
||||
#define EVUTIL_TOLOWER(c) (EVUTIL_TOLOWER_TABLE[(ev_uint8_t)c])
|
||||
#define EVUTIL_TOUPPER(c) (EVUTIL_TOUPPER_TABLE[(ev_uint8_t)c])
|
||||
|
||||
/** Helper macro. If we know that a given pointer points to a field in a
|
||||
structure, return a pointer to the structure itself. Used to implement
|
||||
our half-baked C OO. Example:
|
||||
|
||||
struct subtype {
|
||||
int x;
|
||||
struct supertype common;
|
||||
int y;
|
||||
};
|
||||
...
|
||||
void fn(struct supertype *super) {
|
||||
struct subtype *sub = EVUTIL_UPCAST(super, struct subtype, common);
|
||||
...
|
||||
}
|
||||
*/
|
||||
#define EVUTIL_UPCAST(ptr, type, field) \
|
||||
((type *)((char*)ptr) - evutil_offsetof(type, field))
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user