2008-04-30 00:09:16 +00:00
|
|
|
/*
|
2010-03-04 01:25:51 -05:00
|
|
|
* Copyright (c) 2008-2010 Niels Provos and Nick Mathewson
|
2008-04-30 00:09:16 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#ifndef _BUFFEREVENT_INTERNAL_H_
|
|
|
|
#define _BUFFEREVENT_INTERNAL_H_
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2010-07-07 16:45:03 -04:00
|
|
|
#include "event2/event-config.h"
|
2010-12-02 10:27:06 -05:00
|
|
|
#include "event2/util.h"
|
2009-04-13 03:08:11 +00:00
|
|
|
#include "defer-internal.h"
|
2009-04-13 03:17:19 +00:00
|
|
|
#include "evthread-internal.h"
|
|
|
|
#include "event2/thread.h"
|
2009-11-27 13:16:54 -05:00
|
|
|
#include "ratelim-internal.h"
|
2011-03-07 21:55:47 -05:00
|
|
|
#include "event2/bufferevent_struct.h"
|
2009-04-13 03:08:11 +00:00
|
|
|
|
2009-11-23 18:34:32 -05:00
|
|
|
/* These flags are reasons that we might be declining to actually enable
|
|
|
|
reading or writing on a bufferevent.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* On a all bufferevents, for reading: used when we have read up to the
|
|
|
|
watermark value.
|
|
|
|
|
2009-11-27 13:16:54 -05:00
|
|
|
On a filtering bufferevent, for writing: used when the underlying
|
2009-11-23 18:34:32 -05:00
|
|
|
bufferevent's write buffer has been filled up to its watermark
|
|
|
|
value.
|
|
|
|
*/
|
|
|
|
#define BEV_SUSPEND_WM 0x01
|
2009-11-27 13:16:54 -05:00
|
|
|
/* On a base bufferevent: when we have emptied a bandwidth buckets */
|
2009-11-23 18:34:32 -05:00
|
|
|
#define BEV_SUSPEND_BW 0x02
|
2009-11-27 13:16:54 -05:00
|
|
|
/* On a base bufferevent: when we have emptied the group's bandwidth bucket. */
|
|
|
|
#define BEV_SUSPEND_BW_GROUP 0x04
|
2010-02-20 12:55:59 -05:00
|
|
|
/* On a socket bufferevent: can't do any operations while we're waiting for
|
|
|
|
* name lookup to finish. */
|
|
|
|
#define BEV_SUSPEND_LOOKUP 0x08
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
/* On a base bufferevent, for reading: used when a filter has choked this
|
|
|
|
* (underlying) bufferevent because it has stopped reading from it. */
|
|
|
|
#define BEV_SUSPEND_FILT_READ 0x10
|
|
|
|
|
|
|
|
typedef ev_uint16_t bufferevent_suspend_flags;
|
2009-11-27 13:16:54 -05:00
|
|
|
|
|
|
|
struct bufferevent_rate_limit_group {
|
|
|
|
/** List of all members in the group */
|
|
|
|
TAILQ_HEAD(rlim_group_member_list, bufferevent_private) members;
|
|
|
|
/** Current limits for the group. */
|
|
|
|
struct ev_token_bucket rate_limit;
|
|
|
|
struct ev_token_bucket_cfg rate_limit_cfg;
|
|
|
|
|
|
|
|
/** True iff we don't want to read from any member of the group.until
|
|
|
|
* the token bucket refills. */
|
|
|
|
unsigned read_suspended : 1;
|
|
|
|
/** True iff we don't want to write from any member of the group.until
|
|
|
|
* the token bucket refills. */
|
|
|
|
unsigned write_suspended : 1;
|
|
|
|
/** True iff we were unable to suspend one of the bufferevents in the
|
|
|
|
* group for reading the last time we tried, and we should try
|
|
|
|
* again. */
|
|
|
|
unsigned pending_unsuspend_read : 1;
|
|
|
|
/** True iff we were unable to suspend one of the bufferevents in the
|
|
|
|
* group for writing the last time we tried, and we should try
|
|
|
|
* again. */
|
|
|
|
unsigned pending_unsuspend_write : 1;
|
|
|
|
|
2010-03-21 13:16:31 -04:00
|
|
|
/*@{*/
|
|
|
|
/** Total number of bytes read or written in this group since last
|
|
|
|
* reset. */
|
|
|
|
ev_uint64_t total_read;
|
|
|
|
ev_uint64_t total_written;
|
|
|
|
/*@}*/
|
|
|
|
|
2009-11-27 13:16:54 -05:00
|
|
|
/** The number of bufferevents in the group. */
|
|
|
|
int n_members;
|
|
|
|
|
|
|
|
/** The smallest number of bytes that any member of the group should
|
|
|
|
* be limited to read or write at a time. */
|
2010-10-26 10:27:29 -04:00
|
|
|
ev_ssize_t min_share;
|
2009-11-27 13:16:54 -05:00
|
|
|
/** Timeout event that goes off once a tick, when the bucket is ready
|
|
|
|
* to refill. */
|
|
|
|
struct event master_refill_event;
|
|
|
|
/** Lock to protect the members of this group. This lock should nest
|
|
|
|
* within every bufferevent lock: if you are holding this lock, do
|
|
|
|
* not assume you can lock another bufferevent. */
|
|
|
|
void *lock;
|
|
|
|
};
|
2009-11-23 18:34:32 -05:00
|
|
|
|
2009-11-27 13:16:54 -05:00
|
|
|
/** Fields for rate-limiting a single bufferevent. */
|
|
|
|
struct bufferevent_rate_limit {
|
|
|
|
/* Linked-list elements for storing this bufferevent_private in a
|
|
|
|
* group.
|
|
|
|
*
|
|
|
|
* Note that this field is supposed to be protected by the group
|
|
|
|
* lock */
|
|
|
|
TAILQ_ENTRY(bufferevent_private) next_in_group;
|
|
|
|
/** The rate-limiting group for this bufferevent, or NULL if it is
|
|
|
|
* only rate-limited on its own. */
|
|
|
|
struct bufferevent_rate_limit_group *group;
|
|
|
|
|
|
|
|
/* This bufferevent's current limits. */
|
|
|
|
struct ev_token_bucket limit;
|
|
|
|
/* Pointer to the rate-limit configuration for this bufferevent.
|
|
|
|
* Can be shared. XXX reference-count this? */
|
|
|
|
struct ev_token_bucket_cfg *cfg;
|
|
|
|
|
|
|
|
/* Timeout event used when one this bufferevent's buckets are
|
|
|
|
* empty. */
|
|
|
|
struct event refill_bucket_event;
|
2009-11-23 18:34:32 -05:00
|
|
|
};
|
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Parts of the bufferevent structure that are shared among all bufferevent
|
|
|
|
* types, but not exposed in bufferevent_struct.h. */
|
2009-04-13 03:08:11 +00:00
|
|
|
struct bufferevent_private {
|
2009-05-25 23:10:47 +00:00
|
|
|
/** The underlying bufferevent structure. */
|
2009-04-13 03:08:11 +00:00
|
|
|
struct bufferevent bev;
|
|
|
|
|
|
|
|
/** Evbuffer callback to enforce watermarks on input. */
|
|
|
|
struct evbuffer_cb_entry *read_watermarks_cb;
|
|
|
|
|
2009-04-17 23:12:34 +00:00
|
|
|
/** If set, we should free the lock when we free the bufferevent. */
|
2009-04-13 03:17:19 +00:00
|
|
|
unsigned own_lock : 1;
|
2009-04-13 03:08:11 +00:00
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Flag: set if we have deferred callbacks and a read callback is
|
|
|
|
* pending. */
|
2009-04-17 23:12:34 +00:00
|
|
|
unsigned readcb_pending : 1;
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Flag: set if we have deferred callbacks and a write callback is
|
|
|
|
* pending. */
|
2009-04-17 23:12:34 +00:00
|
|
|
unsigned writecb_pending : 1;
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Flag: set if we are currently busy connecting. */
|
2009-05-05 02:59:26 +00:00
|
|
|
unsigned connecting : 1;
|
2010-02-27 18:59:06 -08:00
|
|
|
/** Flag: set if a connect failed prematurely; this is a hack for
|
|
|
|
* getting around the bufferevent abstraction. */
|
|
|
|
unsigned connection_refused : 1;
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Set to the events pending if we have deferred callbacks and
|
|
|
|
* an events callback is pending. */
|
2009-05-25 23:11:20 +00:00
|
|
|
short eventcb_pending;
|
2009-11-23 18:34:32 -05:00
|
|
|
|
|
|
|
/** If set, read is suspended until one or more conditions are over.
|
|
|
|
* The actual value here is a bitfield of those conditions; see the
|
|
|
|
* BEV_SUSPEND_* flags above. */
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
bufferevent_suspend_flags read_suspended;
|
2009-11-23 18:34:32 -05:00
|
|
|
|
|
|
|
/** If set, writing is suspended until one or more conditions are over.
|
|
|
|
* The actual value here is a bitfield of those conditions; see the
|
|
|
|
* BEV_SUSPEND_* flags above. */
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
bufferevent_suspend_flags write_suspended;
|
2009-11-23 18:34:32 -05:00
|
|
|
|
2009-05-25 23:11:20 +00:00
|
|
|
/** Set to the current socket errno if we have deferred callbacks and
|
2009-05-25 23:10:47 +00:00
|
|
|
* an events callback is pending. */
|
2009-04-17 23:12:34 +00:00
|
|
|
int errno_pending;
|
2009-11-23 18:34:32 -05:00
|
|
|
|
2010-04-24 00:06:38 -07:00
|
|
|
/** The DNS error code for bufferevent_socket_connect_hostname */
|
|
|
|
int dns_error;
|
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Used to implement deferred callbacks */
|
2009-04-17 23:12:34 +00:00
|
|
|
struct deferred_cb deferred;
|
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** The options this bufferevent was constructed with */
|
2009-04-13 03:08:11 +00:00
|
|
|
enum bufferevent_options options;
|
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Current reference count for this bufferevent. */
|
2009-04-13 03:08:11 +00:00
|
|
|
int refcnt;
|
2009-05-25 23:10:47 +00:00
|
|
|
|
|
|
|
/** Lock for this bufferevent. Shared by the inbuf and the outbuf.
|
|
|
|
* If NULL, locking is disabled. */
|
2009-04-13 03:08:11 +00:00
|
|
|
void *lock;
|
2009-11-27 13:16:54 -05:00
|
|
|
|
|
|
|
/** Rate-limiting information for this bufferevent */
|
|
|
|
struct bufferevent_rate_limit *rate_limiting;
|
2009-04-13 03:08:11 +00:00
|
|
|
};
|
2008-04-30 00:09:16 +00:00
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Possible operations for a control callback. */
|
2009-05-13 20:37:21 +00:00
|
|
|
enum bufferevent_ctrl_op {
|
|
|
|
BEV_CTRL_SET_FD,
|
|
|
|
BEV_CTRL_GET_FD,
|
2010-01-23 16:38:36 -05:00
|
|
|
BEV_CTRL_GET_UNDERLYING
|
2009-05-13 20:37:21 +00:00
|
|
|
};
|
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Possible data types for a control callback */
|
2009-05-13 20:37:21 +00:00
|
|
|
union bufferevent_ctrl_data {
|
|
|
|
void *ptr;
|
|
|
|
evutil_socket_t fd;
|
|
|
|
};
|
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
/**
|
|
|
|
Implementation table for a bufferevent: holds function pointers and other
|
|
|
|
information to make the various bufferevent types work.
|
|
|
|
*/
|
|
|
|
struct bufferevent_ops {
|
|
|
|
/** The name of the bufferevent's type. */
|
|
|
|
const char *type;
|
|
|
|
/** At what offset into the implementation type will we find a
|
|
|
|
bufferevent structure?
|
2008-04-30 00:09:16 +00:00
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
Example: if the type is implemented as
|
|
|
|
struct bufferevent_x {
|
|
|
|
int extra_data;
|
|
|
|
struct bufferevent bev;
|
|
|
|
}
|
|
|
|
then mem_offset should be offsetof(struct bufferevent_x, bev)
|
|
|
|
*/
|
|
|
|
off_t mem_offset;
|
2008-04-30 00:09:16 +00:00
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
/** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does
|
|
|
|
not need to adjust the 'enabled' field. Returns 0 on success, -1
|
|
|
|
on failure.
|
|
|
|
*/
|
|
|
|
int (*enable)(struct bufferevent *, short);
|
2008-04-30 00:09:16 +00:00
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
/** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does
|
|
|
|
not need to adjust the 'enabled' field. Returns 0 on success, -1
|
|
|
|
on failure.
|
|
|
|
*/
|
|
|
|
int (*disable)(struct bufferevent *, short);
|
2008-04-30 00:09:16 +00:00
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
/** Free any storage and deallocate any extra data or structures used
|
|
|
|
in this implementation.
|
|
|
|
*/
|
|
|
|
void (*destruct)(struct bufferevent *);
|
2008-04-30 00:09:16 +00:00
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
/** Called when the timeouts on the bufferevent have changed.*/
|
2010-01-22 16:14:49 -05:00
|
|
|
int (*adj_timeouts)(struct bufferevent *);
|
2009-02-02 19:22:13 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
/** Called to flush data. */
|
|
|
|
int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
|
2009-05-13 20:37:21 +00:00
|
|
|
|
|
|
|
/** Called to access miscellaneous fields. */
|
|
|
|
int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
|
2009-11-27 13:16:54 -05:00
|
|
|
|
2008-04-30 00:09:16 +00:00
|
|
|
};
|
|
|
|
|
2009-04-10 15:01:31 +00:00
|
|
|
extern const struct bufferevent_ops bufferevent_ops_socket;
|
|
|
|
extern const struct bufferevent_ops bufferevent_ops_filter;
|
|
|
|
extern const struct bufferevent_ops bufferevent_ops_pair;
|
2009-02-02 19:22:13 +00:00
|
|
|
|
2009-11-04 05:19:26 +00:00
|
|
|
#define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket)
|
|
|
|
#define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter)
|
|
|
|
#define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair)
|
|
|
|
|
|
|
|
#ifdef WIN32
|
|
|
|
extern const struct bufferevent_ops bufferevent_ops_async;
|
|
|
|
#define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async)
|
|
|
|
#else
|
|
|
|
#define BEV_IS_ASYNC(bevp) 0
|
|
|
|
#endif
|
|
|
|
|
2009-02-02 19:22:13 +00:00
|
|
|
/** Initialize the shared parts of a bufferevent. */
|
2009-04-13 03:08:11 +00:00
|
|
|
int bufferevent_init_common(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options);
|
2009-02-02 19:22:13 +00:00
|
|
|
|
2009-11-23 18:34:32 -05:00
|
|
|
/** For internal use: temporarily stop all reads on bufev, until the conditions
|
|
|
|
* in 'what' are over. */
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
void bufferevent_suspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what);
|
2009-11-23 18:34:32 -05:00
|
|
|
/** For internal use: clear the conditions 'what' on bufev, and re-enable
|
|
|
|
* reading if there are no conditions left. */
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
void bufferevent_unsuspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what);
|
2009-11-23 18:34:32 -05:00
|
|
|
|
|
|
|
/** For internal use: temporarily stop all writes on bufev, until the conditions
|
|
|
|
* in 'what' are over. */
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
void bufferevent_suspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what);
|
2009-11-23 18:34:32 -05:00
|
|
|
/** For internal use: clear the conditions 'what' on bufev, and re-enable
|
|
|
|
* writing if there are no conditions left. */
|
Correct logic on disabling underlying bufferevents when disabling a filter
Previously, whenever writing was disabled on a bufferevent_filter (or
a filtering SSL bufferevent), we would stop writing on the underlying
bufferevent. This would make for trouble, though, since if you
implemented common patterns like "stop writing once data X has been
flushed", your bufferevent filter would disable the underlying
bufferevent after the data was flushed to the underlying bufferevent,
but before actually having it written to the network.
Now, we have filters leave their underlying bufferevents enabled for
reading and writing for reading and writing immediately. They are not
disabled, unless the user wants to disable them, which is now allowed.
To handle the case where we want to choke reading on the underlying
bufferevent because the filter no longer wants to read, we use
bufferevent_suspend_read(). This is analogous to the way that we use
bufferevent_suspend_write() to suspend writing on a filtering
bufferevent when the underlying bufferevent's output buffer has hit
its high watermark.
2010-10-08 00:59:02 -04:00
|
|
|
void bufferevent_unsuspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what);
|
2009-11-23 18:34:32 -05:00
|
|
|
|
|
|
|
#define bufferevent_wm_suspend_read(b) \
|
|
|
|
bufferevent_suspend_read((b), BEV_SUSPEND_WM)
|
|
|
|
#define bufferevent_wm_unsuspend_read(b) \
|
|
|
|
bufferevent_unsuspend_read((b), BEV_SUSPEND_WM)
|
2009-02-02 19:22:13 +00:00
|
|
|
|
2011-03-07 21:55:47 -05:00
|
|
|
/*
|
|
|
|
Disable a bufferevent. Equivalent to bufferevent_disable(), but
|
|
|
|
first resets 'connecting' flag to force EV_WRITE down for sure.
|
|
|
|
|
|
|
|
XXXX this method will go away in the future; try not to add new users.
|
|
|
|
See comment in evhttp_connection_reset() for discussion.
|
|
|
|
|
|
|
|
@param bufev the bufferevent to be disabled
|
|
|
|
@param event any combination of EV_READ | EV_WRITE.
|
|
|
|
@return 0 if successful, or -1 if an error occurred
|
|
|
|
@see bufferevent_disable()
|
|
|
|
*/
|
|
|
|
int bufferevent_disable_hard(struct bufferevent *bufev, short event);
|
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: Set up locking on a bufferevent. If lock is set, use it.
|
|
|
|
* Otherwise, use a new lock. */
|
2009-04-13 03:17:19 +00:00
|
|
|
int bufferevent_enable_locking(struct bufferevent *bufev, void *lock);
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: Increment the reference count on bufev. */
|
2009-04-17 06:57:38 +00:00
|
|
|
void bufferevent_incref(struct bufferevent *bufev);
|
2009-07-17 17:46:17 +00:00
|
|
|
/** Internal: Lock bufev and increase its reference count.
|
|
|
|
* unlocking it otherwise. */
|
|
|
|
void _bufferevent_incref_and_lock(struct bufferevent *bufev);
|
2010-03-12 23:00:49 -05:00
|
|
|
/** Internal: Decrement the reference count on bufev. Returns 1 if it freed
|
|
|
|
* the bufferevent.*/
|
|
|
|
int bufferevent_decref(struct bufferevent *bufev);
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: Drop the reference count on bufev, freeing as necessary, and
|
2010-03-12 23:00:49 -05:00
|
|
|
* unlocking it otherwise. Returns 1 if it freed the bufferevent. */
|
|
|
|
int _bufferevent_decref_and_unlock(struct bufferevent *bufev);
|
2009-04-13 03:17:19 +00:00
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: If callbacks are deferred and we have a read callback, schedule
|
|
|
|
* a readcb. Otherwise just run the readcb. */
|
2009-04-17 23:12:34 +00:00
|
|
|
void _bufferevent_run_readcb(struct bufferevent *bufev);
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: If callbacks are deferred and we have a write callback, schedule
|
|
|
|
* a writecb. Otherwise just run the writecb. */
|
2009-04-17 23:12:34 +00:00
|
|
|
void _bufferevent_run_writecb(struct bufferevent *bufev);
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: If callbacks are deferred and we have an eventcb, schedule
|
|
|
|
* it to run with events "what". Otherwise just run the eventcb. */
|
2009-05-25 23:11:20 +00:00
|
|
|
void _bufferevent_run_eventcb(struct bufferevent *bufev, short what);
|
2009-04-17 23:12:34 +00:00
|
|
|
|
2009-07-28 04:03:57 +00:00
|
|
|
/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
|
|
|
|
* which case add ev with no timeout. */
|
|
|
|
int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
|
|
|
|
|
2009-05-25 23:10:23 +00:00
|
|
|
/* =========
|
|
|
|
* These next functions implement timeouts for bufferevents that aren't doing
|
|
|
|
* anything else with ev_read and ev_write, to handle timeouts.
|
|
|
|
* ========= */
|
|
|
|
/** Internal use: Set up the ev_read and ev_write callbacks so that
|
|
|
|
* the other "generic_timeout" functions will work on it. Call this from
|
2009-10-16 13:19:57 +00:00
|
|
|
* the constructor function. */
|
2009-05-25 23:10:23 +00:00
|
|
|
void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev);
|
|
|
|
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
|
2009-10-16 13:19:57 +00:00
|
|
|
* Call this from the destructor function. */
|
2010-01-22 16:14:49 -05:00
|
|
|
int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev);
|
2009-05-25 23:10:23 +00:00
|
|
|
/** Internal use: Add or delete the generic timeout events as appropriate.
|
|
|
|
* (If an event is enabled and a timeout is set, we add the event. Otherwise
|
|
|
|
* we delete it.) Call this from anything that changes the timeout values,
|
|
|
|
* that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
|
2010-01-22 16:14:49 -05:00
|
|
|
int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
|
2009-05-25 23:10:23 +00:00
|
|
|
|
|
|
|
/** Internal use: We have just successfully read data into an inbuf, so
|
2009-10-16 13:19:57 +00:00
|
|
|
* reset the read timeout (if any). */
|
2009-05-25 23:10:23 +00:00
|
|
|
#define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \
|
|
|
|
do { \
|
|
|
|
if (evutil_timerisset(&(bev)->timeout_read)) \
|
|
|
|
event_add(&(bev)->ev_read, &(bev)->timeout_read); \
|
|
|
|
} while (0)
|
|
|
|
/** Internal use: We have just successfully written data from an inbuf, so
|
2009-10-16 13:19:57 +00:00
|
|
|
* reset the read timeout (if any). */
|
2009-05-25 23:10:23 +00:00
|
|
|
#define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \
|
|
|
|
do { \
|
|
|
|
if (evutil_timerisset(&(bev)->timeout_write)) \
|
|
|
|
event_add(&(bev)->ev_write, &(bev)->timeout_write); \
|
|
|
|
} while (0)
|
2010-02-20 18:44:35 -05:00
|
|
|
#define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \
|
|
|
|
event_del(&(bev)->ev_read)
|
|
|
|
#define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \
|
|
|
|
event_del(&(bev)->ev_write)
|
|
|
|
|
2009-05-25 23:10:23 +00:00
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: Given a bufferevent, return its corresponding
|
|
|
|
* bufferevent_private. */
|
2009-04-13 03:17:19 +00:00
|
|
|
#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
|
|
|
|
|
2010-05-08 16:41:01 -04:00
|
|
|
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
|
|
|
|
#define BEV_LOCK(b) _EVUTIL_NIL_STMT
|
|
|
|
#define BEV_UNLOCK(b) _EVUTIL_NIL_STMT
|
|
|
|
#else
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: Grab the lock (if any) on a bufferevent */
|
2009-04-13 03:17:19 +00:00
|
|
|
#define BEV_LOCK(b) do { \
|
|
|
|
struct bufferevent_private *locking = BEV_UPCAST(b); \
|
2010-04-28 15:16:32 -04:00
|
|
|
EVLOCK_LOCK(locking->lock, 0); \
|
2010-03-05 13:00:15 -05:00
|
|
|
} while (0)
|
2009-04-13 03:17:19 +00:00
|
|
|
|
2009-05-25 23:10:47 +00:00
|
|
|
/** Internal: Release the lock (if any) on a bufferevent */
|
2009-04-13 03:17:19 +00:00
|
|
|
#define BEV_UNLOCK(b) do { \
|
|
|
|
struct bufferevent_private *locking = BEV_UPCAST(b); \
|
2010-04-28 15:16:32 -04:00
|
|
|
EVLOCK_UNLOCK(locking->lock, 0); \
|
2010-03-05 13:00:15 -05:00
|
|
|
} while (0)
|
2010-05-08 16:41:01 -04:00
|
|
|
#endif
|
|
|
|
|
2009-04-13 03:17:19 +00:00
|
|
|
|
2009-11-27 13:16:54 -05:00
|
|
|
/* ==== For rate-limiting. */
|
|
|
|
|
|
|
|
int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev,
|
2010-10-27 22:57:53 -04:00
|
|
|
ev_ssize_t bytes);
|
2009-11-27 13:16:54 -05:00
|
|
|
int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev,
|
2010-10-27 22:57:53 -04:00
|
|
|
ev_ssize_t bytes);
|
|
|
|
ev_ssize_t _bufferevent_get_read_max(struct bufferevent_private *bev);
|
|
|
|
ev_ssize_t _bufferevent_get_write_max(struct bufferevent_private *bev);
|
2009-11-27 13:16:54 -05:00
|
|
|
|
2008-04-30 00:09:16 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-07-28 04:03:57 +00:00
|
|
|
|
2008-04-30 00:09:16 +00:00
|
|
|
#endif /* _BUFFEREVENT_INTERNAL_H_ */
|