2004-02-22 21:17:23 +00:00
|
|
|
/*
|
2009-01-27 22:34:36 +00:00
|
|
|
* Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
|
2010-03-04 01:25:51 -05:00
|
|
|
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
|
2004-02-22 21:17:23 +00:00
|
|
|
*
|
2004-04-13 06:22:48 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2004-02-22 21:17:23 +00:00
|
|
|
*/
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#include "event-config.h"
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2007-09-20 19:36:03 +00:00
|
|
|
#ifdef WIN32
|
|
|
|
#include <winsock2.h>
|
2007-11-07 21:01:26 +00:00
|
|
|
#include <windows.h>
|
2009-05-01 00:54:14 +00:00
|
|
|
#include <io.h>
|
2007-09-20 19:36:03 +00:00
|
|
|
#endif
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_VASPRINTF
|
2005-04-01 04:20:39 +00:00
|
|
|
/* If we have vasprintf, we need to define this before we include stdio.h. */
|
|
|
|
#define _GNU_SOURCE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_TIME_H
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_SOCKET_H
|
2009-01-27 06:05:38 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_UIO_H
|
2008-02-28 02:47:43 +00:00
|
|
|
#include <sys/uio.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_IOCTL_H
|
2004-07-13 08:02:45 +00:00
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_MMAN_H
|
2009-01-27 06:05:38 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_SENDFILE_H
|
2009-01-27 06:05:38 +00:00
|
|
|
#include <sys/sendfile.h>
|
|
|
|
#endif
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_STDARG_H
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <stdarg.h>
|
|
|
|
#endif
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_UNISTD_H
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <unistd.h>
|
2004-04-13 06:22:48 +00:00
|
|
|
#endif
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-04-16 20:01:51 +00:00
|
|
|
#include "event2/event.h"
|
|
|
|
#include "event2/buffer.h"
|
2009-01-23 01:35:57 +00:00
|
|
|
#include "event2/buffer_compat.h"
|
2009-11-17 20:31:09 +00:00
|
|
|
#include "event2/bufferevent.h"
|
|
|
|
#include "event2/bufferevent_compat.h"
|
|
|
|
#include "event2/bufferevent_struct.h"
|
2009-04-05 02:44:17 +00:00
|
|
|
#include "event2/thread.h"
|
2009-01-27 22:30:46 +00:00
|
|
|
#include "event-config.h"
|
2009-01-13 20:26:37 +00:00
|
|
|
#include "log-internal.h"
|
2007-11-25 17:14:19 +00:00
|
|
|
#include "mm-internal.h"
|
2009-01-27 06:05:38 +00:00
|
|
|
#include "util-internal.h"
|
2009-04-05 02:44:17 +00:00
|
|
|
#include "evthread-internal.h"
|
2008-02-28 02:47:43 +00:00
|
|
|
#include "evbuffer-internal.h"
|
2009-11-17 20:31:09 +00:00
|
|
|
#include "bufferevent-internal.h"
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* some systems do not have MAP_FAILED */
|
|
|
|
#ifndef MAP_FAILED
|
|
|
|
#define MAP_FAILED ((void *)-1)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* send file support */
|
2009-01-27 22:30:46 +00:00
|
|
|
#if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
|
2009-01-27 06:05:38 +00:00
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_LINUX 1
|
2009-04-24 03:24:22 +00:00
|
|
|
#elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
|
2009-01-27 06:05:38 +00:00
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_FREEBSD 1
|
2009-04-24 03:24:22 +00:00
|
|
|
#elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
|
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_MACOSX 1
|
2009-08-16 16:40:42 +00:00
|
|
|
#elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
|
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_SOLARIS 1
|
2009-01-27 06:05:38 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
static int use_sendfile = 1;
|
|
|
|
#endif
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_MMAP
|
2009-01-27 06:05:38 +00:00
|
|
|
static int use_mmap = 1;
|
|
|
|
#endif
|
|
|
|
|
2009-02-01 01:43:58 +00:00
|
|
|
|
|
|
|
/* Mask of user-selectable callback flags. */
|
2010-02-18 17:41:15 -05:00
|
|
|
#define EVBUFFER_CB_USER_FLAGS 0xffff
|
2009-02-01 01:43:58 +00:00
|
|
|
/* Mask of all internal-use-only flags. */
|
|
|
|
#define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
|
2009-04-03 14:27:03 +00:00
|
|
|
|
|
|
|
/* Flag set if the callback is using the cb_obsolete function pointer */
|
2010-02-18 17:41:15 -05:00
|
|
|
#define EVBUFFER_CB_OBSOLETE 0x00040000
|
2009-02-01 01:43:58 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* evbuffer_chain support */
|
2009-01-19 21:53:03 +00:00
|
|
|
#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
|
2009-01-27 06:05:38 +00:00
|
|
|
#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
|
|
|
|
0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
#define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
|
2009-04-13 03:06:59 +00:00
|
|
|
#define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
|
2009-04-08 03:03:59 +00:00
|
|
|
|
|
|
|
static void evbuffer_chain_align(struct evbuffer_chain *chain);
|
2009-04-10 20:43:08 +00:00
|
|
|
static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
|
2009-07-31 17:35:42 +00:00
|
|
|
static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
|
|
|
|
const struct evbuffer_ptr *pos, const char *mem, size_t len);
|
2009-04-08 03:03:59 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
static struct evbuffer_chain *
|
|
|
|
evbuffer_chain_new(size_t size)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
size_t to_alloc;
|
|
|
|
|
|
|
|
size += EVBUFFER_CHAIN_SIZE;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* get the next largest memory that can hold the buffer */
|
|
|
|
to_alloc = MIN_BUFFER_SIZE;
|
|
|
|
while (to_alloc < size)
|
|
|
|
to_alloc <<= 1;
|
|
|
|
|
|
|
|
/* we get everything in one chunk */
|
2008-04-25 01:18:08 +00:00
|
|
|
if ((chain = mm_malloc(to_alloc)) == NULL)
|
2008-02-28 02:47:43 +00:00
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
memset(chain, 0, EVBUFFER_CHAIN_SIZE);
|
|
|
|
|
|
|
|
chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* this way we can manipulate the buffer to different addresses,
|
|
|
|
* which is required for mmap for example.
|
|
|
|
*/
|
|
|
|
chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
return (chain);
|
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
static inline void
|
|
|
|
evbuffer_chain_free(struct evbuffer_chain *chain)
|
|
|
|
{
|
2009-04-08 03:03:59 +00:00
|
|
|
if (CHAIN_PINNED(chain)) {
|
|
|
|
chain->flags |= EVBUFFER_DANGLING;
|
|
|
|
return;
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
|
|
|
|
EVBUFFER_REFERENCE)) {
|
|
|
|
if (chain->flags & EVBUFFER_REFERENCE) {
|
|
|
|
struct evbuffer_chain_reference *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(
|
|
|
|
struct evbuffer_chain_reference,
|
|
|
|
chain);
|
|
|
|
if (info->cleanupfn)
|
2009-05-15 22:44:18 +00:00
|
|
|
(*info->cleanupfn)(chain->buffer,
|
|
|
|
chain->buffer_len,
|
|
|
|
info->extra);
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_MMAP
|
2009-01-27 06:05:38 +00:00
|
|
|
if (chain->flags & EVBUFFER_MMAP) {
|
|
|
|
struct evbuffer_chain_fd *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
|
|
|
|
chain);
|
|
|
|
if (munmap(chain->buffer, chain->buffer_len) == -1)
|
|
|
|
event_warn("%s: munmap failed", __func__);
|
|
|
|
if (close(info->fd) == -1)
|
|
|
|
event_warn("%s: close(%d) failed",
|
|
|
|
__func__, info->fd);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
if (chain->flags & EVBUFFER_SENDFILE) {
|
|
|
|
struct evbuffer_chain_fd *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
|
|
|
|
chain);
|
|
|
|
if (close(info->fd) == -1)
|
|
|
|
event_warn("%s: close(%d) failed",
|
|
|
|
__func__, info->fd);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
mm_free(chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
evbuffer_chain_insert(struct evbuffer *buf, struct evbuffer_chain *chain)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (buf->first == NULL) {
|
|
|
|
buf->first = buf->last = chain;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
buf->last_with_data = chain;
|
2009-01-27 06:05:38 +00:00
|
|
|
} else {
|
|
|
|
/* the last chain is empty so we can just drop it */
|
2009-04-08 03:03:59 +00:00
|
|
|
if (buf->last->off == 0 && !CHAIN_PINNED(buf->last)) {
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (buf->last_with_data == buf->last)
|
|
|
|
buf->last_with_data = chain;
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(buf->last);
|
|
|
|
buf->last = chain;
|
|
|
|
} else {
|
|
|
|
buf->last->next = chain;
|
|
|
|
buf->last = chain;
|
|
|
|
}
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (chain->off)
|
|
|
|
buf->last_with_data = chain;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
buf->total_len += chain->off;
|
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:47 +00:00
|
|
|
void
|
|
|
|
_evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
|
2009-04-08 03:03:59 +00:00
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT((chain->flags & flag) == 0);
|
2009-04-08 03:03:59 +00:00
|
|
|
chain->flags |= flag;
|
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:47 +00:00
|
|
|
void
|
|
|
|
_evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
|
2009-04-08 03:03:59 +00:00
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT((chain->flags & flag) != 0);
|
2009-04-08 03:03:59 +00:00
|
|
|
chain->flags &= ~flag;
|
|
|
|
if (chain->flags & EVBUFFER_DANGLING)
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
}
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
struct evbuffer *
|
|
|
|
evbuffer_new(void)
|
|
|
|
{
|
|
|
|
struct evbuffer *buffer;
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2008-04-25 01:18:08 +00:00
|
|
|
buffer = mm_calloc(1, sizeof(struct evbuffer));
|
2009-11-19 23:08:50 +00:00
|
|
|
if (buffer == NULL)
|
|
|
|
return (NULL);
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
TAILQ_INIT(&buffer->callbacks);
|
2009-04-13 03:06:27 +00:00
|
|
|
buffer->refcnt = 1;
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
return (buffer);
|
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:27 +00:00
|
|
|
void
|
|
|
|
_evbuffer_incref(struct evbuffer *buf)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-13 03:06:27 +00:00
|
|
|
++buf->refcnt;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-13 03:06:27 +00:00
|
|
|
}
|
|
|
|
|
2009-11-17 20:31:09 +00:00
|
|
|
void
|
|
|
|
_evbuffer_incref_and_lock(struct evbuffer *buf)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-11-17 20:31:09 +00:00
|
|
|
++buf->refcnt;
|
|
|
|
}
|
|
|
|
|
2009-04-10 20:43:08 +00:00
|
|
|
int
|
|
|
|
evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-07-26 01:29:39 +00:00
|
|
|
buffer->cb_queue = event_base_get_deferred_cb_queue(base);
|
2009-04-10 20:43:08 +00:00
|
|
|
buffer->deferred_cbs = 1;
|
|
|
|
event_deferred_cb_init(&buffer->deferred,
|
|
|
|
evbuffer_deferred_callback, buffer);
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-10 20:43:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
int
|
|
|
|
evbuffer_enable_locking(struct evbuffer *buf, void *lock)
|
|
|
|
{
|
|
|
|
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
|
2010-02-18 17:41:15 -05:00
|
|
|
return -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
#else
|
2010-02-18 17:41:15 -05:00
|
|
|
if (buf->lock)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!lock) {
|
|
|
|
EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
|
|
|
|
if (!lock)
|
|
|
|
return -1;
|
|
|
|
buf->lock = lock;
|
|
|
|
buf->own_lock = 1;
|
|
|
|
} else {
|
|
|
|
buf->lock = lock;
|
|
|
|
buf->own_lock = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-11-17 20:31:09 +00:00
|
|
|
void
|
|
|
|
evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-11-17 20:31:09 +00:00
|
|
|
buf->parent = bev;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-11-17 20:31:09 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 20:43:08 +00:00
|
|
|
static void
|
2009-11-23 15:53:24 -05:00
|
|
|
evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
|
2009-01-23 01:11:13 +00:00
|
|
|
{
|
2009-01-23 18:04:34 +00:00
|
|
|
struct evbuffer_cb_entry *cbent, *next;
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_cb_info info;
|
2009-04-03 14:27:03 +00:00
|
|
|
size_t new_size;
|
2009-12-23 07:53:19 -05:00
|
|
|
ev_uint32_t mask, masked_val;
|
2009-12-22 15:52:02 -05:00
|
|
|
int clear = 1;
|
2009-11-23 15:53:24 -05:00
|
|
|
|
|
|
|
if (running_deferred) {
|
|
|
|
mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
|
|
|
|
masked_val = EVBUFFER_CB_ENABLED;
|
|
|
|
} else if (buffer->deferred_cbs) {
|
|
|
|
mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
|
|
|
|
masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
|
2010-01-06 18:42:59 -05:00
|
|
|
/* Don't zero-out n_add/n_del, since the deferred callbacks
|
|
|
|
will want to see them. */
|
2009-12-22 15:52:02 -05:00
|
|
|
clear = 0;
|
2009-11-23 15:53:24 -05:00
|
|
|
} else {
|
|
|
|
mask = EVBUFFER_CB_ENABLED;
|
|
|
|
masked_val = EVBUFFER_CB_ENABLED;
|
|
|
|
}
|
2009-04-03 14:27:03 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
if (TAILQ_EMPTY(&buffer->callbacks)) {
|
2010-02-18 17:41:15 -05:00
|
|
|
buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
|
2009-01-23 18:04:34 +00:00
|
|
|
return;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
|
|
|
if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
new_size = buffer->total_len;
|
|
|
|
info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
|
|
|
|
info.n_added = buffer->n_add_for_cb;
|
|
|
|
info.n_deleted = buffer->n_del_for_cb;
|
2010-01-06 18:42:59 -05:00
|
|
|
if (clear) {
|
|
|
|
buffer->n_add_for_cb = 0;
|
|
|
|
buffer->n_del_for_cb = 0;
|
|
|
|
}
|
2009-01-23 18:04:34 +00:00
|
|
|
for (cbent = TAILQ_FIRST(&buffer->callbacks);
|
|
|
|
cbent != TAILQ_END(&buffer->callbacks);
|
|
|
|
cbent = next) {
|
|
|
|
/* Get the 'next' pointer now in case this callback decides
|
|
|
|
* to remove itself or something. */
|
|
|
|
next = TAILQ_NEXT(cbent, next);
|
2009-11-23 15:53:24 -05:00
|
|
|
|
|
|
|
if ((cbent->flags & mask) != masked_val)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
|
|
|
|
cbent->cb.cb_obsolete(buffer,
|
|
|
|
info.orig_size, new_size, cbent->cbarg);
|
|
|
|
else
|
|
|
|
cbent->cb.cb_func(buffer, &info, cbent->cbarg);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-10 20:43:08 +00:00
|
|
|
static inline void
|
|
|
|
evbuffer_invoke_callbacks(struct evbuffer *buffer)
|
|
|
|
{
|
|
|
|
if (buffer->deferred_cbs) {
|
2009-04-13 03:06:27 +00:00
|
|
|
if (buffer->deferred.queued)
|
|
|
|
return;
|
2009-11-17 20:31:09 +00:00
|
|
|
_evbuffer_incref_and_lock(buffer);
|
|
|
|
if (buffer->parent)
|
|
|
|
bufferevent_incref(buffer->parent);
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-07-26 01:29:39 +00:00
|
|
|
event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
|
2009-04-10 20:43:08 +00:00
|
|
|
}
|
2009-11-23 15:53:24 -05:00
|
|
|
|
|
|
|
evbuffer_run_callbacks(buffer, 0);
|
2009-04-10 20:43:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
|
|
|
|
{
|
2009-11-17 20:31:09 +00:00
|
|
|
struct bufferevent *parent = NULL;
|
2009-04-10 20:43:08 +00:00
|
|
|
struct evbuffer *buffer = arg;
|
|
|
|
|
2009-04-29 20:48:43 +00:00
|
|
|
/* XXXX It would be better to run these callbacks without holding the
|
|
|
|
* lock */
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-11-17 20:31:09 +00:00
|
|
|
parent = buffer->parent;
|
2009-11-23 15:53:24 -05:00
|
|
|
evbuffer_run_callbacks(buffer, 1);
|
2009-04-13 03:06:47 +00:00
|
|
|
_evbuffer_decref_and_unlock(buffer);
|
2009-11-17 20:31:09 +00:00
|
|
|
if (parent)
|
2010-02-22 15:38:23 -05:00
|
|
|
bufferevent_decref(parent);
|
2009-04-10 20:43:08 +00:00
|
|
|
}
|
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
static void
|
|
|
|
evbuffer_remove_all_callbacks(struct evbuffer *buffer)
|
|
|
|
{
|
2009-01-23 01:35:57 +00:00
|
|
|
struct evbuffer_cb_entry *cbent;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
|
|
|
|
TAILQ_REMOVE(&buffer->callbacks, cbent, next);
|
|
|
|
mm_free(cbent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
void
|
2009-04-13 03:06:47 +00:00
|
|
|
_evbuffer_decref_and_unlock(struct evbuffer *buffer)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2008-02-28 02:47:43 +00:00
|
|
|
struct evbuffer_chain *chain, *next;
|
2009-04-13 03:06:47 +00:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-03-12 23:00:49 -05:00
|
|
|
EVUTIL_ASSERT(buffer->refcnt > 0);
|
|
|
|
|
2009-04-13 03:06:27 +00:00
|
|
|
if (--buffer->refcnt > 0) {
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-13 03:06:27 +00:00
|
|
|
return;
|
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
for (chain = buffer->first; chain != NULL; chain = next) {
|
|
|
|
next = chain->next;
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2009-01-23 01:11:13 +00:00
|
|
|
evbuffer_remove_all_callbacks(buffer);
|
2009-04-10 20:43:08 +00:00
|
|
|
if (buffer->deferred_cbs)
|
2009-07-26 01:29:39 +00:00
|
|
|
event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
|
2009-04-13 03:06:27 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2010-02-18 17:41:15 -05:00
|
|
|
if (buffer->own_lock)
|
|
|
|
EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(buffer);
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:47 +00:00
|
|
|
void
|
|
|
|
evbuffer_free(struct evbuffer *buffer)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-13 03:06:47 +00:00
|
|
|
_evbuffer_decref_and_unlock(buffer);
|
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
void
|
|
|
|
evbuffer_lock(struct evbuffer *buf)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
evbuffer_unlock(struct evbuffer *buf)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t
|
2009-01-14 22:17:31 +00:00
|
|
|
evbuffer_get_length(const struct evbuffer *buffer)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t result;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
result = (buffer->total_len);
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2004-05-24 00:19:52 +00:00
|
|
|
|
2008-05-03 02:37:18 +00:00
|
|
|
size_t
|
2009-01-14 22:17:31 +00:00
|
|
|
evbuffer_get_contiguous_space(const struct evbuffer *buf)
|
2008-05-03 02:37:18 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain;
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t result;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
chain = buf->first;
|
2009-04-05 02:44:17 +00:00
|
|
|
result = (chain != NULL ? chain->off : 0);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2008-05-03 02:37:18 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
return result;
|
2008-05-03 02:37:18 +00:00
|
|
|
}
|
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
int
|
2009-05-22 19:11:48 +00:00
|
|
|
evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
|
2009-05-19 21:39:35 +00:00
|
|
|
struct evbuffer_iovec *vec, int n_vecs)
|
2008-05-03 03:05:28 +00:00
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
2009-05-19 21:39:35 +00:00
|
|
|
int n = -1;
|
2008-05-03 03:05:28 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end)
|
|
|
|
goto done;
|
2009-05-19 21:39:35 +00:00
|
|
|
if (n_vecs < 1)
|
|
|
|
goto done;
|
|
|
|
if (n_vecs == 1) {
|
|
|
|
if (evbuffer_expand(buf, size) == -1)
|
|
|
|
goto done;
|
|
|
|
chain = buf->last;
|
2009-04-08 03:04:39 +00:00
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
vec[0].iov_base = CHAIN_SPACE_PTR(chain);
|
|
|
|
vec[0].iov_len = CHAIN_SPACE_LEN(chain);
|
|
|
|
n = 1;
|
|
|
|
} else {
|
2010-03-10 23:24:14 -05:00
|
|
|
if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs, &chain, 0);
|
2009-05-19 21:39:35 +00:00
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
done:
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-05-19 21:39:35 +00:00
|
|
|
return n;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2008-05-03 03:05:28 +00:00
|
|
|
}
|
|
|
|
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
static int
|
|
|
|
advance_last_with_data(struct evbuffer *buf)
|
|
|
|
{
|
|
|
|
int n = 0;
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
|
|
|
|
|
|
|
if (!buf->last_with_data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
while (buf->last_with_data->next && buf->last_with_data->next->off) {
|
|
|
|
buf->last_with_data = buf->last_with_data->next;
|
|
|
|
++n;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2008-05-03 03:05:28 +00:00
|
|
|
int
|
2009-05-19 21:39:35 +00:00
|
|
|
evbuffer_commit_space(struct evbuffer *buf,
|
|
|
|
struct evbuffer_iovec *vec, int n_vecs)
|
2008-05-03 03:05:28 +00:00
|
|
|
{
|
2010-03-10 23:24:14 -05:00
|
|
|
struct evbuffer_chain *firstchain, *chain;
|
2009-05-19 21:39:35 +00:00
|
|
|
int result = -1;
|
2010-03-10 23:24:14 -05:00
|
|
|
size_t added = 0;
|
|
|
|
int i;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2010-02-15 21:03:52 -05:00
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
if (buf->freeze_end)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
if (n_vecs == 0) {
|
|
|
|
result = 0;
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
} else if (n_vecs == 1 &&
|
|
|
|
(buf->last && vec[0].iov_base == CHAIN_SPACE_PTR(buf->last))) {
|
|
|
|
/* The user only got or used one chain; it might not
|
|
|
|
* be the first one with space in it. */
|
|
|
|
if (vec[0].iov_len > CHAIN_SPACE_LEN(buf->last))
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
buf->last->off += vec[0].iov_len;
|
|
|
|
added = vec[0].iov_len;
|
|
|
|
if (added)
|
|
|
|
buf->last_with_data = buf->last;
|
|
|
|
goto okay;
|
|
|
|
}
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
/* Advance 'firstchain' to the first chain with space in it. */
|
|
|
|
firstchain = buf->last_with_data;
|
|
|
|
if (!firstchain)
|
|
|
|
goto done;
|
|
|
|
if (CHAIN_SPACE_LEN(firstchain) == 0) {
|
|
|
|
firstchain = firstchain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
chain = firstchain;
|
|
|
|
/* pass 1: make sure that the pointers and lengths of vecs[] are in
|
|
|
|
* bounds before we try to commit anything. */
|
|
|
|
for (i=0; i<n_vecs; ++i) {
|
|
|
|
if (!chain)
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
if (vec[i].iov_base != CHAIN_SPACE_PTR(chain) ||
|
|
|
|
vec[i].iov_len > CHAIN_SPACE_LEN(chain))
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
/* pass 2: actually adjust all the chains. */
|
|
|
|
chain = firstchain;
|
|
|
|
for (i=0; i<n_vecs; ++i) {
|
|
|
|
chain->off += vec[i].iov_len;
|
|
|
|
added += vec[i].iov_len;
|
|
|
|
if (vec[i].iov_len)
|
|
|
|
buf->last_with_data = chain;
|
|
|
|
chain = chain->next;
|
2009-05-19 21:39:35 +00:00
|
|
|
}
|
2008-05-03 03:05:28 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
okay:
|
2009-05-19 21:39:35 +00:00
|
|
|
buf->total_len += added;
|
|
|
|
buf->n_add_for_cb += added;
|
2009-04-13 03:06:05 +00:00
|
|
|
result = 0;
|
|
|
|
evbuffer_invoke_callbacks(buf);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2008-05-03 03:05:28 +00:00
|
|
|
}
|
|
|
|
|
2008-03-31 02:04:34 +00:00
|
|
|
#define ZERO_CHAIN(dst) do { \
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(dst); \
|
2008-03-31 02:04:34 +00:00
|
|
|
(dst)->first = NULL; \
|
|
|
|
(dst)->last = NULL; \
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
(dst)->last_with_data = NULL; \
|
2008-03-31 02:04:34 +00:00
|
|
|
(dst)->total_len = 0; \
|
|
|
|
} while (0)
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2008-03-31 02:04:34 +00:00
|
|
|
#define COPY_CHAIN(dst, src) do { \
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(dst); \
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src); \
|
2008-03-31 02:04:34 +00:00
|
|
|
(dst)->first = (src)->first; \
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
(dst)->last_with_data = (src)->last_with_data; \
|
2008-03-31 02:04:34 +00:00
|
|
|
(dst)->last = (src)->last; \
|
|
|
|
(dst)->total_len = (src)->total_len; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define APPEND_CHAIN(dst, src) do { \
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(dst); \
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src); \
|
2008-03-31 02:04:34 +00:00
|
|
|
(dst)->last->next = (src)->first; \
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if ((src)->last_with_data) \
|
|
|
|
(dst)->last_with_data = (src)->last_with_data; \
|
2008-03-31 02:04:34 +00:00
|
|
|
(dst)->last = (src)->last; \
|
|
|
|
(dst)->total_len += (src)->total_len; \
|
|
|
|
} while (0)
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
#define PREPEND_CHAIN(dst, src) do { \
|
|
|
|
ASSERT_EVBUFFER_LOCKED(dst); \
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src); \
|
|
|
|
(src)->last->next = (dst)->first; \
|
|
|
|
(dst)->first = (src)->first; \
|
|
|
|
(dst)->total_len += (src)->total_len; \
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if ((dst)->last_with_data == NULL) \
|
|
|
|
(dst)->last_with_data = (src)->last_with_data; \
|
2008-03-31 02:04:34 +00:00
|
|
|
} while (0)
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2004-03-23 03:43:53 +00:00
|
|
|
int
|
2004-02-22 21:17:23 +00:00
|
|
|
evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
|
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
size_t in_total_len, out_total_len;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK2(inbuf, outbuf);
|
|
|
|
in_total_len = inbuf->total_len;
|
2009-04-05 02:44:17 +00:00
|
|
|
out_total_len = outbuf->total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-02-10 19:39:22 +00:00
|
|
|
if (in_total_len == 0 || outbuf == inbuf)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-06-01 16:21:24 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end || inbuf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (out_total_len == 0) {
|
2008-03-31 02:04:34 +00:00
|
|
|
COPY_CHAIN(outbuf, inbuf);
|
2008-02-28 02:47:43 +00:00
|
|
|
} else {
|
2008-03-31 02:04:34 +00:00
|
|
|
APPEND_CHAIN(outbuf, inbuf);
|
2004-05-24 00:19:52 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* remove everything from inbuf */
|
2008-03-31 02:04:34 +00:00
|
|
|
ZERO_CHAIN(inbuf);
|
2010-02-18 17:41:15 -05:00
|
|
|
inbuf->n_del_for_cb += in_total_len;
|
|
|
|
outbuf->n_add_for_cb += in_total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(inbuf);
|
|
|
|
evbuffer_invoke_callbacks(outbuf);
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK2(inbuf, outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
|
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
size_t in_total_len, out_total_len;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK2(inbuf, outbuf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
in_total_len = inbuf->total_len;
|
2009-04-05 02:44:17 +00:00
|
|
|
out_total_len = outbuf->total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-02-10 19:39:22 +00:00
|
|
|
if (!in_total_len || inbuf == outbuf)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_start || inbuf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (out_total_len == 0) {
|
2008-03-31 02:04:34 +00:00
|
|
|
COPY_CHAIN(outbuf, inbuf);
|
2008-02-28 02:47:43 +00:00
|
|
|
} else {
|
2008-03-31 02:04:34 +00:00
|
|
|
PREPEND_CHAIN(outbuf, inbuf);
|
2006-02-13 02:22:48 +00:00
|
|
|
}
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* remove everything from inbuf */
|
2008-03-31 02:04:34 +00:00
|
|
|
ZERO_CHAIN(inbuf);
|
2010-02-18 17:41:15 -05:00
|
|
|
inbuf->n_del_for_cb += in_total_len;
|
|
|
|
outbuf->n_add_for_cb += in_total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(inbuf);
|
|
|
|
evbuffer_invoke_callbacks(outbuf);
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK2(inbuf, outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_drain(struct evbuffer *buf, size_t len)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2008-02-28 02:47:43 +00:00
|
|
|
struct evbuffer_chain *chain, *next;
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t old_len;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
old_len = buf->total_len;
|
2005-12-06 03:26:28 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (old_len == 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2005-12-06 03:26:28 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
|
2009-04-13 03:06:59 +00:00
|
|
|
if (len >= old_len && !(buf->last && CHAIN_PINNED_R(buf->last))) {
|
2010-02-18 17:41:15 -05:00
|
|
|
len = old_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
for (chain = buf->first; chain != NULL; chain = next) {
|
|
|
|
next = chain->next;
|
2006-11-10 02:16:16 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2006-11-10 02:16:16 +00:00
|
|
|
|
2008-03-31 02:04:34 +00:00
|
|
|
ZERO_CHAIN(buf);
|
2008-02-28 02:47:43 +00:00
|
|
|
} else {
|
2009-04-13 03:06:59 +00:00
|
|
|
if (len >= old_len)
|
|
|
|
len = old_len;
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->total_len -= len;
|
2006-11-10 02:16:16 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
for (chain = buf->first; len >= chain->off; chain = next) {
|
|
|
|
next = chain->next;
|
|
|
|
len -= chain->off;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (chain == buf->last_with_data)
|
|
|
|
buf->last_with_data = next;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-13 03:06:59 +00:00
|
|
|
if (len == 0 && CHAIN_PINNED_R(chain))
|
|
|
|
break;
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2005-12-06 03:26:28 +00:00
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->first = chain;
|
|
|
|
chain->misalign += len;
|
|
|
|
chain->off -= len;
|
2005-12-06 03:26:28 +00:00
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_del_for_cb += len;
|
2008-02-28 02:47:43 +00:00
|
|
|
/* Tell someone about changes in this buffer */
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2005-12-06 03:26:28 +00:00
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* Reads data from an event buffer and drains the bytes read */
|
|
|
|
|
2005-12-06 03:26:28 +00:00
|
|
|
int
|
2008-02-28 17:38:52 +00:00
|
|
|
evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
|
2005-12-06 03:26:28 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
/*XXX fails badly on sendfile case. */
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *tmp;
|
2008-02-28 17:38:52 +00:00
|
|
|
char *data = data_out;
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t nread;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->first;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (datlen >= buf->total_len)
|
|
|
|
datlen = buf->total_len;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (datlen == 0)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
nread = datlen;
|
|
|
|
|
|
|
|
while (datlen && datlen >= chain->off) {
|
|
|
|
memcpy(data, chain->buffer + chain->misalign, chain->off);
|
|
|
|
data += chain->off;
|
|
|
|
datlen -= chain->off;
|
|
|
|
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (chain == buf->last_with_data)
|
|
|
|
buf->last_with_data = chain->next;
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
tmp = chain;
|
|
|
|
chain = chain->next;
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(tmp);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
buf->first = chain;
|
|
|
|
if (chain == NULL)
|
|
|
|
buf->last = NULL;
|
|
|
|
|
|
|
|
if (datlen) {
|
|
|
|
memcpy(data, chain->buffer + chain->misalign, datlen);
|
|
|
|
chain->misalign += datlen;
|
|
|
|
chain->off -= datlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->total_len -= nread;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_del_for_cb += nread;
|
2009-01-23 01:11:13 +00:00
|
|
|
if (nread)
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(buf);
|
2004-05-24 00:19:52 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
result = nread;
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* reads data from the src buffer to the dst buffer, avoids memcpy as
|
|
|
|
* possible. */
|
2004-03-23 03:43:53 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
|
|
|
|
size_t datlen)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2009-02-10 19:39:22 +00:00
|
|
|
/*XXX We should have an option to force this to be zero-copy.*/
|
|
|
|
|
|
|
|
/*XXX can fail badly on sendfile case. */
|
2010-03-10 23:28:51 -05:00
|
|
|
struct evbuffer_chain *chain, *previous;
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t nread = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK2(src, dst);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = previous = src->first;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
if (datlen == 0 || dst == src) {
|
|
|
|
result = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2009-02-10 19:39:22 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (dst->freeze_end || src->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* short-cut if there is no more data buffered */
|
|
|
|
if (datlen >= src->total_len) {
|
|
|
|
datlen = src->total_len;
|
|
|
|
evbuffer_add_buffer(dst, src);
|
2009-04-05 02:44:17 +00:00
|
|
|
result = datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* removes chains if possible */
|
|
|
|
while (chain->off <= datlen) {
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
/* We can't remove the last with data from src unless we
|
|
|
|
* remove all chains, in which case we would have done the if
|
|
|
|
* block above */
|
|
|
|
EVUTIL_ASSERT(chain != src->last_with_data);
|
2008-02-28 02:47:43 +00:00
|
|
|
nread += chain->off;
|
|
|
|
datlen -= chain->off;
|
|
|
|
previous = chain;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nread) {
|
|
|
|
/* we can remove the chain */
|
|
|
|
if (dst->first == NULL) {
|
|
|
|
dst->first = src->first;
|
|
|
|
} else {
|
|
|
|
dst->last->next = src->first;
|
|
|
|
}
|
|
|
|
dst->last = previous;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
dst->last_with_data = dst->last;
|
2008-02-28 02:47:43 +00:00
|
|
|
previous->next = NULL;
|
|
|
|
src->first = chain;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
dst->total_len += nread;
|
2010-02-18 17:41:15 -05:00
|
|
|
dst->n_add_for_cb += nread;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* we know that there is more data in the src buffer than
|
|
|
|
* we want to read, so we manually drain the chain */
|
|
|
|
evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
|
|
|
|
chain->misalign += datlen;
|
|
|
|
chain->off -= datlen;
|
|
|
|
nread += datlen;
|
|
|
|
|
|
|
|
src->total_len -= nread;
|
2010-02-18 17:41:15 -05:00
|
|
|
src->n_del_for_cb += nread;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
if (nread) {
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(dst);
|
|
|
|
evbuffer_invoke_callbacks(src);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
2010-02-18 17:41:15 -05:00
|
|
|
result = nread;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK2(src, dst);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-05-24 00:19:52 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 00:40:04 +00:00
|
|
|
unsigned char *
|
2009-05-22 19:11:48 +00:00
|
|
|
evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *next, *tmp;
|
|
|
|
unsigned char *buffer, *result = NULL;
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t remaining;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
int removed_last_with_data = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->first;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if (size < 0)
|
2008-02-28 02:47:43 +00:00
|
|
|
size = buf->total_len;
|
2008-02-29 05:23:49 +00:00
|
|
|
/* if size > buf->total_len, we cannot guarantee to the user that she
|
|
|
|
* is going to have a long enough buffer afterwards; so we return
|
|
|
|
* NULL */
|
2009-05-01 00:54:14 +00:00
|
|
|
if (size == 0 || (size_t)size > buf->total_len)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2008-02-29 05:23:49 +00:00
|
|
|
/* No need to pull up anything; the first size bytes are
|
|
|
|
* already here. */
|
2010-02-18 17:41:15 -05:00
|
|
|
if (chain->off >= (size_t)size) {
|
|
|
|
result = chain->buffer + chain->misalign;
|
|
|
|
goto done;
|
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
/* Make sure that none of the chains we need to copy from is pinned. */
|
|
|
|
remaining = size - chain->off;
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(remaining >= 0);
|
2009-04-08 03:03:59 +00:00
|
|
|
for (tmp=chain->next; tmp; tmp=tmp->next) {
|
|
|
|
if (CHAIN_PINNED(tmp))
|
|
|
|
goto done;
|
2009-05-01 00:54:14 +00:00
|
|
|
if (tmp->off >= (size_t)remaining)
|
2009-04-08 03:03:59 +00:00
|
|
|
break;
|
|
|
|
remaining -= tmp->off;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CHAIN_PINNED(chain)) {
|
|
|
|
size_t old_off = chain->off;
|
|
|
|
if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
|
|
|
|
/* not enough room at end of chunk. */
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
buffer = CHAIN_SPACE_PTR(chain);
|
|
|
|
tmp = chain;
|
|
|
|
tmp->off = size;
|
|
|
|
size -= old_off;
|
|
|
|
chain = chain->next;
|
2009-05-01 00:54:14 +00:00
|
|
|
} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
|
2008-02-29 05:23:49 +00:00
|
|
|
/* already have enough space in the first chain */
|
|
|
|
size_t old_off = chain->off;
|
|
|
|
buffer = chain->buffer + chain->misalign + chain->off;
|
|
|
|
tmp = chain;
|
|
|
|
tmp->off = size;
|
|
|
|
size -= old_off;
|
|
|
|
chain = chain->next;
|
|
|
|
} else {
|
|
|
|
if ((tmp = evbuffer_chain_new(size)) == NULL) {
|
2009-10-27 04:04:07 +00:00
|
|
|
event_warn("%s: out of memory", __func__);
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-29 05:23:49 +00:00
|
|
|
}
|
|
|
|
buffer = tmp->buffer;
|
|
|
|
tmp->off = size;
|
|
|
|
buf->first = tmp;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* TODO(niels): deal with buffers that point to NULL like sendfile */
|
|
|
|
|
2008-02-28 18:36:03 +00:00
|
|
|
/* Copy and free every chunk that will be entirely pulled into tmp */
|
2009-05-01 00:54:14 +00:00
|
|
|
for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
|
2008-02-28 02:47:43 +00:00
|
|
|
next = chain->next;
|
|
|
|
|
|
|
|
memcpy(buffer, chain->buffer + chain->misalign, chain->off);
|
|
|
|
size -= chain->off;
|
|
|
|
buffer += chain->off;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (chain == buf->last_with_data)
|
|
|
|
removed_last_with_data = 1;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (chain != NULL) {
|
|
|
|
memcpy(buffer, chain->buffer + chain->misalign, size);
|
|
|
|
chain->misalign += size;
|
|
|
|
chain->off -= size;
|
|
|
|
} else {
|
|
|
|
buf->last = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp->next = chain;
|
|
|
|
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (removed_last_with_data) {
|
|
|
|
int n;
|
|
|
|
buf->last_with_data = buf->first;
|
|
|
|
n = advance_last_with_data(buf);
|
|
|
|
EVUTIL_ASSERT(n == 0);
|
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
result = (tmp->buffer + tmp->misalign);
|
|
|
|
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2005-04-23 02:53:39 +00:00
|
|
|
/*
|
|
|
|
* Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
|
|
|
|
* The returned buffer needs to be freed by the called.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
evbuffer_readline(struct evbuffer *buffer)
|
2007-11-25 21:32:26 +00:00
|
|
|
{
|
|
|
|
return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
static inline int
|
2009-07-31 17:34:47 +00:00
|
|
|
evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2009-07-31 17:34:47 +00:00
|
|
|
struct evbuffer_chain *chain = it->_internal.chain;
|
|
|
|
unsigned i = it->_internal.pos_in_chain;
|
2008-02-28 02:47:43 +00:00
|
|
|
while (chain != NULL) {
|
|
|
|
char *buffer = (char *)chain->buffer + chain->misalign;
|
2010-03-02 17:00:06 -05:00
|
|
|
char *cp = memchr(buffer+i, chr, chain->off-i);
|
|
|
|
if (cp) {
|
|
|
|
it->_internal.chain = chain;
|
|
|
|
it->_internal.pos_in_chain = cp - buffer;
|
|
|
|
it->pos += (cp - buffer);
|
|
|
|
return it->pos;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2010-03-02 17:00:06 -05:00
|
|
|
it->pos += chain->off - i;
|
2008-02-28 02:47:43 +00:00
|
|
|
i = 0;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2010-03-02 17:00:06 -05:00
|
|
|
static inline char *
|
|
|
|
find_eol_char(char *s, size_t len)
|
|
|
|
{
|
|
|
|
#define CHUNK_SZ 128
|
|
|
|
/* Lots of benchmarking found this approach to be faster in practice
|
|
|
|
* than doing two memchrs over the whole buffer, doin a memchr on each
|
|
|
|
* char of the buffer, or trying to emulate memchr by hand. */
|
|
|
|
char *s_end, *cr, *lf;
|
|
|
|
s_end = s+len;
|
|
|
|
while (s < s_end) {
|
|
|
|
size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
|
|
|
|
cr = memchr(s, '\r', chunk);
|
|
|
|
lf = memchr(s, '\n', chunk);
|
|
|
|
if (cr) {
|
|
|
|
if (lf && lf < cr)
|
|
|
|
return lf;
|
|
|
|
return cr;
|
|
|
|
} else if (lf) {
|
|
|
|
return lf;
|
|
|
|
}
|
|
|
|
s += CHUNK_SZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
#undef CHUNK_SZ
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
evbuffer_find_eol_char(struct evbuffer_ptr *it)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2009-07-31 17:34:47 +00:00
|
|
|
struct evbuffer_chain *chain = it->_internal.chain;
|
|
|
|
unsigned i = it->_internal.pos_in_chain;
|
2008-02-28 02:47:43 +00:00
|
|
|
while (chain != NULL) {
|
|
|
|
char *buffer = (char *)chain->buffer + chain->misalign;
|
2010-03-02 17:00:06 -05:00
|
|
|
char *cp = find_eol_char(buffer+i, chain->off-i);
|
|
|
|
if (cp) {
|
|
|
|
it->_internal.chain = chain;
|
|
|
|
it->_internal.pos_in_chain = cp - buffer;
|
|
|
|
it->pos += (cp - buffer) - i;
|
|
|
|
return it->pos;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2010-03-02 17:00:06 -05:00
|
|
|
it->pos += chain->off - i;
|
2008-02-28 02:47:43 +00:00
|
|
|
i = 0;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
evbuffer_strspn(
|
2009-07-31 17:34:47 +00:00
|
|
|
struct evbuffer_ptr *ptr, const char *chrset)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
|
|
|
int count = 0;
|
2009-07-31 17:34:47 +00:00
|
|
|
struct evbuffer_chain *chain = ptr->_internal.chain;
|
|
|
|
unsigned i = ptr->_internal.pos_in_chain;
|
|
|
|
|
|
|
|
if (!chain)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
while (1) {
|
2008-02-28 02:47:43 +00:00
|
|
|
char *buffer = (char *)chain->buffer + chain->misalign;
|
|
|
|
for (; i < chain->off; ++i) {
|
|
|
|
const char *p = chrset;
|
|
|
|
while (*p) {
|
|
|
|
if (buffer[i] == *p++)
|
|
|
|
goto next;
|
|
|
|
}
|
2009-07-31 17:34:47 +00:00
|
|
|
ptr->_internal.chain = chain;
|
|
|
|
ptr->_internal.pos_in_chain = i;
|
|
|
|
ptr->pos += count;
|
2008-02-28 02:47:43 +00:00
|
|
|
return count;
|
|
|
|
next:
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
i = 0;
|
|
|
|
|
2009-07-31 17:34:47 +00:00
|
|
|
if (! chain->next) {
|
|
|
|
ptr->_internal.chain = chain;
|
|
|
|
ptr->_internal.pos_in_chain = i;
|
|
|
|
ptr->pos += count;
|
|
|
|
return count;
|
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
2009-07-31 17:34:47 +00:00
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
|
2009-07-31 17:34:47 +00:00
|
|
|
static inline char
|
|
|
|
evbuffer_getchr(struct evbuffer_ptr *it)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain = it->_internal.chain;
|
|
|
|
int off = it->_internal.pos_in_chain;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-07-31 17:34:47 +00:00
|
|
|
return chain->buffer[chain->misalign + off];
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2009-07-31 17:35:42 +00:00
|
|
|
struct evbuffer_ptr
|
|
|
|
evbuffer_search_eol(struct evbuffer *buffer,
|
|
|
|
struct evbuffer_ptr *start, size_t *eol_len_out,
|
|
|
|
enum evbuffer_eol_style eol_style)
|
2005-04-23 02:53:39 +00:00
|
|
|
{
|
2009-07-31 17:35:42 +00:00
|
|
|
struct evbuffer_ptr it, it2;
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t extra_drain = 0;
|
2009-07-31 17:35:42 +00:00
|
|
|
int ok = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-07-31 17:35:42 +00:00
|
|
|
if (start) {
|
|
|
|
memcpy(&it, start, sizeof(it));
|
|
|
|
} else {
|
|
|
|
it.pos = 0;
|
|
|
|
it._internal.chain = buffer->first;
|
|
|
|
it._internal.pos_in_chain = 0;
|
2009-04-08 03:04:39 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* the eol_style determines our first stop character and how many
|
|
|
|
* characters we are going to drain afterwards. */
|
2007-11-25 21:32:26 +00:00
|
|
|
switch (eol_style) {
|
|
|
|
case EVBUFFER_EOL_ANY:
|
2010-03-02 17:00:06 -05:00
|
|
|
if (evbuffer_find_eol_char(&it) < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-07-31 17:35:42 +00:00
|
|
|
memcpy(&it2, &it, sizeof(it));
|
|
|
|
extra_drain = evbuffer_strspn(&it2, "\r\n");
|
2007-11-25 21:32:26 +00:00
|
|
|
break;
|
|
|
|
case EVBUFFER_EOL_CRLF_STRICT: {
|
2009-07-31 17:35:42 +00:00
|
|
|
it = evbuffer_search(buffer, "\r\n", 2, &it);
|
|
|
|
if (it.pos < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
extra_drain = 2;
|
2007-11-25 21:32:26 +00:00
|
|
|
break;
|
2005-04-23 02:53:39 +00:00
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
case EVBUFFER_EOL_CRLF:
|
2009-07-31 17:35:42 +00:00
|
|
|
while (1) {
|
2010-03-02 17:00:06 -05:00
|
|
|
if (evbuffer_find_eol_char(&it) < 0)
|
2009-07-31 17:35:42 +00:00
|
|
|
goto done;
|
|
|
|
if (evbuffer_getchr(&it) == '\n') {
|
|
|
|
extra_drain = 1;
|
|
|
|
break;
|
|
|
|
} else if (!evbuffer_ptr_memcmp(
|
|
|
|
buffer, &it, "\r\n", 2)) {
|
|
|
|
extra_drain = 2;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
if (evbuffer_ptr_set(buffer, &it, 1,
|
|
|
|
EVBUFFER_PTR_ADD)<0)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2007-11-25 21:32:26 +00:00
|
|
|
case EVBUFFER_EOL_LF:
|
2009-07-31 17:35:42 +00:00
|
|
|
if (evbuffer_strchr(&it, '\n') < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
extra_drain = 1;
|
2007-11-25 21:32:26 +00:00
|
|
|
break;
|
|
|
|
default:
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2007-11-25 21:32:26 +00:00
|
|
|
}
|
|
|
|
|
2009-07-31 17:35:42 +00:00
|
|
|
ok = 1;
|
|
|
|
done:
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-07-31 17:35:42 +00:00
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
it.pos = -1;
|
|
|
|
}
|
|
|
|
if (eol_len_out)
|
|
|
|
*eol_len_out = extra_drain;
|
|
|
|
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *
|
|
|
|
evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
|
|
|
|
enum evbuffer_eol_style eol_style)
|
|
|
|
{
|
|
|
|
struct evbuffer_ptr it;
|
|
|
|
char *line;
|
|
|
|
size_t n_to_copy=0, extra_drain=0;
|
2010-02-18 17:41:15 -05:00
|
|
|
char *result = NULL;
|
2009-07-31 17:35:42 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-07-31 17:35:42 +00:00
|
|
|
|
|
|
|
if (buffer->freeze_start) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
|
|
|
|
if (it.pos < 0)
|
|
|
|
goto done;
|
|
|
|
n_to_copy = it.pos;
|
|
|
|
|
2008-04-25 01:18:08 +00:00
|
|
|
if ((line = mm_malloc(n_to_copy+1)) == NULL) {
|
2009-10-27 04:04:07 +00:00
|
|
|
event_warn("%s: out of memory", __func__);
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2005-04-23 02:53:39 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_remove(buffer, line, n_to_copy);
|
2007-11-25 21:32:26 +00:00
|
|
|
line[n_to_copy] = '\0';
|
2005-04-23 02:53:39 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_drain(buffer, extra_drain);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = line;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-07-31 17:35:42 +00:00
|
|
|
|
|
|
|
if (n_read_out)
|
|
|
|
*n_read_out = result ? n_to_copy : 0;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
return result;
|
2005-04-23 02:53:39 +00:00
|
|
|
}
|
|
|
|
|
2009-01-14 19:39:17 +00:00
|
|
|
#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
|
|
|
|
|
2004-05-24 00:19:52 +00:00
|
|
|
/* Adds data to an event buffer */
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
int
|
2008-02-28 17:38:52 +00:00
|
|
|
evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
|
2004-07-13 08:02:45 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *tmp;
|
2008-05-12 00:40:04 +00:00
|
|
|
const unsigned char *data = data_in;
|
2009-04-03 14:27:03 +00:00
|
|
|
size_t remain, to_alloc;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->last;
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2008-02-28 18:36:03 +00:00
|
|
|
/* If there are no chains allocated for this buffer, allocate one
|
|
|
|
* big enough to hold all the data. */
|
2008-02-28 02:47:43 +00:00
|
|
|
if (chain == NULL) {
|
2010-03-26 14:50:45 -04:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (!chain)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2010-03-26 14:50:45 -04:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
|
|
|
|
remain = chain->buffer_len - chain->misalign - chain->off;
|
|
|
|
if (remain >= datlen) {
|
|
|
|
/* there's enough space to hold all the data in the
|
|
|
|
* current last chain */
|
|
|
|
memcpy(chain->buffer + chain->misalign + chain->off,
|
|
|
|
data, datlen);
|
|
|
|
chain->off += datlen;
|
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
goto out;
|
2009-05-01 00:54:14 +00:00
|
|
|
} else if ((size_t)chain->misalign >= datlen && !CHAIN_PINNED(chain)) {
|
2009-01-27 06:05:38 +00:00
|
|
|
/* we can fit the data into the misalignment */
|
2009-04-08 03:03:59 +00:00
|
|
|
evbuffer_chain_align(chain);
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(chain->buffer + chain->off, data, datlen);
|
|
|
|
chain->off += datlen;
|
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* we cannot write any data to the last chain */
|
|
|
|
remain = 0;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* we need to add another chain */
|
2009-01-14 19:39:17 +00:00
|
|
|
to_alloc = chain->buffer_len;
|
|
|
|
if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
|
|
|
|
to_alloc <<= 1;
|
2008-02-28 02:47:43 +00:00
|
|
|
if (datlen > to_alloc)
|
|
|
|
to_alloc = datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
tmp = evbuffer_chain_new(to_alloc);
|
|
|
|
if (tmp == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
if (remain) {
|
|
|
|
memcpy(chain->buffer + chain->misalign + chain->off,
|
|
|
|
data, remain);
|
|
|
|
chain->off += remain;
|
|
|
|
buf->total_len += remain;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += remain;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
2008-02-28 18:36:03 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
data += remain;
|
|
|
|
datlen -= remain;
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(tmp->buffer, data, datlen);
|
|
|
|
tmp->off = datlen;
|
|
|
|
evbuffer_chain_insert(buf, tmp);
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
out:
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(buf);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-07-13 08:02:45 +00:00
|
|
|
}
|
|
|
|
|
2004-05-24 00:19:52 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
|
2004-05-24 00:19:52 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *tmp;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-08 03:04:39 +00:00
|
|
|
|
|
|
|
if (buf->freeze_start) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->first;
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (chain == NULL) {
|
2010-03-26 14:50:45 -04:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (!chain)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2010-03-26 14:50:45 -04:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* we cannot touch immutable buffers */
|
|
|
|
if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
|
2010-03-26 14:51:39 -04:00
|
|
|
/* If this chain is empty, we can treat it as
|
|
|
|
* 'empty at the beginning' rather than 'empty at the end' */
|
|
|
|
if (chain->off == 0)
|
|
|
|
chain->misalign = chain->buffer_len;
|
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if ((size_t)chain->misalign >= datlen) {
|
2010-03-26 14:51:39 -04:00
|
|
|
/* we have enough space to fit everything */
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(chain->buffer + chain->misalign - datlen,
|
|
|
|
data, datlen);
|
|
|
|
chain->off += datlen;
|
|
|
|
chain->misalign -= datlen;
|
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
goto out;
|
|
|
|
} else if (chain->misalign) {
|
2010-03-26 14:51:39 -04:00
|
|
|
/* we can only fit some of the data. */
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(chain->buffer,
|
2009-01-27 13:37:09 +00:00
|
|
|
(char*)data + datlen - chain->misalign,
|
2009-01-27 06:05:38 +00:00
|
|
|
chain->misalign);
|
|
|
|
chain->off += chain->misalign;
|
|
|
|
buf->total_len += chain->misalign;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += chain->misalign;
|
2009-01-27 06:05:38 +00:00
|
|
|
datlen -= chain->misalign;
|
|
|
|
chain->misalign = 0;
|
|
|
|
}
|
|
|
|
}
|
2008-02-28 18:36:03 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* we need to add another chain */
|
|
|
|
if ((tmp = evbuffer_chain_new(datlen)) == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-01-27 06:05:38 +00:00
|
|
|
buf->first = tmp;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (buf->last_with_data == NULL)
|
|
|
|
buf->last_with_data = tmp;
|
|
|
|
else if (chain && buf->last_with_data == chain && 0==chain->off)
|
|
|
|
buf->last_with_data = tmp;
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
tmp->next = chain;
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
tmp->off = datlen;
|
|
|
|
tmp->misalign = tmp->buffer_len - datlen;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(tmp->buffer + tmp->misalign, data, datlen);
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += chain->misalign;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
out:
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(buf);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/** Helper: realigns the memory in chain->buffer so that misalign is 0. */
|
2008-02-28 02:47:43 +00:00
|
|
|
static void
|
|
|
|
evbuffer_chain_align(struct evbuffer_chain *chain)
|
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
|
|
|
|
EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
|
2008-02-28 02:47:43 +00:00
|
|
|
memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
|
|
|
|
chain->misalign = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Expands the available space in the event buffer to at least datlen */
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_expand(struct evbuffer *buf, size_t datlen)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2009-01-19 21:53:03 +00:00
|
|
|
/* XXX we should either make this function less costly, or call it
|
2010-02-18 17:41:15 -05:00
|
|
|
* less often. */
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *tmp;
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t need, length;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->last;
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
if (chain == NULL ||
|
|
|
|
(chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
|
2008-02-28 02:47:43 +00:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (chain == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto err;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2010-02-18 17:41:15 -05:00
|
|
|
goto ok;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
need = chain->misalign + chain->off + datlen;
|
2004-05-24 00:19:52 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* If we can fit all the data, then we don't have to do anything */
|
|
|
|
if (chain->buffer_len >= need)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto ok;
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2009-10-16 13:19:57 +00:00
|
|
|
/* If the misalignment plus the remaining space fulfills our
|
2008-03-31 02:04:34 +00:00
|
|
|
* data needs, we just force an alignment to happen.
|
|
|
|
* Afterwards, we have enough space.
|
2008-02-28 02:47:43 +00:00
|
|
|
*/
|
2008-02-28 18:36:03 +00:00
|
|
|
if (chain->buffer_len - chain->off >= datlen) {
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_chain_align(chain);
|
2009-04-05 02:44:17 +00:00
|
|
|
goto ok;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2008-03-31 02:04:34 +00:00
|
|
|
/* figure out how much space we need */
|
|
|
|
length = chain->buffer_len - chain->misalign + datlen;
|
2008-02-28 17:38:52 +00:00
|
|
|
tmp = evbuffer_chain_new(length);
|
2008-02-28 02:47:43 +00:00
|
|
|
if (tmp == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto err;
|
2008-03-31 02:04:34 +00:00
|
|
|
/* copy the data over that we had so far */
|
|
|
|
tmp->off = chain->off;
|
|
|
|
tmp->misalign = 0;
|
|
|
|
memcpy(tmp->buffer, chain->buffer + chain->misalign, chain->off);
|
|
|
|
|
|
|
|
/* fix up the chain */
|
|
|
|
if (buf->first == chain)
|
|
|
|
buf->first = tmp;
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->last = tmp;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (buf->last->off || buf->last_with_data == chain)
|
|
|
|
buf->last_with_data = tmp;
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-03-31 02:04:34 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
ok:
|
2010-02-18 17:41:15 -05:00
|
|
|
result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
err:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
/* Make sure that datlen bytes are available for writing in the last n
|
2009-01-19 21:53:03 +00:00
|
|
|
* chains. Never copies or moves data. */
|
2009-04-14 20:11:10 +00:00
|
|
|
int
|
2010-03-10 23:24:14 -05:00
|
|
|
_evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
|
2009-01-19 21:53:03 +00:00
|
|
|
{
|
2010-03-10 23:24:14 -05:00
|
|
|
struct evbuffer_chain *chain = buf->last, *tmp, *next;
|
|
|
|
size_t avail;
|
|
|
|
int used;
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2010-03-10 23:24:14 -05:00
|
|
|
EVUTIL_ASSERT(n >= 2);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
|
2010-03-10 23:24:14 -05:00
|
|
|
/* There is no last chunk, or we can't touch the last chunk.
|
|
|
|
* Just add a new chunk. */
|
2009-01-19 21:53:03 +00:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (chain == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2009-01-19 21:53:03 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
used = 0; /* number of chains we're using space in. */
|
|
|
|
avail = 0; /* how much space they have. */
|
|
|
|
/* How many bytes can we stick at the end of buffer as it is? Iterate
|
|
|
|
* over the chains at the end of the buffer, tring to see how much
|
|
|
|
* space we have in the first n. */
|
|
|
|
for (chain = buf->last_with_data; chain; chain = chain->next) {
|
|
|
|
if (chain->off) {
|
|
|
|
size_t space = CHAIN_SPACE_LEN(chain);
|
|
|
|
EVUTIL_ASSERT(chain == buf->last_with_data);
|
|
|
|
if (space) {
|
|
|
|
avail += space;
|
|
|
|
++used;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* No data in chain; realign it. */
|
|
|
|
chain->misalign = 0;
|
|
|
|
avail += chain->buffer_len;
|
|
|
|
++used;
|
|
|
|
}
|
|
|
|
if (avail >= datlen) {
|
|
|
|
/* There is already enough space. Just return */
|
|
|
|
return (0);
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
2010-03-10 23:24:14 -05:00
|
|
|
if (used == n)
|
|
|
|
break;
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
/* There wasn't enough space in the first n chains with space in
|
|
|
|
* them. Either add a new chain with enough space, or replace all
|
|
|
|
* empty chains with one that has enough space, depending on n. */
|
|
|
|
if (used < n) {
|
|
|
|
/* The loop ran off the end of the chains before it hit n
|
|
|
|
* chains; we can add another. */
|
|
|
|
EVUTIL_ASSERT(chain == NULL);
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
tmp = evbuffer_chain_new(datlen - avail);
|
2009-01-19 21:53:03 +00:00
|
|
|
if (tmp == NULL)
|
2010-03-10 23:24:14 -05:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
buf->last->next = tmp;
|
2009-01-19 21:53:03 +00:00
|
|
|
buf->last = tmp;
|
2010-03-10 23:24:14 -05:00
|
|
|
/* (we would only set last_with_data if we added the first
|
|
|
|
* chain. But if the buffer had no chains, we would have
|
|
|
|
* just allocated a new chain earlier) */
|
|
|
|
return (0);
|
2009-01-19 21:53:03 +00:00
|
|
|
} else {
|
2010-03-10 23:24:14 -05:00
|
|
|
/* Nuke _all_ the empty chains. */
|
|
|
|
int rmv_all = 0; /* True iff we removed last_with_data. */
|
|
|
|
chain = buf->last_with_data;
|
|
|
|
if (!chain->off) {
|
|
|
|
EVUTIL_ASSERT(chain == buf->first);
|
|
|
|
rmv_all = 1;
|
|
|
|
avail = 0;
|
|
|
|
} else {
|
|
|
|
avail = CHAIN_SPACE_LEN(chain);
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (; chain; chain = next) {
|
|
|
|
next = chain->next;
|
|
|
|
EVUTIL_ASSERT(chain->off == 0);
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
}
|
|
|
|
tmp = evbuffer_chain_new(datlen - avail);
|
|
|
|
if (tmp == NULL) {
|
|
|
|
if (rmv_all) {
|
|
|
|
ZERO_CHAIN(buf);
|
|
|
|
} else {
|
|
|
|
buf->last = buf->last_with_data;
|
|
|
|
buf->last_with_data->next = NULL;
|
|
|
|
}
|
2009-01-19 21:53:03 +00:00
|
|
|
return (-1);
|
2010-03-10 23:24:14 -05:00
|
|
|
}
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
if (rmv_all) {
|
|
|
|
buf->first = buf->last = buf->last_with_data = tmp;
|
|
|
|
} else {
|
|
|
|
buf->last_with_data->next = tmp;
|
|
|
|
buf->last = tmp;
|
|
|
|
}
|
|
|
|
return (0);
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
/*
|
|
|
|
* Reads data from a file descriptor into a buffer.
|
|
|
|
*/
|
|
|
|
|
2009-02-03 05:22:57 +00:00
|
|
|
#if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
|
2009-01-19 21:53:03 +00:00
|
|
|
#define USE_IOVEC_IMPL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_IOVEC_IMPL
|
|
|
|
|
2009-01-27 22:30:46 +00:00
|
|
|
#ifdef _EVENT_HAVE_SYS_UIO_H
|
2009-01-19 21:53:03 +00:00
|
|
|
/* number of iovec we use for writev, fragmentation is going to determine
|
|
|
|
* how much we end up writing */
|
2010-03-10 23:39:30 -05:00
|
|
|
#define NUM_WRITE_IOVEC 128
|
2009-01-19 21:53:03 +00:00
|
|
|
#define IOV_TYPE struct iovec
|
|
|
|
#define IOV_PTR_FIELD iov_base
|
|
|
|
#define IOV_LEN_FIELD iov_len
|
|
|
|
#else
|
2010-03-10 23:39:30 -05:00
|
|
|
#define NUM_WRITE_IOVEC 16
|
2009-01-19 21:53:03 +00:00
|
|
|
#define IOV_TYPE WSABUF
|
|
|
|
#define IOV_PTR_FIELD buf
|
|
|
|
#define IOV_LEN_FIELD len
|
|
|
|
#endif
|
|
|
|
#endif
|
2010-03-10 23:39:30 -05:00
|
|
|
#define NUM_READ_IOVEC 4
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
#define EVBUFFER_MAX_READ 4096
|
|
|
|
|
2009-04-13 03:05:46 +00:00
|
|
|
/** Helper function to figure out which space to use for reading data into
|
|
|
|
an evbuffer. Internal use only.
|
|
|
|
|
|
|
|
@param buf The buffer to read into
|
|
|
|
@param howmuch How much we want to read.
|
2010-03-10 23:24:14 -05:00
|
|
|
@param vecs An array of two or more iovecs or WSABUFs.
|
|
|
|
@param n_vecs_avail The length of vecs
|
2009-04-13 03:05:46 +00:00
|
|
|
@param chainp A pointer to a variable to hold the first chain we're
|
|
|
|
reading into.
|
2009-05-21 20:59:00 +00:00
|
|
|
@param exact Boolean: if true, we do not provide more than 'howmuch'
|
|
|
|
space in the vectors, even if more space is available.
|
2009-04-13 03:05:46 +00:00
|
|
|
@return The number of buffers we're using.
|
|
|
|
*/
|
|
|
|
int
|
2009-05-22 19:11:48 +00:00
|
|
|
_evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
|
2010-03-10 23:24:14 -05:00
|
|
|
struct evbuffer_iovec *vecs, int n_vecs_avail,
|
|
|
|
struct evbuffer_chain **chainp, int exact)
|
2009-04-13 03:05:46 +00:00
|
|
|
{
|
2010-03-10 23:24:14 -05:00
|
|
|
struct evbuffer_chain *chain, *firstchain;
|
|
|
|
size_t so_far;
|
|
|
|
int i;
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2009-04-13 03:05:46 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if (howmuch < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
so_far = 0;
|
|
|
|
/* Let firstchain be the first chain with any space on it */
|
|
|
|
firstchain = buf->last_with_data;
|
|
|
|
if (CHAIN_SPACE_LEN(firstchain) == 0)
|
|
|
|
firstchain = firstchain->next;
|
|
|
|
|
|
|
|
chain = firstchain;
|
|
|
|
for (i = 0; i < n_vecs_avail && so_far < howmuch; ++i) {
|
|
|
|
size_t avail = CHAIN_SPACE_LEN(chain);
|
|
|
|
if (avail > howmuch && exact)
|
|
|
|
avail = howmuch;
|
|
|
|
vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
|
|
|
|
vecs[i].iov_len = avail;
|
|
|
|
so_far += avail;
|
|
|
|
chain = chain->next;
|
2009-04-13 03:05:46 +00:00
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
*chainp = firstchain;
|
|
|
|
return i;
|
2009-04-13 03:05:46 +00:00
|
|
|
}
|
|
|
|
|
2009-05-22 19:11:48 +00:00
|
|
|
/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
|
2009-01-27 06:05:38 +00:00
|
|
|
* as howmuch? */
|
2004-02-22 21:17:23 +00:00
|
|
|
int
|
2007-11-25 21:53:06 +00:00
|
|
|
evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2010-02-15 21:03:52 -05:00
|
|
|
struct evbuffer_chain *chain;
|
2004-07-13 08:02:45 +00:00
|
|
|
int n = EVBUFFER_MAX_READ;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
2010-03-10 23:39:30 -05:00
|
|
|
int nvecs, i, remaining;
|
2009-01-19 21:53:03 +00:00
|
|
|
#else
|
|
|
|
unsigned char *p;
|
|
|
|
#endif
|
2009-04-05 02:44:17 +00:00
|
|
|
#if defined(FIONREAD) && defined(WIN32)
|
|
|
|
long lng = n;
|
|
|
|
#endif
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2010-02-15 21:03:52 -05:00
|
|
|
chain = buf->last;
|
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2007-09-20 19:36:03 +00:00
|
|
|
#if defined(FIONREAD)
|
|
|
|
#ifdef WIN32
|
2009-09-11 18:21:37 +00:00
|
|
|
if (ioctlsocket(fd, FIONREAD, &lng) == -1 || (n=lng) <= 0) {
|
2007-09-20 19:36:03 +00:00
|
|
|
#else
|
2009-09-11 18:21:37 +00:00
|
|
|
if (ioctl(fd, FIONREAD, &n) == -1 || n <= 0) {
|
2007-09-20 19:36:03 +00:00
|
|
|
#endif
|
2004-07-13 08:02:45 +00:00
|
|
|
n = EVBUFFER_MAX_READ;
|
2006-02-13 01:51:58 +00:00
|
|
|
} else if (n > EVBUFFER_MAX_READ && n > howmuch) {
|
|
|
|
/*
|
|
|
|
* It's possible that a lot of data is available for
|
|
|
|
* reading. We do not want to exhaust resources
|
|
|
|
* before the reader has a chance to do something
|
|
|
|
* about it. If the reader does not tell us how much
|
2009-10-16 13:19:57 +00:00
|
|
|
* data we should read, we artificially limit it.
|
2006-02-13 01:51:58 +00:00
|
|
|
*/
|
2008-02-28 02:47:43 +00:00
|
|
|
if (chain == NULL || n < EVBUFFER_MAX_READ)
|
2006-02-13 01:51:58 +00:00
|
|
|
n = EVBUFFER_MAX_READ;
|
2009-05-01 00:54:14 +00:00
|
|
|
else if ((size_t)n > chain->buffer_len << 2)
|
2008-02-28 02:47:43 +00:00
|
|
|
n = chain->buffer_len << 2;
|
2006-02-13 01:51:58 +00:00
|
|
|
}
|
2008-02-28 18:36:03 +00:00
|
|
|
#endif
|
2004-07-13 08:02:45 +00:00
|
|
|
if (howmuch < 0 || howmuch > n)
|
|
|
|
howmuch = n;
|
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
2009-01-27 06:05:38 +00:00
|
|
|
/* Since we can use iovecs, we're willing to use the last
|
2010-03-10 23:39:30 -05:00
|
|
|
* NUM_READ_IOVEC chains. */
|
|
|
|
if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
|
2010-02-18 17:41:15 -05:00
|
|
|
result = -1;
|
|
|
|
goto done;
|
2009-01-19 21:53:03 +00:00
|
|
|
} else {
|
2010-03-10 23:39:30 -05:00
|
|
|
IOV_TYPE vecs[NUM_READ_IOVEC];
|
2009-05-21 20:59:00 +00:00
|
|
|
#ifdef _EVBUFFER_IOVEC_IS_NATIVE
|
2010-03-10 23:39:30 -05:00
|
|
|
nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
|
|
|
|
NUM_READ_IOVEC, &chain, 1);
|
2009-05-21 20:59:00 +00:00
|
|
|
#else
|
|
|
|
/* We aren't using the native struct iovec. Therefore,
|
|
|
|
we are on win32. */
|
2010-03-10 23:39:30 -05:00
|
|
|
struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
|
2010-03-10 23:24:14 -05:00
|
|
|
nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
|
2009-05-19 21:39:35 +00:00
|
|
|
&chain, 1);
|
|
|
|
|
2010-03-11 14:23:02 -05:00
|
|
|
for (i=0; i < nvecs; ++i)
|
2010-03-10 23:39:30 -05:00
|
|
|
WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
|
2009-05-21 20:59:00 +00:00
|
|
|
#endif
|
2009-01-19 21:53:03 +00:00
|
|
|
|
|
|
|
#ifdef WIN32
|
|
|
|
{
|
|
|
|
DWORD bytesRead;
|
2009-02-03 05:22:57 +00:00
|
|
|
DWORD flags=0;
|
|
|
|
if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
|
|
|
|
/* The read failed. It might be a close,
|
|
|
|
* or it might be an error. */
|
|
|
|
if (WSAGetLastError() == WSAECONNABORTED)
|
|
|
|
n = 0;
|
|
|
|
else
|
|
|
|
n = -1;
|
|
|
|
} else
|
2009-01-19 21:53:03 +00:00
|
|
|
n = bytesRead;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
n = readv(fd, vecs, nvecs);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /*!USE_IOVEC_IMPL*/
|
2004-07-13 08:02:45 +00:00
|
|
|
/* If we don't have FIONREAD, we might waste some space here */
|
2008-02-28 18:36:03 +00:00
|
|
|
/* XXX we _will_ waste some space here if there is any space left
|
|
|
|
* over on buf->last. */
|
2009-04-05 02:44:17 +00:00
|
|
|
if (evbuffer_expand(buf, howmuch) == -1) {
|
|
|
|
result = -1;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
chain = buf->last;
|
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
/* We can append new data at this point */
|
2008-02-28 02:47:43 +00:00
|
|
|
p = chain->buffer + chain->misalign + chain->off;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2004-05-24 00:19:52 +00:00
|
|
|
#ifndef WIN32
|
2004-07-13 08:02:45 +00:00
|
|
|
n = read(fd, p, howmuch);
|
2007-09-20 19:36:03 +00:00
|
|
|
#else
|
|
|
|
n = recv(fd, p, howmuch, 0);
|
|
|
|
#endif
|
2009-01-19 21:53:03 +00:00
|
|
|
#endif /* USE_IOVEC_IMPL */
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
if (n == -1) {
|
|
|
|
result = -1;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
if (n == 0) {
|
|
|
|
result = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
2010-03-10 23:39:30 -05:00
|
|
|
remaining = n;
|
|
|
|
for (i=0; i < nvecs; ++i) {
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t space = CHAIN_SPACE_LEN(chain);
|
2010-03-10 23:39:30 -05:00
|
|
|
if (space < remaining) {
|
2009-04-23 21:41:53 +00:00
|
|
|
chain->off += space;
|
2010-03-10 23:39:30 -05:00
|
|
|
remaining -= space;
|
2009-01-19 21:53:03 +00:00
|
|
|
} else {
|
2010-03-10 23:39:30 -05:00
|
|
|
chain->off += remaining;
|
2010-03-10 23:24:14 -05:00
|
|
|
buf->last_with_data = chain;
|
2010-03-10 23:39:30 -05:00
|
|
|
break;
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
2010-03-10 23:39:30 -05:00
|
|
|
chain = chain->next;
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
|
|
|
#else
|
2008-02-28 02:47:43 +00:00
|
|
|
chain->off += n;
|
2010-03-10 23:24:14 -05:00
|
|
|
buf->last_with_data = chain;
|
2009-01-19 21:53:03 +00:00
|
|
|
#endif
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->total_len += n;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += n;
|
2004-07-13 08:02:45 +00:00
|
|
|
|
|
|
|
/* Tell someone about changes in this buffer */
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(buf);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = n;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
|
|
|
static inline int
|
|
|
|
evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
|
2009-11-02 16:17:06 +00:00
|
|
|
ev_ssize_t howmuch)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2010-03-10 23:39:30 -05:00
|
|
|
IOV_TYPE iov[NUM_WRITE_IOVEC];
|
2009-01-27 06:05:38 +00:00
|
|
|
struct evbuffer_chain *chain = buffer->first;
|
|
|
|
int n, i = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if (howmuch < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-01-27 06:05:38 +00:00
|
|
|
/* XXX make this top out at some maximal data length? if the
|
|
|
|
* buffer has (say) 1MB in it, split over 128 chains, there's
|
|
|
|
* no way it all gets written in one go. */
|
2010-03-10 23:39:30 -05:00
|
|
|
while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
|
2009-01-27 06:05:38 +00:00
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
/* we cannot write the file info via writev */
|
|
|
|
if (chain->flags & EVBUFFER_SENDFILE)
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
iov[i].IOV_PTR_FIELD = chain->buffer + chain->misalign;
|
2009-05-01 00:54:14 +00:00
|
|
|
if ((size_t)howmuch >= chain->off) {
|
2009-01-27 06:05:38 +00:00
|
|
|
iov[i++].IOV_LEN_FIELD = chain->off;
|
|
|
|
howmuch -= chain->off;
|
|
|
|
} else {
|
|
|
|
iov[i++].IOV_LEN_FIELD = howmuch;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
#ifdef WIN32
|
|
|
|
{
|
2009-02-03 05:22:57 +00:00
|
|
|
DWORD bytesSent;
|
|
|
|
if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
|
2009-01-27 06:05:38 +00:00
|
|
|
n = -1;
|
|
|
|
else
|
|
|
|
n = bytesSent;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
n = writev(fd, iov, i);
|
|
|
|
#endif
|
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
static inline int
|
|
|
|
evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t howmuch)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain = buffer->first;
|
|
|
|
struct evbuffer_chain_fd *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
|
2009-04-24 03:24:22 +00:00
|
|
|
#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
|
2009-01-27 06:05:38 +00:00
|
|
|
int res;
|
|
|
|
off_t len = chain->off;
|
2009-08-16 16:40:42 +00:00
|
|
|
#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t res;
|
2009-04-05 02:44:17 +00:00
|
|
|
off_t offset = chain->misalign;
|
|
|
|
#endif
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-28 19:08:07 +00:00
|
|
|
#if defined(SENDFILE_IS_MACOSX)
|
2009-01-27 06:05:38 +00:00
|
|
|
res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
|
|
|
|
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
|
|
|
|
return (-1);
|
|
|
|
|
2009-04-24 03:24:22 +00:00
|
|
|
return (len);
|
2009-04-28 19:08:07 +00:00
|
|
|
#elif defined(SENDFILE_IS_FREEBSD)
|
2009-06-25 15:22:36 +00:00
|
|
|
res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
|
2009-04-24 03:24:22 +00:00
|
|
|
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
|
|
|
|
return (-1);
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
return (len);
|
2009-04-28 19:08:07 +00:00
|
|
|
#elif defined(SENDFILE_IS_LINUX)
|
2009-01-27 06:18:45 +00:00
|
|
|
/* TODO(niels): implement splice */
|
|
|
|
res = sendfile(fd, info->fd, &offset, chain->off);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
|
2009-10-16 13:19:57 +00:00
|
|
|
/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
|
2009-08-16 16:40:42 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (res);
|
|
|
|
#elif defined(SENDFILE_IS_SOLARIS)
|
|
|
|
res = sendfile(fd, info->fd, &offset, chain->off);
|
|
|
|
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
|
|
|
|
/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
|
2009-01-27 21:10:31 +00:00
|
|
|
return (0);
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
return (res);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
int
|
2009-01-12 20:42:19 +00:00
|
|
|
evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t howmuch)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2009-04-08 03:04:39 +00:00
|
|
|
int n = -1;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buffer->freeze_start) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2009-01-12 20:42:19 +00:00
|
|
|
if (howmuch < 0)
|
|
|
|
howmuch = buffer->total_len;
|
|
|
|
|
|
|
|
{
|
2009-01-27 06:05:38 +00:00
|
|
|
#ifdef USE_SENDFILE
|
2009-01-14 14:58:48 +00:00
|
|
|
struct evbuffer_chain *chain = buffer->first;
|
2009-01-27 06:05:38 +00:00
|
|
|
if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
|
|
|
|
n = evbuffer_write_sendfile(buffer, fd, howmuch);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#ifdef USE_IOVEC_IMPL
|
|
|
|
n = evbuffer_write_iovec(buffer, fd, howmuch);
|
2009-01-14 14:58:48 +00:00
|
|
|
#elif defined(WIN32)
|
2009-01-27 06:05:38 +00:00
|
|
|
/* XXX(nickm) Don't disable this code until we know if
|
|
|
|
* the WSARecv code above works. */
|
2009-01-14 14:58:48 +00:00
|
|
|
void *p = evbuffer_pullup(buffer, howmuch);
|
|
|
|
n = send(fd, p, howmuch, 0);
|
2007-09-20 19:36:03 +00:00
|
|
|
#else
|
2009-01-14 14:58:48 +00:00
|
|
|
void *p = evbuffer_pullup(buffer, howmuch);
|
|
|
|
n = write(fd, p, howmuch);
|
2007-09-20 19:36:03 +00:00
|
|
|
#endif
|
2009-01-12 20:42:19 +00:00
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
if (n > 0)
|
|
|
|
evbuffer_drain(buffer, n);
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2004-02-22 21:17:23 +00:00
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
|
2009-01-12 20:42:19 +00:00
|
|
|
int
|
|
|
|
evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
|
|
|
|
{
|
|
|
|
return evbuffer_write_atmost(buffer, fd, -1);
|
|
|
|
}
|
|
|
|
|
2008-05-12 00:40:04 +00:00
|
|
|
unsigned char *
|
|
|
|
evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
unsigned char *search;
|
|
|
|
struct evbuffer_ptr ptr;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
|
|
|
|
if (ptr.pos < 0) {
|
|
|
|
search = NULL;
|
|
|
|
} else {
|
|
|
|
search = evbuffer_pullup(buffer, ptr.pos + len);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (search)
|
|
|
|
search += ptr.pos;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
|
|
|
EVBUFFER_UNLOCK(buffer);
|
|
|
|
return search;
|
2009-04-03 01:21:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
|
|
|
|
size_t position, enum evbuffer_ptr_how how)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t left = position;
|
2009-04-03 01:21:36 +00:00
|
|
|
struct evbuffer_chain *chain = NULL;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
switch (how) {
|
|
|
|
case EVBUFFER_PTR_SET:
|
|
|
|
chain = buf->first;
|
|
|
|
pos->pos = position;
|
|
|
|
position = 0;
|
|
|
|
break;
|
|
|
|
case EVBUFFER_PTR_ADD:
|
|
|
|
/* this avoids iterating over all previous chains if
|
|
|
|
we just want to advance the position */
|
|
|
|
chain = pos->_internal.chain;
|
|
|
|
pos->pos += position;
|
|
|
|
position = pos->_internal.pos_in_chain;
|
|
|
|
break;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
while (chain && position + left >= chain->off) {
|
|
|
|
left -= chain->off - position;
|
|
|
|
chain = chain->next;
|
|
|
|
position = 0;
|
|
|
|
}
|
|
|
|
if (chain) {
|
|
|
|
pos->_internal.chain = chain;
|
|
|
|
pos->_internal.pos_in_chain = position + left;
|
|
|
|
} else {
|
|
|
|
pos->_internal.chain = NULL;
|
|
|
|
pos->pos = -1;
|
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
return chain != NULL ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Compare the bytes in buf at position pos to the len bytes in mem. Return
|
|
|
|
less than 0, 0, or greater than 0 as memcmp.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
|
|
|
|
const char *mem, size_t len)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
size_t position;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
|
|
|
|
|
|
|
if (pos->pos + len > buf->total_len)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
chain = pos->_internal.chain;
|
|
|
|
position = pos->_internal.pos_in_chain;
|
|
|
|
while (len && chain) {
|
|
|
|
size_t n_comparable;
|
|
|
|
if (len + position > chain->off)
|
|
|
|
n_comparable = chain->off - position;
|
|
|
|
else
|
|
|
|
n_comparable = len;
|
|
|
|
r = memcmp(chain->buffer + chain->misalign + position, mem,
|
|
|
|
n_comparable);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
mem += n_comparable;
|
|
|
|
len -= n_comparable;
|
|
|
|
position = 0;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-04-03 01:21:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct evbuffer_ptr
|
|
|
|
evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
|
2009-08-07 17:16:52 +00:00
|
|
|
{
|
|
|
|
return evbuffer_search_range(buffer, what, len, start, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct evbuffer_ptr
|
|
|
|
evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
|
2009-04-03 01:21:36 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_ptr pos;
|
|
|
|
struct evbuffer_chain *chain, *last_chain = NULL;
|
2009-04-03 01:21:36 +00:00
|
|
|
const unsigned char *p;
|
2010-02-18 17:41:15 -05:00
|
|
|
char first;
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
if (start) {
|
|
|
|
memcpy(&pos, start, sizeof(pos));
|
|
|
|
chain = pos._internal.chain;
|
|
|
|
} else {
|
|
|
|
pos.pos = 0;
|
|
|
|
chain = pos._internal.chain = buffer->first;
|
|
|
|
pos._internal.pos_in_chain = 0;
|
|
|
|
}
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2009-08-07 17:16:52 +00:00
|
|
|
if (end)
|
|
|
|
last_chain = end->_internal.chain;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
if (!len)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
first = what[0];
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
const unsigned char *start_at =
|
|
|
|
chain->buffer + chain->misalign +
|
|
|
|
pos._internal.pos_in_chain;
|
|
|
|
p = memchr(start_at, first,
|
|
|
|
chain->off - pos._internal.pos_in_chain);
|
|
|
|
if (p) {
|
|
|
|
pos.pos += p - start_at;
|
|
|
|
pos._internal.pos_in_chain += p - start_at;
|
|
|
|
if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
|
2009-08-07 17:16:52 +00:00
|
|
|
if (end && pos.pos + len > end->pos)
|
|
|
|
goto not_found;
|
|
|
|
else
|
|
|
|
goto done;
|
|
|
|
}
|
2010-02-18 17:41:15 -05:00
|
|
|
++pos.pos;
|
|
|
|
++pos._internal.pos_in_chain;
|
|
|
|
if (pos._internal.pos_in_chain == chain->off) {
|
|
|
|
chain = pos._internal.chain = chain->next;
|
|
|
|
pos._internal.pos_in_chain = 0;
|
|
|
|
}
|
|
|
|
} else {
|
2009-08-07 17:16:52 +00:00
|
|
|
if (chain == last_chain)
|
|
|
|
goto not_found;
|
2010-02-18 17:41:15 -05:00
|
|
|
pos.pos += chain->off - pos._internal.pos_in_chain;
|
|
|
|
chain = pos._internal.chain = chain->next;
|
|
|
|
pos._internal.pos_in_chain = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2009-08-07 17:16:52 +00:00
|
|
|
not_found:
|
2010-02-18 17:41:15 -05:00
|
|
|
pos.pos = -1;
|
|
|
|
pos._internal.chain = NULL;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
|
|
|
return pos;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
int
|
|
|
|
evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
|
|
|
|
struct evbuffer_ptr *start_at,
|
|
|
|
struct evbuffer_iovec *vec, int n_vec)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
int idx = 0;
|
2009-11-05 21:22:23 +00:00
|
|
|
ev_ssize_t len_so_far = 0;
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
|
|
|
if (start_at) {
|
|
|
|
chain = start_at->_internal.chain;
|
|
|
|
len_so_far = chain->off
|
|
|
|
- start_at->_internal.pos_in_chain;
|
|
|
|
idx = 1;
|
|
|
|
if (n_vec > 0) {
|
|
|
|
vec[0].iov_base = chain->buffer + chain->misalign
|
|
|
|
+ start_at->_internal.pos_in_chain;
|
|
|
|
vec[0].iov_len = len_so_far;
|
|
|
|
}
|
|
|
|
chain = chain->next;
|
|
|
|
} else {
|
|
|
|
chain = buffer->first;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
if (len >= 0 && len_so_far >= len)
|
|
|
|
break;
|
|
|
|
if (idx<n_vec) {
|
|
|
|
vec[idx].iov_base = chain->buffer + chain->misalign;
|
|
|
|
vec[idx].iov_len = chain->off;
|
|
|
|
} else if (len<0)
|
|
|
|
break;
|
|
|
|
++idx;
|
|
|
|
len_so_far += chain->off;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
int
|
|
|
|
evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
char *buffer;
|
|
|
|
size_t space;
|
2009-04-05 02:44:17 +00:00
|
|
|
int sz, result = -1;
|
2008-02-28 02:47:43 +00:00
|
|
|
va_list aq;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* make sure that at least some space is available */
|
|
|
|
if (evbuffer_expand(buf, 64) == -1)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
for (;;) {
|
2008-03-31 02:04:34 +00:00
|
|
|
struct evbuffer_chain *chain = buf->last;
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t used = chain->misalign + chain->off;
|
|
|
|
buffer = (char *)chain->buffer + chain->misalign + chain->off;
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(chain->buffer_len >= used);
|
2008-02-28 02:47:43 +00:00
|
|
|
space = chain->buffer_len - used;
|
|
|
|
|
|
|
|
#ifndef va_copy
|
|
|
|
#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
|
|
|
|
#endif
|
|
|
|
va_copy(aq, ap);
|
|
|
|
|
2008-05-12 16:44:24 +00:00
|
|
|
sz = evutil_vsnprintf(buffer, space, fmt, aq);
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
va_end(aq);
|
|
|
|
|
|
|
|
if (sz < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-05-01 00:54:14 +00:00
|
|
|
if ((size_t)sz < space) {
|
2008-02-28 02:47:43 +00:00
|
|
|
chain->off += sz;
|
|
|
|
buf->total_len += sz;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += sz;
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
result = sz;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
if (evbuffer_expand(buf, sz + 1) == -1)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
/* NOTREACHED */
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
int res = -1;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
res = evbuffer_add_vprintf(buf, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
int
|
2009-01-29 03:20:40 +00:00
|
|
|
evbuffer_add_reference(struct evbuffer *outbuf,
|
|
|
|
const void *data, size_t datlen,
|
2009-05-15 22:44:18 +00:00
|
|
|
evbuffer_ref_cleanup_cb cleanupfn, void *extra)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2009-04-08 03:04:39 +00:00
|
|
|
struct evbuffer_chain *chain;
|
2009-01-27 06:05:38 +00:00
|
|
|
struct evbuffer_chain_reference *info;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = -1;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
|
|
|
|
if (!chain)
|
|
|
|
return (-1);
|
2009-01-27 06:05:38 +00:00
|
|
|
chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
|
2009-01-29 03:20:40 +00:00
|
|
|
chain->buffer = (u_char *)data;
|
2009-01-27 06:05:38 +00:00
|
|
|
chain->buffer_len = datlen;
|
|
|
|
chain->off = datlen;
|
|
|
|
|
|
|
|
info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
|
|
|
|
info->cleanupfn = cleanupfn;
|
|
|
|
info->extra = extra;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end) {
|
|
|
|
/* don't call chain_free; we do not want to actually invoke
|
|
|
|
* the cleanup function */
|
|
|
|
mm_free(chain);
|
|
|
|
goto done;
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_insert(outbuf, chain);
|
2010-02-18 17:41:15 -05:00
|
|
|
outbuf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_invoke_callbacks(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
|
|
|
|
result = 0;
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(outbuf);
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO(niels): maybe we don't want to own the fd, however, in that
|
|
|
|
* case, we should dup it - dup is cheap. Perhaps, we should use a
|
2009-10-16 13:19:57 +00:00
|
|
|
* callback instead?
|
2009-01-27 06:05:38 +00:00
|
|
|
*/
|
|
|
|
/* TODO(niels): we may want to add to automagically convert to mmap, in
|
|
|
|
* case evbuffer_remove() or evbuffer_pullup() are being used.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
evbuffer_add_file(struct evbuffer *outbuf, int fd,
|
2010-02-12 23:40:13 -05:00
|
|
|
off_t offset, off_t length)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2009-01-27 22:30:46 +00:00
|
|
|
#if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
|
2009-01-27 06:05:38 +00:00
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
struct evbuffer_chain_fd *info;
|
|
|
|
#endif
|
2009-04-08 03:04:39 +00:00
|
|
|
int ok = 1;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
|
|
|
#if defined(USE_SENDFILE)
|
|
|
|
if (use_sendfile) {
|
|
|
|
chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
|
|
|
|
if (chain == NULL) {
|
2009-10-27 04:04:07 +00:00
|
|
|
event_warn("%s: out of memory", __func__);
|
2009-01-27 06:05:38 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
|
|
|
|
chain->buffer = NULL; /* no reading possible */
|
|
|
|
chain->buffer_len = length + offset;
|
|
|
|
chain->off = length;
|
|
|
|
chain->misalign = offset;
|
|
|
|
|
|
|
|
info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
|
|
|
|
info->fd = fd;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end) {
|
|
|
|
mm_free(chain);
|
|
|
|
ok = 0;
|
|
|
|
} else {
|
|
|
|
outbuf->n_add_for_cb += length;
|
|
|
|
evbuffer_chain_insert(outbuf, chain);
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
} else
|
|
|
|
#endif
|
2009-01-27 22:30:46 +00:00
|
|
|
#if defined(_EVENT_HAVE_MMAP)
|
2009-01-27 06:05:38 +00:00
|
|
|
if (use_mmap) {
|
|
|
|
void *mapped = mmap(NULL, length + offset, PROT_READ,
|
2009-01-27 06:18:45 +00:00
|
|
|
#ifdef MAP_NOCACHE
|
|
|
|
MAP_NOCACHE |
|
|
|
|
#endif
|
2009-07-23 14:48:24 +00:00
|
|
|
#ifdef MAP_FILE
|
|
|
|
MAP_FILE |
|
|
|
|
#endif
|
|
|
|
MAP_PRIVATE,
|
2009-01-27 06:05:38 +00:00
|
|
|
fd, 0);
|
|
|
|
/* some mmap implementations require offset to be a multiple of
|
|
|
|
* the page size. most users of this api, are likely to use 0
|
|
|
|
* so mapping everything is not likely to be a problem.
|
|
|
|
* TODO(niels): determine page size and round offset to that
|
|
|
|
* page size to avoid mapping too much memory.
|
|
|
|
*/
|
|
|
|
if (mapped == MAP_FAILED) {
|
2009-01-27 06:21:12 +00:00
|
|
|
event_warn("%s: mmap(%d, %d, %zu) failed",
|
2009-01-27 13:37:09 +00:00
|
|
|
__func__, fd, 0, (size_t)(offset + length));
|
2009-01-27 06:05:38 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
|
|
|
|
if (chain == NULL) {
|
2009-10-27 04:04:07 +00:00
|
|
|
event_warn("%s: out of memory", __func__);
|
2009-01-27 06:05:38 +00:00
|
|
|
munmap(mapped, length);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
|
|
|
|
chain->buffer = mapped;
|
|
|
|
chain->buffer_len = length + offset;
|
|
|
|
chain->off = length + offset;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
|
|
|
|
info->fd = fd;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end) {
|
|
|
|
info->fd = -1;
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
ok = 0;
|
|
|
|
} else {
|
|
|
|
outbuf->n_add_for_cb += length;
|
2009-04-03 14:27:03 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
evbuffer_chain_insert(outbuf, chain);
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
/* we need to subtract whatever we don't need */
|
|
|
|
evbuffer_drain(outbuf, offset);
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* the default implementation */
|
2009-01-29 03:22:47 +00:00
|
|
|
struct evbuffer *tmp = evbuffer_new();
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t read;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-01-29 03:22:47 +00:00
|
|
|
if (tmp == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
#ifdef WIN32
|
|
|
|
#define lseek _lseek
|
|
|
|
#endif
|
2009-01-29 03:22:47 +00:00
|
|
|
if (lseek(fd, offset, SEEK_SET) == -1) {
|
|
|
|
evbuffer_free(tmp);
|
2009-01-27 06:05:38 +00:00
|
|
|
return (-1);
|
2009-01-29 03:22:47 +00:00
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-01-29 03:22:47 +00:00
|
|
|
/* we add everything to a temporary buffer, so that we
|
|
|
|
* can abort without side effects if the read fails.
|
|
|
|
*/
|
2009-01-27 06:05:38 +00:00
|
|
|
while (length) {
|
2009-01-29 03:22:47 +00:00
|
|
|
read = evbuffer_read(tmp, fd, length);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (read == -1) {
|
2009-01-29 03:22:47 +00:00
|
|
|
evbuffer_free(tmp);
|
2009-01-27 06:05:38 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
length -= read;
|
|
|
|
}
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end) {
|
|
|
|
evbuffer_free(tmp);
|
|
|
|
ok = 0;
|
|
|
|
} else {
|
|
|
|
evbuffer_add_buffer(outbuf, tmp);
|
|
|
|
evbuffer_free(tmp);
|
2009-01-29 03:22:47 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
#ifdef WIN32
|
|
|
|
#define close _close
|
|
|
|
#endif
|
2009-04-08 03:04:39 +00:00
|
|
|
close(fd);
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (ok)
|
|
|
|
evbuffer_invoke_callbacks(outbuf);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(outbuf);
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
return ok ? 0 : -1;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-28 18:36:03 +00:00
|
|
|
void
|
2009-01-23 01:11:13 +00:00
|
|
|
evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
if (!TAILQ_EMPTY(&buffer->callbacks))
|
|
|
|
evbuffer_remove_all_callbacks(buffer);
|
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
if (cb) {
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_cb_entry *ent =
|
|
|
|
evbuffer_add_cb(buffer, NULL, cbarg);
|
|
|
|
ent->cb.cb_obsolete = cb;
|
|
|
|
ent->flags |= EVBUFFER_CB_OBSOLETE;
|
|
|
|
}
|
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct evbuffer_cb_entry *
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
|
2009-01-23 01:11:13 +00:00
|
|
|
{
|
|
|
|
struct evbuffer_cb_entry *e;
|
2009-02-01 05:26:47 +00:00
|
|
|
if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
|
2009-01-23 01:11:13 +00:00
|
|
|
return NULL;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-03 14:27:03 +00:00
|
|
|
e->cb.cb_func = cb;
|
2009-01-23 01:11:13 +00:00
|
|
|
e->cbarg = cbarg;
|
2009-01-23 18:04:34 +00:00
|
|
|
e->flags = EVBUFFER_CB_ENABLED;
|
2009-01-23 01:11:13 +00:00
|
|
|
TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_remove_cb_entry(struct evbuffer *buffer,
|
|
|
|
struct evbuffer_cb_entry *ent)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
TAILQ_REMOVE(&buffer->callbacks, ent, next);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
mm_free(ent);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
|
2004-04-04 02:20:21 +00:00
|
|
|
{
|
2009-01-23 01:11:13 +00:00
|
|
|
struct evbuffer_cb_entry *cbent;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
|
2009-04-03 14:27:03 +00:00
|
|
|
if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
|
2009-04-05 02:44:17 +00:00
|
|
|
result = evbuffer_remove_cb_entry(buffer, cbent);
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-04-04 02:20:21 +00:00
|
|
|
}
|
2009-01-23 18:04:34 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_cb_set_flags(struct evbuffer *buffer,
|
2009-02-01 01:43:58 +00:00
|
|
|
struct evbuffer_cb_entry *cb, ev_uint32_t flags)
|
2009-01-23 18:04:34 +00:00
|
|
|
{
|
2009-05-15 20:23:59 +00:00
|
|
|
/* the user isn't allowed to mess with these. */
|
|
|
|
flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-05-15 20:23:59 +00:00
|
|
|
cb->flags |= flags;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-05-15 20:23:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_cb_clear_flags(struct evbuffer *buffer,
|
|
|
|
struct evbuffer_cb_entry *cb, ev_uint32_t flags)
|
|
|
|
{
|
|
|
|
/* the user isn't allowed to mess with these. */
|
|
|
|
flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-05-15 20:23:59 +00:00
|
|
|
cb->flags &= ~flags;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 18:04:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-02-01 01:43:58 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
int
|
|
|
|
evbuffer_freeze(struct evbuffer *buffer, int start)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (start)
|
|
|
|
buffer->freeze_start = 1;
|
|
|
|
else
|
|
|
|
buffer->freeze_end = 1;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_unfreeze(struct evbuffer *buffer, int start)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (start)
|
|
|
|
buffer->freeze_start = 0;
|
|
|
|
else
|
|
|
|
buffer->freeze_end = 0;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
#if 0
|
2009-02-01 01:43:58 +00:00
|
|
|
void
|
|
|
|
evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
|
|
|
|
{
|
|
|
|
if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
|
2009-04-17 06:56:09 +00:00
|
|
|
cb->size_before_suspend = evbuffer_get_length(buffer);
|
2009-02-01 01:43:58 +00:00
|
|
|
cb->flags |= EVBUFFER_CB_SUSPENDED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
|
|
|
|
{
|
|
|
|
if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
|
|
|
|
unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
|
|
|
|
size_t sz = cb->size_before_suspend;
|
|
|
|
cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
|
|
|
|
EVBUFFER_CB_CALL_ON_UNSUSPEND);
|
|
|
|
cb->size_before_suspend = 0;
|
|
|
|
if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
|
2009-04-17 06:56:09 +00:00
|
|
|
cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
|
2009-02-01 01:43:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-04-03 14:27:03 +00:00
|
|
|
#endif
|