2004-02-22 21:17:23 +00:00
|
|
|
/*
|
2009-01-27 22:34:36 +00:00
|
|
|
* Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
|
2012-02-10 17:29:53 -05:00
|
|
|
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
|
2004-02-22 21:17:23 +00:00
|
|
|
*
|
2004-04-13 06:22:48 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2004-02-22 21:17:23 +00:00
|
|
|
*/
|
|
|
|
|
2010-07-07 16:45:03 -04:00
|
|
|
#include "event2/event-config.h"
|
2011-01-02 08:04:12 -07:00
|
|
|
#include "evconfig-private.h"
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2007-09-20 19:36:03 +00:00
|
|
|
#include <winsock2.h>
|
2007-11-07 21:01:26 +00:00
|
|
|
#include <windows.h>
|
2009-05-01 00:54:14 +00:00
|
|
|
#include <io.h>
|
2007-09-20 19:36:03 +00:00
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_VASPRINTF
|
2011-12-08 14:05:47 -05:00
|
|
|
/* If we have vasprintf, we need to define _GNU_SOURCE before we include
|
2011-01-07 00:34:22 -07:00
|
|
|
* stdio.h. This comes from evconfig-private.h.
|
2010-12-22 23:24:01 -07:00
|
|
|
*/
|
2005-04-01 04:20:39 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_TIME_H
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_SOCKET_H
|
2009-01-27 06:05:38 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_UIO_H
|
2008-02-28 02:47:43 +00:00
|
|
|
#include <sys/uio.h>
|
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_IOCTL_H
|
2004-07-13 08:02:45 +00:00
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_MMAN_H
|
2009-01-27 06:05:38 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_SENDFILE_H
|
2009-01-27 06:05:38 +00:00
|
|
|
#include <sys/sendfile.h>
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_STAT_H
|
2010-10-21 19:45:49 -04:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_STDARG_H
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <stdarg.h>
|
|
|
|
#endif
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_UNISTD_H
|
2004-02-22 21:17:23 +00:00
|
|
|
#include <unistd.h>
|
2004-04-13 06:22:48 +00:00
|
|
|
#endif
|
2010-10-05 21:34:07 -04:00
|
|
|
#include <limits.h>
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-04-16 20:01:51 +00:00
|
|
|
#include "event2/event.h"
|
|
|
|
#include "event2/buffer.h"
|
2009-01-23 01:35:57 +00:00
|
|
|
#include "event2/buffer_compat.h"
|
2009-11-17 20:31:09 +00:00
|
|
|
#include "event2/bufferevent.h"
|
|
|
|
#include "event2/bufferevent_compat.h"
|
|
|
|
#include "event2/bufferevent_struct.h"
|
2009-04-05 02:44:17 +00:00
|
|
|
#include "event2/thread.h"
|
2009-01-13 20:26:37 +00:00
|
|
|
#include "log-internal.h"
|
2007-11-25 17:14:19 +00:00
|
|
|
#include "mm-internal.h"
|
2009-01-27 06:05:38 +00:00
|
|
|
#include "util-internal.h"
|
2009-04-05 02:44:17 +00:00
|
|
|
#include "evthread-internal.h"
|
2008-02-28 02:47:43 +00:00
|
|
|
#include "evbuffer-internal.h"
|
2009-11-17 20:31:09 +00:00
|
|
|
#include "bufferevent-internal.h"
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* some systems do not have MAP_FAILED */
|
|
|
|
#ifndef MAP_FAILED
|
|
|
|
#define MAP_FAILED ((void *)-1)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* send file support */
|
2012-02-29 15:07:31 -05:00
|
|
|
#if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__)
|
2009-01-27 06:05:38 +00:00
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_LINUX 1
|
2012-02-29 15:07:31 -05:00
|
|
|
#elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__)
|
2009-01-27 06:05:38 +00:00
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_FREEBSD 1
|
2012-02-29 15:07:31 -05:00
|
|
|
#elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__)
|
2009-04-24 03:24:22 +00:00
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_MACOSX 1
|
2012-02-29 15:07:31 -05:00
|
|
|
#elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
|
2009-08-16 16:40:42 +00:00
|
|
|
#define USE_SENDFILE 1
|
|
|
|
#define SENDFILE_IS_SOLARIS 1
|
2009-01-27 06:05:38 +00:00
|
|
|
#endif
|
|
|
|
|
2009-02-01 01:43:58 +00:00
|
|
|
/* Mask of user-selectable callback flags. */
|
2010-02-18 17:41:15 -05:00
|
|
|
#define EVBUFFER_CB_USER_FLAGS 0xffff
|
2009-02-01 01:43:58 +00:00
|
|
|
/* Mask of all internal-use-only flags. */
|
|
|
|
#define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
|
2009-04-03 14:27:03 +00:00
|
|
|
|
|
|
|
/* Flag set if the callback is using the cb_obsolete function pointer */
|
2010-02-18 17:41:15 -05:00
|
|
|
#define EVBUFFER_CB_OBSOLETE 0x00040000
|
2009-02-01 01:43:58 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* evbuffer_chain support */
|
2009-01-19 21:53:03 +00:00
|
|
|
#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
|
2009-01-27 06:05:38 +00:00
|
|
|
#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
|
|
|
|
0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
#define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
|
2009-04-13 03:06:59 +00:00
|
|
|
#define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
|
2009-04-08 03:03:59 +00:00
|
|
|
|
2011-06-14 01:58:30 +03:00
|
|
|
/* evbuffer_ptr support */
|
|
|
|
#define PTR_NOT_FOUND(ptr) do { \
|
|
|
|
(ptr)->pos = -1; \
|
2012-02-29 15:07:32 -05:00
|
|
|
(ptr)->internal_.chain = NULL; \
|
|
|
|
(ptr)->internal_.pos_in_chain = 0; \
|
2011-06-14 01:58:30 +03:00
|
|
|
} while (0)
|
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
static void evbuffer_chain_align(struct evbuffer_chain *chain);
|
2010-10-25 22:36:23 -04:00
|
|
|
static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
|
|
|
|
size_t datalen);
|
2012-04-06 11:05:35 -04:00
|
|
|
static void evbuffer_deferred_callback(struct event_callback *cb, void *arg);
|
2009-07-31 17:35:42 +00:00
|
|
|
static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
|
|
|
|
const struct evbuffer_ptr *pos, const char *mem, size_t len);
|
2010-03-30 16:47:37 -04:00
|
|
|
static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
|
|
|
|
size_t datlen);
|
2011-06-01 14:19:13 -04:00
|
|
|
static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
|
|
|
|
size_t howfar);
|
2011-10-06 18:02:22 -04:00
|
|
|
static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg);
|
2011-06-09 23:33:58 +02:00
|
|
|
static inline void evbuffer_chain_incref(struct evbuffer_chain *chain);
|
2009-04-08 03:03:59 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
static struct evbuffer_chain *
|
|
|
|
evbuffer_chain_new(size_t size)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
size_t to_alloc;
|
|
|
|
|
|
|
|
size += EVBUFFER_CHAIN_SIZE;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* get the next largest memory that can hold the buffer */
|
|
|
|
to_alloc = MIN_BUFFER_SIZE;
|
|
|
|
while (to_alloc < size)
|
|
|
|
to_alloc <<= 1;
|
|
|
|
|
|
|
|
/* we get everything in one chunk */
|
2008-04-25 01:18:08 +00:00
|
|
|
if ((chain = mm_malloc(to_alloc)) == NULL)
|
2008-02-28 02:47:43 +00:00
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
memset(chain, 0, EVBUFFER_CHAIN_SIZE);
|
|
|
|
|
|
|
|
chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* this way we can manipulate the buffer to different addresses,
|
|
|
|
* which is required for mmap for example.
|
|
|
|
*/
|
|
|
|
chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
|
|
|
|
|
2011-12-07 21:06:10 +01:00
|
|
|
chain->refcnt = 1;
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
return (chain);
|
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
static inline void
|
|
|
|
evbuffer_chain_free(struct evbuffer_chain *chain)
|
|
|
|
{
|
2011-12-07 21:06:10 +01:00
|
|
|
EVUTIL_ASSERT(chain->refcnt > 0);
|
|
|
|
if (--chain->refcnt > 0) {
|
2012-02-02 11:45:23 -05:00
|
|
|
/* chain is still referenced by other chains */
|
2009-04-08 03:03:59 +00:00
|
|
|
return;
|
|
|
|
}
|
2011-12-08 14:05:47 -05:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
if (CHAIN_PINNED(chain)) {
|
2012-02-02 11:45:23 -05:00
|
|
|
/* will get freed once no longer dangling */
|
2011-12-07 21:06:10 +01:00
|
|
|
chain->refcnt++;
|
2009-04-08 03:03:59 +00:00
|
|
|
chain->flags |= EVBUFFER_DANGLING;
|
|
|
|
return;
|
|
|
|
}
|
2011-12-08 14:05:47 -05:00
|
|
|
|
2012-02-02 11:45:23 -05:00
|
|
|
/* safe to release chain, it's either a referencing
|
|
|
|
* chain or all references to it have been freed */
|
2010-10-21 19:45:49 -04:00
|
|
|
if (chain->flags & EVBUFFER_REFERENCE) {
|
|
|
|
struct evbuffer_chain_reference *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(
|
|
|
|
struct evbuffer_chain_reference,
|
|
|
|
chain);
|
|
|
|
if (info->cleanupfn)
|
|
|
|
(*info->cleanupfn)(chain->buffer,
|
|
|
|
chain->buffer_len,
|
|
|
|
info->extra);
|
|
|
|
}
|
|
|
|
if (chain->flags & EVBUFFER_FILESEGMENT) {
|
|
|
|
struct evbuffer_chain_file_segment *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(
|
|
|
|
struct evbuffer_chain_file_segment,
|
|
|
|
chain);
|
2010-10-25 12:29:39 -04:00
|
|
|
if (info->segment) {
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2011-10-06 18:02:22 -04:00
|
|
|
if (info->segment->is_mapping)
|
2010-10-25 12:29:39 -04:00
|
|
|
UnmapViewOfFile(chain->buffer);
|
|
|
|
#endif
|
2010-10-21 19:45:49 -04:00
|
|
|
evbuffer_file_segment_free(info->segment);
|
2010-10-25 12:29:39 -04:00
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
2011-06-09 23:33:58 +02:00
|
|
|
if (chain->flags & EVBUFFER_MULTICAST) {
|
|
|
|
struct evbuffer_multicast_parent *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(
|
|
|
|
struct evbuffer_multicast_parent,
|
|
|
|
chain);
|
2012-02-02 11:45:23 -05:00
|
|
|
/* referencing chain is being freed, decrease
|
|
|
|
* refcounts of source chain and associated
|
|
|
|
* evbuffer (which get freed once both reach
|
|
|
|
* zero) */
|
2011-08-04 23:39:15 +02:00
|
|
|
EVUTIL_ASSERT(info->source != NULL);
|
2011-06-09 23:33:58 +02:00
|
|
|
EVUTIL_ASSERT(info->parent != NULL);
|
2011-08-04 23:39:15 +02:00
|
|
|
EVBUFFER_LOCK(info->source);
|
2011-12-07 21:06:10 +01:00
|
|
|
evbuffer_chain_free(info->parent);
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_decref_and_unlock_(info->source);
|
2011-06-09 23:33:58 +02:00
|
|
|
}
|
2010-03-26 23:18:40 -04:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
mm_free(chain);
|
|
|
|
}
|
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
static void
|
|
|
|
evbuffer_free_all_chains(struct evbuffer_chain *chain)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *next;
|
|
|
|
for (; chain; chain = next) {
|
|
|
|
next = chain->next;
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
}
|
|
|
|
}
|
2010-03-26 23:18:40 -04:00
|
|
|
|
2010-08-23 11:48:46 -04:00
|
|
|
#ifndef NDEBUG
|
2010-03-30 16:47:37 -04:00
|
|
|
static int
|
|
|
|
evbuffer_chains_all_empty(struct evbuffer_chain *chain)
|
|
|
|
{
|
|
|
|
for (; chain; chain = chain->next) {
|
|
|
|
if (chain->off)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
2010-12-06 14:17:44 -05:00
|
|
|
#else
|
|
|
|
/* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
|
|
|
|
"unused variable" warnings. */
|
|
|
|
static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
|
|
|
|
return 1;
|
|
|
|
}
|
2010-08-23 11:48:46 -04:00
|
|
|
#endif
|
2010-03-30 16:47:37 -04:00
|
|
|
|
2011-11-02 16:09:15 -04:00
|
|
|
/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
|
|
|
|
* to replacing them all with a new chain. Return a pointer to the place
|
|
|
|
* where the new chain will go.
|
|
|
|
*
|
|
|
|
* Internal; requires lock. The caller must fix up buf->last and buf->first
|
|
|
|
* as needed; they might have been freed.
|
|
|
|
*/
|
|
|
|
static struct evbuffer_chain **
|
|
|
|
evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain **ch = buf->last_with_datap;
|
|
|
|
/* Find the first victim chain. It might be *last_with_datap */
|
|
|
|
while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
|
|
|
|
ch = &(*ch)->next;
|
|
|
|
if (*ch) {
|
|
|
|
EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
|
|
|
|
evbuffer_free_all_chains(*ch);
|
|
|
|
*ch = NULL;
|
|
|
|
}
|
|
|
|
return ch;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
|
|
|
|
* chains as necessary. Requires lock. Does not schedule callbacks.
|
|
|
|
*/
|
2010-03-30 16:47:37 -04:00
|
|
|
static void
|
|
|
|
evbuffer_chain_insert(struct evbuffer *buf,
|
|
|
|
struct evbuffer_chain *chain)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2010-03-30 16:47:37 -04:00
|
|
|
if (*buf->last_with_datap == NULL) {
|
|
|
|
/* There are no chains data on the buffer at all. */
|
|
|
|
EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
|
|
|
|
EVUTIL_ASSERT(buf->first == NULL);
|
2009-01-27 06:05:38 +00:00
|
|
|
buf->first = buf->last = chain;
|
|
|
|
} else {
|
2011-11-02 22:50:47 -04:00
|
|
|
struct evbuffer_chain **chp;
|
|
|
|
chp = evbuffer_free_trailing_empty_chains(buf);
|
|
|
|
*chp = chain;
|
|
|
|
if (chain->off)
|
|
|
|
buf->last_with_datap = chp;
|
2010-03-30 16:47:37 -04:00
|
|
|
buf->last = chain;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
buf->total_len += chain->off;
|
|
|
|
}
|
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
static inline struct evbuffer_chain *
|
|
|
|
evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
if ((chain = evbuffer_chain_new(datlen)) == NULL)
|
|
|
|
return NULL;
|
|
|
|
evbuffer_chain_insert(buf, chain);
|
|
|
|
return chain;
|
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:47 +00:00
|
|
|
void
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
|
2009-04-08 03:03:59 +00:00
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT((chain->flags & flag) == 0);
|
2009-04-08 03:03:59 +00:00
|
|
|
chain->flags |= flag;
|
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:47 +00:00
|
|
|
void
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
|
2009-04-08 03:03:59 +00:00
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT((chain->flags & flag) != 0);
|
2009-04-08 03:03:59 +00:00
|
|
|
chain->flags &= ~flag;
|
|
|
|
if (chain->flags & EVBUFFER_DANGLING)
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
}
|
|
|
|
|
2011-06-09 23:33:58 +02:00
|
|
|
static inline void
|
|
|
|
evbuffer_chain_incref(struct evbuffer_chain *chain)
|
|
|
|
{
|
|
|
|
++chain->refcnt;
|
|
|
|
}
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
struct evbuffer *
|
|
|
|
evbuffer_new(void)
|
|
|
|
{
|
|
|
|
struct evbuffer *buffer;
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2008-04-25 01:18:08 +00:00
|
|
|
buffer = mm_calloc(1, sizeof(struct evbuffer));
|
2009-11-19 23:08:50 +00:00
|
|
|
if (buffer == NULL)
|
|
|
|
return (NULL);
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2010-04-09 20:04:24 -04:00
|
|
|
LIST_INIT(&buffer->callbacks);
|
2009-04-13 03:06:27 +00:00
|
|
|
buffer->refcnt = 1;
|
2010-03-26 23:18:40 -04:00
|
|
|
buffer->last_with_datap = &buffer->first;
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
return (buffer);
|
|
|
|
}
|
|
|
|
|
Prefer mmap to sendfile unless a DRAINS_TO_FD flag is set. Allows add_file to work with SSL.
The sendfile() implementation for evbuffer_add_file is potentially more
efficient, but it has a problem: you can only use it to send bytes over
a socket using sendfile(). If you are writing bytes via SSL_send() or
via a filter, or if you need to be able to inspect your buffer, it
doesn't work.
As an easy fix, this patch disables the sendfile-based implementation of
evbuffer_add_file on an evbuffer unless the user sets a new
EVBUFFER_FLAG_DRAINS_TO_FD flag on that evbuffer, indicating that the
evbuffer will not be inspected, but only written out via
evbuffer_write(), evbuffer_write_atmost(), or drained with stuff like
evbuffer_drain() or evbuffer_add_buffer(). This flag is off by
default, except for evbuffers used for output on bufferevent_socket.
In the future, it could be interesting to make a best-effort file
segment implementation that tries to send via sendfile, but mmaps on
demand. That's too much complexity for a stable release series, though.
2011-09-29 09:30:04 -04:00
|
|
|
int
|
|
|
|
evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
|
|
|
|
{
|
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
buf->flags |= (ev_uint32_t)flags;
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
|
|
|
|
{
|
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
buf->flags &= ~(ev_uint32_t)flags;
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:27 +00:00
|
|
|
void
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_incref_(struct evbuffer *buf)
|
2009-04-13 03:06:27 +00:00
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-13 03:06:27 +00:00
|
|
|
++buf->refcnt;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-13 03:06:27 +00:00
|
|
|
}
|
|
|
|
|
2009-11-17 20:31:09 +00:00
|
|
|
void
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_incref_and_lock_(struct evbuffer *buf)
|
2009-11-17 20:31:09 +00:00
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-11-17 20:31:09 +00:00
|
|
|
++buf->refcnt;
|
|
|
|
}
|
|
|
|
|
2009-04-10 20:43:08 +00:00
|
|
|
int
|
|
|
|
evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2012-04-06 11:05:35 -04:00
|
|
|
buffer->cb_queue = base;
|
2009-04-10 20:43:08 +00:00
|
|
|
buffer->deferred_cbs = 1;
|
Restore our priority-inversion-prevention code with deferreds
Back when deferred_cb stuff had its own queue, the queue was always
executed, but we never ran more than 16 callbacks per iteration.
That made for two problems:
1: Because deferred_cb stuff would always run, and had no priority,
it could cause priority inversion.
2: It doesn't respect the max_dispatch_interval code.
Then, when I refactored deferred_cb to be a special case of
event_callback, that solved the above issues, but made for two more
issues:
3: Because deferred_cb stuff would always get the default priority,
it could could low-priority bufferevents to get too much priority.
4: With code like bufferevent_pair, it's easy to get into a
situation where two deferreds keep adding one another, preventing
the event loop from ever actually scanning for more events.
This commit fixes the above by giving deferreds a better notion of
priorities, and by limiting the number of deferreds that can be
added to the _current_ loop iteration's active queues. (Extra
deferreds are put into the active_later state.)
That isn't an all-purpose priority inversion solution, of course: for
that, you may need to mess around with max_dispatch_interval.
2012-05-09 11:06:06 -04:00
|
|
|
event_deferred_cb_init_(&buffer->deferred,
|
|
|
|
event_base_get_npriorities(base) / 2,
|
2009-04-10 20:43:08 +00:00
|
|
|
evbuffer_deferred_callback, buffer);
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-10 20:43:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
int
|
|
|
|
evbuffer_enable_locking(struct evbuffer *buf, void *lock)
|
|
|
|
{
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__DISABLE_THREAD_SUPPORT
|
2010-02-18 17:41:15 -05:00
|
|
|
return -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
#else
|
2010-02-18 17:41:15 -05:00
|
|
|
if (buf->lock)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!lock) {
|
|
|
|
EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
|
|
|
|
if (!lock)
|
|
|
|
return -1;
|
|
|
|
buf->lock = lock;
|
|
|
|
buf->own_lock = 1;
|
|
|
|
} else {
|
|
|
|
buf->lock = lock;
|
|
|
|
buf->own_lock = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-11-17 20:31:09 +00:00
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev)
|
2009-11-17 20:31:09 +00:00
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-11-17 20:31:09 +00:00
|
|
|
buf->parent = bev;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-11-17 20:31:09 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 20:43:08 +00:00
|
|
|
static void
|
2009-11-23 15:53:24 -05:00
|
|
|
evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
|
2009-01-23 01:11:13 +00:00
|
|
|
{
|
2009-01-23 18:04:34 +00:00
|
|
|
struct evbuffer_cb_entry *cbent, *next;
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_cb_info info;
|
2009-04-03 14:27:03 +00:00
|
|
|
size_t new_size;
|
2009-12-23 07:53:19 -05:00
|
|
|
ev_uint32_t mask, masked_val;
|
2009-12-22 15:52:02 -05:00
|
|
|
int clear = 1;
|
2009-11-23 15:53:24 -05:00
|
|
|
|
|
|
|
if (running_deferred) {
|
|
|
|
mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
|
|
|
|
masked_val = EVBUFFER_CB_ENABLED;
|
|
|
|
} else if (buffer->deferred_cbs) {
|
|
|
|
mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
|
|
|
|
masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
|
2010-01-06 18:42:59 -05:00
|
|
|
/* Don't zero-out n_add/n_del, since the deferred callbacks
|
|
|
|
will want to see them. */
|
2009-12-22 15:52:02 -05:00
|
|
|
clear = 0;
|
2009-11-23 15:53:24 -05:00
|
|
|
} else {
|
|
|
|
mask = EVBUFFER_CB_ENABLED;
|
|
|
|
masked_val = EVBUFFER_CB_ENABLED;
|
|
|
|
}
|
2009-04-03 14:27:03 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-04-09 20:04:24 -04:00
|
|
|
if (LIST_EMPTY(&buffer->callbacks)) {
|
2010-02-18 17:41:15 -05:00
|
|
|
buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
|
2009-01-23 18:04:34 +00:00
|
|
|
return;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
|
|
|
if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
new_size = buffer->total_len;
|
|
|
|
info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
|
|
|
|
info.n_added = buffer->n_add_for_cb;
|
|
|
|
info.n_deleted = buffer->n_del_for_cb;
|
2010-01-06 18:42:59 -05:00
|
|
|
if (clear) {
|
|
|
|
buffer->n_add_for_cb = 0;
|
|
|
|
buffer->n_del_for_cb = 0;
|
|
|
|
}
|
2010-04-09 20:04:24 -04:00
|
|
|
for (cbent = LIST_FIRST(&buffer->callbacks);
|
|
|
|
cbent != LIST_END(&buffer->callbacks);
|
2009-01-23 18:04:34 +00:00
|
|
|
cbent = next) {
|
|
|
|
/* Get the 'next' pointer now in case this callback decides
|
|
|
|
* to remove itself or something. */
|
2010-04-09 20:04:24 -04:00
|
|
|
next = LIST_NEXT(cbent, next);
|
2009-11-23 15:53:24 -05:00
|
|
|
|
|
|
|
if ((cbent->flags & mask) != masked_val)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
|
|
|
|
cbent->cb.cb_obsolete(buffer,
|
|
|
|
info.orig_size, new_size, cbent->cbarg);
|
|
|
|
else
|
|
|
|
cbent->cb.cb_func(buffer, &info, cbent->cbarg);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-18 12:35:27 -04:00
|
|
|
void
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(struct evbuffer *buffer)
|
2009-04-10 20:43:08 +00:00
|
|
|
{
|
2012-01-20 16:31:20 -05:00
|
|
|
if (LIST_EMPTY(&buffer->callbacks)) {
|
2011-07-04 11:47:24 -04:00
|
|
|
buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-04-10 20:43:08 +00:00
|
|
|
if (buffer->deferred_cbs) {
|
2012-04-06 04:33:19 -04:00
|
|
|
if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) {
|
|
|
|
evbuffer_incref_and_lock_(buffer);
|
|
|
|
if (buffer->parent)
|
|
|
|
bufferevent_incref_(buffer->parent);
|
|
|
|
}
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-10 20:43:08 +00:00
|
|
|
}
|
2009-11-23 15:53:24 -05:00
|
|
|
|
|
|
|
evbuffer_run_callbacks(buffer, 0);
|
2009-04-10 20:43:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-04-06 11:05:35 -04:00
|
|
|
evbuffer_deferred_callback(struct event_callback *cb, void *arg)
|
2009-04-10 20:43:08 +00:00
|
|
|
{
|
2009-11-17 20:31:09 +00:00
|
|
|
struct bufferevent *parent = NULL;
|
2009-04-10 20:43:08 +00:00
|
|
|
struct evbuffer *buffer = arg;
|
|
|
|
|
2009-04-29 20:48:43 +00:00
|
|
|
/* XXXX It would be better to run these callbacks without holding the
|
|
|
|
* lock */
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-11-17 20:31:09 +00:00
|
|
|
parent = buffer->parent;
|
2009-11-23 15:53:24 -05:00
|
|
|
evbuffer_run_callbacks(buffer, 1);
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_decref_and_unlock_(buffer);
|
2009-11-17 20:31:09 +00:00
|
|
|
if (parent)
|
2012-02-29 15:07:33 -05:00
|
|
|
bufferevent_decref_(parent);
|
2009-04-10 20:43:08 +00:00
|
|
|
}
|
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
static void
|
|
|
|
evbuffer_remove_all_callbacks(struct evbuffer *buffer)
|
|
|
|
{
|
2009-01-23 01:35:57 +00:00
|
|
|
struct evbuffer_cb_entry *cbent;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-04-09 20:04:24 -04:00
|
|
|
while ((cbent = LIST_FIRST(&buffer->callbacks))) {
|
|
|
|
LIST_REMOVE(cbent, next);
|
|
|
|
mm_free(cbent);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
void
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_decref_and_unlock_(struct evbuffer *buffer)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2008-02-28 02:47:43 +00:00
|
|
|
struct evbuffer_chain *chain, *next;
|
2009-04-13 03:06:47 +00:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-03-12 23:00:49 -05:00
|
|
|
EVUTIL_ASSERT(buffer->refcnt > 0);
|
|
|
|
|
2009-04-13 03:06:27 +00:00
|
|
|
if (--buffer->refcnt > 0) {
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-13 03:06:27 +00:00
|
|
|
return;
|
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
for (chain = buffer->first; chain != NULL; chain = next) {
|
|
|
|
next = chain->next;
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2009-01-23 01:11:13 +00:00
|
|
|
evbuffer_remove_all_callbacks(buffer);
|
2009-04-10 20:43:08 +00:00
|
|
|
if (buffer->deferred_cbs)
|
2012-02-29 15:07:33 -05:00
|
|
|
event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred);
|
2009-04-13 03:06:27 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2010-02-18 17:41:15 -05:00
|
|
|
if (buffer->own_lock)
|
|
|
|
EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
|
2008-04-25 01:18:08 +00:00
|
|
|
mm_free(buffer);
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-04-13 03:06:47 +00:00
|
|
|
void
|
|
|
|
evbuffer_free(struct evbuffer *buffer)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_decref_and_unlock_(buffer);
|
2009-04-13 03:06:47 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
void
|
|
|
|
evbuffer_lock(struct evbuffer *buf)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
evbuffer_unlock(struct evbuffer *buf)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t
|
2009-01-14 22:17:31 +00:00
|
|
|
evbuffer_get_length(const struct evbuffer *buffer)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t result;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
result = (buffer->total_len);
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2004-05-24 00:19:52 +00:00
|
|
|
|
2008-05-03 02:37:18 +00:00
|
|
|
size_t
|
2009-01-14 22:17:31 +00:00
|
|
|
evbuffer_get_contiguous_space(const struct evbuffer *buf)
|
2008-05-03 02:37:18 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain;
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t result;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
chain = buf->first;
|
2009-04-05 02:44:17 +00:00
|
|
|
result = (chain != NULL ? chain->off : 0);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2008-05-03 02:37:18 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
return result;
|
2008-05-03 02:37:18 +00:00
|
|
|
}
|
|
|
|
|
2011-10-25 09:13:15 -04:00
|
|
|
size_t
|
|
|
|
evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) {
|
|
|
|
int n;
|
|
|
|
size_t res;
|
|
|
|
size_t to_alloc;
|
|
|
|
|
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
|
|
|
|
res = to_alloc = 0;
|
|
|
|
|
|
|
|
for (n = 0; n < n_vec; n++) {
|
|
|
|
to_alloc += vec[n].iov_len;
|
|
|
|
}
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
|
2011-10-25 09:13:15 -04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = 0; n < n_vec; n++) {
|
2011-11-11 17:56:08 -05:00
|
|
|
/* XXX each 'add' call here does a bunch of setup that's
|
2012-02-29 15:07:32 -05:00
|
|
|
* obviated by evbuffer_expand_fast_, and some cleanup that we
|
2011-11-11 17:56:08 -05:00
|
|
|
* would like to do only once. Instead we should just extract
|
|
|
|
* the part of the code that's needed. */
|
|
|
|
|
2011-10-25 09:13:15 -04:00
|
|
|
if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
res += vec[n].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
int
|
2009-05-22 19:11:48 +00:00
|
|
|
evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
|
2009-05-19 21:39:35 +00:00
|
|
|
struct evbuffer_iovec *vec, int n_vecs)
|
2008-05-03 03:05:28 +00:00
|
|
|
{
|
2010-03-26 23:18:40 -04:00
|
|
|
struct evbuffer_chain *chain, **chainp;
|
2009-05-19 21:39:35 +00:00
|
|
|
int n = -1;
|
2008-05-03 03:05:28 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end)
|
|
|
|
goto done;
|
2009-05-19 21:39:35 +00:00
|
|
|
if (n_vecs < 1)
|
|
|
|
goto done;
|
|
|
|
if (n_vecs == 1) {
|
2010-03-30 16:47:37 -04:00
|
|
|
if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2009-04-08 03:04:39 +00:00
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
vec[0].iov_base = CHAIN_SPACE_PTR(chain);
|
2010-11-01 14:23:33 -04:00
|
|
|
vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
|
2010-10-14 13:48:40 -04:00
|
|
|
EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
|
2009-05-19 21:39:35 +00:00
|
|
|
n = 1;
|
|
|
|
} else {
|
2012-02-29 15:07:32 -05:00
|
|
|
if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2012-02-29 15:07:32 -05:00
|
|
|
n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
|
2010-08-16 01:23:57 -07:00
|
|
|
&chainp, 0);
|
2009-05-19 21:39:35 +00:00
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
done:
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-05-19 21:39:35 +00:00
|
|
|
return n;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2008-05-03 03:05:28 +00:00
|
|
|
}
|
|
|
|
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
static int
|
|
|
|
advance_last_with_data(struct evbuffer *buf)
|
|
|
|
{
|
|
|
|
int n = 0;
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
|
|
|
|
2010-03-26 23:18:40 -04:00
|
|
|
if (!*buf->last_with_datap)
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
return 0;
|
|
|
|
|
2010-03-26 23:18:40 -04:00
|
|
|
while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
|
|
|
|
buf->last_with_datap = &(*buf->last_with_datap)->next;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
++n;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2008-05-03 03:05:28 +00:00
|
|
|
int
|
2009-05-19 21:39:35 +00:00
|
|
|
evbuffer_commit_space(struct evbuffer *buf,
|
|
|
|
struct evbuffer_iovec *vec, int n_vecs)
|
2008-05-03 03:05:28 +00:00
|
|
|
{
|
2010-03-26 23:18:40 -04:00
|
|
|
struct evbuffer_chain *chain, **firstchainp, **chainp;
|
2009-05-19 21:39:35 +00:00
|
|
|
int result = -1;
|
2010-03-10 23:24:14 -05:00
|
|
|
size_t added = 0;
|
|
|
|
int i;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2010-02-15 21:03:52 -05:00
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
if (buf->freeze_end)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
if (n_vecs == 0) {
|
|
|
|
result = 0;
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
} else if (n_vecs == 1 &&
|
2010-05-08 18:09:27 -04:00
|
|
|
(buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
|
2010-03-10 23:24:14 -05:00
|
|
|
/* The user only got or used one chain; it might not
|
|
|
|
* be the first one with space in it. */
|
2010-10-14 13:48:40 -04:00
|
|
|
if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
buf->last->off += vec[0].iov_len;
|
|
|
|
added = vec[0].iov_len;
|
|
|
|
if (added)
|
2010-03-26 23:18:40 -04:00
|
|
|
advance_last_with_data(buf);
|
2010-03-10 23:24:14 -05:00
|
|
|
goto okay;
|
|
|
|
}
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
/* Advance 'firstchain' to the first chain with space in it. */
|
2010-03-26 23:18:40 -04:00
|
|
|
firstchainp = buf->last_with_datap;
|
|
|
|
if (!*firstchainp)
|
2010-03-10 23:24:14 -05:00
|
|
|
goto done;
|
2010-03-26 23:18:40 -04:00
|
|
|
if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
|
|
|
|
firstchainp = &(*firstchainp)->next;
|
2010-03-10 23:24:14 -05:00
|
|
|
}
|
|
|
|
|
2010-03-26 23:18:40 -04:00
|
|
|
chain = *firstchainp;
|
2010-03-10 23:24:14 -05:00
|
|
|
/* pass 1: make sure that the pointers and lengths of vecs[] are in
|
|
|
|
* bounds before we try to commit anything. */
|
|
|
|
for (i=0; i<n_vecs; ++i) {
|
|
|
|
if (!chain)
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-05-08 18:09:27 -04:00
|
|
|
if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
|
2010-10-14 13:48:40 -04:00
|
|
|
(size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
|
2009-05-19 21:39:35 +00:00
|
|
|
goto done;
|
2010-03-10 23:24:14 -05:00
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
/* pass 2: actually adjust all the chains. */
|
2010-03-26 23:18:40 -04:00
|
|
|
chainp = firstchainp;
|
2010-03-10 23:24:14 -05:00
|
|
|
for (i=0; i<n_vecs; ++i) {
|
2010-03-26 23:18:40 -04:00
|
|
|
(*chainp)->off += vec[i].iov_len;
|
2010-03-10 23:24:14 -05:00
|
|
|
added += vec[i].iov_len;
|
2010-03-26 23:18:40 -04:00
|
|
|
if (vec[i].iov_len) {
|
|
|
|
buf->last_with_datap = chainp;
|
|
|
|
}
|
|
|
|
chainp = &(*chainp)->next;
|
2009-05-19 21:39:35 +00:00
|
|
|
}
|
2008-05-03 03:05:28 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
okay:
|
2009-05-19 21:39:35 +00:00
|
|
|
buf->total_len += added;
|
|
|
|
buf->n_add_for_cb += added;
|
2009-04-13 03:06:05 +00:00
|
|
|
result = 0;
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2008-05-03 03:05:28 +00:00
|
|
|
}
|
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
static inline int
|
|
|
|
HAS_PINNED_R(struct evbuffer *buf)
|
|
|
|
{
|
|
|
|
return (buf->last && CHAIN_PINNED_R(buf->last));
|
|
|
|
}
|
|
|
|
|
2010-03-30 12:48:56 -04:00
|
|
|
static inline void
|
|
|
|
ZERO_CHAIN(struct evbuffer *dst)
|
|
|
|
{
|
|
|
|
ASSERT_EVBUFFER_LOCKED(dst);
|
|
|
|
dst->first = NULL;
|
|
|
|
dst->last = NULL;
|
|
|
|
dst->last_with_datap = &(dst)->first;
|
|
|
|
dst->total_len = 0;
|
|
|
|
}
|
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
/* Prepares the contents of src to be moved to another buffer by removing
|
|
|
|
* read-pinned chains. The first pinned chain is saved in first, and the
|
|
|
|
* last in last. If src has no read-pinned chains, first and last are set
|
|
|
|
* to NULL. */
|
|
|
|
static int
|
|
|
|
PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
|
|
|
|
struct evbuffer_chain **last)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain, **pinned;
|
|
|
|
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src);
|
|
|
|
|
|
|
|
if (!HAS_PINNED_R(src)) {
|
|
|
|
*first = *last = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pinned = src->last_with_datap;
|
|
|
|
if (!CHAIN_PINNED_R(*pinned))
|
|
|
|
pinned = &(*pinned)->next;
|
|
|
|
EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
|
|
|
|
chain = *first = *pinned;
|
|
|
|
*last = src->last;
|
|
|
|
|
|
|
|
/* If there's data in the first pinned chain, we need to allocate
|
|
|
|
* a new chain and copy the data over. */
|
|
|
|
if (chain->off) {
|
|
|
|
struct evbuffer_chain *tmp;
|
|
|
|
|
|
|
|
EVUTIL_ASSERT(pinned == src->last_with_datap);
|
|
|
|
tmp = evbuffer_chain_new(chain->off);
|
|
|
|
if (!tmp)
|
|
|
|
return -1;
|
|
|
|
memcpy(tmp->buffer, chain->buffer + chain->misalign,
|
|
|
|
chain->off);
|
|
|
|
tmp->off = chain->off;
|
|
|
|
*src->last_with_datap = tmp;
|
|
|
|
src->last = tmp;
|
|
|
|
chain->misalign += chain->off;
|
|
|
|
chain->off = 0;
|
|
|
|
} else {
|
|
|
|
src->last = *src->last_with_datap;
|
|
|
|
*pinned = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
|
|
|
|
struct evbuffer_chain *last)
|
|
|
|
{
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src);
|
|
|
|
|
|
|
|
if (!pinned) {
|
|
|
|
ZERO_CHAIN(src);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
src->first = pinned;
|
|
|
|
src->last = last;
|
|
|
|
src->last_with_datap = &src->first;
|
|
|
|
src->total_len = 0;
|
|
|
|
}
|
|
|
|
|
2010-03-30 12:48:56 -04:00
|
|
|
static inline void
|
|
|
|
COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
|
|
|
|
{
|
|
|
|
ASSERT_EVBUFFER_LOCKED(dst);
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src);
|
|
|
|
dst->first = src->first;
|
|
|
|
if (src->last_with_datap == &src->first)
|
|
|
|
dst->last_with_datap = &dst->first;
|
|
|
|
else
|
|
|
|
dst->last_with_datap = src->last_with_datap;
|
|
|
|
dst->last = src->last;
|
|
|
|
dst->total_len = src->total_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
|
|
|
|
{
|
|
|
|
ASSERT_EVBUFFER_LOCKED(dst);
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src);
|
|
|
|
dst->last->next = src->first;
|
|
|
|
if (src->last_with_datap == &src->first)
|
|
|
|
dst->last_with_datap = &dst->last->next;
|
|
|
|
else
|
|
|
|
dst->last_with_datap = src->last_with_datap;
|
|
|
|
dst->last = src->last;
|
|
|
|
dst->total_len += src->total_len;
|
|
|
|
}
|
|
|
|
|
2011-06-09 23:33:58 +02:00
|
|
|
static inline void
|
|
|
|
APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *tmp;
|
|
|
|
struct evbuffer_chain *chain = src->first;
|
|
|
|
struct evbuffer_multicast_parent *extra;
|
|
|
|
|
|
|
|
ASSERT_EVBUFFER_LOCKED(dst);
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src);
|
|
|
|
|
|
|
|
for (; chain; chain = chain->next) {
|
|
|
|
if (!chain->off || chain->flags & EVBUFFER_DANGLING) {
|
2012-02-02 11:45:23 -05:00
|
|
|
/* skip empty chains */
|
2011-06-09 23:33:58 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent));
|
|
|
|
if (!tmp) {
|
|
|
|
event_warn("%s: out of memory", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp);
|
2012-02-02 11:45:23 -05:00
|
|
|
/* reference evbuffer containing source chain so it
|
|
|
|
* doesn't get released while the chain is still
|
|
|
|
* being referenced to */
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_incref_(src);
|
2011-08-04 23:39:15 +02:00
|
|
|
extra->source = src;
|
2012-02-02 11:45:23 -05:00
|
|
|
/* reference source chain which now becomes immutable */
|
2011-08-04 23:39:15 +02:00
|
|
|
evbuffer_chain_incref(chain);
|
|
|
|
extra->parent = chain;
|
|
|
|
chain->flags |= EVBUFFER_IMMUTABLE;
|
2011-06-09 23:33:58 +02:00
|
|
|
tmp->buffer_len = chain->buffer_len;
|
|
|
|
tmp->misalign = chain->misalign;
|
|
|
|
tmp->off = chain->off;
|
|
|
|
tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;
|
|
|
|
tmp->buffer = chain->buffer;
|
|
|
|
evbuffer_chain_insert(dst, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-30 12:48:56 -04:00
|
|
|
static void
|
|
|
|
PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
|
|
|
|
{
|
|
|
|
ASSERT_EVBUFFER_LOCKED(dst);
|
|
|
|
ASSERT_EVBUFFER_LOCKED(src);
|
|
|
|
src->last->next = dst->first;
|
|
|
|
dst->first = src->first;
|
|
|
|
dst->total_len += src->total_len;
|
|
|
|
if (*dst->last_with_datap == NULL) {
|
|
|
|
if (src->last_with_datap == &(src)->first)
|
|
|
|
dst->last_with_datap = &dst->first;
|
|
|
|
else
|
|
|
|
dst->last_with_datap = src->last_with_datap;
|
|
|
|
} else if (dst->last_with_datap == &dst->first) {
|
|
|
|
dst->last_with_datap = &src->last->next;
|
|
|
|
}
|
|
|
|
}
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2004-03-23 03:43:53 +00:00
|
|
|
int
|
2004-02-22 21:17:23 +00:00
|
|
|
evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
|
|
|
|
{
|
2010-08-16 01:23:57 -07:00
|
|
|
struct evbuffer_chain *pinned, *last;
|
2009-04-05 02:44:17 +00:00
|
|
|
size_t in_total_len, out_total_len;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK2(inbuf, outbuf);
|
|
|
|
in_total_len = inbuf->total_len;
|
2009-04-05 02:44:17 +00:00
|
|
|
out_total_len = outbuf->total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-02-10 19:39:22 +00:00
|
|
|
if (in_total_len == 0 || outbuf == inbuf)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-06-01 16:21:24 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end || inbuf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (out_total_len == 0) {
|
2010-03-31 12:03:43 -04:00
|
|
|
/* There might be an empty chain at the start of outbuf; free
|
|
|
|
* it. */
|
|
|
|
evbuffer_free_all_chains(outbuf->first);
|
2008-03-31 02:04:34 +00:00
|
|
|
COPY_CHAIN(outbuf, inbuf);
|
2008-02-28 02:47:43 +00:00
|
|
|
} else {
|
2008-03-31 02:04:34 +00:00
|
|
|
APPEND_CHAIN(outbuf, inbuf);
|
2004-05-24 00:19:52 +00:00
|
|
|
}
|
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
RESTORE_PINNED(inbuf, pinned, last);
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
inbuf->n_del_for_cb += in_total_len;
|
|
|
|
outbuf->n_add_for_cb += in_total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(inbuf);
|
|
|
|
evbuffer_invoke_callbacks_(outbuf);
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK2(inbuf, outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2011-06-09 23:33:58 +02:00
|
|
|
int
|
|
|
|
evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf)
|
|
|
|
{
|
|
|
|
size_t in_total_len, out_total_len;
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
int result = 0;
|
|
|
|
|
|
|
|
EVBUFFER_LOCK2(inbuf, outbuf);
|
|
|
|
in_total_len = inbuf->total_len;
|
|
|
|
out_total_len = outbuf->total_len;
|
|
|
|
chain = inbuf->first;
|
|
|
|
|
|
|
|
if (in_total_len == 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
if (outbuf->freeze_end || outbuf == inbuf) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (; chain; chain = chain->next) {
|
2011-08-04 23:39:15 +02:00
|
|
|
if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) {
|
2012-02-02 11:45:23 -05:00
|
|
|
/* chain type can not be referenced */
|
2011-06-09 23:33:58 +02:00
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (out_total_len == 0) {
|
|
|
|
/* There might be an empty chain at the start of outbuf; free
|
|
|
|
* it. */
|
|
|
|
evbuffer_free_all_chains(outbuf->first);
|
|
|
|
}
|
|
|
|
APPEND_CHAIN_MULTICAST(outbuf, inbuf);
|
|
|
|
|
|
|
|
outbuf->n_add_for_cb += in_total_len;
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(outbuf);
|
2011-06-09 23:33:58 +02:00
|
|
|
|
|
|
|
done:
|
|
|
|
EVBUFFER_UNLOCK2(inbuf, outbuf);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
|
|
|
|
{
|
2010-08-16 01:23:57 -07:00
|
|
|
struct evbuffer_chain *pinned, *last;
|
2009-04-05 02:44:17 +00:00
|
|
|
size_t in_total_len, out_total_len;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK2(inbuf, outbuf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
in_total_len = inbuf->total_len;
|
2009-04-05 02:44:17 +00:00
|
|
|
out_total_len = outbuf->total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-02-10 19:39:22 +00:00
|
|
|
if (!in_total_len || inbuf == outbuf)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_start || inbuf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (out_total_len == 0) {
|
2010-03-31 12:03:43 -04:00
|
|
|
/* There might be an empty chain at the start of outbuf; free
|
|
|
|
* it. */
|
|
|
|
evbuffer_free_all_chains(outbuf->first);
|
2008-03-31 02:04:34 +00:00
|
|
|
COPY_CHAIN(outbuf, inbuf);
|
2008-02-28 02:47:43 +00:00
|
|
|
} else {
|
2008-03-31 02:04:34 +00:00
|
|
|
PREPEND_CHAIN(outbuf, inbuf);
|
2006-02-13 02:22:48 +00:00
|
|
|
}
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
RESTORE_PINNED(inbuf, pinned, last);
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
inbuf->n_del_for_cb += in_total_len;
|
|
|
|
outbuf->n_add_for_cb += in_total_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(inbuf);
|
|
|
|
evbuffer_invoke_callbacks_(outbuf);
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK2(inbuf, outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_drain(struct evbuffer *buf, size_t len)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2008-02-28 02:47:43 +00:00
|
|
|
struct evbuffer_chain *chain, *next;
|
2010-08-16 01:23:57 -07:00
|
|
|
size_t remaining, old_len;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
old_len = buf->total_len;
|
2005-12-06 03:26:28 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (old_len == 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2005-12-06 03:26:28 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
if (len >= old_len && !HAS_PINNED_R(buf)) {
|
2010-02-18 17:41:15 -05:00
|
|
|
len = old_len;
|
2008-02-28 02:47:43 +00:00
|
|
|
for (chain = buf->first; chain != NULL; chain = next) {
|
|
|
|
next = chain->next;
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2006-11-10 02:16:16 +00:00
|
|
|
|
2008-03-31 02:04:34 +00:00
|
|
|
ZERO_CHAIN(buf);
|
2008-02-28 02:47:43 +00:00
|
|
|
} else {
|
2009-04-13 03:06:59 +00:00
|
|
|
if (len >= old_len)
|
|
|
|
len = old_len;
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->total_len -= len;
|
2010-08-16 01:23:57 -07:00
|
|
|
remaining = len;
|
|
|
|
for (chain = buf->first;
|
|
|
|
remaining >= chain->off;
|
|
|
|
chain = next) {
|
2008-02-28 02:47:43 +00:00
|
|
|
next = chain->next;
|
2010-08-16 01:23:57 -07:00
|
|
|
remaining -= chain->off;
|
2010-03-26 23:18:40 -04:00
|
|
|
|
|
|
|
if (chain == *buf->last_with_datap) {
|
|
|
|
buf->last_with_datap = &buf->first;
|
|
|
|
}
|
|
|
|
if (&chain->next == buf->last_with_datap)
|
|
|
|
buf->last_with_datap = &buf->first;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-08-16 01:23:57 -07:00
|
|
|
if (CHAIN_PINNED_R(chain)) {
|
|
|
|
EVUTIL_ASSERT(remaining == 0);
|
|
|
|
chain->misalign += chain->off;
|
|
|
|
chain->off = 0;
|
2009-04-13 03:06:59 +00:00
|
|
|
break;
|
2010-08-16 01:23:57 -07:00
|
|
|
} else
|
|
|
|
evbuffer_chain_free(chain);
|
2005-12-06 03:26:28 +00:00
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->first = chain;
|
2011-06-08 13:32:47 -04:00
|
|
|
chain->misalign += remaining;
|
|
|
|
chain->off -= remaining;
|
2005-12-06 03:26:28 +00:00
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_del_for_cb += len;
|
2008-02-28 02:47:43 +00:00
|
|
|
/* Tell someone about changes in this buffer */
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2005-12-06 03:26:28 +00:00
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* Reads data from an event buffer and drains the bytes read */
|
2005-12-06 03:26:28 +00:00
|
|
|
int
|
2008-02-28 17:38:52 +00:00
|
|
|
evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
|
2010-04-12 22:24:54 -04:00
|
|
|
{
|
|
|
|
ev_ssize_t n;
|
|
|
|
EVBUFFER_LOCK(buf);
|
2011-12-07 13:04:35 -05:00
|
|
|
n = evbuffer_copyout_from(buf, NULL, data_out, datlen);
|
2010-04-12 22:24:54 -04:00
|
|
|
if (n > 0) {
|
|
|
|
if (evbuffer_drain(buf, n)<0)
|
|
|
|
n = -1;
|
|
|
|
}
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return (int)n;
|
|
|
|
}
|
|
|
|
|
|
|
|
ev_ssize_t
|
|
|
|
evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
|
2011-12-07 13:04:35 -05:00
|
|
|
{
|
|
|
|
return evbuffer_copyout_from(buf, NULL, data_out, datlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
ev_ssize_t
|
|
|
|
evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
|
|
|
|
void *data_out, size_t datlen)
|
2005-12-06 03:26:28 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
/*XXX fails badly on sendfile case. */
|
2010-04-12 22:24:54 -04:00
|
|
|
struct evbuffer_chain *chain;
|
2008-02-28 17:38:52 +00:00
|
|
|
char *data = data_out;
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t nread;
|
2010-04-12 22:24:54 -04:00
|
|
|
ev_ssize_t result = 0;
|
2011-12-07 13:04:35 -05:00
|
|
|
size_t pos_in_chain;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2011-12-07 13:04:35 -05:00
|
|
|
if (pos) {
|
2012-02-29 15:07:32 -05:00
|
|
|
chain = pos->internal_.chain;
|
|
|
|
pos_in_chain = pos->internal_.pos_in_chain;
|
2011-12-07 13:04:35 -05:00
|
|
|
if (datlen + pos->pos > buf->total_len)
|
|
|
|
datlen = buf->total_len - pos->pos;
|
|
|
|
} else {
|
|
|
|
chain = buf->first;
|
|
|
|
pos_in_chain = 0;
|
|
|
|
if (datlen > buf->total_len)
|
|
|
|
datlen = buf->total_len;
|
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (datlen == 0)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
nread = datlen;
|
|
|
|
|
2011-12-07 13:04:35 -05:00
|
|
|
while (datlen && datlen >= chain->off - pos_in_chain) {
|
|
|
|
size_t copylen = chain->off - pos_in_chain;
|
|
|
|
memcpy(data,
|
|
|
|
chain->buffer + chain->misalign + pos_in_chain,
|
|
|
|
copylen);
|
|
|
|
data += copylen;
|
|
|
|
datlen -= copylen;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
chain = chain->next;
|
2011-12-07 13:04:35 -05:00
|
|
|
pos_in_chain = 0;
|
2010-04-12 22:24:54 -04:00
|
|
|
EVUTIL_ASSERT(chain || datlen==0);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (datlen) {
|
2010-04-12 22:24:54 -04:00
|
|
|
EVUTIL_ASSERT(chain);
|
2011-12-07 13:04:35 -05:00
|
|
|
memcpy(data, chain->buffer + chain->misalign + pos_in_chain,
|
|
|
|
datlen);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
result = nread;
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* reads data from the src buffer to the dst buffer, avoids memcpy as
|
|
|
|
* possible. */
|
2010-11-01 13:59:04 -04:00
|
|
|
/* XXXX should return ev_ssize_t */
|
2004-03-23 03:43:53 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
|
|
|
|
size_t datlen)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2009-02-10 19:39:22 +00:00
|
|
|
/*XXX We should have an option to force this to be zero-copy.*/
|
|
|
|
|
|
|
|
/*XXX can fail badly on sendfile case. */
|
2010-03-10 23:28:51 -05:00
|
|
|
struct evbuffer_chain *chain, *previous;
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t nread = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK2(src, dst);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = previous = src->first;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
if (datlen == 0 || dst == src) {
|
|
|
|
result = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2009-02-10 19:39:22 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (dst->freeze_end || src->freeze_start) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* short-cut if there is no more data buffered */
|
|
|
|
if (datlen >= src->total_len) {
|
|
|
|
datlen = src->total_len;
|
|
|
|
evbuffer_add_buffer(dst, src);
|
2010-11-01 13:59:04 -04:00
|
|
|
result = (int)datlen; /*XXXX should return ev_ssize_t*/
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* removes chains if possible */
|
|
|
|
while (chain->off <= datlen) {
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
/* We can't remove the last with data from src unless we
|
|
|
|
* remove all chains, in which case we would have done the if
|
|
|
|
* block above */
|
2010-03-26 23:18:40 -04:00
|
|
|
EVUTIL_ASSERT(chain != *src->last_with_datap);
|
2008-02-28 02:47:43 +00:00
|
|
|
nread += chain->off;
|
|
|
|
datlen -= chain->off;
|
|
|
|
previous = chain;
|
2010-03-26 23:18:40 -04:00
|
|
|
if (src->last_with_datap == &chain->next)
|
|
|
|
src->last_with_datap = &src->first;
|
2008-02-28 02:47:43 +00:00
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nread) {
|
|
|
|
/* we can remove the chain */
|
2011-11-02 16:09:15 -04:00
|
|
|
struct evbuffer_chain **chp;
|
|
|
|
chp = evbuffer_free_trailing_empty_chains(dst);
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (dst->first == NULL) {
|
|
|
|
dst->first = src->first;
|
|
|
|
} else {
|
2011-11-02 16:09:15 -04:00
|
|
|
*chp = src->first;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
dst->last = previous;
|
|
|
|
previous->next = NULL;
|
|
|
|
src->first = chain;
|
2010-03-26 23:18:40 -04:00
|
|
|
advance_last_with_data(dst);
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
dst->total_len += nread;
|
2010-02-18 17:41:15 -05:00
|
|
|
dst->n_add_for_cb += nread;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* we know that there is more data in the src buffer than
|
|
|
|
* we want to read, so we manually drain the chain */
|
|
|
|
evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
|
|
|
|
chain->misalign += datlen;
|
|
|
|
chain->off -= datlen;
|
|
|
|
nread += datlen;
|
|
|
|
|
2011-11-02 16:09:15 -04:00
|
|
|
/* You might think we would want to increment dst->n_add_for_cb
|
|
|
|
* here too. But evbuffer_add above already took care of that.
|
|
|
|
*/
|
2008-02-28 02:47:43 +00:00
|
|
|
src->total_len -= nread;
|
2010-02-18 17:41:15 -05:00
|
|
|
src->n_del_for_cb += nread;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-23 01:11:13 +00:00
|
|
|
if (nread) {
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(dst);
|
|
|
|
evbuffer_invoke_callbacks_(src);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
2010-11-01 13:59:04 -04:00
|
|
|
result = (int)nread;/*XXXX should change return type */
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK2(src, dst);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-05-24 00:19:52 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 00:40:04 +00:00
|
|
|
unsigned char *
|
2009-05-22 19:11:48 +00:00
|
|
|
evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2010-04-23 23:04:20 -04:00
|
|
|
struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
|
2009-04-05 02:44:17 +00:00
|
|
|
unsigned char *buffer, *result = NULL;
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t remaining;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
int removed_last_with_data = 0;
|
2010-03-26 23:18:40 -04:00
|
|
|
int removed_last_with_datap = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->first;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if (size < 0)
|
2008-02-28 02:47:43 +00:00
|
|
|
size = buf->total_len;
|
2008-02-29 05:23:49 +00:00
|
|
|
/* if size > buf->total_len, we cannot guarantee to the user that she
|
|
|
|
* is going to have a long enough buffer afterwards; so we return
|
|
|
|
* NULL */
|
2009-05-01 00:54:14 +00:00
|
|
|
if (size == 0 || (size_t)size > buf->total_len)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2008-02-29 05:23:49 +00:00
|
|
|
/* No need to pull up anything; the first size bytes are
|
|
|
|
* already here. */
|
2010-02-18 17:41:15 -05:00
|
|
|
if (chain->off >= (size_t)size) {
|
|
|
|
result = chain->buffer + chain->misalign;
|
|
|
|
goto done;
|
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
/* Make sure that none of the chains we need to copy from is pinned. */
|
|
|
|
remaining = size - chain->off;
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(remaining >= 0);
|
2009-04-08 03:03:59 +00:00
|
|
|
for (tmp=chain->next; tmp; tmp=tmp->next) {
|
|
|
|
if (CHAIN_PINNED(tmp))
|
|
|
|
goto done;
|
2009-05-01 00:54:14 +00:00
|
|
|
if (tmp->off >= (size_t)remaining)
|
2009-04-08 03:03:59 +00:00
|
|
|
break;
|
|
|
|
remaining -= tmp->off;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CHAIN_PINNED(chain)) {
|
|
|
|
size_t old_off = chain->off;
|
|
|
|
if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
|
|
|
|
/* not enough room at end of chunk. */
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
buffer = CHAIN_SPACE_PTR(chain);
|
|
|
|
tmp = chain;
|
|
|
|
tmp->off = size;
|
|
|
|
size -= old_off;
|
|
|
|
chain = chain->next;
|
2009-05-01 00:54:14 +00:00
|
|
|
} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
|
2008-02-29 05:23:49 +00:00
|
|
|
/* already have enough space in the first chain */
|
|
|
|
size_t old_off = chain->off;
|
|
|
|
buffer = chain->buffer + chain->misalign + chain->off;
|
|
|
|
tmp = chain;
|
|
|
|
tmp->off = size;
|
|
|
|
size -= old_off;
|
|
|
|
chain = chain->next;
|
|
|
|
} else {
|
|
|
|
if ((tmp = evbuffer_chain_new(size)) == NULL) {
|
2009-10-27 04:04:07 +00:00
|
|
|
event_warn("%s: out of memory", __func__);
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-29 05:23:49 +00:00
|
|
|
}
|
|
|
|
buffer = tmp->buffer;
|
|
|
|
tmp->off = size;
|
|
|
|
buf->first = tmp;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* TODO(niels): deal with buffers that point to NULL like sendfile */
|
|
|
|
|
2008-02-28 18:36:03 +00:00
|
|
|
/* Copy and free every chunk that will be entirely pulled into tmp */
|
2010-04-23 23:04:20 -04:00
|
|
|
last_with_data = *buf->last_with_datap;
|
2009-05-01 00:54:14 +00:00
|
|
|
for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
|
2008-02-28 02:47:43 +00:00
|
|
|
next = chain->next;
|
|
|
|
|
|
|
|
memcpy(buffer, chain->buffer + chain->misalign, chain->off);
|
|
|
|
size -= chain->off;
|
|
|
|
buffer += chain->off;
|
2010-04-23 23:04:20 -04:00
|
|
|
if (chain == last_with_data)
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
removed_last_with_data = 1;
|
2010-03-26 23:18:40 -04:00
|
|
|
if (&chain->next == buf->last_with_datap)
|
|
|
|
removed_last_with_datap = 1;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_free(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (chain != NULL) {
|
|
|
|
memcpy(buffer, chain->buffer + chain->misalign, size);
|
|
|
|
chain->misalign += size;
|
|
|
|
chain->off -= size;
|
|
|
|
} else {
|
|
|
|
buf->last = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp->next = chain;
|
|
|
|
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
if (removed_last_with_data) {
|
2010-03-26 23:18:40 -04:00
|
|
|
buf->last_with_datap = &buf->first;
|
|
|
|
} else if (removed_last_with_datap) {
|
|
|
|
if (buf->first->next && buf->first->next->off)
|
|
|
|
buf->last_with_datap = &buf->first->next;
|
|
|
|
else
|
|
|
|
buf->last_with_datap = &buf->first;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
}
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
result = (tmp->buffer + tmp->misalign);
|
|
|
|
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2005-04-23 02:53:39 +00:00
|
|
|
/*
|
|
|
|
* Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
|
|
|
|
* The returned buffer needs to be freed by the called.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
evbuffer_readline(struct evbuffer *buffer)
|
2007-11-25 21:32:26 +00:00
|
|
|
{
|
|
|
|
return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
|
|
|
|
}
|
|
|
|
|
2010-11-01 13:59:04 -04:00
|
|
|
static inline ev_ssize_t
|
2009-07-31 17:34:47 +00:00
|
|
|
evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
struct evbuffer_chain *chain = it->internal_.chain;
|
|
|
|
size_t i = it->internal_.pos_in_chain;
|
2008-02-28 02:47:43 +00:00
|
|
|
while (chain != NULL) {
|
|
|
|
char *buffer = (char *)chain->buffer + chain->misalign;
|
2010-03-02 17:00:06 -05:00
|
|
|
char *cp = memchr(buffer+i, chr, chain->off-i);
|
|
|
|
if (cp) {
|
2012-02-29 15:07:32 -05:00
|
|
|
it->internal_.chain = chain;
|
|
|
|
it->internal_.pos_in_chain = cp - buffer;
|
2011-06-06 15:11:28 -04:00
|
|
|
it->pos += (cp - buffer - i);
|
2010-03-02 17:00:06 -05:00
|
|
|
return it->pos;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2010-03-02 17:00:06 -05:00
|
|
|
it->pos += chain->off - i;
|
2008-02-28 02:47:43 +00:00
|
|
|
i = 0;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2010-03-02 17:00:06 -05:00
|
|
|
static inline char *
|
|
|
|
find_eol_char(char *s, size_t len)
|
|
|
|
{
|
|
|
|
#define CHUNK_SZ 128
|
|
|
|
/* Lots of benchmarking found this approach to be faster in practice
|
|
|
|
* than doing two memchrs over the whole buffer, doin a memchr on each
|
|
|
|
* char of the buffer, or trying to emulate memchr by hand. */
|
|
|
|
char *s_end, *cr, *lf;
|
|
|
|
s_end = s+len;
|
|
|
|
while (s < s_end) {
|
|
|
|
size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
|
|
|
|
cr = memchr(s, '\r', chunk);
|
|
|
|
lf = memchr(s, '\n', chunk);
|
|
|
|
if (cr) {
|
|
|
|
if (lf && lf < cr)
|
|
|
|
return lf;
|
|
|
|
return cr;
|
|
|
|
} else if (lf) {
|
|
|
|
return lf;
|
|
|
|
}
|
|
|
|
s += CHUNK_SZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
#undef CHUNK_SZ
|
|
|
|
}
|
|
|
|
|
2010-11-01 13:59:04 -04:00
|
|
|
static ev_ssize_t
|
2010-03-02 17:00:06 -05:00
|
|
|
evbuffer_find_eol_char(struct evbuffer_ptr *it)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
struct evbuffer_chain *chain = it->internal_.chain;
|
|
|
|
size_t i = it->internal_.pos_in_chain;
|
2008-02-28 02:47:43 +00:00
|
|
|
while (chain != NULL) {
|
|
|
|
char *buffer = (char *)chain->buffer + chain->misalign;
|
2010-03-02 17:00:06 -05:00
|
|
|
char *cp = find_eol_char(buffer+i, chain->off-i);
|
|
|
|
if (cp) {
|
2012-02-29 15:07:32 -05:00
|
|
|
it->internal_.chain = chain;
|
|
|
|
it->internal_.pos_in_chain = cp - buffer;
|
2010-03-02 17:00:06 -05:00
|
|
|
it->pos += (cp - buffer) - i;
|
|
|
|
return it->pos;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2010-03-02 17:00:06 -05:00
|
|
|
it->pos += chain->off - i;
|
2008-02-28 02:47:43 +00:00
|
|
|
i = 0;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
evbuffer_strspn(
|
2009-07-31 17:34:47 +00:00
|
|
|
struct evbuffer_ptr *ptr, const char *chrset)
|
2008-02-28 02:47:43 +00:00
|
|
|
{
|
|
|
|
int count = 0;
|
2012-02-29 15:07:32 -05:00
|
|
|
struct evbuffer_chain *chain = ptr->internal_.chain;
|
|
|
|
size_t i = ptr->internal_.pos_in_chain;
|
2009-07-31 17:34:47 +00:00
|
|
|
|
|
|
|
if (!chain)
|
2011-06-13 16:47:43 -04:00
|
|
|
return 0;
|
2009-07-31 17:34:47 +00:00
|
|
|
|
|
|
|
while (1) {
|
2008-02-28 02:47:43 +00:00
|
|
|
char *buffer = (char *)chain->buffer + chain->misalign;
|
|
|
|
for (; i < chain->off; ++i) {
|
|
|
|
const char *p = chrset;
|
|
|
|
while (*p) {
|
|
|
|
if (buffer[i] == *p++)
|
|
|
|
goto next;
|
|
|
|
}
|
2012-02-29 15:07:32 -05:00
|
|
|
ptr->internal_.chain = chain;
|
|
|
|
ptr->internal_.pos_in_chain = i;
|
2009-07-31 17:34:47 +00:00
|
|
|
ptr->pos += count;
|
2008-02-28 02:47:43 +00:00
|
|
|
return count;
|
|
|
|
next:
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
i = 0;
|
|
|
|
|
2009-07-31 17:34:47 +00:00
|
|
|
if (! chain->next) {
|
2012-02-29 15:07:32 -05:00
|
|
|
ptr->internal_.chain = chain;
|
|
|
|
ptr->internal_.pos_in_chain = i;
|
2009-07-31 17:34:47 +00:00
|
|
|
ptr->pos += count;
|
|
|
|
return count;
|
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
2009-07-31 17:34:47 +00:00
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
|
2011-06-13 16:47:43 -04:00
|
|
|
static inline int
|
2009-07-31 17:34:47 +00:00
|
|
|
evbuffer_getchr(struct evbuffer_ptr *it)
|
|
|
|
{
|
2012-02-29 15:07:32 -05:00
|
|
|
struct evbuffer_chain *chain = it->internal_.chain;
|
|
|
|
size_t off = it->internal_.pos_in_chain;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2011-06-13 16:35:28 -04:00
|
|
|
if (chain == NULL)
|
2011-06-13 16:47:43 -04:00
|
|
|
return -1;
|
2011-06-13 16:35:28 -04:00
|
|
|
|
2011-06-13 16:47:43 -04:00
|
|
|
return (unsigned char)chain->buffer[chain->misalign + off];
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2009-07-31 17:35:42 +00:00
|
|
|
struct evbuffer_ptr
|
|
|
|
evbuffer_search_eol(struct evbuffer *buffer,
|
|
|
|
struct evbuffer_ptr *start, size_t *eol_len_out,
|
|
|
|
enum evbuffer_eol_style eol_style)
|
2005-04-23 02:53:39 +00:00
|
|
|
{
|
2009-07-31 17:35:42 +00:00
|
|
|
struct evbuffer_ptr it, it2;
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t extra_drain = 0;
|
2009-07-31 17:35:42 +00:00
|
|
|
int ok = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2011-06-13 16:35:28 -04:00
|
|
|
/* Avoid locking in trivial edge cases */
|
2012-02-29 15:07:32 -05:00
|
|
|
if (start && start->internal_.chain == NULL) {
|
2011-06-14 01:58:30 +03:00
|
|
|
PTR_NOT_FOUND(&it);
|
2011-06-13 16:35:28 -04:00
|
|
|
if (eol_len_out)
|
|
|
|
*eol_len_out = extra_drain;
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-07-31 17:35:42 +00:00
|
|
|
if (start) {
|
|
|
|
memcpy(&it, start, sizeof(it));
|
|
|
|
} else {
|
|
|
|
it.pos = 0;
|
2012-02-29 15:07:32 -05:00
|
|
|
it.internal_.chain = buffer->first;
|
|
|
|
it.internal_.pos_in_chain = 0;
|
2009-04-08 03:04:39 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* the eol_style determines our first stop character and how many
|
|
|
|
* characters we are going to drain afterwards. */
|
2007-11-25 21:32:26 +00:00
|
|
|
switch (eol_style) {
|
|
|
|
case EVBUFFER_EOL_ANY:
|
2010-03-02 17:00:06 -05:00
|
|
|
if (evbuffer_find_eol_char(&it) < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-07-31 17:35:42 +00:00
|
|
|
memcpy(&it2, &it, sizeof(it));
|
|
|
|
extra_drain = evbuffer_strspn(&it2, "\r\n");
|
2007-11-25 21:32:26 +00:00
|
|
|
break;
|
|
|
|
case EVBUFFER_EOL_CRLF_STRICT: {
|
2009-07-31 17:35:42 +00:00
|
|
|
it = evbuffer_search(buffer, "\r\n", 2, &it);
|
|
|
|
if (it.pos < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
extra_drain = 2;
|
2007-11-25 21:32:26 +00:00
|
|
|
break;
|
2005-04-23 02:53:39 +00:00
|
|
|
}
|
2011-06-06 15:33:27 -04:00
|
|
|
case EVBUFFER_EOL_CRLF: {
|
|
|
|
ev_ssize_t start_pos = it.pos;
|
2011-06-01 14:19:13 -04:00
|
|
|
/* Look for a LF ... */
|
2011-05-31 13:56:56 -04:00
|
|
|
if (evbuffer_strchr(&it, '\n') < 0)
|
|
|
|
goto done;
|
|
|
|
extra_drain = 1;
|
2011-06-01 14:19:13 -04:00
|
|
|
/* ... optionally preceeded by a CR. */
|
2011-06-06 15:33:27 -04:00
|
|
|
if (it.pos == start_pos)
|
|
|
|
break; /* If the first character is \n, don't back up */
|
2011-06-01 14:19:13 -04:00
|
|
|
/* This potentially does an extra linear walk over the first
|
|
|
|
* few chains. Probably, that's not too expensive unless you
|
|
|
|
* have a really pathological setup. */
|
2011-05-31 13:56:56 -04:00
|
|
|
memcpy(&it2, &it, sizeof(it));
|
2011-06-01 14:19:13 -04:00
|
|
|
if (evbuffer_ptr_subtract(buffer, &it2, 1)<0)
|
|
|
|
break;
|
2011-05-31 13:56:56 -04:00
|
|
|
if (evbuffer_getchr(&it2) == '\r') {
|
|
|
|
memcpy(&it, &it2, sizeof(it));
|
|
|
|
extra_drain = 2;
|
2009-07-31 17:35:42 +00:00
|
|
|
}
|
|
|
|
break;
|
2011-06-06 15:33:27 -04:00
|
|
|
}
|
2007-11-25 21:32:26 +00:00
|
|
|
case EVBUFFER_EOL_LF:
|
2009-07-31 17:35:42 +00:00
|
|
|
if (evbuffer_strchr(&it, '\n') < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
extra_drain = 1;
|
2007-11-25 21:32:26 +00:00
|
|
|
break;
|
2011-11-14 11:42:52 -05:00
|
|
|
case EVBUFFER_EOL_NUL:
|
|
|
|
if (evbuffer_strchr(&it, '\0') < 0)
|
|
|
|
goto done;
|
|
|
|
extra_drain = 1;
|
|
|
|
break;
|
2007-11-25 21:32:26 +00:00
|
|
|
default:
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2007-11-25 21:32:26 +00:00
|
|
|
}
|
|
|
|
|
2009-07-31 17:35:42 +00:00
|
|
|
ok = 1;
|
|
|
|
done:
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-07-31 17:35:42 +00:00
|
|
|
|
2011-06-14 01:58:30 +03:00
|
|
|
if (!ok)
|
|
|
|
PTR_NOT_FOUND(&it);
|
2009-07-31 17:35:42 +00:00
|
|
|
if (eol_len_out)
|
|
|
|
*eol_len_out = extra_drain;
|
|
|
|
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *
|
|
|
|
evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
|
|
|
|
enum evbuffer_eol_style eol_style)
|
|
|
|
{
|
|
|
|
struct evbuffer_ptr it;
|
|
|
|
char *line;
|
|
|
|
size_t n_to_copy=0, extra_drain=0;
|
2010-02-18 17:41:15 -05:00
|
|
|
char *result = NULL;
|
2009-07-31 17:35:42 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-07-31 17:35:42 +00:00
|
|
|
|
|
|
|
if (buffer->freeze_start) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
|
|
|
|
if (it.pos < 0)
|
|
|
|
goto done;
|
|
|
|
n_to_copy = it.pos;
|
|
|
|
|
2008-04-25 01:18:08 +00:00
|
|
|
if ((line = mm_malloc(n_to_copy+1)) == NULL) {
|
2009-10-27 04:04:07 +00:00
|
|
|
event_warn("%s: out of memory", __func__);
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2005-04-23 02:53:39 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_remove(buffer, line, n_to_copy);
|
2007-11-25 21:32:26 +00:00
|
|
|
line[n_to_copy] = '\0';
|
2005-04-23 02:53:39 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_drain(buffer, extra_drain);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = line;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-07-31 17:35:42 +00:00
|
|
|
|
|
|
|
if (n_read_out)
|
|
|
|
*n_read_out = result ? n_to_copy : 0;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
return result;
|
2005-04-23 02:53:39 +00:00
|
|
|
}
|
|
|
|
|
2009-01-14 19:39:17 +00:00
|
|
|
#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
|
|
|
|
|
2004-05-24 00:19:52 +00:00
|
|
|
/* Adds data to an event buffer */
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
int
|
2008-02-28 17:38:52 +00:00
|
|
|
evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
|
2004-07-13 08:02:45 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *tmp;
|
2008-05-12 00:40:04 +00:00
|
|
|
const unsigned char *data = data_in;
|
2009-04-03 14:27:03 +00:00
|
|
|
size_t remain, to_alloc;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->last;
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2008-02-28 18:36:03 +00:00
|
|
|
/* If there are no chains allocated for this buffer, allocate one
|
|
|
|
* big enough to hold all the data. */
|
2008-02-28 02:47:43 +00:00
|
|
|
if (chain == NULL) {
|
2010-03-26 14:50:45 -04:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (!chain)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2010-03-26 14:50:45 -04:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
|
2010-11-01 14:23:33 -04:00
|
|
|
remain = (size_t)(chain->buffer_len - chain->misalign - chain->off);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (remain >= datlen) {
|
|
|
|
/* there's enough space to hold all the data in the
|
|
|
|
* current last chain */
|
|
|
|
memcpy(chain->buffer + chain->misalign + chain->off,
|
|
|
|
data, datlen);
|
|
|
|
chain->off += datlen;
|
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
goto out;
|
2010-10-25 22:36:23 -04:00
|
|
|
} else if (!CHAIN_PINNED(chain) &&
|
|
|
|
evbuffer_chain_should_realign(chain, datlen)) {
|
2009-01-27 06:05:38 +00:00
|
|
|
/* we can fit the data into the misalignment */
|
2009-04-08 03:03:59 +00:00
|
|
|
evbuffer_chain_align(chain);
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(chain->buffer + chain->off, data, datlen);
|
|
|
|
chain->off += datlen;
|
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* we cannot write any data to the last chain */
|
|
|
|
remain = 0;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* we need to add another chain */
|
2009-01-14 19:39:17 +00:00
|
|
|
to_alloc = chain->buffer_len;
|
|
|
|
if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
|
|
|
|
to_alloc <<= 1;
|
2008-02-28 02:47:43 +00:00
|
|
|
if (datlen > to_alloc)
|
|
|
|
to_alloc = datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
tmp = evbuffer_chain_new(to_alloc);
|
|
|
|
if (tmp == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
if (remain) {
|
|
|
|
memcpy(chain->buffer + chain->misalign + chain->off,
|
|
|
|
data, remain);
|
|
|
|
chain->off += remain;
|
|
|
|
buf->total_len += remain;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += remain;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
2008-02-28 18:36:03 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
data += remain;
|
|
|
|
datlen -= remain;
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(tmp->buffer, data, datlen);
|
|
|
|
tmp->off = datlen;
|
|
|
|
evbuffer_chain_insert(buf, tmp);
|
2011-09-28 09:22:17 -04:00
|
|
|
buf->n_add_for_cb += datlen;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
out:
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-07-13 08:02:45 +00:00
|
|
|
}
|
|
|
|
|
2004-05-24 00:19:52 +00:00
|
|
|
int
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
|
2004-05-24 00:19:52 +00:00
|
|
|
{
|
2009-04-05 02:44:17 +00:00
|
|
|
struct evbuffer_chain *chain, *tmp;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-08 03:04:39 +00:00
|
|
|
|
|
|
|
if (buf->freeze_start) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
chain = buf->first;
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
if (chain == NULL) {
|
2010-03-26 14:50:45 -04:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (!chain)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2010-03-26 14:50:45 -04:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* we cannot touch immutable buffers */
|
|
|
|
if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
|
2010-03-26 14:51:39 -04:00
|
|
|
/* If this chain is empty, we can treat it as
|
|
|
|
* 'empty at the beginning' rather than 'empty at the end' */
|
|
|
|
if (chain->off == 0)
|
|
|
|
chain->misalign = chain->buffer_len;
|
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if ((size_t)chain->misalign >= datlen) {
|
2010-03-26 14:51:39 -04:00
|
|
|
/* we have enough space to fit everything */
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(chain->buffer + chain->misalign - datlen,
|
|
|
|
data, datlen);
|
|
|
|
chain->off += datlen;
|
|
|
|
chain->misalign -= datlen;
|
|
|
|
buf->total_len += datlen;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
goto out;
|
|
|
|
} else if (chain->misalign) {
|
2010-03-26 14:51:39 -04:00
|
|
|
/* we can only fit some of the data. */
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(chain->buffer,
|
2009-01-27 13:37:09 +00:00
|
|
|
(char*)data + datlen - chain->misalign,
|
2010-11-01 14:23:33 -04:00
|
|
|
(size_t)chain->misalign);
|
|
|
|
chain->off += (size_t)chain->misalign;
|
|
|
|
buf->total_len += (size_t)chain->misalign;
|
|
|
|
buf->n_add_for_cb += (size_t)chain->misalign;
|
|
|
|
datlen -= (size_t)chain->misalign;
|
2009-01-27 06:05:38 +00:00
|
|
|
chain->misalign = 0;
|
|
|
|
}
|
|
|
|
}
|
2008-02-28 18:36:03 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/* we need to add another chain */
|
|
|
|
if ((tmp = evbuffer_chain_new(datlen)) == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-01-27 06:05:38 +00:00
|
|
|
buf->first = tmp;
|
2010-03-26 23:18:40 -04:00
|
|
|
if (buf->last_with_datap == &buf->first)
|
|
|
|
buf->last_with_datap = &tmp->next;
|
Revise evbuffer to add last_with_data
This is the first patch in a series to replace previous_to_last with
last_with_data. Currently, we can only use two partially empty chains
at the end of an evbuffer, so if we have one with 511 bytes free, and
another with 512 bytes free, and we try to do a 1024 byte read, we
can't just stick another chain on the end: we need to reallocate the
last one. That's stupid and inefficient.
Instead, this patch adds a last_with_data pointer to eventually
replace previous_to_last. Instead of pointing to the penultimated
chain (if any) as previous_to_last does, last_with_data points to the
last chain that has any data in it, if any. If all chains are empty,
last_with_data points to the first chain. If there are no chains,
last_with_data is NULL.
The next step is to start using last_with_data everywhere that we
currently use previous_to_last. When that's done, we can remove
previous_to_last and the code that maintains it.
2010-03-10 22:16:14 -05:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
tmp->next = chain;
|
2004-03-23 03:43:53 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
tmp->off = datlen;
|
|
|
|
tmp->misalign = tmp->buffer_len - datlen;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
memcpy(tmp->buffer + tmp->misalign, data, datlen);
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->total_len += datlen;
|
2010-11-01 14:23:33 -04:00
|
|
|
buf->n_add_for_cb += (size_t)chain->misalign;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
out:
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
/** Helper: realigns the memory in chain->buffer so that misalign is 0. */
|
2008-02-28 02:47:43 +00:00
|
|
|
static void
|
|
|
|
evbuffer_chain_align(struct evbuffer_chain *chain)
|
|
|
|
{
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
|
|
|
|
EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
|
2008-02-28 02:47:43 +00:00
|
|
|
memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
|
|
|
|
chain->misalign = 0;
|
|
|
|
}
|
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
#define MAX_TO_COPY_IN_EXPAND 4096
|
|
|
|
#define MAX_TO_REALIGN_IN_EXPAND 2048
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-10-25 22:36:23 -04:00
|
|
|
/** Helper: return true iff we should realign chain to fit datalen bytes of
|
|
|
|
data in it. */
|
|
|
|
static int
|
|
|
|
evbuffer_chain_should_realign(struct evbuffer_chain *chain,
|
|
|
|
size_t datlen)
|
|
|
|
{
|
|
|
|
return chain->buffer_len - chain->off >= datlen &&
|
|
|
|
(chain->off < chain->buffer_len / 2) &&
|
|
|
|
(chain->off <= MAX_TO_REALIGN_IN_EXPAND);
|
|
|
|
}
|
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
/* Expands the available space in the event buffer to at least datlen, all in
|
|
|
|
* a single chunk. Return that chunk. */
|
|
|
|
static struct evbuffer_chain *
|
|
|
|
evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2010-03-30 16:47:37 -04:00
|
|
|
struct evbuffer_chain *chain, **chainp;
|
|
|
|
struct evbuffer_chain *result = NULL;
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
chainp = buf->last_with_datap;
|
2010-04-09 16:40:53 -04:00
|
|
|
|
|
|
|
/* XXX If *chainp is no longer writeable, but has enough space in its
|
|
|
|
* misalign, this might be a bad idea: we could still use *chainp, not
|
|
|
|
* (*chainp)->next. */
|
2010-03-30 16:47:37 -04:00
|
|
|
if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
|
|
|
|
chainp = &(*chainp)->next;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
/* 'chain' now points to the first chain with writable space (if any)
|
|
|
|
* We will either use it, realign it, replace it, or resize it. */
|
|
|
|
chain = *chainp;
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2009-04-08 03:03:59 +00:00
|
|
|
if (chain == NULL ||
|
|
|
|
(chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
|
2010-03-30 16:47:37 -04:00
|
|
|
/* We can't use the last_with_data chain at all. Just add a
|
|
|
|
* new one that's big enough. */
|
|
|
|
goto insert_new;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* If we can fit all the data, then we don't have to do anything */
|
2010-03-30 16:47:37 -04:00
|
|
|
if (CHAIN_SPACE_LEN(chain) >= datlen) {
|
|
|
|
result = chain;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto ok;
|
2010-03-30 16:47:37 -04:00
|
|
|
}
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
/* If the chain is completely empty, just replace it by adding a new
|
|
|
|
* empty chain. */
|
|
|
|
if (chain->off == 0) {
|
|
|
|
goto insert_new;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the misalignment plus the remaining space fulfills our data
|
|
|
|
* needs, we could just force an alignment to happen. Afterwards, we
|
2010-10-25 22:36:23 -04:00
|
|
|
* have enough space. But only do this if we're saving a lot of space
|
|
|
|
* and not moving too much data. Otherwise the space savings are
|
|
|
|
* probably offset by the time lost in copying.
|
2008-02-28 02:47:43 +00:00
|
|
|
*/
|
2010-10-25 22:36:23 -04:00
|
|
|
if (evbuffer_chain_should_realign(chain, datlen)) {
|
2008-02-28 02:47:43 +00:00
|
|
|
evbuffer_chain_align(chain);
|
2010-03-30 16:47:37 -04:00
|
|
|
result = chain;
|
2009-04-05 02:44:17 +00:00
|
|
|
goto ok;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
/* At this point, we can either resize the last chunk with space in
|
|
|
|
* it, use the next chunk after it, or If we add a new chunk, we waste
|
|
|
|
* CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we
|
|
|
|
* resize, we have to copy chain->off bytes.
|
|
|
|
*/
|
2008-03-31 02:04:34 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
/* Would expanding this chunk be affordable and worthwhile? */
|
|
|
|
if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
|
|
|
|
chain->off > MAX_TO_COPY_IN_EXPAND) {
|
|
|
|
/* It's not worth resizing this chain. Can the next one be
|
|
|
|
* used? */
|
|
|
|
if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
|
|
|
|
/* Yes, we can just use the next chain (which should
|
|
|
|
* be empty. */
|
|
|
|
result = chain->next;
|
|
|
|
goto ok;
|
|
|
|
} else {
|
|
|
|
/* No; append a new chain (which will free all
|
|
|
|
* terminal empty chains.) */
|
|
|
|
goto insert_new;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Okay, we're going to try to resize this chain: Not doing so
|
|
|
|
* would waste at least 1/8 of its current allocation, and we
|
|
|
|
* can do so without having to copy more than
|
|
|
|
* MAX_TO_COPY_IN_EXPAND bytes. */
|
|
|
|
/* figure out how much space we need */
|
|
|
|
size_t length = chain->off + datlen;
|
|
|
|
struct evbuffer_chain *tmp = evbuffer_chain_new(length);
|
|
|
|
if (tmp == NULL)
|
|
|
|
goto err;
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
/* copy the data over that we had so far */
|
|
|
|
tmp->off = chain->off;
|
|
|
|
memcpy(tmp->buffer, chain->buffer + chain->misalign,
|
|
|
|
chain->off);
|
|
|
|
/* fix up the list */
|
|
|
|
EVUTIL_ASSERT(*chainp == chain);
|
|
|
|
result = *chainp = tmp;
|
|
|
|
|
|
|
|
if (buf->last == chain)
|
|
|
|
buf->last = tmp;
|
|
|
|
|
|
|
|
tmp->next = chain->next;
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
goto ok;
|
|
|
|
}
|
2008-03-31 02:04:34 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
insert_new:
|
|
|
|
result = evbuffer_chain_insert_new(buf, datlen);
|
|
|
|
if (!result)
|
|
|
|
goto err;
|
2009-04-05 02:44:17 +00:00
|
|
|
ok:
|
2010-03-30 16:47:37 -04:00
|
|
|
EVUTIL_ASSERT(result);
|
|
|
|
EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
|
2009-04-05 02:44:17 +00:00
|
|
|
err:
|
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
/* Make sure that datlen bytes are available for writing in the last n
|
2009-01-19 21:53:03 +00:00
|
|
|
* chains. Never copies or moves data. */
|
2009-04-14 20:11:10 +00:00
|
|
|
int
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
|
2009-01-19 21:53:03 +00:00
|
|
|
{
|
2010-03-10 23:24:14 -05:00
|
|
|
struct evbuffer_chain *chain = buf->last, *tmp, *next;
|
|
|
|
size_t avail;
|
|
|
|
int used;
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2010-03-10 23:24:14 -05:00
|
|
|
EVUTIL_ASSERT(n >= 2);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
|
2010-03-10 23:24:14 -05:00
|
|
|
/* There is no last chunk, or we can't touch the last chunk.
|
|
|
|
* Just add a new chunk. */
|
2009-01-19 21:53:03 +00:00
|
|
|
chain = evbuffer_chain_new(datlen);
|
|
|
|
if (chain == NULL)
|
|
|
|
return (-1);
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_insert(buf, chain);
|
2009-01-19 21:53:03 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
used = 0; /* number of chains we're using space in. */
|
|
|
|
avail = 0; /* how much space they have. */
|
|
|
|
/* How many bytes can we stick at the end of buffer as it is? Iterate
|
|
|
|
* over the chains at the end of the buffer, tring to see how much
|
|
|
|
* space we have in the first n. */
|
2010-03-26 23:18:40 -04:00
|
|
|
for (chain = *buf->last_with_datap; chain; chain = chain->next) {
|
2010-03-10 23:24:14 -05:00
|
|
|
if (chain->off) {
|
2010-11-01 14:23:33 -04:00
|
|
|
size_t space = (size_t) CHAIN_SPACE_LEN(chain);
|
2010-03-26 23:18:40 -04:00
|
|
|
EVUTIL_ASSERT(chain == *buf->last_with_datap);
|
2010-03-10 23:24:14 -05:00
|
|
|
if (space) {
|
|
|
|
avail += space;
|
|
|
|
++used;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* No data in chain; realign it. */
|
|
|
|
chain->misalign = 0;
|
|
|
|
avail += chain->buffer_len;
|
|
|
|
++used;
|
|
|
|
}
|
|
|
|
if (avail >= datlen) {
|
|
|
|
/* There is already enough space. Just return */
|
|
|
|
return (0);
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
2010-03-10 23:24:14 -05:00
|
|
|
if (used == n)
|
|
|
|
break;
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
/* There wasn't enough space in the first n chains with space in
|
|
|
|
* them. Either add a new chain with enough space, or replace all
|
|
|
|
* empty chains with one that has enough space, depending on n. */
|
|
|
|
if (used < n) {
|
|
|
|
/* The loop ran off the end of the chains before it hit n
|
|
|
|
* chains; we can add another. */
|
|
|
|
EVUTIL_ASSERT(chain == NULL);
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
tmp = evbuffer_chain_new(datlen - avail);
|
2009-01-19 21:53:03 +00:00
|
|
|
if (tmp == NULL)
|
2010-03-10 23:24:14 -05:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
buf->last->next = tmp;
|
2009-01-19 21:53:03 +00:00
|
|
|
buf->last = tmp;
|
2010-03-10 23:24:14 -05:00
|
|
|
/* (we would only set last_with_data if we added the first
|
|
|
|
* chain. But if the buffer had no chains, we would have
|
|
|
|
* just allocated a new chain earlier) */
|
|
|
|
return (0);
|
2009-01-19 21:53:03 +00:00
|
|
|
} else {
|
2010-03-10 23:24:14 -05:00
|
|
|
/* Nuke _all_ the empty chains. */
|
|
|
|
int rmv_all = 0; /* True iff we removed last_with_data. */
|
2010-03-26 23:18:40 -04:00
|
|
|
chain = *buf->last_with_datap;
|
2010-03-10 23:24:14 -05:00
|
|
|
if (!chain->off) {
|
|
|
|
EVUTIL_ASSERT(chain == buf->first);
|
|
|
|
rmv_all = 1;
|
|
|
|
avail = 0;
|
|
|
|
} else {
|
2010-11-01 14:23:33 -04:00
|
|
|
avail = (size_t) CHAIN_SPACE_LEN(chain);
|
2010-03-10 23:24:14 -05:00
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (; chain; chain = next) {
|
|
|
|
next = chain->next;
|
|
|
|
EVUTIL_ASSERT(chain->off == 0);
|
|
|
|
evbuffer_chain_free(chain);
|
|
|
|
}
|
|
|
|
tmp = evbuffer_chain_new(datlen - avail);
|
|
|
|
if (tmp == NULL) {
|
|
|
|
if (rmv_all) {
|
|
|
|
ZERO_CHAIN(buf);
|
|
|
|
} else {
|
2010-03-26 23:18:40 -04:00
|
|
|
buf->last = *buf->last_with_datap;
|
|
|
|
(*buf->last_with_datap)->next = NULL;
|
2010-03-10 23:24:14 -05:00
|
|
|
}
|
2009-01-19 21:53:03 +00:00
|
|
|
return (-1);
|
2010-03-10 23:24:14 -05:00
|
|
|
}
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
if (rmv_all) {
|
2010-03-26 23:18:40 -04:00
|
|
|
buf->first = buf->last = tmp;
|
|
|
|
buf->last_with_datap = &buf->first;
|
2010-03-10 23:24:14 -05:00
|
|
|
} else {
|
2010-03-26 23:18:40 -04:00
|
|
|
(*buf->last_with_datap)->next = tmp;
|
2010-03-10 23:24:14 -05:00
|
|
|
buf->last = tmp;
|
|
|
|
}
|
|
|
|
return (0);
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
int
|
|
|
|
evbuffer_expand(struct evbuffer *buf, size_t datlen)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
|
|
|
|
EVBUFFER_LOCK(buf);
|
|
|
|
chain = evbuffer_expand_singlechain(buf, datlen);
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return chain ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
/*
|
|
|
|
* Reads data from a file descriptor into a buffer.
|
|
|
|
*/
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32)
|
2009-01-19 21:53:03 +00:00
|
|
|
#define USE_IOVEC_IMPL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_IOVEC_IMPL
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#ifdef EVENT__HAVE_SYS_UIO_H
|
2009-01-19 21:53:03 +00:00
|
|
|
/* number of iovec we use for writev, fragmentation is going to determine
|
|
|
|
* how much we end up writing */
|
2010-10-05 21:34:07 -04:00
|
|
|
|
|
|
|
#define DEFAULT_WRITE_IOVEC 128
|
|
|
|
|
|
|
|
#if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
|
|
|
|
#define NUM_WRITE_IOVEC UIO_MAXIOV
|
|
|
|
#elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
|
|
|
|
#define NUM_WRITE_IOVEC IOV_MAX
|
|
|
|
#else
|
|
|
|
#define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
|
|
|
|
#endif
|
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#define IOV_TYPE struct iovec
|
|
|
|
#define IOV_PTR_FIELD iov_base
|
|
|
|
#define IOV_LEN_FIELD iov_len
|
2010-11-01 13:59:04 -04:00
|
|
|
#define IOV_LEN_TYPE size_t
|
2009-01-19 21:53:03 +00:00
|
|
|
#else
|
2010-03-10 23:39:30 -05:00
|
|
|
#define NUM_WRITE_IOVEC 16
|
2009-01-19 21:53:03 +00:00
|
|
|
#define IOV_TYPE WSABUF
|
|
|
|
#define IOV_PTR_FIELD buf
|
|
|
|
#define IOV_LEN_FIELD len
|
2010-11-01 13:59:04 -04:00
|
|
|
#define IOV_LEN_TYPE unsigned long
|
2009-01-19 21:53:03 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
2010-03-10 23:39:30 -05:00
|
|
|
#define NUM_READ_IOVEC 4
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2004-07-13 08:02:45 +00:00
|
|
|
#define EVBUFFER_MAX_READ 4096
|
|
|
|
|
2009-04-13 03:05:46 +00:00
|
|
|
/** Helper function to figure out which space to use for reading data into
|
|
|
|
an evbuffer. Internal use only.
|
|
|
|
|
|
|
|
@param buf The buffer to read into
|
|
|
|
@param howmuch How much we want to read.
|
2010-03-10 23:24:14 -05:00
|
|
|
@param vecs An array of two or more iovecs or WSABUFs.
|
|
|
|
@param n_vecs_avail The length of vecs
|
2009-04-13 03:05:46 +00:00
|
|
|
@param chainp A pointer to a variable to hold the first chain we're
|
|
|
|
reading into.
|
2009-05-21 20:59:00 +00:00
|
|
|
@param exact Boolean: if true, we do not provide more than 'howmuch'
|
|
|
|
space in the vectors, even if more space is available.
|
2009-04-13 03:05:46 +00:00
|
|
|
@return The number of buffers we're using.
|
|
|
|
*/
|
|
|
|
int
|
2012-02-29 15:07:32 -05:00
|
|
|
evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
|
2010-03-10 23:24:14 -05:00
|
|
|
struct evbuffer_iovec *vecs, int n_vecs_avail,
|
2010-03-26 23:18:40 -04:00
|
|
|
struct evbuffer_chain ***chainp, int exact)
|
2009-04-13 03:05:46 +00:00
|
|
|
{
|
2010-03-26 23:18:40 -04:00
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
struct evbuffer_chain **firstchainp;
|
2010-03-10 23:24:14 -05:00
|
|
|
size_t so_far;
|
|
|
|
int i;
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
2009-04-13 03:05:46 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if (howmuch < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-03-10 23:24:14 -05:00
|
|
|
so_far = 0;
|
|
|
|
/* Let firstchain be the first chain with any space on it */
|
2010-03-26 23:18:40 -04:00
|
|
|
firstchainp = buf->last_with_datap;
|
|
|
|
if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
|
|
|
|
firstchainp = &(*firstchainp)->next;
|
|
|
|
}
|
2010-03-10 23:24:14 -05:00
|
|
|
|
2010-03-26 23:18:40 -04:00
|
|
|
chain = *firstchainp;
|
2010-09-23 22:45:55 -04:00
|
|
|
for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
|
2010-11-01 14:23:33 -04:00
|
|
|
size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
|
2010-07-16 09:11:09 -04:00
|
|
|
if (avail > (howmuch - so_far) && exact)
|
|
|
|
avail = howmuch - so_far;
|
2010-03-10 23:24:14 -05:00
|
|
|
vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
|
|
|
|
vecs[i].iov_len = avail;
|
|
|
|
so_far += avail;
|
|
|
|
chain = chain->next;
|
2009-04-13 03:05:46 +00:00
|
|
|
}
|
|
|
|
|
2010-03-26 23:18:40 -04:00
|
|
|
*chainp = firstchainp;
|
2010-03-10 23:24:14 -05:00
|
|
|
return i;
|
2009-04-13 03:05:46 +00:00
|
|
|
}
|
|
|
|
|
2010-05-28 15:05:32 -04:00
|
|
|
static int
|
|
|
|
get_n_bytes_readable_on_socket(evutil_socket_t fd)
|
|
|
|
{
|
2011-05-25 19:50:56 -04:00
|
|
|
#if defined(FIONREAD) && defined(_WIN32)
|
2010-05-28 15:05:32 -04:00
|
|
|
unsigned long lng = EVBUFFER_MAX_READ;
|
|
|
|
if (ioctlsocket(fd, FIONREAD, &lng) < 0)
|
|
|
|
return -1;
|
|
|
|
return (int)lng;
|
|
|
|
#elif defined(FIONREAD)
|
|
|
|
int n = EVBUFFER_MAX_READ;
|
|
|
|
if (ioctl(fd, FIONREAD, &n) < 0)
|
|
|
|
return -1;
|
|
|
|
return n;
|
|
|
|
#else
|
|
|
|
return EVBUFFER_MAX_READ;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-05-22 19:11:48 +00:00
|
|
|
/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
|
2009-01-27 06:05:38 +00:00
|
|
|
* as howmuch? */
|
2004-02-22 21:17:23 +00:00
|
|
|
int
|
2007-11-25 21:53:06 +00:00
|
|
|
evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2010-11-22 21:02:34 -05:00
|
|
|
struct evbuffer_chain **chainp;
|
2010-05-28 15:05:32 -04:00
|
|
|
int n;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
2010-03-10 23:39:30 -05:00
|
|
|
int nvecs, i, remaining;
|
2009-01-19 21:53:03 +00:00
|
|
|
#else
|
2010-11-22 21:02:34 -05:00
|
|
|
struct evbuffer_chain *chain;
|
2009-01-19 21:53:03 +00:00
|
|
|
unsigned char *p;
|
|
|
|
#endif
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2004-07-13 08:02:45 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end) {
|
|
|
|
result = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-05-28 15:05:32 -04:00
|
|
|
n = get_n_bytes_readable_on_socket(fd);
|
|
|
|
if (n <= 0 || n > EVBUFFER_MAX_READ)
|
2004-07-13 08:02:45 +00:00
|
|
|
n = EVBUFFER_MAX_READ;
|
|
|
|
if (howmuch < 0 || howmuch > n)
|
|
|
|
howmuch = n;
|
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
2009-01-27 06:05:38 +00:00
|
|
|
/* Since we can use iovecs, we're willing to use the last
|
2010-03-10 23:39:30 -05:00
|
|
|
* NUM_READ_IOVEC chains. */
|
2012-02-29 15:07:32 -05:00
|
|
|
if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
|
2010-02-18 17:41:15 -05:00
|
|
|
result = -1;
|
|
|
|
goto done;
|
2009-01-19 21:53:03 +00:00
|
|
|
} else {
|
2010-03-10 23:39:30 -05:00
|
|
|
IOV_TYPE vecs[NUM_READ_IOVEC];
|
2012-02-29 15:07:32 -05:00
|
|
|
#ifdef EVBUFFER_IOVEC_IS_NATIVE_
|
|
|
|
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
|
2010-03-26 23:18:40 -04:00
|
|
|
NUM_READ_IOVEC, &chainp, 1);
|
2009-05-21 20:59:00 +00:00
|
|
|
#else
|
|
|
|
/* We aren't using the native struct iovec. Therefore,
|
|
|
|
we are on win32. */
|
2010-03-10 23:39:30 -05:00
|
|
|
struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
|
2012-02-29 15:07:32 -05:00
|
|
|
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
|
2010-03-26 23:18:40 -04:00
|
|
|
&chainp, 1);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2010-03-11 14:23:02 -05:00
|
|
|
for (i=0; i < nvecs; ++i)
|
2010-03-10 23:39:30 -05:00
|
|
|
WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
|
2009-05-21 20:59:00 +00:00
|
|
|
#endif
|
2009-01-19 21:53:03 +00:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2009-01-19 21:53:03 +00:00
|
|
|
{
|
|
|
|
DWORD bytesRead;
|
2009-02-03 05:22:57 +00:00
|
|
|
DWORD flags=0;
|
|
|
|
if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
|
|
|
|
/* The read failed. It might be a close,
|
|
|
|
* or it might be an error. */
|
|
|
|
if (WSAGetLastError() == WSAECONNABORTED)
|
|
|
|
n = 0;
|
|
|
|
else
|
|
|
|
n = -1;
|
|
|
|
} else
|
2009-01-19 21:53:03 +00:00
|
|
|
n = bytesRead;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
n = readv(fd, vecs, nvecs);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /*!USE_IOVEC_IMPL*/
|
2004-07-13 08:02:45 +00:00
|
|
|
/* If we don't have FIONREAD, we might waste some space here */
|
2008-02-28 18:36:03 +00:00
|
|
|
/* XXX we _will_ waste some space here if there is any space left
|
|
|
|
* over on buf->last. */
|
2010-03-30 16:47:37 -04:00
|
|
|
if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
|
2009-04-05 02:44:17 +00:00
|
|
|
result = -1;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2004-07-13 08:02:45 +00:00
|
|
|
|
|
|
|
/* We can append new data at this point */
|
2008-02-28 02:47:43 +00:00
|
|
|
p = chain->buffer + chain->misalign + chain->off;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifndef _WIN32
|
2004-07-13 08:02:45 +00:00
|
|
|
n = read(fd, p, howmuch);
|
2007-09-20 19:36:03 +00:00
|
|
|
#else
|
|
|
|
n = recv(fd, p, howmuch, 0);
|
|
|
|
#endif
|
2009-01-19 21:53:03 +00:00
|
|
|
#endif /* USE_IOVEC_IMPL */
|
|
|
|
|
2009-04-05 02:44:17 +00:00
|
|
|
if (n == -1) {
|
|
|
|
result = -1;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
if (n == 0) {
|
|
|
|
result = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-01-19 21:53:03 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
2010-03-10 23:39:30 -05:00
|
|
|
remaining = n;
|
|
|
|
for (i=0; i < nvecs; ++i) {
|
2010-11-01 14:23:33 -04:00
|
|
|
ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp);
|
2010-03-10 23:39:30 -05:00
|
|
|
if (space < remaining) {
|
2010-03-26 23:18:40 -04:00
|
|
|
(*chainp)->off += space;
|
2010-11-01 13:59:04 -04:00
|
|
|
remaining -= (int)space;
|
2009-01-19 21:53:03 +00:00
|
|
|
} else {
|
2010-03-26 23:18:40 -04:00
|
|
|
(*chainp)->off += remaining;
|
|
|
|
buf->last_with_datap = chainp;
|
2010-03-10 23:39:30 -05:00
|
|
|
break;
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
2010-03-26 23:18:40 -04:00
|
|
|
chainp = &(*chainp)->next;
|
2009-01-19 21:53:03 +00:00
|
|
|
}
|
|
|
|
#else
|
2008-02-28 02:47:43 +00:00
|
|
|
chain->off += n;
|
2010-03-26 23:18:40 -04:00
|
|
|
advance_last_with_data(buf);
|
2009-01-19 21:53:03 +00:00
|
|
|
#endif
|
2008-02-28 02:47:43 +00:00
|
|
|
buf->total_len += n;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += n;
|
2004-07-13 08:02:45 +00:00
|
|
|
|
|
|
|
/* Tell someone about changes in this buffer */
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2010-02-18 17:41:15 -05:00
|
|
|
result = n;
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
#ifdef USE_IOVEC_IMPL
|
|
|
|
static inline int
|
|
|
|
evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
|
2009-11-02 16:17:06 +00:00
|
|
|
ev_ssize_t howmuch)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2010-03-10 23:39:30 -05:00
|
|
|
IOV_TYPE iov[NUM_WRITE_IOVEC];
|
2009-01-27 06:05:38 +00:00
|
|
|
struct evbuffer_chain *chain = buffer->first;
|
|
|
|
int n, i = 0;
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-05-01 00:54:14 +00:00
|
|
|
if (howmuch < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-01-27 06:05:38 +00:00
|
|
|
/* XXX make this top out at some maximal data length? if the
|
|
|
|
* buffer has (say) 1MB in it, split over 128 chains, there's
|
|
|
|
* no way it all gets written in one go. */
|
2010-03-10 23:39:30 -05:00
|
|
|
while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
|
2009-01-27 06:05:38 +00:00
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
/* we cannot write the file info via writev */
|
|
|
|
if (chain->flags & EVBUFFER_SENDFILE)
|
|
|
|
break;
|
|
|
|
#endif
|
2010-04-12 12:18:57 +02:00
|
|
|
iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
|
2009-05-01 00:54:14 +00:00
|
|
|
if ((size_t)howmuch >= chain->off) {
|
2010-11-01 13:59:04 -04:00
|
|
|
/* XXXcould be problematic when windows supports mmap*/
|
|
|
|
iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
|
2009-01-27 06:05:38 +00:00
|
|
|
howmuch -= chain->off;
|
|
|
|
} else {
|
2010-11-01 13:59:04 -04:00
|
|
|
/* XXXcould be problematic when windows supports mmap*/
|
|
|
|
iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
|
2009-01-27 06:05:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
2012-07-26 10:34:06 -04:00
|
|
|
if (! i)
|
|
|
|
return 0;
|
2012-07-26 10:43:13 -04:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2009-02-03 05:22:57 +00:00
|
|
|
DWORD bytesSent;
|
|
|
|
if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
|
2009-01-27 06:05:38 +00:00
|
|
|
n = -1;
|
|
|
|
else
|
|
|
|
n = bytesSent;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
n = writev(fd, iov, i);
|
|
|
|
#endif
|
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
static inline int
|
2010-10-21 19:45:49 -04:00
|
|
|
evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t howmuch)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain = buffer->first;
|
2010-10-21 19:45:49 -04:00
|
|
|
struct evbuffer_chain_file_segment *info =
|
|
|
|
EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,
|
|
|
|
chain);
|
|
|
|
const int source_fd = info->segment->fd;
|
2009-04-24 03:24:22 +00:00
|
|
|
#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
|
2009-01-27 06:05:38 +00:00
|
|
|
int res;
|
2010-10-21 19:45:49 -04:00
|
|
|
ev_off_t len = chain->off;
|
2009-08-16 16:40:42 +00:00
|
|
|
#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t res;
|
2010-10-21 19:45:49 -04:00
|
|
|
ev_off_t offset = chain->misalign;
|
2009-04-05 02:44:17 +00:00
|
|
|
#endif
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ASSERT_EVBUFFER_LOCKED(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-28 19:08:07 +00:00
|
|
|
#if defined(SENDFILE_IS_MACOSX)
|
2010-10-21 19:45:49 -04:00
|
|
|
res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
|
|
|
|
return (-1);
|
|
|
|
|
2009-04-24 03:24:22 +00:00
|
|
|
return (len);
|
2009-04-28 19:08:07 +00:00
|
|
|
#elif defined(SENDFILE_IS_FREEBSD)
|
2010-10-21 19:45:49 -04:00
|
|
|
res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);
|
2009-04-24 03:24:22 +00:00
|
|
|
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
|
|
|
|
return (-1);
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
return (len);
|
2009-04-28 19:08:07 +00:00
|
|
|
#elif defined(SENDFILE_IS_LINUX)
|
2009-01-27 06:18:45 +00:00
|
|
|
/* TODO(niels): implement splice */
|
2010-10-21 19:45:49 -04:00
|
|
|
res = sendfile(dest_fd, source_fd, &offset, chain->off);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
|
2009-10-16 13:19:57 +00:00
|
|
|
/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
|
2009-08-16 16:40:42 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (res);
|
|
|
|
#elif defined(SENDFILE_IS_SOLARIS)
|
2011-08-15 13:39:10 -04:00
|
|
|
{
|
|
|
|
const off_t offset_orig = offset;
|
2011-08-17 22:08:06 -04:00
|
|
|
res = sendfile(dest_fd, source_fd, &offset, chain->off);
|
2011-08-15 13:39:10 -04:00
|
|
|
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
|
|
|
|
if (offset - offset_orig)
|
|
|
|
return offset - offset_orig;
|
|
|
|
/* if this is EAGAIN or EINTR and no bytes were
|
|
|
|
* written, return 0 */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (res);
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2004-02-22 21:17:23 +00:00
|
|
|
int
|
2009-01-12 20:42:19 +00:00
|
|
|
evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
|
2009-05-22 19:11:48 +00:00
|
|
|
ev_ssize_t howmuch)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2009-04-08 03:04:39 +00:00
|
|
|
int n = -1;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buffer->freeze_start) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-09-23 22:45:55 -04:00
|
|
|
if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
|
2009-01-12 20:42:19 +00:00
|
|
|
howmuch = buffer->total_len;
|
|
|
|
|
2010-03-27 00:09:25 -04:00
|
|
|
if (howmuch > 0) {
|
2009-01-27 06:05:38 +00:00
|
|
|
#ifdef USE_SENDFILE
|
2009-01-14 14:58:48 +00:00
|
|
|
struct evbuffer_chain *chain = buffer->first;
|
2009-01-27 06:05:38 +00:00
|
|
|
if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
|
|
|
|
n = evbuffer_write_sendfile(buffer, fd, howmuch);
|
2010-03-27 00:09:25 -04:00
|
|
|
else {
|
2009-01-27 06:05:38 +00:00
|
|
|
#endif
|
|
|
|
#ifdef USE_IOVEC_IMPL
|
|
|
|
n = evbuffer_write_iovec(buffer, fd, howmuch);
|
2011-05-25 19:50:56 -04:00
|
|
|
#elif defined(_WIN32)
|
2009-01-27 06:05:38 +00:00
|
|
|
/* XXX(nickm) Don't disable this code until we know if
|
|
|
|
* the WSARecv code above works. */
|
2009-01-14 14:58:48 +00:00
|
|
|
void *p = evbuffer_pullup(buffer, howmuch);
|
|
|
|
n = send(fd, p, howmuch, 0);
|
2007-09-20 19:36:03 +00:00
|
|
|
#else
|
2009-01-14 14:58:48 +00:00
|
|
|
void *p = evbuffer_pullup(buffer, howmuch);
|
|
|
|
n = write(fd, p, howmuch);
|
2010-03-27 00:09:25 -04:00
|
|
|
#endif
|
|
|
|
#ifdef USE_SENDFILE
|
|
|
|
}
|
2007-09-20 19:36:03 +00:00
|
|
|
#endif
|
2009-01-12 20:42:19 +00:00
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
if (n > 0)
|
|
|
|
evbuffer_drain(buffer, n);
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2004-02-22 21:17:23 +00:00
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
|
2009-01-12 20:42:19 +00:00
|
|
|
int
|
|
|
|
evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
|
|
|
|
{
|
|
|
|
return evbuffer_write_atmost(buffer, fd, -1);
|
|
|
|
}
|
|
|
|
|
2008-05-12 00:40:04 +00:00
|
|
|
unsigned char *
|
|
|
|
evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
|
2004-02-22 21:17:23 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
unsigned char *search;
|
|
|
|
struct evbuffer_ptr ptr;
|
2004-02-22 21:17:23 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
|
|
|
|
if (ptr.pos < 0) {
|
|
|
|
search = NULL;
|
|
|
|
} else {
|
|
|
|
search = evbuffer_pullup(buffer, ptr.pos + len);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (search)
|
|
|
|
search += ptr.pos;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
|
|
|
EVBUFFER_UNLOCK(buffer);
|
|
|
|
return search;
|
2009-04-03 01:21:36 +00:00
|
|
|
}
|
|
|
|
|
2011-06-01 14:19:13 -04:00
|
|
|
/* Subract <b>howfar</b> from the position of <b>pos</b> within
|
|
|
|
* <b>buf</b>. Returns 0 on success, -1 on failure.
|
|
|
|
*
|
|
|
|
* This isn't exposed yet, because of potential inefficiency issues.
|
|
|
|
* Maybe it should be. */
|
|
|
|
static int
|
|
|
|
evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
|
|
|
|
size_t howfar)
|
|
|
|
{
|
|
|
|
if (howfar > (size_t)pos->pos)
|
|
|
|
return -1;
|
2012-02-29 15:07:32 -05:00
|
|
|
if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
|
|
|
|
pos->internal_.pos_in_chain -= howfar;
|
2011-06-01 14:19:13 -04:00
|
|
|
pos->pos -= howfar;
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
const size_t newpos = pos->pos - howfar;
|
|
|
|
/* Here's the inefficient part: it walks over the
|
|
|
|
* chains until we hit newpos. */
|
|
|
|
return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
int
|
|
|
|
evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
|
|
|
|
size_t position, enum evbuffer_ptr_how how)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
size_t left = position;
|
2009-04-03 01:21:36 +00:00
|
|
|
struct evbuffer_chain *chain = NULL;
|
2011-06-06 21:03:35 -04:00
|
|
|
int result = 0;
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
switch (how) {
|
|
|
|
case EVBUFFER_PTR_SET:
|
|
|
|
chain = buf->first;
|
|
|
|
pos->pos = position;
|
|
|
|
position = 0;
|
|
|
|
break;
|
|
|
|
case EVBUFFER_PTR_ADD:
|
|
|
|
/* this avoids iterating over all previous chains if
|
|
|
|
we just want to advance the position */
|
2012-02-29 15:07:32 -05:00
|
|
|
chain = pos->internal_.chain;
|
2009-04-03 01:21:36 +00:00
|
|
|
pos->pos += position;
|
2012-02-29 15:07:32 -05:00
|
|
|
position = pos->internal_.pos_in_chain;
|
2009-04-03 01:21:36 +00:00
|
|
|
break;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
while (chain && position + left >= chain->off) {
|
|
|
|
left -= chain->off - position;
|
|
|
|
chain = chain->next;
|
|
|
|
position = 0;
|
|
|
|
}
|
|
|
|
if (chain) {
|
2012-02-29 15:07:32 -05:00
|
|
|
pos->internal_.chain = chain;
|
|
|
|
pos->internal_.pos_in_chain = position + left;
|
2011-06-13 16:35:28 -04:00
|
|
|
} else if (left == 0) {
|
|
|
|
/* The first byte in the (nonexistent) chain after the last chain */
|
2012-02-29 15:07:32 -05:00
|
|
|
pos->internal_.chain = NULL;
|
|
|
|
pos->internal_.pos_in_chain = 0;
|
2009-04-03 01:21:36 +00:00
|
|
|
} else {
|
2011-06-14 01:58:30 +03:00
|
|
|
PTR_NOT_FOUND(pos);
|
2011-06-06 21:03:35 -04:00
|
|
|
result = -1;
|
2009-04-03 01:21:36 +00:00
|
|
|
}
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2011-06-06 21:03:35 -04:00
|
|
|
return result;
|
2009-04-03 01:21:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Compare the bytes in buf at position pos to the len bytes in mem. Return
|
|
|
|
less than 0, 0, or greater than 0 as memcmp.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
|
|
|
|
const char *mem, size_t len)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
size_t position;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
ASSERT_EVBUFFER_LOCKED(buf);
|
|
|
|
|
|
|
|
if (pos->pos + len > buf->total_len)
|
|
|
|
return -1;
|
|
|
|
|
2012-02-29 15:07:32 -05:00
|
|
|
chain = pos->internal_.chain;
|
|
|
|
position = pos->internal_.pos_in_chain;
|
2010-02-18 17:41:15 -05:00
|
|
|
while (len && chain) {
|
|
|
|
size_t n_comparable;
|
|
|
|
if (len + position > chain->off)
|
|
|
|
n_comparable = chain->off - position;
|
|
|
|
else
|
|
|
|
n_comparable = len;
|
|
|
|
r = memcmp(chain->buffer + chain->misalign + position, mem,
|
|
|
|
n_comparable);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
mem += n_comparable;
|
|
|
|
len -= n_comparable;
|
|
|
|
position = 0;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-04-03 01:21:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct evbuffer_ptr
|
|
|
|
evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
|
2009-08-07 17:16:52 +00:00
|
|
|
{
|
|
|
|
return evbuffer_search_range(buffer, what, len, start, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct evbuffer_ptr
|
|
|
|
evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
|
2009-04-03 01:21:36 +00:00
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_ptr pos;
|
|
|
|
struct evbuffer_chain *chain, *last_chain = NULL;
|
2009-04-03 01:21:36 +00:00
|
|
|
const unsigned char *p;
|
2010-02-18 17:41:15 -05:00
|
|
|
char first;
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
if (start) {
|
|
|
|
memcpy(&pos, start, sizeof(pos));
|
2012-02-29 15:07:32 -05:00
|
|
|
chain = pos.internal_.chain;
|
2010-02-18 17:41:15 -05:00
|
|
|
} else {
|
|
|
|
pos.pos = 0;
|
2012-02-29 15:07:32 -05:00
|
|
|
chain = pos.internal_.chain = buffer->first;
|
|
|
|
pos.internal_.pos_in_chain = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2009-08-07 17:16:52 +00:00
|
|
|
if (end)
|
2012-02-29 15:07:32 -05:00
|
|
|
last_chain = end->internal_.chain;
|
2009-08-07 17:16:52 +00:00
|
|
|
|
2010-09-23 22:45:55 -04:00
|
|
|
if (!len || len > EV_SSIZE_MAX)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
first = what[0];
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
const unsigned char *start_at =
|
|
|
|
chain->buffer + chain->misalign +
|
2012-02-29 15:07:32 -05:00
|
|
|
pos.internal_.pos_in_chain;
|
2010-02-18 17:41:15 -05:00
|
|
|
p = memchr(start_at, first,
|
2012-02-29 15:07:32 -05:00
|
|
|
chain->off - pos.internal_.pos_in_chain);
|
2010-02-18 17:41:15 -05:00
|
|
|
if (p) {
|
|
|
|
pos.pos += p - start_at;
|
2012-02-29 15:07:32 -05:00
|
|
|
pos.internal_.pos_in_chain += p - start_at;
|
2010-02-18 17:41:15 -05:00
|
|
|
if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
|
2010-09-23 22:45:55 -04:00
|
|
|
if (end && pos.pos + (ev_ssize_t)len > end->pos)
|
2009-08-07 17:16:52 +00:00
|
|
|
goto not_found;
|
|
|
|
else
|
|
|
|
goto done;
|
|
|
|
}
|
2010-02-18 17:41:15 -05:00
|
|
|
++pos.pos;
|
2012-02-29 15:07:32 -05:00
|
|
|
++pos.internal_.pos_in_chain;
|
|
|
|
if (pos.internal_.pos_in_chain == chain->off) {
|
|
|
|
chain = pos.internal_.chain = chain->next;
|
|
|
|
pos.internal_.pos_in_chain = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
|
|
|
} else {
|
2009-08-07 17:16:52 +00:00
|
|
|
if (chain == last_chain)
|
|
|
|
goto not_found;
|
2012-02-29 15:07:32 -05:00
|
|
|
pos.pos += chain->off - pos.internal_.pos_in_chain;
|
|
|
|
chain = pos.internal_.chain = chain->next;
|
|
|
|
pos.internal_.pos_in_chain = 0;
|
2010-02-18 17:41:15 -05:00
|
|
|
}
|
|
|
|
}
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2009-08-07 17:16:52 +00:00
|
|
|
not_found:
|
2011-06-14 01:58:30 +03:00
|
|
|
PTR_NOT_FOUND(&pos);
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
|
|
|
return pos;
|
2004-02-22 21:17:23 +00:00
|
|
|
}
|
2004-04-04 02:20:21 +00:00
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
int
|
|
|
|
evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
|
|
|
|
struct evbuffer_ptr *start_at,
|
|
|
|
struct evbuffer_iovec *vec, int n_vec)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
int idx = 0;
|
2009-11-05 21:22:23 +00:00
|
|
|
ev_ssize_t len_so_far = 0;
|
2009-05-19 21:39:35 +00:00
|
|
|
|
2011-06-13 16:35:28 -04:00
|
|
|
/* Avoid locking in trivial edge cases */
|
2012-02-29 15:07:32 -05:00
|
|
|
if (start_at && start_at->internal_.chain == NULL)
|
2011-06-13 16:35:28 -04:00
|
|
|
return 0;
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
|
|
|
if (start_at) {
|
2012-02-29 15:07:32 -05:00
|
|
|
chain = start_at->internal_.chain;
|
2009-05-19 21:39:35 +00:00
|
|
|
len_so_far = chain->off
|
2012-02-29 15:07:32 -05:00
|
|
|
- start_at->internal_.pos_in_chain;
|
2009-05-19 21:39:35 +00:00
|
|
|
idx = 1;
|
|
|
|
if (n_vec > 0) {
|
|
|
|
vec[0].iov_base = chain->buffer + chain->misalign
|
2012-02-29 15:07:32 -05:00
|
|
|
+ start_at->internal_.pos_in_chain;
|
2009-05-19 21:39:35 +00:00
|
|
|
vec[0].iov_len = len_so_far;
|
|
|
|
}
|
|
|
|
chain = chain->next;
|
|
|
|
} else {
|
|
|
|
chain = buffer->first;
|
|
|
|
}
|
|
|
|
|
2011-12-08 14:30:20 -05:00
|
|
|
if (n_vec == 0 && len < 0) {
|
|
|
|
/* If no vectors are provided and they asked for "everything",
|
|
|
|
* pretend they asked for the actual available amount. */
|
|
|
|
len = buffer->total_len - len_so_far;
|
|
|
|
}
|
|
|
|
|
2009-05-19 21:39:35 +00:00
|
|
|
while (chain) {
|
|
|
|
if (len >= 0 && len_so_far >= len)
|
|
|
|
break;
|
|
|
|
if (idx<n_vec) {
|
|
|
|
vec[idx].iov_base = chain->buffer + chain->misalign;
|
|
|
|
vec[idx].iov_len = chain->off;
|
2011-12-08 14:30:20 -05:00
|
|
|
} else if (len<0) {
|
2009-05-19 21:39:35 +00:00
|
|
|
break;
|
2011-12-08 14:30:20 -05:00
|
|
|
}
|
2009-05-19 21:39:35 +00:00
|
|
|
++idx;
|
|
|
|
len_so_far += chain->off;
|
|
|
|
chain = chain->next;
|
|
|
|
}
|
|
|
|
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-05-19 21:39:35 +00:00
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2009-04-03 01:21:36 +00:00
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
int
|
|
|
|
evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
char *buffer;
|
|
|
|
size_t space;
|
2009-04-05 02:44:17 +00:00
|
|
|
int sz, result = -1;
|
2008-02-28 02:47:43 +00:00
|
|
|
va_list aq;
|
2010-03-30 16:47:37 -04:00
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
if (buf->freeze_end) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2008-02-28 02:47:43 +00:00
|
|
|
/* make sure that at least some space is available */
|
2010-03-30 16:47:37 -04:00
|
|
|
if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
for (;;) {
|
2010-03-30 16:47:37 -04:00
|
|
|
#if 0
|
2008-02-28 02:47:43 +00:00
|
|
|
size_t used = chain->misalign + chain->off;
|
|
|
|
buffer = (char *)chain->buffer + chain->misalign + chain->off;
|
2009-10-26 20:00:43 +00:00
|
|
|
EVUTIL_ASSERT(chain->buffer_len >= used);
|
2008-02-28 02:47:43 +00:00
|
|
|
space = chain->buffer_len - used;
|
2010-03-30 16:47:37 -04:00
|
|
|
#endif
|
|
|
|
buffer = (char*) CHAIN_SPACE_PTR(chain);
|
2010-11-01 14:23:33 -04:00
|
|
|
space = (size_t) CHAIN_SPACE_LEN(chain);
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
#ifndef va_copy
|
|
|
|
#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
|
|
|
|
#endif
|
|
|
|
va_copy(aq, ap);
|
|
|
|
|
2008-05-12 16:44:24 +00:00
|
|
|
sz = evutil_vsnprintf(buffer, space, fmt, aq);
|
2008-02-28 02:47:43 +00:00
|
|
|
|
|
|
|
va_end(aq);
|
|
|
|
|
|
|
|
if (sz < 0)
|
2009-04-05 02:44:17 +00:00
|
|
|
goto done;
|
2009-05-01 00:54:14 +00:00
|
|
|
if ((size_t)sz < space) {
|
2008-02-28 02:47:43 +00:00
|
|
|
chain->off += sz;
|
|
|
|
buf->total_len += sz;
|
2010-02-18 17:41:15 -05:00
|
|
|
buf->n_add_for_cb += sz;
|
2009-01-23 01:11:13 +00:00
|
|
|
|
2010-03-30 16:47:37 -04:00
|
|
|
advance_last_with_data(buf);
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2009-04-05 02:44:17 +00:00
|
|
|
result = sz;
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
2010-03-30 16:47:37 -04:00
|
|
|
if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
|
|
|
}
|
2008-02-28 02:47:43 +00:00
|
|
|
/* NOTREACHED */
|
2009-04-05 02:44:17 +00:00
|
|
|
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return result;
|
2008-02-28 02:47:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
int res = -1;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
res = evbuffer_add_vprintf(buf, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2009-01-27 06:05:38 +00:00
|
|
|
int
|
2009-01-29 03:20:40 +00:00
|
|
|
evbuffer_add_reference(struct evbuffer *outbuf,
|
|
|
|
const void *data, size_t datlen,
|
2009-05-15 22:44:18 +00:00
|
|
|
evbuffer_ref_cleanup_cb cleanupfn, void *extra)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2009-04-08 03:04:39 +00:00
|
|
|
struct evbuffer_chain *chain;
|
2009-01-27 06:05:38 +00:00
|
|
|
struct evbuffer_chain_reference *info;
|
2009-04-08 03:04:39 +00:00
|
|
|
int result = -1;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
|
|
|
|
if (!chain)
|
|
|
|
return (-1);
|
2009-01-27 06:05:38 +00:00
|
|
|
chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
|
2009-01-29 03:20:40 +00:00
|
|
|
chain->buffer = (u_char *)data;
|
2009-01-27 06:05:38 +00:00
|
|
|
chain->buffer_len = datlen;
|
|
|
|
chain->off = datlen;
|
|
|
|
|
|
|
|
info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
|
|
|
|
info->cleanupfn = cleanupfn;
|
|
|
|
info->extra = extra;
|
|
|
|
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (outbuf->freeze_end) {
|
|
|
|
/* don't call chain_free; we do not want to actually invoke
|
|
|
|
* the cleanup function */
|
|
|
|
mm_free(chain);
|
|
|
|
goto done;
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
evbuffer_chain_insert(outbuf, chain);
|
2010-02-18 17:41:15 -05:00
|
|
|
outbuf->n_add_for_cb += datlen;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(outbuf);
|
2009-04-08 03:04:39 +00:00
|
|
|
|
|
|
|
result = 0;
|
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(outbuf);
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
return result;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO(niels): we may want to add to automagically convert to mmap, in
|
|
|
|
* case evbuffer_remove() or evbuffer_pullup() are being used.
|
|
|
|
*/
|
2010-10-21 19:45:49 -04:00
|
|
|
struct evbuffer_file_segment *
|
|
|
|
evbuffer_file_segment_new(
|
|
|
|
int fd, ev_off_t offset, ev_off_t length, unsigned flags)
|
2009-01-27 06:05:38 +00:00
|
|
|
{
|
2010-10-21 19:45:49 -04:00
|
|
|
struct evbuffer_file_segment *seg =
|
|
|
|
mm_calloc(sizeof(struct evbuffer_file_segment), 1);
|
|
|
|
if (!seg)
|
|
|
|
return NULL;
|
|
|
|
seg->refcnt = 1;
|
|
|
|
seg->fd = fd;
|
|
|
|
seg->flags = flags;
|
2011-10-06 18:02:22 -04:00
|
|
|
seg->file_offset = offset;
|
2010-10-21 19:45:49 -04:00
|
|
|
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2010-10-21 19:45:49 -04:00
|
|
|
#define lseek _lseeki64
|
|
|
|
#define fstat _fstat
|
|
|
|
#define stat _stat
|
2009-01-27 06:05:38 +00:00
|
|
|
#endif
|
2010-10-21 19:45:49 -04:00
|
|
|
if (length == -1) {
|
|
|
|
struct stat st;
|
|
|
|
if (fstat(fd, &st) < 0)
|
|
|
|
goto err;
|
|
|
|
length = st.st_size;
|
|
|
|
}
|
|
|
|
seg->length = length;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
|
|
|
#if defined(USE_SENDFILE)
|
2010-10-21 19:45:49 -04:00
|
|
|
if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) {
|
2011-10-06 18:02:22 -04:00
|
|
|
seg->can_sendfile = 1;
|
2010-10-21 19:45:49 -04:00
|
|
|
goto done;
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
#endif
|
2011-10-06 18:02:22 -04:00
|
|
|
|
|
|
|
if (evbuffer_file_segment_materialize(seg)<0)
|
|
|
|
goto err;
|
|
|
|
|
2011-12-02 01:48:14 -05:00
|
|
|
#if defined(USE_SENDFILE)
|
2011-10-06 18:02:22 -04:00
|
|
|
done:
|
2011-12-02 01:48:14 -05:00
|
|
|
#endif
|
2011-10-06 18:02:22 -04:00
|
|
|
if (!(flags & EVBUF_FS_DISABLE_LOCKING)) {
|
|
|
|
EVTHREAD_ALLOC_LOCK(seg->lock, 0);
|
|
|
|
}
|
|
|
|
return seg;
|
|
|
|
err:
|
|
|
|
mm_free(seg);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* DOCDOC */
|
|
|
|
/* Requires lock */
|
|
|
|
static int
|
|
|
|
evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg)
|
|
|
|
{
|
|
|
|
const unsigned flags = seg->flags;
|
|
|
|
const int fd = seg->fd;
|
|
|
|
const ev_off_t length = seg->length;
|
|
|
|
const ev_off_t offset = seg->file_offset;
|
|
|
|
|
|
|
|
if (seg->contents)
|
|
|
|
return 0; /* already materialized */
|
|
|
|
|
2012-02-29 15:07:31 -05:00
|
|
|
#if defined(EVENT__HAVE_MMAP)
|
2010-10-21 19:45:49 -04:00
|
|
|
if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
|
2010-10-25 12:10:10 -04:00
|
|
|
off_t offset_rounded = 0, offset_leftover = 0;
|
|
|
|
void *mapped;
|
|
|
|
if (offset) {
|
|
|
|
/* mmap implementations don't generally like us
|
|
|
|
* to have an offset that isn't a round */
|
|
|
|
#ifdef SC_PAGE_SIZE
|
|
|
|
long page_size = sysconf(SC_PAGE_SIZE);
|
|
|
|
#elif defined(_SC_PAGE_SIZE)
|
|
|
|
long page_size = sysconf(_SC_PAGE_SIZE);
|
|
|
|
#else
|
|
|
|
long page_size = 1;
|
|
|
|
#endif
|
|
|
|
if (page_size == -1)
|
|
|
|
goto err;
|
|
|
|
offset_leftover = offset % page_size;
|
|
|
|
offset_rounded = offset - offset_leftover;
|
|
|
|
}
|
|
|
|
mapped = mmap(NULL, length + offset_leftover,
|
|
|
|
PROT_READ,
|
2009-01-27 06:18:45 +00:00
|
|
|
#ifdef MAP_NOCACHE
|
2010-10-21 19:45:49 -04:00
|
|
|
MAP_NOCACHE | /* ??? */
|
2009-01-27 06:18:45 +00:00
|
|
|
#endif
|
2009-07-23 14:48:24 +00:00
|
|
|
#ifdef MAP_FILE
|
|
|
|
MAP_FILE |
|
|
|
|
#endif
|
|
|
|
MAP_PRIVATE,
|
2010-10-25 12:10:10 -04:00
|
|
|
fd, offset_rounded);
|
2009-01-27 06:05:38 +00:00
|
|
|
if (mapped == MAP_FAILED) {
|
2009-01-27 06:21:12 +00:00
|
|
|
event_warn("%s: mmap(%d, %d, %zu) failed",
|
2009-01-27 13:37:09 +00:00
|
|
|
__func__, fd, 0, (size_t)(offset + length));
|
2010-10-21 19:45:49 -04:00
|
|
|
} else {
|
|
|
|
seg->mapping = mapped;
|
2010-10-25 12:10:10 -04:00
|
|
|
seg->contents = (char*)mapped+offset_leftover;
|
2011-10-06 18:02:22 -04:00
|
|
|
seg->mmap_offset = 0;
|
|
|
|
seg->is_mapping = 1;
|
2010-10-21 19:45:49 -04:00
|
|
|
goto done;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
2010-10-21 19:45:49 -04:00
|
|
|
}
|
|
|
|
#endif
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2010-10-25 12:29:39 -04:00
|
|
|
if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
|
|
|
|
long h = (long)_get_osfhandle(fd);
|
|
|
|
HANDLE m;
|
|
|
|
ev_uint64_t total_size = length+offset;
|
|
|
|
if (h == (long)INVALID_HANDLE_VALUE)
|
2011-12-02 01:48:14 -05:00
|
|
|
goto err;
|
2010-10-25 12:29:39 -04:00
|
|
|
m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY,
|
|
|
|
(total_size >> 32), total_size & 0xfffffffful,
|
|
|
|
NULL);
|
|
|
|
if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */
|
|
|
|
seg->mapping_handle = m;
|
2011-10-06 18:02:22 -04:00
|
|
|
seg->mmap_offset = offset;
|
|
|
|
seg->is_mapping = 1;
|
2010-10-25 12:29:39 -04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-10-21 19:45:49 -04:00
|
|
|
{
|
|
|
|
ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos;
|
|
|
|
ev_off_t read_so_far = 0;
|
|
|
|
char *mem;
|
|
|
|
int e;
|
|
|
|
ev_ssize_t n = 0;
|
|
|
|
if (!(mem = mm_malloc(length)))
|
|
|
|
goto err;
|
|
|
|
if (start_pos < 0) {
|
|
|
|
mm_free(mem);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (lseek(fd, offset, SEEK_SET) < 0) {
|
|
|
|
mm_free(mem);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
while (read_so_far < length) {
|
|
|
|
n = read(fd, mem+read_so_far, length-read_so_far);
|
|
|
|
if (n <= 0)
|
|
|
|
break;
|
|
|
|
read_so_far += n;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
e = errno;
|
|
|
|
pos = lseek(fd, start_pos, SEEK_SET);
|
|
|
|
if (n < 0 || (n == 0 && length > read_so_far)) {
|
|
|
|
mm_free(mem);
|
|
|
|
errno = e;
|
|
|
|
goto err;
|
|
|
|
} else if (pos < 0) {
|
|
|
|
mm_free(mem);
|
|
|
|
goto err;
|
|
|
|
}
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
seg->contents = mem;
|
|
|
|
}
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
done:
|
2011-10-06 18:02:22 -04:00
|
|
|
return 0;
|
2010-10-21 19:45:49 -04:00
|
|
|
err:
|
2011-10-06 18:02:22 -04:00
|
|
|
return -1;
|
2010-10-21 19:45:49 -04:00
|
|
|
}
|
2009-04-03 14:27:03 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
void
|
|
|
|
evbuffer_file_segment_free(struct evbuffer_file_segment *seg)
|
|
|
|
{
|
|
|
|
int refcnt;
|
|
|
|
EVLOCK_LOCK(seg->lock, 0);
|
|
|
|
refcnt = --seg->refcnt;
|
|
|
|
EVLOCK_UNLOCK(seg->lock, 0);
|
|
|
|
if (refcnt > 0)
|
|
|
|
return;
|
|
|
|
EVUTIL_ASSERT(refcnt == 0);
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2011-10-06 18:02:22 -04:00
|
|
|
if (seg->is_mapping) {
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2010-10-25 12:29:39 -04:00
|
|
|
CloseHandle(seg->mapping_handle);
|
2012-02-29 15:07:31 -05:00
|
|
|
#elif defined (EVENT__HAVE_MMAP)
|
2010-10-21 19:45:49 -04:00
|
|
|
if (munmap(seg->mapping, seg->length) == -1)
|
|
|
|
event_warn("%s: munmap failed", __func__);
|
2010-10-25 12:29:39 -04:00
|
|
|
#endif
|
2011-10-06 18:02:22 -04:00
|
|
|
} else if (seg->contents) {
|
2010-10-21 19:45:49 -04:00
|
|
|
mm_free(seg->contents);
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) {
|
|
|
|
close(seg->fd);
|
|
|
|
}
|
2009-01-29 03:22:47 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
EVTHREAD_FREE_LOCK(seg->lock, 0);
|
|
|
|
mm_free(seg);
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
int
|
|
|
|
evbuffer_add_file_segment(struct evbuffer *buf,
|
|
|
|
struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length)
|
|
|
|
{
|
|
|
|
struct evbuffer_chain *chain;
|
|
|
|
struct evbuffer_chain_file_segment *extra;
|
2011-10-06 18:02:22 -04:00
|
|
|
int can_use_sendfile = 0;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2011-10-06 18:02:22 -04:00
|
|
|
EVBUFFER_LOCK(buf);
|
2010-10-21 19:45:49 -04:00
|
|
|
EVLOCK_LOCK(seg->lock, 0);
|
2011-10-06 18:02:22 -04:00
|
|
|
if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) {
|
|
|
|
can_use_sendfile = 1;
|
|
|
|
} else {
|
|
|
|
if (!seg->contents) {
|
|
|
|
if (evbuffer_file_segment_materialize(seg)<0) {
|
|
|
|
EVLOCK_UNLOCK(seg->lock, 0);
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-10-21 19:45:49 -04:00
|
|
|
++seg->refcnt;
|
|
|
|
EVLOCK_UNLOCK(seg->lock, 0);
|
2009-01-27 21:10:31 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
if (buf->freeze_end)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (length < 0) {
|
|
|
|
if (offset > seg->length)
|
|
|
|
goto err;
|
|
|
|
length = seg->length - offset;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
/* Can we actually add this? */
|
|
|
|
if (offset+length > seg->length)
|
|
|
|
goto err;
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment));
|
|
|
|
if (!chain)
|
|
|
|
goto err;
|
|
|
|
extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain);
|
|
|
|
|
|
|
|
chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT;
|
2011-10-06 18:02:22 -04:00
|
|
|
if (can_use_sendfile && seg->can_sendfile) {
|
2010-10-21 19:45:49 -04:00
|
|
|
chain->flags |= EVBUFFER_SENDFILE;
|
2011-10-06 18:02:22 -04:00
|
|
|
chain->misalign = seg->file_offset + offset;
|
2010-10-21 19:45:49 -04:00
|
|
|
chain->off = length;
|
|
|
|
chain->buffer_len = chain->misalign + length;
|
2011-10-06 18:02:22 -04:00
|
|
|
} else if (seg->is_mapping) {
|
2011-05-25 19:50:56 -04:00
|
|
|
#ifdef _WIN32
|
2011-10-06 18:02:22 -04:00
|
|
|
ev_uint64_t total_offset = seg->mmap_offset+offset;
|
2010-10-25 12:29:39 -04:00
|
|
|
ev_uint64_t offset_rounded=0, offset_remaining=0;
|
|
|
|
LPVOID data;
|
|
|
|
if (total_offset) {
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
memset(&si, 0, sizeof(si)); /* cargo cult */
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
offset_remaining = total_offset % si.dwAllocationGranularity;
|
|
|
|
offset_rounded = total_offset - offset_remaining;
|
|
|
|
}
|
|
|
|
data = MapViewOfFile(
|
|
|
|
seg->mapping_handle,
|
|
|
|
FILE_MAP_READ,
|
|
|
|
offset_rounded >> 32,
|
|
|
|
offset_rounded & 0xfffffffful,
|
2011-06-02 17:07:40 -04:00
|
|
|
length + offset_remaining);
|
2010-10-25 12:29:39 -04:00
|
|
|
if (data == NULL) {
|
|
|
|
mm_free(chain);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
chain->buffer = (unsigned char*) data;
|
|
|
|
chain->buffer_len = length+offset_remaining;
|
|
|
|
chain->misalign = offset_remaining;
|
|
|
|
chain->off = length;
|
|
|
|
#else
|
2010-10-21 19:45:49 -04:00
|
|
|
chain->buffer = (unsigned char*)(seg->contents + offset);
|
|
|
|
chain->buffer_len = length;
|
|
|
|
chain->off = length;
|
2010-10-25 12:29:39 -04:00
|
|
|
#endif
|
2010-10-21 19:45:49 -04:00
|
|
|
} else {
|
|
|
|
chain->buffer = (unsigned char*)(seg->contents + offset);
|
|
|
|
chain->buffer_len = length;
|
|
|
|
chain->off = length;
|
|
|
|
}
|
|
|
|
|
|
|
|
extra->segment = seg;
|
|
|
|
buf->n_add_for_cb += length;
|
|
|
|
evbuffer_chain_insert(buf, chain);
|
|
|
|
|
2012-02-29 15:07:33 -05:00
|
|
|
evbuffer_invoke_callbacks_(buf);
|
2010-10-21 19:45:49 -04:00
|
|
|
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
EVBUFFER_UNLOCK(buf);
|
|
|
|
evbuffer_file_segment_free(seg);
|
|
|
|
return -1;
|
2009-01-27 06:05:38 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 19:45:49 -04:00
|
|
|
int
|
|
|
|
evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length)
|
|
|
|
{
|
|
|
|
struct evbuffer_file_segment *seg;
|
|
|
|
unsigned flags = EVBUF_FS_CLOSE_ON_FREE;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
seg = evbuffer_file_segment_new(fd, offset, length, flags);
|
|
|
|
if (!seg)
|
|
|
|
return -1;
|
|
|
|
r = evbuffer_add_file_segment(buf, seg, 0, length);
|
|
|
|
evbuffer_file_segment_free(seg);
|
|
|
|
return r;
|
|
|
|
}
|
2009-01-27 06:05:38 +00:00
|
|
|
|
2008-02-28 18:36:03 +00:00
|
|
|
void
|
2009-01-23 01:11:13 +00:00
|
|
|
evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
|
2010-04-09 20:04:24 -04:00
|
|
|
if (!LIST_EMPTY(&buffer->callbacks))
|
2009-01-23 01:11:13 +00:00
|
|
|
evbuffer_remove_all_callbacks(buffer);
|
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
if (cb) {
|
2010-02-18 17:41:15 -05:00
|
|
|
struct evbuffer_cb_entry *ent =
|
|
|
|
evbuffer_add_cb(buffer, NULL, cbarg);
|
|
|
|
ent->cb.cb_obsolete = cb;
|
|
|
|
ent->flags |= EVBUFFER_CB_OBSOLETE;
|
|
|
|
}
|
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct evbuffer_cb_entry *
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
|
2009-01-23 01:11:13 +00:00
|
|
|
{
|
|
|
|
struct evbuffer_cb_entry *e;
|
2009-02-01 05:26:47 +00:00
|
|
|
if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
|
2009-01-23 01:11:13 +00:00
|
|
|
return NULL;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-03 14:27:03 +00:00
|
|
|
e->cb.cb_func = cb;
|
2009-01-23 01:11:13 +00:00
|
|
|
e->cbarg = cbarg;
|
2009-01-23 18:04:34 +00:00
|
|
|
e->flags = EVBUFFER_CB_ENABLED;
|
2010-04-09 20:04:24 -04:00
|
|
|
LIST_INSERT_HEAD(&buffer->callbacks, e, next);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_remove_cb_entry(struct evbuffer *buffer,
|
|
|
|
struct evbuffer_cb_entry *ent)
|
|
|
|
{
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2010-04-09 20:04:24 -04:00
|
|
|
LIST_REMOVE(ent, next);
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 01:11:13 +00:00
|
|
|
mm_free(ent);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-04-03 14:27:03 +00:00
|
|
|
evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
|
2004-04-04 02:20:21 +00:00
|
|
|
{
|
2009-01-23 01:11:13 +00:00
|
|
|
struct evbuffer_cb_entry *cbent;
|
2010-02-18 17:41:15 -05:00
|
|
|
int result = -1;
|
|
|
|
EVBUFFER_LOCK(buffer);
|
2010-04-09 20:04:24 -04:00
|
|
|
LIST_FOREACH(cbent, &buffer->callbacks, next) {
|
2009-04-03 14:27:03 +00:00
|
|
|
if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
|
2009-04-05 02:44:17 +00:00
|
|
|
result = evbuffer_remove_cb_entry(buffer, cbent);
|
2010-02-18 17:41:15 -05:00
|
|
|
goto done;
|
2009-01-23 01:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2009-04-05 02:44:17 +00:00
|
|
|
done:
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-05 02:44:17 +00:00
|
|
|
return result;
|
2004-04-04 02:20:21 +00:00
|
|
|
}
|
2009-01-23 18:04:34 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_cb_set_flags(struct evbuffer *buffer,
|
2009-02-01 01:43:58 +00:00
|
|
|
struct evbuffer_cb_entry *cb, ev_uint32_t flags)
|
2009-01-23 18:04:34 +00:00
|
|
|
{
|
2009-05-15 20:23:59 +00:00
|
|
|
/* the user isn't allowed to mess with these. */
|
|
|
|
flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-05-15 20:23:59 +00:00
|
|
|
cb->flags |= flags;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-05-15 20:23:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_cb_clear_flags(struct evbuffer *buffer,
|
|
|
|
struct evbuffer_cb_entry *cb, ev_uint32_t flags)
|
|
|
|
{
|
|
|
|
/* the user isn't allowed to mess with these. */
|
|
|
|
flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-05-15 20:23:59 +00:00
|
|
|
cb->flags &= ~flags;
|
2010-02-18 17:41:15 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-01-23 18:04:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-02-01 01:43:58 +00:00
|
|
|
|
2009-04-08 03:04:39 +00:00
|
|
|
int
|
|
|
|
evbuffer_freeze(struct evbuffer *buffer, int start)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (start)
|
|
|
|
buffer->freeze_start = 1;
|
|
|
|
else
|
|
|
|
buffer->freeze_end = 1;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
evbuffer_unfreeze(struct evbuffer *buffer, int start)
|
|
|
|
{
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_LOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
if (start)
|
|
|
|
buffer->freeze_start = 0;
|
|
|
|
else
|
|
|
|
buffer->freeze_end = 0;
|
2009-11-27 16:44:47 -05:00
|
|
|
EVBUFFER_UNLOCK(buffer);
|
2009-04-08 03:04:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-03 14:27:03 +00:00
|
|
|
#if 0
|
2009-02-01 01:43:58 +00:00
|
|
|
void
|
|
|
|
evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
|
|
|
|
{
|
|
|
|
if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
|
2009-04-17 06:56:09 +00:00
|
|
|
cb->size_before_suspend = evbuffer_get_length(buffer);
|
2009-02-01 01:43:58 +00:00
|
|
|
cb->flags |= EVBUFFER_CB_SUSPENDED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
|
|
|
|
{
|
|
|
|
if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
|
|
|
|
unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
|
|
|
|
size_t sz = cb->size_before_suspend;
|
|
|
|
cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
|
|
|
|
EVBUFFER_CB_CALL_ON_UNSUSPEND);
|
|
|
|
cb->size_before_suspend = 0;
|
|
|
|
if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
|
2009-04-17 06:56:09 +00:00
|
|
|
cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
|
2009-02-01 01:43:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-04-03 14:27:03 +00:00
|
|
|
#endif
|
2010-04-09 15:28:26 -04:00
|
|
|
|