Fix all identifiers with names beginning with underscore.

These are reserved in C.  We'd been erroneously using them to indicate
internal use.

Instead, we now use a trailing underscore whenever we'd been using a leading
underscore.

This is an automatic conversion.  The script that produced was made by
running the following script over the output of

 git ls-tree -r --name-only HEAD | grep  '\.[ch]$' | \
   xargs ctags --c-kinds=defglmpstuvx -o - | grep '^_' | \
   cut -f 1 | sort| uniq

(GNU ctags was required.)

=====
#!/usr/bin/perl -w -n

use strict;

BEGIN { print "#!/usr/bin/perl -w -i -p\n\n"; }

chomp;

next if (/^__func__/ or
	 /^_FILE_OFFSET_BITS/ or
	 /^_FORTIFY_SOURCE/ or
	 /^_GNU_SOURCE/ or
	 /^_WIN32/ or
	 /^_DARWIN_UNLIMITED/ or
	 /^_FILE_OFFSET_BITS/ or
	 /^_LARGEFILE64_SOURCE/ or
	 /^_LFS64_LARGEFILE/ or
	 /^__cdecl/ or
	 /^__attribute__/ or
	 /^__func__/ or
         /^_SYS_TREE_H_/);

my $ident = $_;

my $better = $ident;
$better =~ s/^_//;

if ($ident !~ /EVENT_LOG_/) {
    $better = "${better}_";
}

print "s/(?<![A-Za-z0-9_])$ident(?![A-Za-z0-9_])/$better/g;\n";

=== And then running the script below that it generated over all
=== the .c and .h files again
#!/usr/bin/perl -w -i -p

s/(?<![A-Za-z0-9_])_ARC4_LOCK(?![A-Za-z0-9_])/ARC4_LOCK_/g;
s/(?<![A-Za-z0-9_])_ARC4_UNLOCK(?![A-Za-z0-9_])/ARC4_UNLOCK_/g;
s/(?<![A-Za-z0-9_])_bev_group_random_element(?![A-Za-z0-9_])/bev_group_random_element_/g;
s/(?<![A-Za-z0-9_])_bev_group_refill_callback(?![A-Za-z0-9_])/bev_group_refill_callback_/g;
s/(?<![A-Za-z0-9_])_bev_group_suspend_reading(?![A-Za-z0-9_])/bev_group_suspend_reading_/g;
s/(?<![A-Za-z0-9_])_bev_group_suspend_writing(?![A-Za-z0-9_])/bev_group_suspend_writing_/g;
s/(?<![A-Za-z0-9_])_bev_group_unsuspend_reading(?![A-Za-z0-9_])/bev_group_unsuspend_reading_/g;
s/(?<![A-Za-z0-9_])_bev_group_unsuspend_writing(?![A-Za-z0-9_])/bev_group_unsuspend_writing_/g;
s/(?<![A-Za-z0-9_])_bev_refill_callback(?![A-Za-z0-9_])/bev_refill_callback_/g;
s/(?<![A-Za-z0-9_])_bufferevent_add_event(?![A-Za-z0-9_])/bufferevent_add_event_/g;
s/(?<![A-Za-z0-9_])_bufferevent_cancel_all(?![A-Za-z0-9_])/bufferevent_cancel_all_/g;
s/(?<![A-Za-z0-9_])_bufferevent_decref_and_unlock(?![A-Za-z0-9_])/bufferevent_decref_and_unlock_/g;
s/(?<![A-Za-z0-9_])_bufferevent_decrement_read_buckets(?![A-Za-z0-9_])/bufferevent_decrement_read_buckets_/g;
s/(?<![A-Za-z0-9_])_bufferevent_decrement_write_buckets(?![A-Za-z0-9_])/bufferevent_decrement_write_buckets_/g;
s/(?<![A-Za-z0-9_])_bufferevent_del_generic_timeout_cbs(?![A-Za-z0-9_])/bufferevent_del_generic_timeout_cbs_/g;
s/(?<![A-Za-z0-9_])_bufferevent_generic_adj_timeouts(?![A-Za-z0-9_])/bufferevent_generic_adj_timeouts_/g;
s/(?<![A-Za-z0-9_])_bufferevent_get_read_max(?![A-Za-z0-9_])/bufferevent_get_read_max_/g;
s/(?<![A-Za-z0-9_])_bufferevent_get_rlim_max(?![A-Za-z0-9_])/bufferevent_get_rlim_max_/g;
s/(?<![A-Za-z0-9_])_bufferevent_get_write_max(?![A-Za-z0-9_])/bufferevent_get_write_max_/g;
s/(?<![A-Za-z0-9_])_bufferevent_incref_and_lock(?![A-Za-z0-9_])/bufferevent_incref_and_lock_/g;
s/(?<![A-Za-z0-9_])_bufferevent_init_generic_timeout_cbs(?![A-Za-z0-9_])/bufferevent_init_generic_timeout_cbs_/g;
s/(?<![A-Za-z0-9_])_bufferevent_ratelim_init(?![A-Za-z0-9_])/bufferevent_ratelim_init_/g;
s/(?<![A-Za-z0-9_])_bufferevent_run_eventcb(?![A-Za-z0-9_])/bufferevent_run_eventcb_/g;
s/(?<![A-Za-z0-9_])_bufferevent_run_readcb(?![A-Za-z0-9_])/bufferevent_run_readcb_/g;
s/(?<![A-Za-z0-9_])_bufferevent_run_writecb(?![A-Za-z0-9_])/bufferevent_run_writecb_/g;
s/(?<![A-Za-z0-9_])_ev(?![A-Za-z0-9_])/ev_/g;
s/(?<![A-Za-z0-9_])_evbuffer_chain_pin(?![A-Za-z0-9_])/evbuffer_chain_pin_/g;
s/(?<![A-Za-z0-9_])_evbuffer_chain_unpin(?![A-Za-z0-9_])/evbuffer_chain_unpin_/g;
s/(?<![A-Za-z0-9_])_evbuffer_decref_and_unlock(?![A-Za-z0-9_])/evbuffer_decref_and_unlock_/g;
s/(?<![A-Za-z0-9_])_evbuffer_expand_fast(?![A-Za-z0-9_])/evbuffer_expand_fast_/g;
s/(?<![A-Za-z0-9_])_evbuffer_incref(?![A-Za-z0-9_])/evbuffer_incref_/g;
s/(?<![A-Za-z0-9_])_evbuffer_incref_and_lock(?![A-Za-z0-9_])/evbuffer_incref_and_lock_/g;
s/(?<![A-Za-z0-9_])_EVBUFFER_IOVEC_IS_NATIVE(?![A-Za-z0-9_])/EVBUFFER_IOVEC_IS_NATIVE_/g;
s/(?<![A-Za-z0-9_])_evbuffer_overlapped_get_fd(?![A-Za-z0-9_])/evbuffer_overlapped_get_fd_/g;
s/(?<![A-Za-z0-9_])_evbuffer_overlapped_set_fd(?![A-Za-z0-9_])/evbuffer_overlapped_set_fd_/g;
s/(?<![A-Za-z0-9_])_evbuffer_read_setup_vecs(?![A-Za-z0-9_])/evbuffer_read_setup_vecs_/g;
s/(?<![A-Za-z0-9_])_evbuffer_validate(?![A-Za-z0-9_])/evbuffer_validate_/g;
s/(?<![A-Za-z0-9_])_evdns_log(?![A-Za-z0-9_])/evdns_log_/g;
s/(?<![A-Za-z0-9_])_evdns_nameserver_add_impl(?![A-Za-z0-9_])/evdns_nameserver_add_impl_/g;
s/(?<![A-Za-z0-9_])_EVENT_CONFIG_H_(?![A-Za-z0-9_])/EVENT_CONFIG_H__/g;
s/(?<![A-Za-z0-9_])_event_debug_assert_is_setup(?![A-Za-z0-9_])/event_debug_assert_is_setup_/g;
s/(?<![A-Za-z0-9_])_event_debug_assert_not_added(?![A-Za-z0-9_])/event_debug_assert_not_added_/g;
s/(?<![A-Za-z0-9_])_event_debug_get_logging_mask(?![A-Za-z0-9_])/event_debug_get_logging_mask_/g;
s/(?<![A-Za-z0-9_])_event_debug_logging_mask(?![A-Za-z0-9_])/event_debug_logging_mask_/g;
s/(?<![A-Za-z0-9_])_event_debug_map_lock(?![A-Za-z0-9_])/event_debug_map_lock_/g;
s/(?<![A-Za-z0-9_])_event_debug_mode_on(?![A-Za-z0-9_])/event_debug_mode_on_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_add(?![A-Za-z0-9_])/event_debug_note_add_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_del(?![A-Za-z0-9_])/event_debug_note_del_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_setup(?![A-Za-z0-9_])/event_debug_note_setup_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_teardown(?![A-Za-z0-9_])/event_debug_note_teardown_/g;
s/(?<![A-Za-z0-9_])_event_debugx(?![A-Za-z0-9_])/event_debugx_/g;
s/(?<![A-Za-z0-9_])_EVENT_DEFINED_LISTENTRY(?![A-Za-z0-9_])/EVENT_DEFINED_LISTENTRY_/g;
s/(?<![A-Za-z0-9_])_EVENT_DEFINED_TQENTRY(?![A-Za-z0-9_])/EVENT_DEFINED_TQENTRY_/g;
s/(?<![A-Za-z0-9_])_EVENT_DEFINED_TQHEAD(?![A-Za-z0-9_])/EVENT_DEFINED_TQHEAD_/g;
s/(?<![A-Za-z0-9_])_EVENT_DNS_USE_FTIME_FOR_ID(?![A-Za-z0-9_])/EVENT_DNS_USE_FTIME_FOR_ID_/g;
s/(?<![A-Za-z0-9_])_EVENT_ERR_ABORT(?![A-Za-z0-9_])/EVENT_ERR_ABORT_/g;
s/(?<![A-Za-z0-9_])_EVENT_EVCONFIG__PRIVATE_H(?![A-Za-z0-9_])/EVENT_EVCONFIG__PRIVATE_H_/g;
s/(?<![A-Za-z0-9_])_event_iocp_port_unlock_and_free(?![A-Za-z0-9_])/event_iocp_port_unlock_and_free_/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_DEBUG(?![A-Za-z0-9_])/EVENT_LOG_DEBUG/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_ERR(?![A-Za-z0-9_])/EVENT_LOG_ERR/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_MSG(?![A-Za-z0-9_])/EVENT_LOG_MSG/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_WARN(?![A-Za-z0-9_])/EVENT_LOG_WARN/g;
s/(?<![A-Za-z0-9_])_event_strlcpy(?![A-Za-z0-9_])/event_strlcpy_/g;
s/(?<![A-Za-z0-9_])_EVHTTP_REQ_UNKNOWN(?![A-Za-z0-9_])/EVHTTP_REQ_UNKNOWN_/g;
s/(?<![A-Za-z0-9_])_EVLOCK_SORTLOCKS(?![A-Za-z0-9_])/EVLOCK_SORTLOCKS_/g;
s/(?<![A-Za-z0-9_])_evrpc_hooks(?![A-Za-z0-9_])/evrpc_hooks_/g;
s/(?<![A-Za-z0-9_])_evsig_restore_handler(?![A-Za-z0-9_])/evsig_restore_handler_/g;
s/(?<![A-Za-z0-9_])_evsig_set_handler(?![A-Za-z0-9_])/evsig_set_handler_/g;
s/(?<![A-Za-z0-9_])_evthread_cond_fns(?![A-Za-z0-9_])/evthread_cond_fns_/g;
s/(?<![A-Za-z0-9_])_evthread_debug_get_real_lock(?![A-Za-z0-9_])/evthread_debug_get_real_lock_/g;
s/(?<![A-Za-z0-9_])_evthread_id_fn(?![A-Za-z0-9_])/evthread_id_fn_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_alloc(?![A-Za-z0-9_])/evthreadimpl_cond_alloc_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_free(?![A-Za-z0-9_])/evthreadimpl_cond_free_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_signal(?![A-Za-z0-9_])/evthreadimpl_cond_signal_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_wait(?![A-Za-z0-9_])/evthreadimpl_cond_wait_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_get_id(?![A-Za-z0-9_])/evthreadimpl_get_id_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_is_lock_debugging_enabled(?![A-Za-z0-9_])/evthreadimpl_is_lock_debugging_enabled_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_alloc(?![A-Za-z0-9_])/evthreadimpl_lock_alloc_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_free(?![A-Za-z0-9_])/evthreadimpl_lock_free_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_locking_enabled(?![A-Za-z0-9_])/evthreadimpl_locking_enabled_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_lock(?![A-Za-z0-9_])/evthreadimpl_lock_lock_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_unlock(?![A-Za-z0-9_])/evthreadimpl_lock_unlock_/g;
s/(?<![A-Za-z0-9_])_evthread_is_debug_lock_held(?![A-Za-z0-9_])/evthread_is_debug_lock_held_/g;
s/(?<![A-Za-z0-9_])_evthread_lock_debugging_enabled(?![A-Za-z0-9_])/evthread_lock_debugging_enabled_/g;
s/(?<![A-Za-z0-9_])_evthread_lock_fns(?![A-Za-z0-9_])/evthread_lock_fns_/g;
s/(?<![A-Za-z0-9_])_EVUTIL_NIL_CONDITION(?![A-Za-z0-9_])/EVUTIL_NIL_CONDITION_/g;
s/(?<![A-Za-z0-9_])_EVUTIL_NIL_STMT(?![A-Za-z0-9_])/EVUTIL_NIL_STMT_/g;
s/(?<![A-Za-z0-9_])_evutil_weakrand(?![A-Za-z0-9_])/evutil_weakrand_/g;
s/(?<![A-Za-z0-9_])_http_close_detection(?![A-Za-z0-9_])/http_close_detection_/g;
s/(?<![A-Za-z0-9_])_http_connection_test(?![A-Za-z0-9_])/http_connection_test_/g;
s/(?<![A-Za-z0-9_])_http_incomplete_test(?![A-Za-z0-9_])/http_incomplete_test_/g;
s/(?<![A-Za-z0-9_])_http_stream_in_test(?![A-Za-z0-9_])/http_stream_in_test_/g;
s/(?<![A-Za-z0-9_])_internal(?![A-Za-z0-9_])/internal_/g;
s/(?<![A-Za-z0-9_])_mm_free_fn(?![A-Za-z0-9_])/mm_free_fn_/g;
s/(?<![A-Za-z0-9_])_mm_malloc_fn(?![A-Za-z0-9_])/mm_malloc_fn_/g;
s/(?<![A-Za-z0-9_])_mm_realloc_fn(?![A-Za-z0-9_])/mm_realloc_fn_/g;
s/(?<![A-Za-z0-9_])_original_cond_fns(?![A-Za-z0-9_])/original_cond_fns_/g;
s/(?<![A-Za-z0-9_])_original_lock_fns(?![A-Za-z0-9_])/original_lock_fns_/g;
s/(?<![A-Za-z0-9_])_rpc_hook_ctx(?![A-Za-z0-9_])/rpc_hook_ctx_/g;
s/(?<![A-Za-z0-9_])_SYS_QUEUE_H_(?![A-Za-z0-9_])/SYS_QUEUE_H__/g;
s/(?<![A-Za-z0-9_])_t(?![A-Za-z0-9_])/t_/g;
s/(?<![A-Za-z0-9_])_t32(?![A-Za-z0-9_])/t32_/g;
s/(?<![A-Za-z0-9_])_test_ai_eq(?![A-Za-z0-9_])/test_ai_eq_/g;
s/(?<![A-Za-z0-9_])_URI_ADD(?![A-Za-z0-9_])/URI_ADD_/g;
s/(?<![A-Za-z0-9_])_URI_FREE_STR(?![A-Za-z0-9_])/URI_FREE_STR_/g;
s/(?<![A-Za-z0-9_])_URI_SET_STR(?![A-Za-z0-9_])/URI_SET_STR_/g;
s/(?<![A-Za-z0-9_])_warn_helper(?![A-Za-z0-9_])/warn_helper_/g;
This commit is contained in:
Nick Mathewson 2012-02-29 15:07:32 -05:00
parent 639383a454
commit cb9da0bf38
45 changed files with 708 additions and 708 deletions

View File

@ -1,5 +1,5 @@
#if !defined(_EVENT_EVCONFIG__PRIVATE_H) && !defined(__MINGW32__)
#define _EVENT_EVCONFIG__PRIVATE_H
#if !defined(EVENT_EVCONFIG__PRIVATE_H_) && !defined(__MINGW32__)
#define EVENT_EVCONFIG__PRIVATE_H_
/* Nothing to see here. Move along. */

View File

@ -7,8 +7,8 @@
*
* Do not rely on macros in this file existing in later versions.
*/
#ifndef _EVENT_CONFIG_H_
#define _EVENT_CONFIG_H_
#ifndef EVENT_CONFIG_H__
#define EVENT_CONFIG_H__
/* config.h. Generated by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
@ -23,7 +23,7 @@
/* Define is no secure id variant is available */
/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
#define _EVENT_DNS_USE_FTIME_FOR_ID 1
#define EVENT_DNS_USE_FTIME_FOR_ID_ 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
/* #undef EVENT__HAVE_ARPA_INET_H */

View File

@ -438,9 +438,9 @@ ARC4RANDOM_EXPORT int
arc4random_stir(void)
{
int val;
_ARC4_LOCK();
ARC4_LOCK_();
val = arc4_stir();
_ARC4_UNLOCK();
ARC4_UNLOCK_();
return val;
}
#endif
@ -450,7 +450,7 @@ ARC4RANDOM_EXPORT void
arc4random_addrandom(const unsigned char *dat, int datlen)
{
int j;
_ARC4_LOCK();
ARC4_LOCK_();
if (!rs_initialized)
arc4_stir();
for (j = 0; j < datlen; j += 256) {
@ -460,7 +460,7 @@ arc4random_addrandom(const unsigned char *dat, int datlen)
* crazy like passing us all the files in /var/log. */
arc4_addrandom(dat + j, datlen - j);
}
_ARC4_UNLOCK();
ARC4_UNLOCK_();
}
#endif
@ -469,11 +469,11 @@ ARC4RANDOM_EXPORT ARC4RANDOM_UINT32
arc4random(void)
{
ARC4RANDOM_UINT32 val;
_ARC4_LOCK();
ARC4_LOCK_();
arc4_count -= 4;
arc4_stir_if_needed();
val = arc4_getword();
_ARC4_UNLOCK();
ARC4_UNLOCK_();
return val;
}
#endif
@ -482,14 +482,14 @@ ARC4RANDOM_EXPORT void
arc4random_buf(void *_buf, size_t n)
{
unsigned char *buf = _buf;
_ARC4_LOCK();
ARC4_LOCK_();
arc4_stir_if_needed();
while (n--) {
if (--arc4_count <= 0)
arc4_stir();
buf[n] = arc4_getbyte();
}
_ARC4_UNLOCK();
ARC4_UNLOCK_();
}
#ifndef ARC4RANDOM_NOUNIFORM

142
buffer.c
View File

@ -135,8 +135,8 @@
/* evbuffer_ptr support */
#define PTR_NOT_FOUND(ptr) do { \
(ptr)->pos = -1; \
(ptr)->_internal.chain = NULL; \
(ptr)->_internal.pos_in_chain = 0; \
(ptr)->internal_.chain = NULL; \
(ptr)->internal_.pos_in_chain = 0; \
} while (0)
static void evbuffer_chain_align(struct evbuffer_chain *chain);
@ -237,7 +237,7 @@ evbuffer_chain_free(struct evbuffer_chain *chain)
EVUTIL_ASSERT(info->parent != NULL);
EVBUFFER_LOCK(info->source);
evbuffer_chain_free(info->parent);
_evbuffer_decref_and_unlock(info->source);
evbuffer_decref_and_unlock_(info->source);
}
mm_free(chain);
@ -328,14 +328,14 @@ evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
}
void
_evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
{
EVUTIL_ASSERT((chain->flags & flag) == 0);
chain->flags |= flag;
}
void
_evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
{
EVUTIL_ASSERT((chain->flags & flag) != 0);
chain->flags &= ~flag;
@ -384,7 +384,7 @@ evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
}
void
_evbuffer_incref(struct evbuffer *buf)
evbuffer_incref_(struct evbuffer *buf)
{
EVBUFFER_LOCK(buf);
++buf->refcnt;
@ -392,7 +392,7 @@ _evbuffer_incref(struct evbuffer *buf)
}
void
_evbuffer_incref_and_lock(struct evbuffer *buf)
evbuffer_incref_and_lock_(struct evbuffer *buf)
{
EVBUFFER_LOCK(buf);
++buf->refcnt;
@ -511,7 +511,7 @@ evbuffer_invoke_callbacks(struct evbuffer *buffer)
if (buffer->deferred_cbs) {
if (buffer->deferred.queued)
return;
_evbuffer_incref_and_lock(buffer);
evbuffer_incref_and_lock_(buffer);
if (buffer->parent)
bufferevent_incref(buffer->parent);
EVBUFFER_UNLOCK(buffer);
@ -532,7 +532,7 @@ evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
EVBUFFER_LOCK(buffer);
parent = buffer->parent;
evbuffer_run_callbacks(buffer, 1);
_evbuffer_decref_and_unlock(buffer);
evbuffer_decref_and_unlock_(buffer);
if (parent)
bufferevent_decref(parent);
}
@ -549,7 +549,7 @@ evbuffer_remove_all_callbacks(struct evbuffer *buffer)
}
void
_evbuffer_decref_and_unlock(struct evbuffer *buffer)
evbuffer_decref_and_unlock_(struct evbuffer *buffer)
{
struct evbuffer_chain *chain, *next;
ASSERT_EVBUFFER_LOCKED(buffer);
@ -579,7 +579,7 @@ void
evbuffer_free(struct evbuffer *buffer)
{
EVBUFFER_LOCK(buffer);
_evbuffer_decref_and_unlock(buffer);
evbuffer_decref_and_unlock_(buffer);
}
void
@ -636,13 +636,13 @@ evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec
to_alloc += vec[n].iov_len;
}
if (_evbuffer_expand_fast(buf, to_alloc, 2) < 0) {
if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
goto done;
}
for (n = 0; n < n_vec; n++) {
/* XXX each 'add' call here does a bunch of setup that's
* obviated by _evbuffer_expand_fast, and some cleanup that we
* obviated by evbuffer_expand_fast_, and some cleanup that we
* would like to do only once. Instead we should just extract
* the part of the code that's needed. */
@ -679,9 +679,9 @@ evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
n = 1;
} else {
if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
goto done;
n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
&chainp, 0);
}
@ -911,7 +911,7 @@ APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
/* reference evbuffer containing source chain so it
* doesn't get released while the chain is still
* being referenced to */
_evbuffer_incref(src);
evbuffer_incref_(src);
extra->source = src;
/* reference source chain which now becomes immutable */
evbuffer_chain_incref(chain);
@ -1181,8 +1181,8 @@ evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
EVBUFFER_LOCK(buf);
if (pos) {
chain = pos->_internal.chain;
pos_in_chain = pos->_internal.pos_in_chain;
chain = pos->internal_.chain;
pos_in_chain = pos->internal_.pos_in_chain;
if (datlen + pos->pos > buf->total_len)
datlen = buf->total_len - pos->pos;
} else {
@ -1446,14 +1446,14 @@ evbuffer_readline(struct evbuffer *buffer)
static inline ev_ssize_t
evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
{
struct evbuffer_chain *chain = it->_internal.chain;
size_t i = it->_internal.pos_in_chain;
struct evbuffer_chain *chain = it->internal_.chain;
size_t i = it->internal_.pos_in_chain;
while (chain != NULL) {
char *buffer = (char *)chain->buffer + chain->misalign;
char *cp = memchr(buffer+i, chr, chain->off-i);
if (cp) {
it->_internal.chain = chain;
it->_internal.pos_in_chain = cp - buffer;
it->internal_.chain = chain;
it->internal_.pos_in_chain = cp - buffer;
it->pos += (cp - buffer - i);
return it->pos;
}
@ -1495,14 +1495,14 @@ find_eol_char(char *s, size_t len)
static ev_ssize_t
evbuffer_find_eol_char(struct evbuffer_ptr *it)
{
struct evbuffer_chain *chain = it->_internal.chain;
size_t i = it->_internal.pos_in_chain;
struct evbuffer_chain *chain = it->internal_.chain;
size_t i = it->internal_.pos_in_chain;
while (chain != NULL) {
char *buffer = (char *)chain->buffer + chain->misalign;
char *cp = find_eol_char(buffer+i, chain->off-i);
if (cp) {
it->_internal.chain = chain;
it->_internal.pos_in_chain = cp - buffer;
it->internal_.chain = chain;
it->internal_.pos_in_chain = cp - buffer;
it->pos += (cp - buffer) - i;
return it->pos;
}
@ -1519,8 +1519,8 @@ evbuffer_strspn(
struct evbuffer_ptr *ptr, const char *chrset)
{
int count = 0;
struct evbuffer_chain *chain = ptr->_internal.chain;
size_t i = ptr->_internal.pos_in_chain;
struct evbuffer_chain *chain = ptr->internal_.chain;
size_t i = ptr->internal_.pos_in_chain;
if (!chain)
return 0;
@ -1533,8 +1533,8 @@ evbuffer_strspn(
if (buffer[i] == *p++)
goto next;
}
ptr->_internal.chain = chain;
ptr->_internal.pos_in_chain = i;
ptr->internal_.chain = chain;
ptr->internal_.pos_in_chain = i;
ptr->pos += count;
return count;
next:
@ -1543,8 +1543,8 @@ evbuffer_strspn(
i = 0;
if (! chain->next) {
ptr->_internal.chain = chain;
ptr->_internal.pos_in_chain = i;
ptr->internal_.chain = chain;
ptr->internal_.pos_in_chain = i;
ptr->pos += count;
return count;
}
@ -1557,8 +1557,8 @@ evbuffer_strspn(
static inline int
evbuffer_getchr(struct evbuffer_ptr *it)
{
struct evbuffer_chain *chain = it->_internal.chain;
size_t off = it->_internal.pos_in_chain;
struct evbuffer_chain *chain = it->internal_.chain;
size_t off = it->internal_.pos_in_chain;
if (chain == NULL)
return -1;
@ -1576,7 +1576,7 @@ evbuffer_search_eol(struct evbuffer *buffer,
int ok = 0;
/* Avoid locking in trivial edge cases */
if (start && start->_internal.chain == NULL) {
if (start && start->internal_.chain == NULL) {
PTR_NOT_FOUND(&it);
if (eol_len_out)
*eol_len_out = extra_drain;
@ -1589,8 +1589,8 @@ evbuffer_search_eol(struct evbuffer *buffer,
memcpy(&it, start, sizeof(it));
} else {
it.pos = 0;
it._internal.chain = buffer->first;
it._internal.pos_in_chain = 0;
it.internal_.chain = buffer->first;
it.internal_.pos_in_chain = 0;
}
/* the eol_style determines our first stop character and how many
@ -1997,7 +1997,7 @@ err:
/* Make sure that datlen bytes are available for writing in the last n
* chains. Never copies or moves data. */
int
_evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
{
struct evbuffer_chain *chain = buf->last, *tmp, *next;
size_t avail;
@ -2168,7 +2168,7 @@ evbuffer_expand(struct evbuffer *buf, size_t datlen)
@return The number of buffers we're using.
*/
int
_evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs_avail,
struct evbuffer_chain ***chainp, int exact)
{
@ -2253,19 +2253,19 @@ evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
#ifdef USE_IOVEC_IMPL
/* Since we can use iovecs, we're willing to use the last
* NUM_READ_IOVEC chains. */
if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
result = -1;
goto done;
} else {
IOV_TYPE vecs[NUM_READ_IOVEC];
#ifdef _EVBUFFER_IOVEC_IS_NATIVE
nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
#ifdef EVBUFFER_IOVEC_IS_NATIVE_
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
NUM_READ_IOVEC, &chainp, 1);
#else
/* We aren't using the native struct iovec. Therefore,
we are on win32. */
struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
&chainp, 1);
for (i=0; i < nvecs; ++i)
@ -2537,8 +2537,8 @@ evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
{
if (howfar > (size_t)pos->pos)
return -1;
if (pos->_internal.chain && howfar <= pos->_internal.pos_in_chain) {
pos->_internal.pos_in_chain -= howfar;
if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
pos->internal_.pos_in_chain -= howfar;
pos->pos -= howfar;
return 0;
} else {
@ -2568,9 +2568,9 @@ evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
case EVBUFFER_PTR_ADD:
/* this avoids iterating over all previous chains if
we just want to advance the position */
chain = pos->_internal.chain;
chain = pos->internal_.chain;
pos->pos += position;
position = pos->_internal.pos_in_chain;
position = pos->internal_.pos_in_chain;
break;
}
@ -2580,12 +2580,12 @@ evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
position = 0;
}
if (chain) {
pos->_internal.chain = chain;
pos->_internal.pos_in_chain = position + left;
pos->internal_.chain = chain;
pos->internal_.pos_in_chain = position + left;
} else if (left == 0) {
/* The first byte in the (nonexistent) chain after the last chain */
pos->_internal.chain = NULL;
pos->_internal.pos_in_chain = 0;
pos->internal_.chain = NULL;
pos->internal_.pos_in_chain = 0;
} else {
PTR_NOT_FOUND(pos);
result = -1;
@ -2613,8 +2613,8 @@ evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
if (pos->pos + len > buf->total_len)
return -1;
chain = pos->_internal.chain;
position = pos->_internal.pos_in_chain;
chain = pos->internal_.chain;
position = pos->internal_.pos_in_chain;
while (len && chain) {
size_t n_comparable;
if (len + position > chain->off)
@ -2652,15 +2652,15 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
if (start) {
memcpy(&pos, start, sizeof(pos));
chain = pos._internal.chain;
chain = pos.internal_.chain;
} else {
pos.pos = 0;
chain = pos._internal.chain = buffer->first;
pos._internal.pos_in_chain = 0;
chain = pos.internal_.chain = buffer->first;
pos.internal_.pos_in_chain = 0;
}
if (end)
last_chain = end->_internal.chain;
last_chain = end->internal_.chain;
if (!len || len > EV_SSIZE_MAX)
goto done;
@ -2670,12 +2670,12 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
while (chain) {
const unsigned char *start_at =
chain->buffer + chain->misalign +
pos._internal.pos_in_chain;
pos.internal_.pos_in_chain;
p = memchr(start_at, first,
chain->off - pos._internal.pos_in_chain);
chain->off - pos.internal_.pos_in_chain);
if (p) {
pos.pos += p - start_at;
pos._internal.pos_in_chain += p - start_at;
pos.internal_.pos_in_chain += p - start_at;
if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
if (end && pos.pos + (ev_ssize_t)len > end->pos)
goto not_found;
@ -2683,17 +2683,17 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
goto done;
}
++pos.pos;
++pos._internal.pos_in_chain;
if (pos._internal.pos_in_chain == chain->off) {
chain = pos._internal.chain = chain->next;
pos._internal.pos_in_chain = 0;
++pos.internal_.pos_in_chain;
if (pos.internal_.pos_in_chain == chain->off) {
chain = pos.internal_.chain = chain->next;
pos.internal_.pos_in_chain = 0;
}
} else {
if (chain == last_chain)
goto not_found;
pos.pos += chain->off - pos._internal.pos_in_chain;
chain = pos._internal.chain = chain->next;
pos._internal.pos_in_chain = 0;
pos.pos += chain->off - pos.internal_.pos_in_chain;
chain = pos.internal_.chain = chain->next;
pos.internal_.pos_in_chain = 0;
}
}
@ -2714,19 +2714,19 @@ evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
ev_ssize_t len_so_far = 0;
/* Avoid locking in trivial edge cases */
if (start_at && start_at->_internal.chain == NULL)
if (start_at && start_at->internal_.chain == NULL)
return 0;
EVBUFFER_LOCK(buffer);
if (start_at) {
chain = start_at->_internal.chain;
chain = start_at->internal_.chain;
len_so_far = chain->off
- start_at->_internal.pos_in_chain;
- start_at->internal_.pos_in_chain;
idx = 1;
if (n_vec > 0) {
vec[0].iov_base = chain->buffer + chain->misalign
+ start_at->_internal.pos_in_chain;
+ start_at->internal_.pos_in_chain;
vec[0].iov_len = len_so_far;
}
chain = chain->next;

View File

@ -88,7 +88,7 @@ pin_release(struct evbuffer_overlapped *eo, unsigned flag)
for (i = 0; i < eo->n_buffers; ++i) {
EVUTIL_ASSERT(chain);
next = chain->next;
_evbuffer_chain_unpin(chain, flag);
evbuffer_chain_unpin_(chain, flag);
chain = next;
}
}
@ -131,7 +131,7 @@ evbuffer_commit_read(struct evbuffer *evbuf, ev_ssize_t nBytes)
evbuffer_invoke_callbacks(evbuf);
_evbuffer_decref_and_unlock(evbuf);
evbuffer_decref_and_unlock_(evbuf);
}
void
@ -145,7 +145,7 @@ evbuffer_commit_write(struct evbuffer *evbuf, ev_ssize_t nBytes)
evbuffer_drain(evbuf, nBytes);
pin_release(buf,EVBUFFER_MEM_PINNED_W);
buf->write_in_progress = 0;
_evbuffer_decref_and_unlock(evbuf);
evbuffer_decref_and_unlock_(evbuf);
}
struct evbuffer *
@ -204,7 +204,7 @@ evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t at_most,
for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
WSABUF *b = &buf_o->buffers[i];
b->buf = (char*)( chain->buffer + chain->misalign );
_evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_W);
evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_W);
if ((size_t)at_most > chain->off) {
/* XXXX Cast is safe for now, since win32 has no
@ -221,7 +221,7 @@ evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t at_most,
}
buf_o->n_buffers = i;
_evbuffer_incref(buf);
evbuffer_incref_(buf);
if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
@ -265,11 +265,11 @@ evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
if (_evbuffer_expand_fast(buf, at_most, MAX_WSABUFS) == -1)
if (evbuffer_expand_fast_(buf, at_most, MAX_WSABUFS) == -1)
goto done;
evbuffer_freeze(buf, 0);
nvecs = _evbuffer_read_setup_vecs(buf, at_most,
nvecs = evbuffer_read_setup_vecs_(buf, at_most,
vecs, MAX_WSABUFS, &chainp, 1);
for (i=0;i<nvecs;++i) {
WSABUF_FROM_EVBUFFER_IOV(
@ -282,12 +282,12 @@ evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
npin=0;
for ( ; chain; chain = chain->next) {
_evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_R);
evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_R);
++npin;
}
EVUTIL_ASSERT(npin == nvecs);
_evbuffer_incref(buf);
evbuffer_incref_(buf);
if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
@ -308,14 +308,14 @@ done:
}
evutil_socket_t
_evbuffer_overlapped_get_fd(struct evbuffer *buf)
evbuffer_overlapped_get_fd_(struct evbuffer *buf)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
return buf_o ? buf_o->fd : -1;
}
void
_evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd)
evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
EVBUFFER_LOCK(buf);

View File

@ -321,27 +321,27 @@ int bufferevent_enable_locking(struct bufferevent *bufev, void *lock);
void bufferevent_incref(struct bufferevent *bufev);
/** Internal: Lock bufev and increase its reference count.
* unlocking it otherwise. */
void _bufferevent_incref_and_lock(struct bufferevent *bufev);
void bufferevent_incref_and_lock_(struct bufferevent *bufev);
/** Internal: Decrement the reference count on bufev. Returns 1 if it freed
* the bufferevent.*/
int bufferevent_decref(struct bufferevent *bufev);
/** Internal: Drop the reference count on bufev, freeing as necessary, and
* unlocking it otherwise. Returns 1 if it freed the bufferevent. */
int _bufferevent_decref_and_unlock(struct bufferevent *bufev);
int bufferevent_decref_and_unlock_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a read callback, schedule
* a readcb. Otherwise just run the readcb. */
void _bufferevent_run_readcb(struct bufferevent *bufev);
void bufferevent_run_readcb_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a write callback, schedule
* a writecb. Otherwise just run the writecb. */
void _bufferevent_run_writecb(struct bufferevent *bufev);
void bufferevent_run_writecb_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have an eventcb, schedule
* it to run with events "what". Otherwise just run the eventcb. */
void _bufferevent_run_eventcb(struct bufferevent *bufev, short what);
void bufferevent_run_eventcb_(struct bufferevent *bufev, short what);
/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
* which case add ev with no timeout. */
int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
int bufferevent_add_event_(struct event *ev, const struct timeval *tv);
/* =========
* These next functions implement timeouts for bufferevents that aren't doing
@ -350,15 +350,15 @@ int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
/** Internal use: Set up the ev_read and ev_write callbacks so that
* the other "generic_timeout" functions will work on it. Call this from
* the constructor function. */
void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev);
void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev);
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
* Call this from the destructor function. */
int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev);
int bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev);
/** Internal use: Add or delete the generic timeout events as appropriate.
* (If an event is enabled and a timeout is set, we add the event. Otherwise
* we delete it.) Call this from anything that changes the timeout values,
* that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
int bufferevent_generic_adj_timeouts_(struct bufferevent *bev);
/** Internal use: We have just successfully read data into an inbuf, so
* reset the read timeout (if any). */
@ -385,8 +385,8 @@ int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
#ifdef EVENT__DISABLE_THREAD_SUPPORT
#define BEV_LOCK(b) _EVUTIL_NIL_STMT
#define BEV_UNLOCK(b) _EVUTIL_NIL_STMT
#define BEV_LOCK(b) EVUTIL_NIL_STMT_
#define BEV_UNLOCK(b) EVUTIL_NIL_STMT_
#else
/** Internal: Grab the lock (if any) on a bufferevent */
#define BEV_LOCK(b) do { \
@ -404,14 +404,14 @@ int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
/* ==== For rate-limiting. */
int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev,
int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev,
ev_ssize_t bytes);
int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev,
int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev,
ev_ssize_t bytes);
ev_ssize_t _bufferevent_get_read_max(struct bufferevent_private *bev);
ev_ssize_t _bufferevent_get_write_max(struct bufferevent_private *bev);
ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev);
ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev);
int _bufferevent_ratelim_init(struct bufferevent_private *bev);
int bufferevent_ratelim_init_(struct bufferevent_private *bev);
#ifdef __cplusplus
}

View File

@ -60,7 +60,7 @@
#include "evbuffer-internal.h"
#include "util-internal.h"
static void _bufferevent_cancel_all(struct bufferevent *bev);
static void bufferevent_cancel_all_(struct bufferevent *bev);
void
@ -160,7 +160,7 @@ bufferevent_run_deferred_callbacks_locked(struct deferred_cb *_, void *arg)
EVUTIL_SET_SOCKET_ERROR(err);
bufev->errorcb(bufev, what, bufev->cbarg);
}
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
}
static void
@ -204,7 +204,7 @@ bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
EVUTIL_SET_SOCKET_ERROR(err);
UNLOCKED(errorcb(bufev,what,cbarg));
}
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
#undef UNLOCKED
}
@ -218,7 +218,7 @@ bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
void
_bufferevent_run_readcb(struct bufferevent *bufev)
bufferevent_run_readcb_(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
@ -235,7 +235,7 @@ _bufferevent_run_readcb(struct bufferevent *bufev)
}
void
_bufferevent_run_writecb(struct bufferevent *bufev)
bufferevent_run_writecb_(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
@ -252,7 +252,7 @@ _bufferevent_run_writecb(struct bufferevent *bufev)
}
void
_bufferevent_run_eventcb(struct bufferevent *bufev, short what)
bufferevent_run_eventcb_(struct bufferevent *bufev, short what)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
@ -298,7 +298,7 @@ bufferevent_init_common(struct bufferevent_private *bufev_private,
bufev->be_ops = ops;
_bufferevent_ratelim_init(bufev_private);
bufferevent_ratelim_init_(bufev_private);
/*
* Set to EV_WRITE so that using bufferevent_write is going to
@ -434,7 +434,7 @@ bufferevent_enable(struct bufferevent *bufev, short event)
short impl_events = event;
int r = 0;
_bufferevent_incref_and_lock(bufev);
bufferevent_incref_and_lock_(bufev);
if (bufev_private->read_suspended)
impl_events &= ~EV_READ;
if (bufev_private->write_suspended)
@ -445,7 +445,7 @@ bufferevent_enable(struct bufferevent *bufev, short event)
if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
r = -1;
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
return r;
}
@ -598,7 +598,7 @@ bufferevent_flush(struct bufferevent *bufev,
}
void
_bufferevent_incref_and_lock(struct bufferevent *bufev)
bufferevent_incref_and_lock_(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
BEV_UPCAST(bufev);
@ -625,7 +625,7 @@ _bufferevent_transfer_lock_ownership(struct bufferevent *donor,
#endif
int
_bufferevent_decref_and_unlock(struct bufferevent *bufev)
bufferevent_decref_and_unlock_(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
@ -692,7 +692,7 @@ int
bufferevent_decref(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
return _bufferevent_decref_and_unlock(bufev);
return bufferevent_decref_and_unlock_(bufev);
}
void
@ -700,8 +700,8 @@ bufferevent_free(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
_bufferevent_cancel_all(bufev);
_bufferevent_decref_and_unlock(bufev);
bufferevent_cancel_all_(bufev);
bufferevent_decref_and_unlock_(bufev);
}
void
@ -778,7 +778,7 @@ bufferevent_getfd(struct bufferevent *bev)
}
static void
_bufferevent_cancel_all(struct bufferevent *bev)
bufferevent_cancel_all_(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
memset(&d, 0, sizeof(d));
@ -815,23 +815,23 @@ static void
bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
bufferevent_disable(bev, EV_READ);
_bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
_bufferevent_decref_and_unlock(bev);
bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
bufferevent_decref_and_unlock_(bev);
}
static void
bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
bufferevent_disable(bev, EV_WRITE);
_bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
_bufferevent_decref_and_unlock(bev);
bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
bufferevent_decref_and_unlock_(bev);
}
void
_bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
{
evtimer_assign(&bev->ev_read, bev->ev_base,
bufferevent_generic_read_timeout_cb, bev);
@ -840,7 +840,7 @@ _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
}
int
_bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev)
{
int r1,r2;
r1 = event_del(&bev->ev_read);
@ -851,7 +851,7 @@ _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
}
int
_bufferevent_generic_adj_timeouts(struct bufferevent *bev)
bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
{
const short enabled = bev->enabled;
struct bufferevent_private *bev_p =
@ -875,7 +875,7 @@ _bufferevent_generic_adj_timeouts(struct bufferevent *bev)
}
int
_bufferevent_add_event(struct event *ev, const struct timeval *tv)
bufferevent_add_event_(struct event *ev, const struct timeval *tv)
{
if (tv->tv_sec == 0 && tv->tv_usec == 0)
return event_add(ev, NULL);
@ -884,15 +884,15 @@ _bufferevent_add_event(struct event *ev, const struct timeval *tv)
}
/* For use by user programs only; internally, we should be calling
either _bufferevent_incref_and_lock(), or BEV_LOCK. */
either bufferevent_incref_and_lock_(), or BEV_LOCK. */
void
bufferevent_lock(struct bufferevent *bev)
{
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
}
void
bufferevent_unlock(struct bufferevent *bev)
{
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}

View File

@ -94,7 +94,7 @@ const struct bufferevent_ops bufferevent_ops_async = {
be_async_enable,
be_async_disable,
be_async_destruct,
_bufferevent_generic_adj_timeouts,
bufferevent_generic_adj_timeouts_,
be_async_flush,
be_async_ctrl,
};
@ -201,7 +201,7 @@ bev_async_consider_writing(struct bufferevent_async *beva)
/* This is safe so long as bufferevent_get_write_max never returns
* more than INT_MAX. That's true for now. XXXX */
limit = (int)_bufferevent_get_write_max(&beva->bev);
limit = (int)bufferevent_get_write_max_(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
@ -216,10 +216,10 @@ bev_async_consider_writing(struct bufferevent_async *beva)
&beva->write_overlapped)) {
bufferevent_decref(bev);
beva->ok = 0;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
} else {
beva->write_in_progress = at_most;
_bufferevent_decrement_write_buckets(&beva->bev, at_most);
bufferevent_decrement_write_buckets_(&beva->bev, at_most);
bev_async_add_write(beva);
}
}
@ -256,8 +256,8 @@ bev_async_consider_reading(struct bufferevent_async *beva)
}
/* XXXX This over-commits. */
/* XXXX see also not above on cast on _bufferevent_get_write_max() */
limit = (int)_bufferevent_get_read_max(&beva->bev);
/* XXXX see also not above on cast on bufferevent_get_write_max_() */
limit = (int)bufferevent_get_read_max_(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
@ -269,11 +269,11 @@ bev_async_consider_reading(struct bufferevent_async *beva)
bufferevent_incref(bev);
if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
beva->ok = 0;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
bufferevent_decref(bev);
} else {
beva->read_in_progress = at_most;
_bufferevent_decrement_read_buckets(&beva->bev, at_most);
bufferevent_decrement_read_buckets_(&beva->bev, at_most);
bev_async_add_read(beva);
}
@ -291,12 +291,12 @@ be_async_outbuf_callback(struct evbuffer *buf,
/* If we added data to the outbuf and were not writing before,
* we may want to write now. */
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
if (cbinfo->n_added)
bev_async_consider_writing(bev_async);
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
static void
@ -310,12 +310,12 @@ be_async_inbuf_callback(struct evbuffer *buf,
/* If we drained data from the inbuf and were not reading before,
* we may want to read now */
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
if (cbinfo->n_deleted)
bev_async_consider_reading(bev_async);
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
static int
@ -379,7 +379,7 @@ be_async_destruct(struct bufferevent *bev)
bev_async_del_read(bev_async);
bev_async_del_write(bev_async);
fd = _evbuffer_overlapped_get_fd(bev->input);
fd = evbuffer_overlapped_get_fd_(bev->input);
if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
/* XXXX possible double-close */
evutil_closesocket(fd);
@ -387,7 +387,7 @@ be_async_destruct(struct bufferevent *bev)
/* delete this in case non-blocking connect was used */
if (event_initialized(&bev->ev_write)) {
event_del(&bev->ev_write);
_bufferevent_del_generic_timeout_cbs(bev);
bufferevent_del_generic_timeout_cbs_(bev);
}
}
@ -399,7 +399,7 @@ bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
DWORD bytes, flags;
evutil_socket_t fd;
fd = _evbuffer_overlapped_get_fd(bev->input);
fd = evbuffer_overlapped_get_fd_(bev->input);
WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
}
@ -422,7 +422,7 @@ connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
EVUTIL_ASSERT(bev_a->bev.connecting);
bev_a->bev.connecting = 0;
sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
/* XXXX Handle error? */
setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
@ -431,12 +431,12 @@ connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
else
bev_async_set_wsa_error(bev, eo);
_bufferevent_run_eventcb(bev,
bufferevent_run_eventcb_(bev,
ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
event_base_del_virtual(bev->ev_base);
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
static void
@ -454,7 +454,7 @@ read_complete(struct event_overlapped *eo, ev_uintptr_t key,
evbuffer_commit_read(bev->input, nbytes);
bev_a->read_in_progress = 0;
if (amount_unread)
_bufferevent_decrement_read_buckets(&bev_a->bev, -amount_unread);
bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
if (!ok)
bev_async_set_wsa_error(bev, eo);
@ -463,20 +463,20 @@ read_complete(struct event_overlapped *eo, ev_uintptr_t key,
if (ok && nbytes) {
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
_bufferevent_run_readcb(bev);
bufferevent_run_readcb_(bev);
bev_async_consider_reading(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
bufferevent_run_eventcb_(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
bufferevent_run_eventcb_(bev, what);
}
}
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
static void
@ -496,7 +496,7 @@ write_complete(struct event_overlapped *eo, ev_uintptr_t key,
bev_a->write_in_progress = 0;
if (amount_unwritten)
_bufferevent_decrement_write_buckets(&bev_a->bev,
bufferevent_decrement_write_buckets_(&bev_a->bev,
-amount_unwritten);
@ -508,20 +508,20 @@ write_complete(struct event_overlapped *eo, ev_uintptr_t key,
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(bev->output) <=
bev->wm_write.low)
_bufferevent_run_writecb(bev);
bufferevent_run_writecb_(bev);
bev_async_consider_writing(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
bufferevent_run_eventcb_(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
bufferevent_run_eventcb_(bev, what);
}
}
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
struct bufferevent *
@ -573,7 +573,7 @@ bufferevent_async_new(struct event_base *base,
bev_a->ok = fd >= 0;
if (bev_a->ok)
_bufferevent_init_generic_timeout_cbs(bev);
bufferevent_init_generic_timeout_cbs_(bev);
return bev;
err:
@ -586,7 +586,7 @@ bufferevent_async_set_connected(struct bufferevent *bev)
{
struct bufferevent_async *bev_async = upcast(bev);
bev_async->ok = 1;
_bufferevent_init_generic_timeout_cbs(bev);
bufferevent_init_generic_timeout_cbs_(bev);
/* Now's a good time to consider reading/writing */
be_async_enable(bev, bev->enabled);
}
@ -657,24 +657,24 @@ be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
{
switch (op) {
case BEV_CTRL_GET_FD:
data->fd = _evbuffer_overlapped_get_fd(bev->input);
data->fd = evbuffer_overlapped_get_fd_(bev->input);
return 0;
case BEV_CTRL_SET_FD: {
struct event_iocp_port *iocp;
if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
return 0;
if (!(iocp = event_base_get_iocp(bev->ev_base)))
return -1;
if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
return -1;
_evbuffer_overlapped_set_fd(bev->input, data->fd);
_evbuffer_overlapped_set_fd(bev->output, data->fd);
evbuffer_overlapped_set_fd_(bev->input, data->fd);
evbuffer_overlapped_set_fd_(bev->output, data->fd);
return 0;
}
case BEV_CTRL_CANCEL_ALL: {
struct bufferevent_async *bev_a = upcast(bev);
evutil_socket_t fd = _evbuffer_overlapped_get_fd(bev->input);
evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
if (fd != (evutil_socket_t)INVALID_SOCKET &&
(bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
closesocket(fd);

View File

@ -100,7 +100,7 @@ const struct bufferevent_ops bufferevent_ops_filter = {
be_filter_enable,
be_filter_disable,
be_filter_destruct,
_bufferevent_generic_adj_timeouts,
bufferevent_generic_adj_timeouts_,
be_filter_flush,
be_filter_ctrl,
};
@ -204,7 +204,7 @@ bufferevent_filter_new(struct bufferevent *underlying,
bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
bufferevent_filtered_outbuf_cb, bufev_f);
_bufferevent_init_generic_timeout_cbs(downcast(bufev_f));
bufferevent_init_generic_timeout_cbs_(downcast(bufev_f));
bufferevent_incref(underlying);
bufferevent_enable(underlying, EV_READ|EV_WRITE);
@ -243,7 +243,7 @@ be_filter_destruct(struct bufferevent *bev)
}
}
_bufferevent_del_generic_timeout_cbs(bev);
bufferevent_del_generic_timeout_cbs_(bev);
}
static int
@ -372,7 +372,7 @@ be_filter_process_output(struct bufferevent_filtered *bevf,
if (processed &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
/* call the write callback.*/
_bufferevent_run_writecb(bufev);
bufferevent_run_writecb_(bufev);
if (res == BEV_OK &&
(bufev->enabled & EV_WRITE) &&
@ -405,9 +405,9 @@ bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
int processed_any = 0;
/* Somebody added more data to the output buffer. Try to
* process it, if we should. */
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
}
@ -421,7 +421,7 @@ be_filter_readcb(struct bufferevent *underlying, void *_me)
struct bufferevent *bufev = downcast(bevf);
int processed_any = 0;
_bufferevent_incref_and_lock(bufev);
bufferevent_incref_and_lock_(bufev);
if (bevf->got_eof)
state = BEV_FINISHED;
@ -437,9 +437,9 @@ be_filter_readcb(struct bufferevent *underlying, void *_me)
* force readcb calls as needed. */
if (processed_any &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.low)
_bufferevent_run_readcb(bufev);
bufferevent_run_readcb_(bufev);
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
}
/* Called when the underlying socket has drained enough that we can write to
@ -451,9 +451,9 @@ be_filter_writecb(struct bufferevent *underlying, void *_me)
struct bufferevent *bev = downcast(bevf);
int processed_any = 0;
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
}
/* Called when the underlying socket has given us an error */
@ -463,10 +463,10 @@ be_filter_eventcb(struct bufferevent *underlying, short what, void *_me)
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
/* All we can really to is tell our own eventcb. */
_bufferevent_run_eventcb(bev, what);
_bufferevent_decref_and_unlock(bev);
bufferevent_run_eventcb_(bev, what);
bufferevent_decref_and_unlock_(bev);
}
static int
@ -477,7 +477,7 @@ be_filter_flush(struct bufferevent *bufev,
int processed_any = 0;
EVUTIL_ASSERT(bevf);
_bufferevent_incref_and_lock(bufev);
bufferevent_incref_and_lock_(bufev);
if (iotype & EV_READ) {
be_filter_process_input(bevf, mode, &processed_any);
@ -489,7 +489,7 @@ be_filter_flush(struct bufferevent *bufev,
/* XXX does this want to recursively call lower-level flushes? */
bufferevent_flush(bevf->underlying, iotype, mode);
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
return processed_any;
}

View File

@ -383,9 +383,9 @@ start_reading(struct bufferevent_openssl *bev_ssl)
} else {
struct bufferevent *bev = &bev_ssl->bev.bev;
int r;
r = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
r = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (r == 0 && bev_ssl->read_blocked_on_write)
r = _bufferevent_add_event(&bev->ev_write,
r = bufferevent_add_event_(&bev->ev_write,
&bev->timeout_write);
return r;
}
@ -402,9 +402,9 @@ start_writing(struct bufferevent_openssl *bev_ssl)
;
} else {
struct bufferevent *bev = &bev_ssl->bev.bev;
r = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
r = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
if (!r && bev_ssl->write_blocked_on_read)
r = _bufferevent_add_event(&bev->ev_read,
r = bufferevent_add_event_(&bev->ev_read,
&bev->timeout_read);
}
return r;
@ -531,7 +531,7 @@ conn_closed(struct bufferevent_openssl *bev_ssl, int when, int errcode, int ret)
/* when is BEV_EVENT_{READING|WRITING} */
event = when | event;
_bufferevent_run_eventcb(&bev_ssl->bev.bev, event);
bufferevent_run_eventcb_(&bev_ssl->bev.bev, event);
}
static void
@ -552,9 +552,9 @@ decrement_buckets(struct bufferevent_openssl *bev_ssl)
unsigned long w = num_w - bev_ssl->counts.n_written;
unsigned long r = num_r - bev_ssl->counts.n_read;
if (w)
_bufferevent_decrement_write_buckets(&bev_ssl->bev, w);
bufferevent_decrement_write_buckets_(&bev_ssl->bev, w);
if (r)
_bufferevent_decrement_read_buckets(&bev_ssl->bev, r);
bufferevent_decrement_read_buckets_(&bev_ssl->bev, r);
bev_ssl->counts.n_written = num_w;
bev_ssl->counts.n_read = num_r;
}
@ -569,7 +569,7 @@ do_read(struct bufferevent_openssl *bev_ssl, int n_to_read)
int r, n, i, n_used = 0, blocked = 0, atmost;
struct evbuffer_iovec space[2];
atmost = _bufferevent_get_read_max(&bev_ssl->bev);
atmost = bufferevent_get_read_max_(&bev_ssl->bev);
if (n_to_read > atmost)
n_to_read = atmost;
@ -620,7 +620,7 @@ do_read(struct bufferevent_openssl *bev_ssl, int n_to_read)
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(input) >= bev->wm_read.low)
_bufferevent_run_readcb(bev);
bufferevent_run_readcb_(bev);
}
return blocked ? 0 : 1;
@ -637,7 +637,7 @@ do_write(struct bufferevent_openssl *bev_ssl, int atmost)
if (bev_ssl->last_write > 0)
atmost = bev_ssl->last_write;
else
atmost = _bufferevent_get_write_max(&bev_ssl->bev);
atmost = bufferevent_get_write_max_(&bev_ssl->bev);
n = evbuffer_peek(output, atmost, NULL, space, 8);
if (n < 0)
@ -698,7 +698,7 @@ do_write(struct bufferevent_openssl *bev_ssl, int atmost)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(output) <= bev->wm_write.low)
_bufferevent_run_writecb(bev);
bufferevent_run_writecb_(bev);
}
return blocked ? 0 : 1;
}
@ -742,7 +742,7 @@ bytes_to_read(struct bufferevent_openssl *bev)
}
/* Respect the rate limit */
limit = _bufferevent_get_read_max(&bev->bev);
limit = bufferevent_get_read_max_(&bev->bev);
if (result > limit) {
result = limit;
}
@ -892,33 +892,33 @@ be_openssl_eventcb(struct bufferevent *bev_base, short what, void *ctx)
eat it. */
}
if (event)
_bufferevent_run_eventcb(&bev_ssl->bev.bev, event);
bufferevent_run_eventcb_(&bev_ssl->bev.bev, event);
}
static void
be_openssl_readeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
_bufferevent_incref_and_lock(&bev_ssl->bev.bev);
bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
_bufferevent_run_eventcb(&bev_ssl->bev.bev,
bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
} else
consider_reading(bev_ssl);
_bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static void
be_openssl_writeeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
_bufferevent_incref_and_lock(&bev_ssl->bev.bev);
bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
_bufferevent_run_eventcb(&bev_ssl->bev.bev,
bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
}
consider_writing(bev_ssl);
_bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static int
@ -945,9 +945,9 @@ set_open_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, be_openssl_writeeventcb, bev_ssl);
if (rpending)
r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (wpending)
r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
if (fd >= 0) {
bev_ssl->fd_is_set = 1;
}
@ -978,7 +978,7 @@ do_handshake(struct bufferevent_openssl *bev_ssl)
set_open_callbacks(bev_ssl, -1); /* XXXX handle failure */
/* Call do_read and do_write as needed */
bufferevent_enable(&bev_ssl->bev.bev, bev_ssl->bev.bev.enabled);
_bufferevent_run_eventcb(&bev_ssl->bev.bev,
bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_CONNECTED);
return 1;
} else {
@ -1016,12 +1016,12 @@ be_openssl_handshakeeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
_bufferevent_incref_and_lock(&bev_ssl->bev.bev);
bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
_bufferevent_run_eventcb(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT);
bufferevent_run_eventcb_(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT);
} else
do_handshake(bev_ssl);/* XXX handle failure */
_bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static int
@ -1047,8 +1047,8 @@ set_handshake_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, be_openssl_handshakeeventcb, bev_ssl);
if (fd >= 0) {
r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
bev_ssl->fd_is_set = 1;
}
return (r1<0 || r2<0) ? -1 : 0;
@ -1081,7 +1081,7 @@ be_openssl_outbuf_cb(struct evbuffer *buf,
if (cbinfo->n_added && bev_ssl->state == BUFFEREVENT_SSL_OPEN) {
if (cbinfo->orig_size == 0)
r = _bufferevent_add_event(&bev_ssl->bev.bev.ev_write,
r = bufferevent_add_event_(&bev_ssl->bev.bev.ev_write,
&bev_ssl->bev.bev.timeout_write);
consider_writing(bev_ssl);
}
@ -1145,7 +1145,7 @@ be_openssl_destruct(struct bufferevent *bev)
struct bufferevent_openssl *bev_ssl = upcast(bev);
if (bev_ssl->underlying) {
_bufferevent_del_generic_timeout_cbs(bev);
bufferevent_del_generic_timeout_cbs_(bev);
} else {
event_del(&bev->ev_read);
event_del(&bev->ev_write);
@ -1186,13 +1186,13 @@ be_openssl_adj_timeouts(struct bufferevent *bev)
struct bufferevent_openssl *bev_ssl = upcast(bev);
if (bev_ssl->underlying)
return _bufferevent_generic_adj_timeouts(bev);
return bufferevent_generic_adj_timeouts_(bev);
else {
int r1=0, r2=0;
if (event_pending(&bev->ev_read, EV_READ, NULL))
r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (event_pending(&bev->ev_write, EV_WRITE, NULL))
r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
return (r1<0 || r2<0) ? -1 : 0;
}
}
@ -1290,7 +1290,7 @@ bufferevent_openssl_new_impl(struct event_base *base,
bufferevent_enable_locking(&bev_ssl->bev.bev, NULL);
if (underlying) {
_bufferevent_init_generic_timeout_cbs(&bev_ssl->bev.bev);
bufferevent_init_generic_timeout_cbs_(&bev_ssl->bev.bev);
bufferevent_incref(underlying);
}

View File

@ -67,10 +67,10 @@ static inline void
incref_and_lock(struct bufferevent *b)
{
struct bufferevent_pair *bevp;
_bufferevent_incref_and_lock(b);
bufferevent_incref_and_lock_(b);
bevp = upcast(b);
if (bevp->partner)
_bufferevent_incref_and_lock(downcast(bevp->partner));
bufferevent_incref_and_lock_(downcast(bevp->partner));
}
static inline void
@ -78,8 +78,8 @@ decref_and_unlock(struct bufferevent *b)
{
struct bufferevent_pair *bevp = upcast(b);
if (bevp->partner)
_bufferevent_decref_and_unlock(downcast(bevp->partner));
_bufferevent_decref_and_unlock(b);
bufferevent_decref_and_unlock_(downcast(bevp->partner));
bufferevent_decref_and_unlock_(b);
}
/* XXX Handle close */
@ -104,7 +104,7 @@ bufferevent_pair_elt_new(struct event_base *base,
return NULL;
}
_bufferevent_init_generic_timeout_cbs(&bufev->bev.bev);
bufferevent_init_generic_timeout_cbs_(&bufev->bev.bev);
return bufev;
}
@ -186,10 +186,10 @@ be_pair_transfer(struct bufferevent *src, struct bufferevent *dst,
dst_size = evbuffer_get_length(dst->input);
if (dst_size >= dst->wm_read.low) {
_bufferevent_run_readcb(dst);
bufferevent_run_readcb_(dst);
}
if (src_size <= src->wm_write.low) {
_bufferevent_run_writecb(src);
bufferevent_run_writecb_(src);
}
done:
evbuffer_freeze(src->output, 1);
@ -275,7 +275,7 @@ be_pair_destruct(struct bufferevent *bev)
bev_p->partner = NULL;
}
_bufferevent_del_generic_timeout_cbs(bev);
bufferevent_del_generic_timeout_cbs_(bev);
}
static int
@ -300,7 +300,7 @@ be_pair_flush(struct bufferevent *bev, short iotype,
be_pair_transfer(bev, partner, 1);
if (mode == BEV_FINISHED) {
_bufferevent_run_eventcb(partner, iotype|BEV_EVENT_EOF);
bufferevent_run_eventcb_(partner, iotype|BEV_EVENT_EOF);
}
decref_and_unlock(bev);
return 0;
@ -327,7 +327,7 @@ const struct bufferevent_ops bufferevent_ops_pair = {
be_pair_enable,
be_pair_disable,
be_pair_destruct,
_bufferevent_generic_adj_timeouts,
bufferevent_generic_adj_timeouts_,
be_pair_flush,
NULL, /* ctrl */
};

View File

@ -185,17 +185,17 @@ ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
static int _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g);
static int _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g);
static void _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g);
static void _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g);
static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
/** Helper: figure out the maximum amount we should write if is_write, or
the maximum amount we should read if is_read. Return that maximum, or
0 if our bucket is wholly exhausted.
*/
static inline ev_ssize_t
_bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write)
{
/* needs lock on bev. */
ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read;
@ -258,19 +258,19 @@ _bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
}
ev_ssize_t
_bufferevent_get_read_max(struct bufferevent_private *bev)
bufferevent_get_read_max_(struct bufferevent_private *bev)
{
return _bufferevent_get_rlim_max(bev, 0);
return bufferevent_get_rlim_max_(bev, 0);
}
ev_ssize_t
_bufferevent_get_write_max(struct bufferevent_private *bev)
bufferevent_get_write_max_(struct bufferevent_private *bev)
{
return _bufferevent_get_rlim_max(bev, 1);
return bufferevent_get_rlim_max_(bev, 1);
}
int
_bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t bytes)
bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
@ -297,9 +297,9 @@ _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t
bev->rate_limiting->group->rate_limit.read_limit -= bytes;
bev->rate_limiting->group->total_read += bytes;
if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
_bev_group_suspend_reading(bev->rate_limiting->group);
bev_group_suspend_reading_(bev->rate_limiting->group);
} else if (bev->rate_limiting->group->read_suspended) {
_bev_group_unsuspend_reading(bev->rate_limiting->group);
bev_group_unsuspend_reading_(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
@ -308,7 +308,7 @@ _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t
}
int
_bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t bytes)
bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
@ -335,9 +335,9 @@ _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t
bev->rate_limiting->group->rate_limit.write_limit -= bytes;
bev->rate_limiting->group->total_written += bytes;
if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
_bev_group_suspend_writing(bev->rate_limiting->group);
bev_group_suspend_writing_(bev->rate_limiting->group);
} else if (bev->rate_limiting->group->write_suspended) {
_bev_group_unsuspend_writing(bev->rate_limiting->group);
bev_group_unsuspend_writing_(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
@ -347,7 +347,7 @@ _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t
/** Stop reading on every bufferevent in <b>g</b> */
static int
_bev_group_suspend_reading(struct bufferevent_rate_limit_group *g)
bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
@ -372,7 +372,7 @@ _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g)
/** Stop writing on every bufferevent in <b>g</b> */
static int
_bev_group_suspend_writing(struct bufferevent_rate_limit_group *g)
bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
@ -391,7 +391,7 @@ _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g)
/** Timer callback invoked on a single bufferevent with one or more exhausted
buckets when they are ready to refill. */
static void
_bev_refill_callback(evutil_socket_t fd, short what, void *arg)
bev_refill_callback_(evutil_socket_t fd, short what, void *arg)
{
unsigned tick;
struct timeval now;
@ -440,7 +440,7 @@ _bev_refill_callback(evutil_socket_t fd, short what, void *arg)
/** Helper: grab a random element from a bufferevent group. */
static struct bufferevent_private *
_bev_group_random_element(struct bufferevent_rate_limit_group *group)
bev_group_random_element_(struct bufferevent_rate_limit_group *group)
{
int which;
struct bufferevent_private *bev;
@ -452,7 +452,7 @@ _bev_group_random_element(struct bufferevent_rate_limit_group *group)
EVUTIL_ASSERT(! LIST_EMPTY(&group->members));
which = _evutil_weakrand() % group->n_members;
which = evutil_weakrand_() % group->n_members;
bev = LIST_FIRST(&group->members);
while (which--)
@ -470,7 +470,7 @@ _bev_group_random_element(struct bufferevent_rate_limit_group *group)
*/
#define FOREACH_RANDOM_ORDER(block) \
do { \
first = _bev_group_random_element(g); \
first = bev_group_random_element_(g); \
for (bev = first; bev != LIST_END(&g->members); \
bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
block ; \
@ -482,7 +482,7 @@ _bev_group_random_element(struct bufferevent_rate_limit_group *group)
} while (0)
static void
_bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g)
bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
@ -501,7 +501,7 @@ _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g)
}
static void
_bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g)
bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
@ -523,7 +523,7 @@ _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g)
and unsuspend group members as needed.
*/
static void
_bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg)
{
struct bufferevent_rate_limit_group *g = arg;
unsigned tick;
@ -538,11 +538,11 @@ _bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
if (g->pending_unsuspend_read ||
(g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
_bev_group_unsuspend_reading(g);
bev_group_unsuspend_reading_(g);
}
if (g->pending_unsuspend_write ||
(g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
_bev_group_unsuspend_writing(g);
bev_group_unsuspend_writing_(g);
}
/* XXXX Rather than waiting to the next tick to unsuspend stuff
@ -607,7 +607,7 @@ bufferevent_set_rate_limit(struct bufferevent *bev,
event_del(&rlim->refill_bucket_event);
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
_bev_refill_callback, bevp);
bev_refill_callback_, bevp);
if (rlim->limit.read_limit > 0) {
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW);
@ -652,7 +652,7 @@ bufferevent_rate_limit_group_new(struct event_base *base,
ev_token_bucket_init(&g->rate_limit, cfg, tick, 0);
event_assign(&g->master_refill_event, base, -1, EV_PERSIST,
_bev_group_refill_callback, g);
bev_group_refill_callback_, g);
/*XXXX handle event_add failure */
event_add(&g->master_refill_event, &cfg->tick_timeout);
@ -743,7 +743,7 @@ bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
return -1;
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
_bev_refill_callback, bevp);
bev_refill_callback_, bevp);
bevp->rate_limiting = rlim;
}
@ -811,7 +811,7 @@ bufferevent_remove_from_rate_limit_group_internal(struct bufferevent *bev,
* === */
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_read_max() is more likely what you want*/
* bufferevent_get_read_max_() is more likely what you want*/
ev_ssize_t
bufferevent_get_read_limit(struct bufferevent *bev)
{
@ -830,7 +830,7 @@ bufferevent_get_read_limit(struct bufferevent *bev)
}
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_write_max() is more likely what you want*/
* bufferevent_get_write_max_() is more likely what you want*/
ev_ssize_t
bufferevent_get_write_limit(struct bufferevent *bev)
{
@ -903,7 +903,7 @@ bufferevent_get_max_to_read(struct bufferevent *bev)
{
ev_ssize_t r;
BEV_LOCK(bev);
r = _bufferevent_get_read_max(BEV_UPCAST(bev));
r = bufferevent_get_read_max_(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
@ -913,14 +913,14 @@ bufferevent_get_max_to_write(struct bufferevent *bev)
{
ev_ssize_t r;
BEV_LOCK(bev);
r = _bufferevent_get_write_max(BEV_UPCAST(bev));
r = bufferevent_get_write_max_(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_read_max() is more likely what you want*/
* bufferevent_get_read_max_() is more likely what you want*/
ev_ssize_t
bufferevent_rate_limit_group_get_read_limit(
struct bufferevent_rate_limit_group *grp)
@ -933,7 +933,7 @@ bufferevent_rate_limit_group_get_read_limit(
}
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_write_max() is more likely what you want. */
* bufferevent_get_write_max_() is more likely what you want. */
ev_ssize_t
bufferevent_rate_limit_group_get_write_limit(
struct bufferevent_rate_limit_group *grp)
@ -1012,9 +1012,9 @@ bufferevent_rate_limit_group_decrement_read(
new_limit = (grp->rate_limit.read_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
_bev_group_suspend_reading(grp);
bev_group_suspend_reading_(grp);
} else if (old_limit <= 0 && new_limit > 0) {
_bev_group_unsuspend_reading(grp);
bev_group_unsuspend_reading_(grp);
}
UNLOCK_GROUP(grp);
@ -1032,9 +1032,9 @@ bufferevent_rate_limit_group_decrement_write(
new_limit = (grp->rate_limit.write_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
_bev_group_suspend_writing(grp);
bev_group_suspend_writing_(grp);
} else if (old_limit <= 0 && new_limit > 0) {
_bev_group_unsuspend_writing(grp);
bev_group_unsuspend_writing_(grp);
}
UNLOCK_GROUP(grp);
@ -1059,7 +1059,7 @@ bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *g
}
int
_bufferevent_ratelim_init(struct bufferevent_private *bev)
bufferevent_ratelim_init_(struct bufferevent_private *bev)
{
bev->rate_limiting = NULL;
bev->max_single_read = MAX_SINGLE_READ_DEFAULT;

View File

@ -97,7 +97,7 @@ const struct bufferevent_ops bufferevent_ops_socket = {
};
#define be_socket_add(ev, t) \
_bufferevent_add_event((ev), (t))
bufferevent_add_event_((ev), (t))
static void
bufferevent_socket_outbuf_cb(struct evbuffer *buf,
@ -131,7 +131,7 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
short what = BEV_EVENT_READING;
ev_ssize_t howmuch = -1, readmax=-1;
_bufferevent_incref_and_lock(bufev);
bufferevent_incref_and_lock_(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
@ -152,7 +152,7 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
goto done;
}
}
readmax = _bufferevent_get_read_max(bufev_p);
readmax = bufferevent_get_read_max_(bufev_p);
if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
* uglifies this code. XXXX */
howmuch = readmax;
@ -177,11 +177,11 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
if (res <= 0)
goto error;
_bufferevent_decrement_read_buckets(bufev_p, res);
bufferevent_decrement_read_buckets_(bufev_p, res);
/* Invoke the user callback - must always be called last */
if (evbuffer_get_length(input) >= bufev->wm_read.low)
_bufferevent_run_readcb(bufev);
bufferevent_run_readcb_(bufev);
goto done;
@ -190,10 +190,10 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
error:
bufferevent_disable(bufev, EV_READ);
_bufferevent_run_eventcb(bufev, what);
bufferevent_run_eventcb_(bufev, what);
done:
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
}
static void
@ -207,7 +207,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
int connected = 0;
ev_ssize_t atmost = -1;
_bufferevent_incref_and_lock(bufev);
bufferevent_incref_and_lock_(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
@ -229,7 +229,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
if (c < 0) {
event_del(&bufev->ev_write);
event_del(&bufev->ev_read);
_bufferevent_run_eventcb(bufev, BEV_EVENT_ERROR);
bufferevent_run_eventcb_(bufev, BEV_EVENT_ERROR);
goto done;
} else {
connected = 1;
@ -237,12 +237,12 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
if (BEV_IS_ASYNC(bufev)) {
event_del(&bufev->ev_write);
bufferevent_async_set_connected(bufev);
_bufferevent_run_eventcb(bufev,
bufferevent_run_eventcb_(bufev,
BEV_EVENT_CONNECTED);
goto done;
}
#endif
_bufferevent_run_eventcb(bufev,
bufferevent_run_eventcb_(bufev,
BEV_EVENT_CONNECTED);
if (!(bufev->enabled & EV_WRITE) ||
bufev_p->write_suspended) {
@ -252,7 +252,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
}
}
atmost = _bufferevent_get_write_max(bufev_p);
atmost = bufferevent_get_write_max_(bufev_p);
if (bufev_p->write_suspended)
goto done;
@ -276,7 +276,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
if (res <= 0)
goto error;
_bufferevent_decrement_write_buckets(bufev_p, res);
bufferevent_decrement_write_buckets_(bufev_p, res);
}
if (evbuffer_get_length(bufev->output) == 0) {
@ -289,7 +289,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
*/
if ((res || !connected) &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
_bufferevent_run_writecb(bufev);
bufferevent_run_writecb_(bufev);
}
goto done;
@ -302,10 +302,10 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
error:
bufferevent_disable(bufev, EV_WRITE);
_bufferevent_run_eventcb(bufev, what);
bufferevent_run_eventcb_(bufev, what);
done:
_bufferevent_decref_and_unlock(bufev);
bufferevent_decref_and_unlock_(bufev);
}
struct bufferevent *
@ -356,7 +356,7 @@ bufferevent_socket_connect(struct bufferevent *bev,
int result=-1;
int ownfd = 0;
_bufferevent_incref_and_lock(bev);
bufferevent_incref_and_lock_(bev);
if (!bufev_p)
goto done;
@ -419,12 +419,12 @@ bufferevent_socket_connect(struct bufferevent *bev,
goto done;
freesock:
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
if (ownfd)
evutil_closesocket(fd);
/* do something about the error? */
done:
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
return result;
}
@ -443,8 +443,8 @@ bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai,
if (result != 0) {
bev_p->dns_error = result;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
_bufferevent_decref_and_unlock(bev);
bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
bufferevent_decref_and_unlock_(bev);
if (ai)
evutil_freeaddrinfo(ai);
return;
@ -454,7 +454,7 @@ bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai,
/* XXX use this return value */
r = bufferevent_socket_connect(bev, ai->ai_addr, (int)ai->ai_addrlen);
(void)r;
_bufferevent_decref_and_unlock(bev);
bufferevent_decref_and_unlock_(bev);
evutil_freeaddrinfo(ai);
}

View File

@ -32,8 +32,8 @@
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
#ifndef SYS_QUEUE_H__
#define SYS_QUEUE_H__
/*
* This file defines five types of data structures: singly-linked lists,
@ -485,4 +485,4 @@ struct { \
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
} while (0)
#endif /* !_SYS_QUEUE_H_ */
#endif /* !SYS_QUEUE_H__ */

View File

@ -283,21 +283,21 @@ struct evbuffer_multicast_parent {
} while (0)
/** Increase the reference count of buf by one. */
void _evbuffer_incref(struct evbuffer *buf);
void evbuffer_incref_(struct evbuffer *buf);
/** Increase the reference count of buf by one and acquire the lock. */
void _evbuffer_incref_and_lock(struct evbuffer *buf);
void evbuffer_incref_and_lock_(struct evbuffer *buf);
/** Pin a single buffer chain using a given flag. A pinned chunk may not be
* moved or freed until it is unpinned. */
void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag);
void evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag);
/** Unpin a single buffer chain using a given flag. */
void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag);
void evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag);
/** As evbuffer_free, but requires that we hold a lock on the buffer, and
* releases the lock before freeing it and the buffer. */
void _evbuffer_decref_and_unlock(struct evbuffer *buffer);
void evbuffer_decref_and_unlock_(struct evbuffer *buffer);
/** As evbuffer_expand, but does not guarantee that the newly allocated memory
* is contiguous. Instead, it may be split across two or more chunks. */
int _evbuffer_expand_fast(struct evbuffer *, size_t, int);
int evbuffer_expand_fast_(struct evbuffer *, size_t, int);
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
@ -305,7 +305,7 @@ int _evbuffer_expand_fast(struct evbuffer *, size_t, int);
* extent, and *chainp to point to the first chain that we'll try to read into.
* Returns the number of vecs used.
*/
int _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
int evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp,
int exact);

50
evdns.c
View File

@ -414,9 +414,9 @@ static void evdns_base_free_and_unlock(struct evdns_base *base, int fail_request
static int strtoint(const char *const str);
#ifdef EVENT__DISABLE_THREAD_SUPPORT
#define EVDNS_LOCK(base) _EVUTIL_NIL_STMT
#define EVDNS_UNLOCK(base) _EVUTIL_NIL_STMT
#define ASSERT_LOCKED(base) _EVUTIL_NIL_STMT
#define EVDNS_LOCK(base) EVUTIL_NIL_STMT_
#define EVDNS_UNLOCK(base) EVUTIL_NIL_STMT_
#define ASSERT_LOCKED(base) EVUTIL_NIL_STMT_
#else
#define EVDNS_LOCK(base) \
EVLOCK_LOCK((base)->lock, 0)
@ -451,9 +451,9 @@ evdns_set_log_fn(evdns_debug_log_fn_type fn)
#define EVDNS_LOG_CHECK
#endif
static void _evdns_log(int warn, const char *fmt, ...) EVDNS_LOG_CHECK;
static void evdns_log_(int warn, const char *fmt, ...) EVDNS_LOG_CHECK;
static void
_evdns_log(int warn, const char *fmt, ...)
evdns_log_(int warn, const char *fmt, ...)
{
va_list args;
char buf[512];
@ -472,7 +472,7 @@ _evdns_log(int warn, const char *fmt, ...)
}
#define log _evdns_log
#define log evdns_log_
/* This walks the list of inflight requests to find the */
/* one with a matching transaction id. Returns NULL on */
@ -939,8 +939,8 @@ name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) {
int name_end = -1;
int j = *idx;
int ptr_count = 0;
#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&_t32, packet + j, 4); j += 4; x = ntohl(_t32); } while (0)
#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&_t, packet + j, 2); j += 2; x = ntohs(_t); } while (0)
#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0)
#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0)
#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0)
char *cp = name_out;
@ -994,8 +994,8 @@ name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) {
static int
reply_parse(struct evdns_base *base, u8 *packet, int length) {
int j = 0, k = 0; /* index into packet */
u16 _t; /* used by the macros */
u32 _t32; /* used by the macros */
u16 t_; /* used by the macros */
u32 t32_; /* used by the macros */
char tmp_name[256], cmp_name[256]; /* used by the macros */
int name_matches = 0;
@ -1196,7 +1196,7 @@ static int
request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, ev_socklen_t addrlen)
{
int j = 0; /* index into packet */
u16 _t; /* used by the macros */
u16 t_; /* used by the macros */
char tmp_name[256]; /* used by the macros */
int i;
@ -1582,20 +1582,20 @@ dnsname_to_labels(u8 *const buf, size_t buf_len, off_t j,
struct dnslabel_table *table) {
const char *end = name + name_len;
int ref = 0;
u16 _t;
u16 t_;
#define APPEND16(x) do { \
if (j + 2 > (off_t)buf_len) \
goto overflow; \
_t = htons(x); \
memcpy(buf + j, &_t, 2); \
t_ = htons(x); \
memcpy(buf + j, &t_, 2); \
j += 2; \
} while (0)
#define APPEND32(x) do { \
if (j + 4 > (off_t)buf_len) \
goto overflow; \
_t32 = htonl(x); \
memcpy(buf + j, &_t32, 4); \
t32_ = htonl(x); \
memcpy(buf + j, &t32_, 4); \
j += 4; \
} while (0)
@ -1661,7 +1661,7 @@ evdns_request_data_build(const char *const name, const size_t name_len,
const u16 trans_id, const u16 type, const u16 class,
u8 *const buf, size_t buf_len) {
off_t j = 0; /* current offset into buf */
u16 _t; /* used by the macros */
u16 t_; /* used by the macros */
APPEND16(trans_id);
APPEND16(0x0100); /* standard query, recusion needed */
@ -1873,8 +1873,8 @@ evdns_server_request_format_response(struct server_request *req, int err)
unsigned char buf[1500];
size_t buf_len = sizeof(buf);
off_t j = 0, r;
u16 _t;
u32 _t32;
u16 t_;
u32 t32_;
int i;
u16 flags;
struct dnslabel_table table;
@ -1932,8 +1932,8 @@ evdns_server_request_format_response(struct server_request *req, int err)
if (r < 0)
goto overflow;
j = r;
_t = htons( (short) (j-name_start) );
memcpy(buf+len_idx, &_t, 2);
t_ = htons( (short) (j-name_start) );
memcpy(buf+len_idx, &t_, 2);
} else {
APPEND16(item->datalen);
if (j+item->datalen > (off_t)buf_len)
@ -2430,7 +2430,7 @@ evdns_resume(void)
}
static int
_evdns_nameserver_add_impl(struct evdns_base *base, const struct sockaddr *address, int addrlen) {
evdns_nameserver_add_impl_(struct evdns_base *base, const struct sockaddr *address, int addrlen) {
/* first check to see if we already have this nameserver */
const struct nameserver *server = base->server_head, *const started_at = base->server_head;
@ -2520,7 +2520,7 @@ evdns_base_nameserver_add(struct evdns_base *base, unsigned long int address)
sin.sin_port = htons(53);
sin.sin_family = AF_INET;
EVDNS_LOCK(base);
res = _evdns_nameserver_add_impl(base, (struct sockaddr*)&sin, sizeof(sin));
res = evdns_nameserver_add_impl_(base, (struct sockaddr*)&sin, sizeof(sin));
EVDNS_UNLOCK(base);
return res;
}
@ -2572,7 +2572,7 @@ evdns_base_nameserver_ip_add(struct evdns_base *base, const char *ip_as_string)
sockaddr_setport(sa, 53);
EVDNS_LOCK(base);
res = _evdns_nameserver_add_impl(base, sa, len);
res = evdns_nameserver_add_impl_(base, sa, len);
EVDNS_UNLOCK(base);
return res;
}
@ -2591,7 +2591,7 @@ evdns_base_nameserver_sockaddr_add(struct evdns_base *base,
int res;
EVUTIL_ASSERT(base);
EVDNS_LOCK(base);
res = _evdns_nameserver_add_impl(base, sa, len);
res = evdns_nameserver_add_impl_(base, sa, len);
EVDNS_UNLOCK(base);
return res;
}

View File

@ -45,13 +45,13 @@ extern "C" {
/* map union members back */
/* mutually exclusive */
#define ev_signal_next _ev.ev_signal.ev_signal_next
#define ev_io_next _ev.ev_io.ev_io_next
#define ev_io_timeout _ev.ev_io.ev_timeout
#define ev_signal_next ev_.ev_signal.ev_signal_next
#define ev_io_next ev_.ev_io.ev_io_next
#define ev_io_timeout ev_.ev_io.ev_timeout
/* used only by signals */
#define ev_ncalls _ev.ev_signal.ev_ncalls
#define ev_pncalls _ev.ev_signal.ev_pncalls
#define ev_ncalls ev_.ev_signal.ev_ncalls
#define ev_pncalls ev_.ev_signal.ev_pncalls
/* Possible values for ev_closure in struct event. */
#define EV_CLOSURE_NONE 0
@ -164,8 +164,8 @@ struct event_changelist {
#ifndef EVENT__DISABLE_DEBUG_MODE
/* Global internal flag: set to one if debug mode is on. */
extern int _event_debug_mode_on;
#define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on)
extern int event_debug_mode_on_;
#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
#else
#define EVENT_DEBUG_MODE_IS_ON() (0)
#endif
@ -343,9 +343,9 @@ struct event_config {
#define N_ACTIVE_CALLBACKS(base) \
((base)->event_count_active + (base)->defer_queue.active_count)
int _evsig_set_handler(struct event_base *base, int evsignal,
int evsig_set_handler_(struct event_base *base, int evsignal,
void (*fn)(int));
int _evsig_restore_handler(struct event_base *base, int evsignal);
int evsig_restore_handler_(struct event_base *base, int evsignal);
void event_active_nolock(struct event *ev, int res, short count);

152
event.c
View File

@ -188,11 +188,11 @@ eq_debug_entry(const struct event_debug_entry *a,
return a->ptr == b->ptr;
}
int _event_debug_mode_on = 0;
int event_debug_mode_on_ = 0;
/* Set if it's too late to enable event_debug_mode. */
static int event_debug_mode_too_late = 0;
#ifndef EVENT__DISABLE_THREAD_SUPPORT
static void *_event_debug_map_lock = NULL;
static void *event_debug_map_lock_ = NULL;
#endif
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
HT_INITIALIZER();
@ -203,11 +203,11 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
/* Macro: record that ev is now setup (that is, ready for an add) */
#define _event_debug_note_setup(ev) do { \
if (_event_debug_mode_on) { \
#define event_debug_note_setup_(ev) do { \
if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 0; \
@ -220,110 +220,110 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
dent->added = 0; \
HT_INSERT(event_debug_map, &global_debug_map, dent); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: record that ev is no longer setup */
#define _event_debug_note_teardown(ev) do { \
if (_event_debug_mode_on) { \
#define event_debug_note_teardown_(ev) do { \
if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
if (dent) \
mm_free(dent); \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: record that ev is now added */
#define _event_debug_note_add(ev) do { \
if (_event_debug_mode_on) { \
#define event_debug_note_add_(ev) do { \
if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 1; \
} else { \
event_errx(_EVENT_ERR_ABORT, \
event_errx(EVENT_ERR_ABORT_, \
"%s: noting an add on a non-setup event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: record that ev is no longer added */
#define _event_debug_note_del(ev) do { \
if (_event_debug_mode_on) { \
#define event_debug_note_del_(ev) do { \
if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 0; \
} else { \
event_errx(_EVENT_ERR_ABORT, \
event_errx(EVENT_ERR_ABORT_, \
"%s: noting a del on a non-setup event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
#define _event_debug_assert_is_setup(ev) do { \
if (_event_debug_mode_on) { \
#define event_debug_assert_is_setup_(ev) do { \
if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (!dent) { \
event_errx(_EVENT_ERR_ABORT, \
event_errx(EVENT_ERR_ABORT_, \
"%s called on a non-initialized event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
} while (0)
/* Macro: assert that ev is not added (i.e., okay to tear down or set
* up again) */
#define _event_debug_assert_not_added(ev) do { \
if (_event_debug_mode_on) { \
#define event_debug_assert_not_added_(ev) do { \
if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent && dent->added) { \
event_errx(_EVENT_ERR_ABORT, \
event_errx(EVENT_ERR_ABORT_, \
"%s called on an already added event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
} while (0)
#else
#define _event_debug_note_setup(ev) \
#define event_debug_note_setup_(ev) \
((void)0)
#define _event_debug_note_teardown(ev) \
#define event_debug_note_teardown_(ev) \
((void)0)
#define _event_debug_note_add(ev) \
#define event_debug_note_add_(ev) \
((void)0)
#define _event_debug_note_del(ev) \
#define event_debug_note_del_(ev) \
((void)0)
#define _event_debug_assert_is_setup(ev) \
#define event_debug_assert_is_setup_(ev) \
((void)0)
#define _event_debug_assert_not_added(ev) \
#define event_debug_assert_not_added_(ev) \
((void)0)
#endif
@ -540,13 +540,13 @@ void
event_enable_debug_mode(void)
{
#ifndef EVENT__DISABLE_DEBUG_MODE
if (_event_debug_mode_on)
if (event_debug_mode_on_)
event_errx(1, "%s was called twice!", __func__);
if (event_debug_mode_too_late)
event_errx(1, "%s must be called *before* creating any events "
"or event_bases",__func__);
_event_debug_mode_on = 1;
event_debug_mode_on_ = 1;
HT_INIT(event_debug_map, &global_debug_map);
#endif
@ -558,14 +558,14 @@ event_disable_debug_mode(void)
{
struct event_debug_entry **ent, *victim;
EVLOCK_LOCK(_event_debug_map_lock, 0);
EVLOCK_LOCK(event_debug_map_lock_, 0);
for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
victim = *ent;
ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
mm_free(victim);
}
HT_CLEAR(event_debug_map, &global_debug_map);
EVLOCK_UNLOCK(_event_debug_map_lock , 0);
EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
}
#endif
@ -1845,7 +1845,7 @@ event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, shor
if (!base)
base = current_base;
_event_debug_assert_not_added(ev);
event_debug_assert_not_added_(ev);
ev->ev_base = base;
@ -1881,7 +1881,7 @@ event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, shor
ev->ev_pri = base->nactivequeues / 2;
}
_event_debug_note_setup(ev);
event_debug_note_setup_(ev);
return 0;
}
@ -1893,7 +1893,7 @@ event_base_set(struct event_base *base, struct event *ev)
if (ev->ev_flags != EVLIST_INIT)
return (-1);
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
ev->ev_base = base;
ev->ev_pri = base->nactivequeues/2;
@ -1928,11 +1928,11 @@ event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(
void
event_free(struct event *ev)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
/* make sure that this event won't be coming back to haunt us. */
event_del(ev);
_event_debug_note_teardown(ev);
event_debug_note_teardown_(ev);
mm_free(ev);
}
@ -1940,8 +1940,8 @@ event_free(struct event *ev)
void
event_debug_unassign(struct event *ev)
{
_event_debug_assert_not_added(ev);
_event_debug_note_teardown(ev);
event_debug_assert_not_added_(ev);
event_debug_note_teardown_(ev);
ev->ev_flags &= ~EVLIST_INIT;
}
@ -1954,7 +1954,7 @@ event_debug_unassign(struct event *ev)
int
event_priority_set(struct event *ev, int pri)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
if (ev->ev_flags & EVLIST_ACTIVE)
return (-1);
@ -1975,7 +1975,7 @@ event_pending(const struct event *ev, short event, struct timeval *tv)
{
int flags = 0;
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
if (ev->ev_flags & EVLIST_INSERTED)
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
@ -2013,7 +2013,7 @@ event_initialized(const struct event *ev)
void
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
{
_event_debug_assert_is_setup(event);
event_debug_assert_is_setup_(event);
if (base_out)
*base_out = event->ev_base;
@ -2036,35 +2036,35 @@ event_get_struct_event_size(void)
evutil_socket_t
event_get_fd(const struct event *ev)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
return ev->ev_fd;
}
struct event_base *
event_get_base(const struct event *ev)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
return ev->ev_base;
}
short
event_get_events(const struct event *ev)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
return ev->ev_events;
}
event_callback_fn
event_get_callback(const struct event *ev)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
return ev->ev_callback;
}
void *
event_get_callback_arg(const struct event *ev)
{
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
return ev->ev_arg;
}
@ -2147,7 +2147,7 @@ event_add_internal(struct event *ev, const struct timeval *tv,
int notify = 0;
EVENT_BASE_ASSERT_LOCKED(base);
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
event_debug((
"event_add: event: %p (fd %d), %s%s%scall %p",
@ -2273,7 +2273,7 @@ event_add_internal(struct event *ev, const struct timeval *tv,
if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
evthread_notify_base(base);
_event_debug_note_add(ev);
event_debug_note_add_(ev);
return (res);
}
@ -2367,7 +2367,7 @@ event_del_internal(struct event *ev)
if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
evthread_notify_base(base);
_event_debug_note_del(ev);
event_debug_note_del_(ev);
return (res);
}
@ -2382,7 +2382,7 @@ event_active(struct event *ev, int res, short ncalls)
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
_event_debug_assert_is_setup(ev);
event_debug_assert_is_setup_(ev);
event_active_nolock(ev, res, ncalls);
@ -2797,9 +2797,9 @@ event_get_method(void)
}
#ifndef EVENT__DISABLE_MM_REPLACEMENT
static void *(*_mm_malloc_fn)(size_t sz) = NULL;
static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
static void (*_mm_free_fn)(void *p) = NULL;
static void *(*mm_malloc_fn_)(size_t sz) = NULL;
static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
static void (*mm_free_fn_)(void *p) = NULL;
void *
event_mm_malloc_(size_t sz)
@ -2807,8 +2807,8 @@ event_mm_malloc_(size_t sz)
if (sz == 0)
return NULL;
if (_mm_malloc_fn)
return _mm_malloc_fn(sz);
if (mm_malloc_fn_)
return mm_malloc_fn_(sz);
else
return malloc(sz);
}
@ -2819,12 +2819,12 @@ event_mm_calloc_(size_t count, size_t size)
if (count == 0 || size == 0)
return NULL;
if (_mm_malloc_fn) {
if (mm_malloc_fn_) {
size_t sz = count * size;
void *p = NULL;
if (count > EV_SIZE_MAX / size)
goto error;
p = _mm_malloc_fn(sz);
p = mm_malloc_fn_(sz);
if (p)
return memset(p, 0, sz);
} else {
@ -2850,12 +2850,12 @@ event_mm_strdup_(const char *str)
return NULL;
}
if (_mm_malloc_fn) {
if (mm_malloc_fn_) {
size_t ln = strlen(str);
void *p = NULL;
if (ln == EV_SIZE_MAX)
goto error;
p = _mm_malloc_fn(ln+1);
p = mm_malloc_fn_(ln+1);
if (p)
return memcpy(p, str, ln+1);
} else
@ -2873,8 +2873,8 @@ error:
void *
event_mm_realloc_(void *ptr, size_t sz)
{
if (_mm_realloc_fn)
return _mm_realloc_fn(ptr, sz);
if (mm_realloc_fn_)
return mm_realloc_fn_(ptr, sz);
else
return realloc(ptr, sz);
}
@ -2882,8 +2882,8 @@ event_mm_realloc_(void *ptr, size_t sz)
void
event_mm_free_(void *ptr)
{
if (_mm_free_fn)
_mm_free_fn(ptr);
if (mm_free_fn_)
mm_free_fn_(ptr);
else
free(ptr);
}
@ -2893,9 +2893,9 @@ event_set_mem_functions(void *(*malloc_fn)(size_t sz),
void *(*realloc_fn)(void *ptr, size_t sz),
void (*free_fn)(void *ptr))
{
_mm_malloc_fn = malloc_fn;
_mm_realloc_fn = realloc_fn;
_mm_free_fn = free_fn;
mm_malloc_fn_ = malloc_fn;
mm_realloc_fn_ = realloc_fn;
mm_free_fn_ = free_fn;
}
#endif
@ -3119,7 +3119,7 @@ int
event_global_setup_locks_(const int enable_locks)
{
#ifndef EVENT__DISABLE_DEBUG_MODE
EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
#endif
if (evsig_global_setup_locks_(enable_locks) < 0)
return -1;

View File

@ -221,7 +221,7 @@ err:
}
static void
_event_iocp_port_unlock_and_free(struct event_iocp_port *port)
event_iocp_port_unlock_and_free_(struct event_iocp_port *port)
{
DeleteCriticalSection(&port->lock);
CloseHandle(port->port);
@ -262,7 +262,7 @@ event_iocp_shutdown(struct event_iocp_port *port, long waitMsec)
n = port->n_live_threads;
LeaveCriticalSection(&port->lock);
if (n == 0) {
_event_iocp_port_unlock_and_free(port);
event_iocp_port_unlock_and_free_(port);
return 0;
} else {
return -1;

View File

@ -56,7 +56,7 @@ TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
struct evrpc_hook_ctx;
TAILQ_HEAD(evrpc_pause_list, evrpc_hook_ctx);
struct _evrpc_hooks {
struct evrpc_hooks_ {
/* hooks for processing outbound and inbound rpcs */
struct evrpc_hook_list in_hooks;
struct evrpc_hook_list out_hooks;
@ -69,7 +69,7 @@ struct _evrpc_hooks {
#define paused_requests common.pause_requests
struct evrpc_base {
struct _evrpc_hooks common;
struct evrpc_hooks_ common;
/* the HTTP server under which we register our RPC calls */
struct evhttp* http_server;
@ -83,7 +83,7 @@ void evrpc_reqstate_free(struct evrpc_req_generic* rpc_state);
/* A pool for holding evhttp_connection objects */
struct evrpc_pool {
struct _evrpc_hooks common;
struct evrpc_hooks_ common;
struct event_base *base;

View File

@ -122,7 +122,7 @@ evrpc_add_hook(void *vbase,
int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hooks_ *base = vbase;
struct evrpc_hook_list *head = NULL;
struct evrpc_hook *hook = NULL;
switch (hook_type) {
@ -168,7 +168,7 @@ evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
int
evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hooks_ *base = vbase;
struct evrpc_hook_list *head = NULL;
switch (hook_type) {
case EVRPC_INPUT:
@ -766,7 +766,7 @@ static int
evrpc_pause_request(void *vbase, void *ctx,
void (*cb)(void *, enum EVRPC_HOOK_RESULT))
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hooks_ *base = vbase;
struct evrpc_hook_ctx *pause = mm_malloc(sizeof(*pause));
if (pause == NULL)
return (-1);
@ -781,7 +781,7 @@ evrpc_pause_request(void *vbase, void *ctx,
int
evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hooks_ *base = vbase;
struct evrpc_pause_list *head = &base->pause_requests;
struct evrpc_hook_ctx *pause;

View File

@ -49,59 +49,59 @@ struct event_base;
#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
/* Global function pointers to lock-related functions. NULL if locking isn't
enabled. */
extern struct evthread_lock_callbacks _evthread_lock_fns;
extern struct evthread_condition_callbacks _evthread_cond_fns;
extern unsigned long (*_evthread_id_fn)(void);
extern int _evthread_lock_debugging_enabled;
extern struct evthread_lock_callbacks evthread_lock_fns_;
extern struct evthread_condition_callbacks evthread_cond_fns_;
extern unsigned long (*evthread_id_fn_)(void);
extern int evthread_lock_debugging_enabled_;
/** Return the ID of the current thread, or 1 if threading isn't enabled. */
#define EVTHREAD_GET_ID() \
(_evthread_id_fn ? _evthread_id_fn() : 1)
(evthread_id_fn_ ? evthread_id_fn_() : 1)
/** Return true iff we're in the thread that is currently (or most recently)
* running a given event_base's loop. Requires lock. */
#define EVBASE_IN_THREAD(base) \
(_evthread_id_fn == NULL || \
(base)->th_owner_id == _evthread_id_fn())
(evthread_id_fn_ == NULL || \
(base)->th_owner_id == evthread_id_fn_())
/** Return true iff we need to notify the base's main thread about changes to
* its state, because it's currently running the main loop in another
* thread. Requires lock. */
#define EVBASE_NEED_NOTIFY(base) \
(_evthread_id_fn != NULL && \
(evthread_id_fn_ != NULL && \
(base)->running_loop && \
(base)->th_owner_id != _evthread_id_fn())
(base)->th_owner_id != evthread_id_fn_())
/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
NULL if locking is not enabled. */
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
((lockvar) = _evthread_lock_fns.alloc ? \
_evthread_lock_fns.alloc(locktype) : NULL)
((lockvar) = evthread_lock_fns_.alloc ? \
evthread_lock_fns_.alloc(locktype) : NULL)
/** Free a given lock, if it is present and locking is enabled. */
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
do { \
void *_lock_tmp_ = (lockvar); \
if (_lock_tmp_ && _evthread_lock_fns.free) \
_evthread_lock_fns.free(_lock_tmp_, (locktype)); \
if (_lock_tmp_ && evthread_lock_fns_.free) \
evthread_lock_fns_.free(_lock_tmp_, (locktype)); \
} while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthread_lock_fns.lock(mode, lockvar); \
evthread_lock_fns_.lock(mode, lockvar); \
} while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthread_lock_fns.unlock(mode, lockvar); \
evthread_lock_fns_.unlock(mode, lockvar); \
} while (0)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
void *tmp = lockvar1; \
@ -125,8 +125,8 @@ extern int _evthread_lock_debugging_enabled;
* locked and held by us. */
#define EVLOCK_ASSERT_LOCKED(lock) \
do { \
if ((lock) && _evthread_lock_debugging_enabled) { \
EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
if ((lock) && evthread_lock_debugging_enabled_) { \
EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
} \
} while (0)
@ -136,8 +136,8 @@ static inline int EVLOCK_TRY_LOCK(void *lock);
static inline int
EVLOCK_TRY_LOCK(void *lock)
{
if (lock && _evthread_lock_fns.lock) {
int r = _evthread_lock_fns.lock(EVTHREAD_TRY, lock);
if (lock && evthread_lock_fns_.lock) {
int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
return !r;
} else {
/* Locking is disabled either globally or for this thing;
@ -149,79 +149,79 @@ EVLOCK_TRY_LOCK(void *lock)
/** Allocate a new condition variable and store it in the void *, condvar */
#define EVTHREAD_ALLOC_COND(condvar) \
do { \
(condvar) = _evthread_cond_fns.alloc_condition ? \
_evthread_cond_fns.alloc_condition(0) : NULL; \
(condvar) = evthread_cond_fns_.alloc_condition ? \
evthread_cond_fns_.alloc_condition(0) : NULL; \
} while (0)
/** Deallocate and free a condition variable in condvar */
#define EVTHREAD_FREE_COND(cond) \
do { \
if (cond) \
_evthread_cond_fns.free_condition((cond)); \
evthread_cond_fns_.free_condition((cond)); \
} while (0)
/** Signal one thread waiting on cond */
#define EVTHREAD_COND_SIGNAL(cond) \
( (cond) ? _evthread_cond_fns.signal_condition((cond), 0) : 0 )
( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
/** Signal all threads waiting on cond */
#define EVTHREAD_COND_BROADCAST(cond) \
( (cond) ? _evthread_cond_fns.signal_condition((cond), 1) : 0 )
( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
/** Wait until the condition 'cond' is signalled. Must be called while
* holding 'lock'. The lock will be released until the condition is
* signalled, at which point it will be acquired again. Returns 0 for
* success, -1 for failure. */
#define EVTHREAD_COND_WAIT(cond, lock) \
( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), NULL) : 0 )
( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
* on timeout. */
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 )
( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
/** True iff locking functions have been configured. */
#define EVTHREAD_LOCKING_ENABLED() \
(_evthread_lock_fns.lock != NULL)
(evthread_lock_fns_.lock != NULL)
#elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
unsigned long _evthreadimpl_get_id(void);
int _evthreadimpl_is_lock_debugging_enabled(void);
void *_evthreadimpl_lock_alloc(unsigned locktype);
void _evthreadimpl_lock_free(void *lock, unsigned locktype);
int _evthreadimpl_lock_lock(unsigned mode, void *lock);
int _evthreadimpl_lock_unlock(unsigned mode, void *lock);
void *_evthreadimpl_cond_alloc(unsigned condtype);
void _evthreadimpl_cond_free(void *cond);
int _evthreadimpl_cond_signal(void *cond, int broadcast);
int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv);
int _evthreadimpl_locking_enabled(void);
unsigned long evthreadimpl_get_id_(void);
int evthreadimpl_is_lock_debugging_enabled_(void);
void *evthreadimpl_lock_alloc_(unsigned locktype);
void evthreadimpl_lock_free_(void *lock, unsigned locktype);
int evthreadimpl_lock_lock_(unsigned mode, void *lock);
int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
void *evthreadimpl_cond_alloc_(unsigned condtype);
void evthreadimpl_cond_free_(void *cond);
int evthreadimpl_cond_signal_(void *cond, int broadcast);
int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
int evthreadimpl_locking_enabled_(void);
#define EVTHREAD_GET_ID() _evthreadimpl_get_id()
#define EVTHREAD_GET_ID() evthreadimpl_get_id_()
#define EVBASE_IN_THREAD(base) \
((base)->th_owner_id == _evthreadimpl_get_id())
((base)->th_owner_id == evthreadimpl_get_id_())
#define EVBASE_NEED_NOTIFY(base) \
((base)->running_loop && \
((base)->th_owner_id != _evthreadimpl_get_id()))
((base)->th_owner_id != evthreadimpl_get_id_()))
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
((lockvar) = _evthreadimpl_lock_alloc(locktype))
((lockvar) = evthreadimpl_lock_alloc_(locktype))
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
do { \
void *_lock_tmp_ = (lockvar); \
if (_lock_tmp_) \
_evthreadimpl_lock_free(_lock_tmp_, (locktype)); \
evthreadimpl_lock_free_(_lock_tmp_, (locktype)); \
} while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthreadimpl_lock_lock(mode, lockvar); \
evthreadimpl_lock_lock_(mode, lockvar); \
} while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthreadimpl_lock_unlock(mode, lockvar); \
evthreadimpl_lock_unlock_(mode, lockvar); \
} while (0)
/** Lock an event_base, if it is set up for locking. Acquires the lock
@ -239,8 +239,8 @@ int _evthreadimpl_locking_enabled(void);
* locked and held by us. */
#define EVLOCK_ASSERT_LOCKED(lock) \
do { \
if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \
EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
} \
} while (0)
@ -251,7 +251,7 @@ static inline int
EVLOCK_TRY_LOCK(void *lock)
{
if (lock) {
int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock);
int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
return !r;
} else {
/* Locking is disabled either globally or for this thing;
@ -263,59 +263,59 @@ EVLOCK_TRY_LOCK(void *lock)
/** Allocate a new condition variable and store it in the void *, condvar */
#define EVTHREAD_ALLOC_COND(condvar) \
do { \
(condvar) = _evthreadimpl_cond_alloc(0); \
(condvar) = evthreadimpl_cond_alloc_(0); \
} while (0)
/** Deallocate and free a condition variable in condvar */
#define EVTHREAD_FREE_COND(cond) \
do { \
if (cond) \
_evthreadimpl_cond_free((cond)); \
evthreadimpl_cond_free_((cond)); \
} while (0)
/** Signal one thread waiting on cond */
#define EVTHREAD_COND_SIGNAL(cond) \
( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 )
( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
/** Signal all threads waiting on cond */
#define EVTHREAD_COND_BROADCAST(cond) \
( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 )
( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
/** Wait until the condition 'cond' is signalled. Must be called while
* holding 'lock'. The lock will be released until the condition is
* signalled, at which point it will be acquired again. Returns 0 for
* success, -1 for failure. */
#define EVTHREAD_COND_WAIT(cond, lock) \
( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 )
( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
* on timeout. */
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 )
( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
#define EVTHREAD_LOCKING_ENABLED() \
(_evthreadimpl_locking_enabled())
(evthreadimpl_locking_enabled_())
#else /* EVENT__DISABLE_THREAD_SUPPORT */
#define EVTHREAD_GET_ID() 1
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
#define EVTHREAD_FREE_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
#define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
#define EVLOCK_LOCK(lockvar, mode) _EVUTIL_NIL_STMT
#define EVLOCK_UNLOCK(lockvar, mode) _EVUTIL_NIL_STMT
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
#define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
#define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
#define EVBASE_IN_THREAD(base) 1
#define EVBASE_NEED_NOTIFY(base) 0
#define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT
#define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT
#define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT
#define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
#define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
#define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
#define EVLOCK_TRY_LOCK(lock) 1
#define EVTHREAD_ALLOC_COND(condvar) _EVUTIL_NIL_STMT
#define EVTHREAD_FREE_COND(cond) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_SIGNAL(cond) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_BROADCAST(cond) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_WAIT(cond, lock) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) _EVUTIL_NIL_STMT
#define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
#define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
#define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
#define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
#define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
#define EVTHREAD_LOCKING_ENABLED() 0
@ -324,7 +324,7 @@ EVLOCK_TRY_LOCK(void *lock)
/* This code is shared between both lock impls */
#if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
void *tmp = lockvar1; \
@ -339,7 +339,7 @@ EVLOCK_TRY_LOCK(void *lock)
do { \
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
EVLOCK_SORTLOCKS_(_lock1_tmplock,_lock2_tmplock); \
EVLOCK_LOCK(_lock1_tmplock,mode1); \
if (_lock2_tmplock != _lock1_tmplock) \
EVLOCK_LOCK(_lock2_tmplock,mode2); \
@ -349,14 +349,14 @@ EVLOCK_TRY_LOCK(void *lock)
do { \
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
EVLOCK_SORTLOCKS_(_lock1_tmplock,_lock2_tmplock); \
if (_lock2_tmplock != _lock1_tmplock) \
EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
} while (0)
int _evthread_is_debug_lock_held(void *lock);
void *_evthread_debug_get_real_lock(void *lock);
int evthread_is_debug_lock_held_(void *lock);
void *evthread_debug_get_real_lock_(void *lock);
void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
int enable_locks);

View File

@ -46,41 +46,41 @@
#endif
/* globals */
GLOBAL int _evthread_lock_debugging_enabled = 0;
GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
GLOBAL int evthread_lock_debugging_enabled_ = 0;
GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
0, 0, NULL, NULL, NULL, NULL
};
GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
0, NULL, NULL, NULL, NULL
};
/* Used for debugging */
static struct evthread_lock_callbacks _original_lock_fns = {
static struct evthread_lock_callbacks original_lock_fns_ = {
0, 0, NULL, NULL, NULL, NULL
};
static struct evthread_condition_callbacks _original_cond_fns = {
static struct evthread_condition_callbacks original_cond_fns_ = {
0, NULL, NULL, NULL, NULL
};
void
evthread_set_id_callback(unsigned long (*id_fn)(void))
{
_evthread_id_fn = id_fn;
evthread_id_fn_ = id_fn;
}
int
evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
{
struct evthread_lock_callbacks *target =
_evthread_lock_debugging_enabled
? &_original_lock_fns : &_evthread_lock_fns;
evthread_lock_debugging_enabled_
? &original_lock_fns_ : &evthread_lock_fns_;
if (!cbs) {
if (target->alloc)
event_warnx("Trying to disable lock functions after "
"they have been set up will probaby not work.");
memset(target, 0, sizeof(_evthread_lock_fns));
memset(target, 0, sizeof(evthread_lock_fns_));
return 0;
}
if (target->alloc) {
@ -99,7 +99,7 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
return -1;
}
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
memcpy(target, cbs, sizeof(_evthread_lock_fns));
memcpy(target, cbs, sizeof(evthread_lock_fns_));
return event_global_setup_locks_(1);
} else {
return -1;
@ -110,15 +110,15 @@ int
evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
{
struct evthread_condition_callbacks *target =
_evthread_lock_debugging_enabled
? &_original_cond_fns : &_evthread_cond_fns;
evthread_lock_debugging_enabled_
? &original_cond_fns_ : &evthread_cond_fns_;
if (!cbs) {
if (target->alloc_condition)
event_warnx("Trying to disable condition functions "
"after they have been set up will probaby not "
"work.");
memset(target, 0, sizeof(_evthread_cond_fns));
memset(target, 0, sizeof(evthread_cond_fns_));
return 0;
}
if (target->alloc_condition) {
@ -137,12 +137,12 @@ evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
}
if (cbs->alloc_condition && cbs->free_condition &&
cbs->signal_condition && cbs->wait_condition) {
memcpy(target, cbs, sizeof(_evthread_cond_fns));
memcpy(target, cbs, sizeof(evthread_cond_fns_));
}
if (_evthread_lock_debugging_enabled) {
_evthread_cond_fns.alloc_condition = cbs->alloc_condition;
_evthread_cond_fns.free_condition = cbs->free_condition;
_evthread_cond_fns.signal_condition = cbs->signal_condition;
if (evthread_lock_debugging_enabled_) {
evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
evthread_cond_fns_.free_condition = cbs->free_condition;
evthread_cond_fns_.signal_condition = cbs->signal_condition;
}
return 0;
}
@ -165,8 +165,8 @@ debug_lock_alloc(unsigned locktype)
struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
if (!result)
return NULL;
if (_original_lock_fns.alloc) {
if (!(result->lock = _original_lock_fns.alloc(
if (original_lock_fns_.alloc) {
if (!(result->lock = original_lock_fns_.alloc(
locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
mm_free(result);
return NULL;
@ -188,8 +188,8 @@ debug_lock_free(void *lock_, unsigned locktype)
EVUTIL_ASSERT(lock->count == 0);
EVUTIL_ASSERT(locktype == lock->locktype);
EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
if (_original_lock_fns.free) {
_original_lock_fns.free(lock->lock,
if (original_lock_fns_.free) {
original_lock_fns_.free(lock->lock,
lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
}
lock->lock = NULL;
@ -205,9 +205,9 @@ evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
++lock->count;
if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
EVUTIL_ASSERT(lock->count == 1);
if (_evthread_id_fn) {
if (evthread_id_fn_) {
unsigned long me;
me = _evthread_id_fn();
me = evthread_id_fn_();
if (lock->count > 1)
EVUTIL_ASSERT(lock->held_by == me);
lock->held_by = me;
@ -223,8 +223,8 @@ debug_lock_lock(unsigned mode, void *lock_)
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
else
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
if (_original_lock_fns.lock)
res = _original_lock_fns.lock(mode, lock->lock);
if (original_lock_fns_.lock)
res = original_lock_fns_.lock(mode, lock->lock);
if (!res) {
evthread_debug_lock_mark_locked(mode, lock);
}
@ -239,9 +239,9 @@ evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
else
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
if (_evthread_id_fn) {
if (evthread_id_fn_) {
unsigned long me;
me = _evthread_id_fn();
me = evthread_id_fn_();
EVUTIL_ASSERT(lock->held_by == me);
if (lock->count == 1)
lock->held_by = 0;
@ -256,8 +256,8 @@ debug_lock_unlock(unsigned mode, void *lock_)
struct debug_lock *lock = lock_;
int res = 0;
evthread_debug_lock_mark_unlocked(mode, lock);
if (_original_lock_fns.unlock)
res = _original_lock_fns.unlock(mode, lock->lock);
if (original_lock_fns_.unlock)
res = original_lock_fns_.unlock(mode, lock->lock);
return res;
}
@ -270,7 +270,7 @@ debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
EVLOCK_ASSERT_LOCKED(_lock);
evthread_debug_lock_mark_unlocked(0, lock);
r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
r = original_cond_fns_.wait_condition(_cond, lock->lock, tv);
evthread_debug_lock_mark_locked(0, lock);
return r;
}
@ -286,30 +286,30 @@ evthread_enable_lock_debuging(void)
debug_lock_lock,
debug_lock_unlock
};
if (_evthread_lock_debugging_enabled)
if (evthread_lock_debugging_enabled_)
return;
memcpy(&_original_lock_fns, &_evthread_lock_fns,
memcpy(&original_lock_fns_, &evthread_lock_fns_,
sizeof(struct evthread_lock_callbacks));
memcpy(&_evthread_lock_fns, &cbs,
memcpy(&evthread_lock_fns_, &cbs,
sizeof(struct evthread_lock_callbacks));
memcpy(&_original_cond_fns, &_evthread_cond_fns,
memcpy(&original_cond_fns_, &evthread_cond_fns_,
sizeof(struct evthread_condition_callbacks));
_evthread_cond_fns.wait_condition = debug_cond_wait;
_evthread_lock_debugging_enabled = 1;
evthread_cond_fns_.wait_condition = debug_cond_wait;
evthread_lock_debugging_enabled_ = 1;
/* XXX return value should get checked. */
event_global_setup_locks_(0);
}
int
_evthread_is_debug_lock_held(void *lock_)
evthread_is_debug_lock_held_(void *lock_)
{
struct debug_lock *lock = lock_;
if (! lock->count)
return 0;
if (_evthread_id_fn) {
unsigned long me = _evthread_id_fn();
if (evthread_id_fn_) {
unsigned long me = evthread_id_fn_();
if (lock->held_by != me)
return 0;
}
@ -317,7 +317,7 @@ _evthread_is_debug_lock_held(void *lock_)
}
void *
_evthread_debug_get_real_lock(void *lock_)
evthread_debug_get_real_lock_(void *lock_)
{
struct debug_lock *lock = lock_;
return lock->lock;
@ -332,23 +332,23 @@ evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
3) we're turning on locking; debugging is not on.
4) we're turning on locking; debugging is on. */
if (!enable_locks && _original_lock_fns.alloc == NULL) {
if (!enable_locks && original_lock_fns_.alloc == NULL) {
/* Case 1: allocate a debug lock. */
EVUTIL_ASSERT(lock_ == NULL);
return debug_lock_alloc(locktype);
} else if (!enable_locks && _original_lock_fns.alloc != NULL) {
} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
/* Case 2: wrap the lock in a debug lock. */
struct debug_lock *lock;
EVUTIL_ASSERT(lock_ != NULL);
if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
/* We can't wrap it: We need a recursive lock */
_original_lock_fns.free(lock_, locktype);
original_lock_fns_.free(lock_, locktype);
return debug_lock_alloc(locktype);
}
lock = mm_malloc(sizeof(struct debug_lock));
if (!lock) {
_original_lock_fns.free(lock_, locktype);
original_lock_fns_.free(lock_, locktype);
return NULL;
}
lock->lock = lock_;
@ -356,18 +356,18 @@ evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
lock->count = 0;
lock->held_by = 0;
return lock;
} else if (enable_locks && ! _evthread_lock_debugging_enabled) {
} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
/* Case 3: allocate a regular lock */
EVUTIL_ASSERT(lock_ == NULL);
return _evthread_lock_fns.alloc(locktype);
return evthread_lock_fns_.alloc(locktype);
} else {
/* Case 4: Fill in a debug lock with a real lock */
struct debug_lock *lock = lock_;
EVUTIL_ASSERT(enable_locks &&
_evthread_lock_debugging_enabled);
evthread_lock_debugging_enabled_);
EVUTIL_ASSERT(lock->locktype == locktype);
EVUTIL_ASSERT(lock->lock == NULL);
lock->lock = _original_lock_fns.alloc(
lock->lock = original_lock_fns_.alloc(
locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
if (!lock->lock) {
lock->count = -200;
@ -381,76 +381,76 @@ evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
#ifndef EVTHREAD_EXPOSE_STRUCTS
unsigned long
_evthreadimpl_get_id()
evthreadimpl_get_id_()
{
return _evthread_id_fn ? _evthread_id_fn() : 1;
return evthread_id_fn_ ? evthread_id_fn_() : 1;
}
void *
_evthreadimpl_lock_alloc(unsigned locktype)
evthreadimpl_lock_alloc_(unsigned locktype)
{
return _evthread_lock_fns.alloc ?
_evthread_lock_fns.alloc(locktype) : NULL;
return evthread_lock_fns_.alloc ?
evthread_lock_fns_.alloc(locktype) : NULL;
}
void
_evthreadimpl_lock_free(void *lock, unsigned locktype)
evthreadimpl_lock_free_(void *lock, unsigned locktype)
{
if (_evthread_lock_fns.free)
_evthread_lock_fns.free(lock, locktype);
if (evthread_lock_fns_.free)
evthread_lock_fns_.free(lock, locktype);
}
int
_evthreadimpl_lock_lock(unsigned mode, void *lock)
evthreadimpl_lock_lock_(unsigned mode, void *lock)
{
if (_evthread_lock_fns.lock)
return _evthread_lock_fns.lock(mode, lock);
if (evthread_lock_fns_.lock)
return evthread_lock_fns_.lock(mode, lock);
else
return 0;
}
int
_evthreadimpl_lock_unlock(unsigned mode, void *lock)
evthreadimpl_lock_unlock_(unsigned mode, void *lock)
{
if (_evthread_lock_fns.unlock)
return _evthread_lock_fns.unlock(mode, lock);
if (evthread_lock_fns_.unlock)
return evthread_lock_fns_.unlock(mode, lock);
else
return 0;
}
void *
_evthreadimpl_cond_alloc(unsigned condtype)
evthreadimpl_cond_alloc_(unsigned condtype)
{
return _evthread_cond_fns.alloc_condition ?
_evthread_cond_fns.alloc_condition(condtype) : NULL;
return evthread_cond_fns_.alloc_condition ?
evthread_cond_fns_.alloc_condition(condtype) : NULL;
}
void
_evthreadimpl_cond_free(void *cond)
evthreadimpl_cond_free_(void *cond)
{
if (_evthread_cond_fns.free_condition)
_evthread_cond_fns.free_condition(cond);
if (evthread_cond_fns_.free_condition)
evthread_cond_fns_.free_condition(cond);
}
int
_evthreadimpl_cond_signal(void *cond, int broadcast)
evthreadimpl_cond_signal_(void *cond, int broadcast)
{
if (_evthread_cond_fns.signal_condition)
return _evthread_cond_fns.signal_condition(cond, broadcast);
if (evthread_cond_fns_.signal_condition)
return evthread_cond_fns_.signal_condition(cond, broadcast);
else
return 0;
}
int
_evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
{
if (_evthread_cond_fns.wait_condition)
return _evthread_cond_fns.wait_condition(cond, lock, tv);
if (evthread_cond_fns_.wait_condition)
return evthread_cond_fns_.wait_condition(cond, lock, tv);
else
return 0;
}
int
_evthreadimpl_is_lock_debugging_enabled(void)
evthreadimpl_is_lock_debugging_enabled_(void)
{
return _evthread_lock_debugging_enabled;
return evthread_lock_debugging_enabled_;
}
int
_evthreadimpl_locking_enabled(void)
evthreadimpl_locking_enabled_(void)
{
return _evthread_lock_fns.lock != NULL;
return evthread_lock_fns_.lock != NULL;
}
#endif

View File

@ -2273,7 +2273,7 @@ evutil_getenv(const char *varname)
}
long
_evutil_weakrand(void)
evutil_weakrand_(void)
{
#ifdef _WIN32
return rand();

View File

@ -90,8 +90,8 @@ ev_arc4random_buf(void *buf, size_t n)
#define ssize_t _EVENT_SSIZE_t
#endif
#define ARC4RANDOM_EXPORT static
#define _ARC4_LOCK() EVLOCK_LOCK(arc4rand_lock, 0)
#define _ARC4_UNLOCK() EVLOCK_UNLOCK(arc4rand_lock, 0)
#define ARC4_LOCK_() EVLOCK_LOCK(arc4rand_lock, 0)
#define ARC4_UNLOCK_() EVLOCK_UNLOCK(arc4rand_lock, 0)
#ifndef EVENT__DISABLE_THREAD_SUPPORT
static void *arc4rand_lock;
#endif
@ -117,11 +117,11 @@ evutil_secure_rng_init(void)
{
int val;
_ARC4_LOCK();
ARC4_LOCK_();
if (!arc4_seeded_ok)
arc4_stir();
val = arc4_seeded_ok ? 0 : -1;
_ARC4_UNLOCK();
ARC4_UNLOCK_();
return val;
}

View File

@ -42,7 +42,7 @@ struct addrinfo;
struct evhttp_request;
/* Indicates an unknown request method. */
#define _EVHTTP_REQ_UNKNOWN (1<<15)
#define EVHTTP_REQ_UNKNOWN_ (1<<15)
enum evhttp_connection_state {
EVCON_DISCONNECTED, /**< not currently connected not trying either*/

48
http.c
View File

@ -1532,7 +1532,7 @@ evhttp_parse_request_line(struct evhttp_request *req, char *line)
return (-1);
method_len = (uri - method) - 1;
type = _EVHTTP_REQ_UNKNOWN;
type = EVHTTP_REQ_UNKNOWN_;
/* First line */
switch (method_len) {
@ -1639,7 +1639,7 @@ evhttp_parse_request_line(struct evhttp_request *req, char *line)
break;
} /* switch */
if (type == _EVHTTP_REQ_UNKNOWN) {
if (type == EVHTTP_REQ_UNKNOWN_) {
event_debug(("%s: bad method %s on request %p from %s",
__func__, method, req, req->remote_host));
/* No error yet; we'll give a better error later when
@ -4480,20 +4480,20 @@ err:
void
evhttp_uri_free(struct evhttp_uri *uri)
{
#define _URI_FREE_STR(f) \
#define URI_FREE_STR_(f) \
if (uri->f) { \
mm_free(uri->f); \
}
_URI_FREE_STR(scheme);
_URI_FREE_STR(userinfo);
_URI_FREE_STR(host);
_URI_FREE_STR(path);
_URI_FREE_STR(query);
_URI_FREE_STR(fragment);
URI_FREE_STR_(scheme);
URI_FREE_STR_(userinfo);
URI_FREE_STR_(host);
URI_FREE_STR_(path);
URI_FREE_STR_(query);
URI_FREE_STR_(fragment);
mm_free(uri);
#undef _URI_FREE_STR
#undef URI_FREE_STR_
}
char *
@ -4503,7 +4503,7 @@ evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit)
size_t joined_size = 0;
char *output = NULL;
#define _URI_ADD(f) evbuffer_add(tmp, uri->f, strlen(uri->f))
#define URI_ADD_(f) evbuffer_add(tmp, uri->f, strlen(uri->f))
if (!uri || !buf || !limit)
return NULL;
@ -4513,14 +4513,14 @@ evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit)
return NULL;
if (uri->scheme) {
_URI_ADD(scheme);
URI_ADD_(scheme);
evbuffer_add(tmp, ":", 1);
}
if (uri->host) {
evbuffer_add(tmp, "//", 2);
if (uri->userinfo)
evbuffer_add_printf(tmp,"%s@", uri->userinfo);
_URI_ADD(host);
URI_ADD_(host);
if (uri->port >= 0)
evbuffer_add_printf(tmp,":%d", uri->port);
@ -4529,16 +4529,16 @@ evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit)
}
if (uri->path)
_URI_ADD(path);
URI_ADD_(path);
if (uri->query) {
evbuffer_add(tmp, "?", 1);
_URI_ADD(query);
URI_ADD_(query);
}
if (uri->fragment) {
evbuffer_add(tmp, "#", 1);
_URI_ADD(fragment);
URI_ADD_(fragment);
}
evbuffer_add(tmp, "\0", 1); /* NUL */
@ -4557,7 +4557,7 @@ err:
evbuffer_free(tmp);
return output;
#undef _URI_ADD
#undef URI_ADD_
}
const char *
@ -4596,7 +4596,7 @@ evhttp_uri_get_fragment(const struct evhttp_uri *uri)
return uri->fragment;
}
#define _URI_SET_STR(f) do { \
#define URI_SET_STR_(f) do { \
if (uri->f) \
mm_free(uri->f); \
if (f) { \
@ -4615,7 +4615,7 @@ evhttp_uri_set_scheme(struct evhttp_uri *uri, const char *scheme)
if (scheme && !scheme_ok(scheme, scheme+strlen(scheme)))
return -1;
_URI_SET_STR(scheme);
URI_SET_STR_(scheme);
return 0;
}
int
@ -4623,7 +4623,7 @@ evhttp_uri_set_userinfo(struct evhttp_uri *uri, const char *userinfo)
{
if (userinfo && !userinfo_ok(userinfo, userinfo+strlen(userinfo)))
return -1;
_URI_SET_STR(userinfo);
URI_SET_STR_(userinfo);
return 0;
}
int
@ -4639,7 +4639,7 @@ evhttp_uri_set_host(struct evhttp_uri *uri, const char *host)
}
}
_URI_SET_STR(host);
URI_SET_STR_(host);
return 0;
}
int
@ -4659,7 +4659,7 @@ evhttp_uri_set_path(struct evhttp_uri *uri, const char *path)
if (path && end_of_cpath(path, PART_PATH, uri->flags) != path+strlen(path))
return -1;
_URI_SET_STR(path);
URI_SET_STR_(path);
return 0;
}
int
@ -4667,7 +4667,7 @@ evhttp_uri_set_query(struct evhttp_uri *uri, const char *query)
{
if (query && end_of_cpath(query, PART_QUERY, uri->flags) != query+strlen(query))
return -1;
_URI_SET_STR(query);
URI_SET_STR_(query);
return 0;
}
int
@ -4675,6 +4675,6 @@ evhttp_uri_set_fragment(struct evhttp_uri *uri, const char *fragment)
{
if (fragment && end_of_cpath(fragment, PART_FRAGMENT, uri->flags) != fragment+strlen(fragment))
return -1;
_URI_SET_STR(fragment);
URI_SET_STR_(fragment);
return 0;
}

View File

@ -124,7 +124,7 @@ struct evbuffer_ptr {
struct {
void *chain;
size_t pos_in_chain;
} _internal;
} internal_;
};
/** Describes a single extent of memory inside an evbuffer. Used for
@ -135,7 +135,7 @@ struct evbuffer_ptr {
#ifdef EVENT__HAVE_SYS_UIO_H
#define evbuffer_iovec iovec
/* Internal use -- defined only if we are using the native struct iovec */
#define _EVBUFFER_IOVEC_IS_NATIVE
#define EVBUFFER_IOVEC_IS_NATIVE_
#else
struct evbuffer_iovec {
/** The start of the extent of memory. */

View File

@ -595,10 +595,10 @@ void event_base_free(struct event_base *);
/** @name Log severities
*/
/**@{*/
#define _EVENT_LOG_DEBUG 0
#define _EVENT_LOG_MSG 1
#define _EVENT_LOG_WARN 2
#define _EVENT_LOG_ERR 3
#define EVENT_LOG_DEBUG 0
#define EVENT_LOG_MSG 1
#define EVENT_LOG_WARN 2
#define EVENT_LOG_ERR 3
/**@}*/
/**
@ -611,7 +611,7 @@ typedef void (*event_log_cb)(int severity, const char *msg);
Redirect Libevent's log messages.
@param cb a function taking two arguments: an integer severity between
_EVENT_LOG_DEBUG and _EVENT_LOG_ERR, and a string. If cb is NULL,
EVENT_LOG_DEBUG and EVENT_LOG_ERR, and a string. If cb is NULL,
then the default log is used.
NOTE: The function you provide *must not* call any other libevent
@ -635,7 +635,7 @@ typedef void (*event_fatal_cb)(int err);
something is wrong with your program, or with Libevent: any subsequent calls
to Libevent may result in undefined behavior.
Libevent will (almost) always log an _EVENT_LOG_ERR message before calling
Libevent will (almost) always log an EVENT_LOG_ERR message before calling
this function; look at the last log message to see why Libevent has died.
*/
void event_set_fatal_callback(event_fatal_cb cb);

View File

@ -66,7 +66,7 @@ extern "C" {
/* Fix so that people don't have to run with <sys/queue.h> */
#ifndef TAILQ_ENTRY
#define _EVENT_DEFINED_TQENTRY
#define EVENT_DEFINED_TQENTRY_
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
@ -75,7 +75,7 @@ struct { \
#endif /* !TAILQ_ENTRY */
#ifndef TAILQ_HEAD
#define _EVENT_DEFINED_TQHEAD
#define EVENT_DEFINED_TQHEAD_
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; \
@ -85,7 +85,7 @@ struct name { \
/* Fix so that people don't have to run with <sys/queue.h> */
#ifndef LIST_ENTRY
#define _EVENT_DEFINED_LISTENTRY
#define EVENT_DEFINED_LISTENTRY_
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
@ -120,7 +120,7 @@ struct event {
/* Allows deletes in callback */
short *ev_pncalls;
} ev_signal;
} _ev;
} ev_;
short ev_events;
short ev_res; /* result passed to event callback */
@ -136,21 +136,21 @@ struct event {
TAILQ_HEAD (event_list, event);
#ifdef _EVENT_DEFINED_TQENTRY
#ifdef EVENT_DEFINED_TQENTRY_
#undef TAILQ_ENTRY
#endif
#ifdef _EVENT_DEFINED_TQHEAD
#ifdef EVENT_DEFINED_TQHEAD_
#undef TAILQ_HEAD
#endif
#ifdef _EVENT_DEFINED_LISTENTRY
#ifdef EVENT_DEFINED_LISTENTRY_
#undef LIST_ENTRY
struct event_dlist;
#undef _EVENT_DEFINED_LISTENTRY
#undef EVENT_DEFINED_LISTENTRY_
#else
LIST_HEAD (event_dlist, event);
#endif /* _EVENT_DEFINED_LISTENTRY */
#endif /* EVENT_DEFINED_LISTENTRY_ */
#ifdef __cplusplus
}

View File

@ -34,7 +34,7 @@ extern "C" {
/* Fix so that people don't have to run with <sys/queue.h> */
/* XXXX This code is duplicated with event_struct.h */
#ifndef TAILQ_ENTRY
#define _EVENT_DEFINED_TQENTRY
#define EVENT_DEFINED_TQENTRY_
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
@ -43,7 +43,7 @@ struct { \
#endif /* !TAILQ_ENTRY */
#ifndef TAILQ_HEAD
#define _EVENT_DEFINED_TQHEAD
#define EVENT_DEFINED_TQHEAD_
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; \
@ -65,11 +65,11 @@ struct evkeyval {
TAILQ_HEAD (evkeyvalq, evkeyval);
/* XXXX This code is duplicated with event_struct.h */
#ifdef _EVENT_DEFINED_TQENTRY
#ifdef EVENT_DEFINED_TQENTRY_
#undef TAILQ_ENTRY
#endif
#ifdef _EVENT_DEFINED_TQHEAD
#ifdef EVENT_DEFINED_TQHEAD_
#undef TAILQ_HEAD
#endif

View File

@ -224,7 +224,7 @@ extern "C" {
@name Limits for integer types
These macros hold the largest or smallest values possible for the
ev_[u]int*_t types.
ev_[u]int*t_ types.
@{
*/

View File

@ -115,9 +115,9 @@ void event_overlapped_init(struct event_overlapped *, iocp_callback cb);
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
/** XXXX Document (nickm) */
evutil_socket_t _evbuffer_overlapped_get_fd(struct evbuffer *buf);
evutil_socket_t evbuffer_overlapped_get_fd_(struct evbuffer *buf);
void _evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd);
void evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd);
/** Start reading data onto the end of an overlapped evbuffer.

View File

@ -438,7 +438,7 @@ kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *
* if the handler for SIGCHLD is SIG_IGN, the system reaps
* zombie processes for us, and we don't get any notification.
* This appears to be the only signal with this quirk. */
if (_evsig_set_handler(base, nsignal,
if (evsig_set_handler_(base, nsignal,
nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
return (-1);
@ -467,7 +467,7 @@ kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
if (_evsig_restore_handler(base, nsignal) == -1)
if (evsig_restore_handler_(base, nsignal) == -1)
return (-1);
return (0);

View File

@ -37,7 +37,7 @@
#define EV_NORETURN
#endif
#define _EVENT_ERR_ABORT ((int)0xdeaddead)
#define EVENT_ERR_ABORT_ ((int)0xdeaddead)
#define USE_GLOBAL_FOR_DEBUG_LOGGING
@ -47,13 +47,13 @@
#ifdef EVENT_DEBUG_LOGGING_ENABLED
#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
extern ev_uint32_t _event_debug_logging_mask;
#define _event_debug_get_logging_mask() (_event_debug_logging_mask)
extern ev_uint32_t event_debug_logging_mask_;
#define event_debug_get_logging_mask_() (event_debug_logging_mask_)
#else
ev_uint32_t _event_debug_get_logging_mask(void);
ev_uint32_t event_debug_get_logging_mask_(void);
#endif
#else
#define _event_debug_get_logging_mask() (0)
#define event_debug_get_logging_mask_() (0)
#endif
void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
@ -63,12 +63,12 @@ void event_sock_warn(evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(2,
void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_debugx_(const char *fmt, ...) EV_CHECK_FMT(1,2);
#ifdef EVENT_DEBUG_LOGGING_ENABLED
#define event_debug(x) do { \
if (_event_debug_get_logging_mask()) { \
_event_debugx x; \
if (event_debug_get_logging_mask_()) { \
event_debugx_ x; \
} \
} while (0)
#else

40
log.c
View File

@ -57,7 +57,7 @@
#include "log-internal.h"
static void _warn_helper(int severity, const char *errstr, const char *fmt,
static void warn_helper_(int severity, const char *errstr, const char *fmt,
va_list ap);
static void event_log(int severity, const char *msg);
static void event_exit(int errcode) EV_NORETURN;
@ -72,13 +72,13 @@ static event_fatal_cb fatal_fn = NULL;
#endif
#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
ev_uint32_t _event_debug_logging_mask = DEFAULT_MASK;
ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
#else
static ev_uint32_t _event_debug_logging_mask = DEFAULT_MASK;
static ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
ev_uint32_t
_event_debug_get_logging_mask(void)
event_debug_get_logging_mask_(void)
{
return _event_debug_logging_mask;
return event_debug_logging_mask_;
}
#endif
#endif /* EVENT_DEBUG_LOGGING_ENABLED */
@ -95,7 +95,7 @@ event_exit(int errcode)
if (fatal_fn) {
fatal_fn(errcode);
exit(errcode); /* should never be reached */
} else if (errcode == _EVENT_ERR_ABORT)
} else if (errcode == EVENT_ERR_ABORT_)
abort();
else
exit(errcode);
@ -107,7 +107,7 @@ event_err(int eval, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, strerror(errno), fmt, ap);
warn_helper_(EVENT_LOG_ERR, strerror(errno), fmt, ap);
va_end(ap);
event_exit(eval);
}
@ -118,7 +118,7 @@ event_warn(const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, strerror(errno), fmt, ap);
warn_helper_(EVENT_LOG_WARN, strerror(errno), fmt, ap);
va_end(ap);
}
@ -129,7 +129,7 @@ event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...)
int err = evutil_socket_geterror(sock);
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
warn_helper_(EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
va_end(ap);
event_exit(eval);
}
@ -141,7 +141,7 @@ event_sock_warn(evutil_socket_t sock, const char *fmt, ...)
int err = evutil_socket_geterror(sock);
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
warn_helper_(EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
va_end(ap);
}
@ -151,7 +151,7 @@ event_errx(int eval, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, NULL, fmt, ap);
warn_helper_(EVENT_LOG_ERR, NULL, fmt, ap);
va_end(ap);
event_exit(eval);
}
@ -162,7 +162,7 @@ event_warnx(const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, NULL, fmt, ap);
warn_helper_(EVENT_LOG_WARN, NULL, fmt, ap);
va_end(ap);
}
@ -172,22 +172,22 @@ event_msgx(const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_MSG, NULL, fmt, ap);
warn_helper_(EVENT_LOG_MSG, NULL, fmt, ap);
va_end(ap);
}
void
_event_debugx(const char *fmt, ...)
event_debugx_(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_DEBUG, NULL, fmt, ap);
warn_helper_(EVENT_LOG_DEBUG, NULL, fmt, ap);
va_end(ap);
}
static void
_warn_helper(int severity, const char *errstr, const char *fmt, va_list ap)
warn_helper_(int severity, const char *errstr, const char *fmt, va_list ap)
{
char buf[1024];
size_t len;
@ -223,16 +223,16 @@ event_log(int severity, const char *msg)
else {
const char *severity_str;
switch (severity) {
case _EVENT_LOG_DEBUG:
case EVENT_LOG_DEBUG:
severity_str = "debug";
break;
case _EVENT_LOG_MSG:
case EVENT_LOG_MSG:
severity_str = "msg";
break;
case _EVENT_LOG_WARN:
case EVENT_LOG_WARN:
severity_str = "warn";
break;
case _EVENT_LOG_ERR:
case EVENT_LOG_ERR:
severity_str = "err";
break;
default:

View File

@ -209,7 +209,7 @@ evsig_init(struct event_base *base)
/* Helper: set the signal handler for evsignal to handler in base, so that
* we can restore the original handler when we clear the current one. */
int
_evsig_set_handler(struct event_base *base,
evsig_set_handler_(struct event_base *base,
int evsignal, void (__cdecl *handler)(int))
{
#ifdef EVENT__HAVE_SIGACTION
@ -299,7 +299,7 @@ evsig_add(struct event_base *base, evutil_socket_t evsignal, short old, short ev
EVSIGBASE_UNLOCK();
event_debug(("%s: %d: changing signal handler", __func__, (int)evsignal));
if (_evsig_set_handler(base, (int)evsignal, evsig_handler) == -1) {
if (evsig_set_handler_(base, (int)evsignal, evsig_handler) == -1) {
goto err;
}
@ -321,7 +321,7 @@ err:
}
int
_evsig_restore_handler(struct event_base *base, int evsignal)
evsig_restore_handler_(struct event_base *base, int evsignal)
{
int ret = 0;
struct evsig_info *sig = &base->sig;
@ -369,7 +369,7 @@ evsig_del(struct event_base *base, evutil_socket_t evsignal, short old, short ev
--base->sig.ev_n_signals_added;
EVSIGBASE_UNLOCK();
return (_evsig_restore_handler(base, (int)evsignal));
return (evsig_restore_handler_(base, (int)evsignal));
}
static void __cdecl
@ -422,7 +422,7 @@ evsig_dealloc(struct event_base *base)
for (i = 0; i < NSIG; ++i) {
if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
_evsig_restore_handler(base, i);
evsig_restore_handler_(base, i);
}
EVSIGBASE_LOCK();
if (base == evsig_base) {

View File

@ -10,8 +10,8 @@ extern "C" {
#ifndef EVENT__HAVE_STRLCPY
#include <string.h>
size_t _event_strlcpy(char *dst, const char *src, size_t siz);
#define strlcpy _event_strlcpy
size_t event_strlcpy_(char *dst, const char *src, size_t siz);
#define strlcpy event_strlcpy_
#endif
#ifdef __cplusplus

View File

@ -45,7 +45,7 @@ static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp
* Returns strlen(src); if retval >= siz, truncation occurred.
*/
size_t
_event_strlcpy(dst, src, siz)
event_strlcpy_(dst, src, siz)
char *dst;
const char *src;
size_t siz;

View File

@ -102,11 +102,11 @@ void run_legacy_test_fn(void *ptr);
struct evutil_addrinfo;
struct evutil_addrinfo *ai_find_by_family(struct evutil_addrinfo *ai, int f);
struct evutil_addrinfo *ai_find_by_protocol(struct evutil_addrinfo *ai, int p);
int _test_ai_eq(const struct evutil_addrinfo *ai, const char *sockaddr_port,
int test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
int socktype, int protocol, int line);
#define test_ai_eq(ai, str, s, p) do { \
if (_test_ai_eq((ai), (str), (s), (p), __LINE__)<0) \
if (test_ai_eq_((ai), (str), (s), (p), __LINE__)<0) \
goto end; \
} while (0)

View File

@ -65,7 +65,7 @@
/* Validates that an evbuffer is good. Returns false if it isn't, true if it
* is*/
static int
_evbuffer_validate(struct evbuffer *buf)
evbuffer_validate_(struct evbuffer *buf)
{
struct evbuffer_chain *chain;
size_t sum = 0;
@ -164,7 +164,7 @@ evbuffer_get_waste(struct evbuffer *buf, size_t *allocatedp, size_t *wastedp, si
}
#define evbuffer_validate(buf) \
TT_STMT_BEGIN if (!_evbuffer_validate(buf)) TT_DIE(("Buffer format invalid")); TT_STMT_END
TT_STMT_BEGIN if (!evbuffer_validate_(buf)) TT_DIE(("Buffer format invalid")); TT_STMT_END
static void
test_evbuffer(void *ptr)
@ -757,7 +757,7 @@ test_evbuffer_add_file(void *ptr)
data = malloc(1024*512);
tt_assert(data);
for (i = 0; i < datalen; ++i)
data[i] = _evutil_weakrand();
data[i] = evutil_weakrand_();
} else {
data = strdup("here is a relatively small string.");
tt_assert(data);

View File

@ -823,7 +823,7 @@ static void http_request_done(struct evhttp_request *, void *);
static void http_request_empty_done(struct evhttp_request *, void *);
static void
_http_connection_test(struct basic_test_data *data, int persistent)
http_connection_test_(struct basic_test_data *data, int persistent)
{
ev_uint16_t port = 0;
struct evhttp_connection *evcon = NULL;
@ -907,12 +907,12 @@ _http_connection_test(struct basic_test_data *data, int persistent)
static void
http_connection_test(void *arg)
{
_http_connection_test(arg, 0);
http_connection_test_(arg, 0);
}
static void
http_persist_connection_test(void *arg)
{
_http_connection_test(arg, 1);
http_connection_test_(arg, 1);
}
static struct regress_dns_server_table search_table[] = {
@ -1814,7 +1814,7 @@ close_detect_cb(struct evhttp_request *req, void *arg)
static void
_http_close_detection(struct basic_test_data *data, int with_delay)
http_close_detection_(struct basic_test_data *data, int with_delay)
{
ev_uint16_t port = 0;
struct evhttp_connection *evcon = NULL;
@ -1866,12 +1866,12 @@ _http_close_detection(struct basic_test_data *data, int with_delay)
static void
http_close_detection_test(void *arg)
{
_http_close_detection(arg, 0);
http_close_detection_(arg, 0);
}
static void
http_close_detection_delay_test(void *arg)
{
_http_close_detection(arg, 1);
http_close_detection_(arg, 1);
}
static void
@ -2559,7 +2559,7 @@ http_incomplete_writecb(struct bufferevent *bev, void *arg)
}
static void
_http_incomplete_test(struct basic_test_data *data, int use_timeout)
http_incomplete_test_(struct basic_test_data *data, int use_timeout)
{
struct bufferevent *bev;
evutil_socket_t fd;
@ -2616,12 +2616,12 @@ _http_incomplete_test(struct basic_test_data *data, int use_timeout)
static void
http_incomplete_test(void *arg)
{
_http_incomplete_test(arg, 0);
http_incomplete_test_(arg, 0);
}
static void
http_incomplete_timeout_test(void *arg)
{
_http_incomplete_test(arg, 1);
http_incomplete_test_(arg, 1);
}
/*
@ -2910,7 +2910,7 @@ http_stream_in_done(struct evhttp_request *req, void *arg)
* Makes a request and reads the response in chunks.
*/
static void
_http_stream_in_test(struct basic_test_data *data, char const *url,
http_stream_in_test_(struct basic_test_data *data, char const *url,
size_t expected_len, char const *expected)
{
struct evhttp_connection *evcon;
@ -2958,10 +2958,10 @@ _http_stream_in_test(struct basic_test_data *data, char const *url,
static void
http_stream_in_test(void *arg)
{
_http_stream_in_test(arg, "/chunked", 13 + 18 + 8,
http_stream_in_test_(arg, "/chunked", 13 + 18 + 8,
"This is funnybut not hilarious.bwv 1052");
_http_stream_in_test(arg, "/test", strlen(BASIC_REQUEST_BODY),
http_stream_in_test_(arg, "/test", strlen(BASIC_REQUEST_BODY),
BASIC_REQUEST_BODY);
}

View File

@ -590,7 +590,7 @@ done:
/* we just pause the rpc and continue it in the next callback */
struct _rpc_hook_ctx {
struct rpc_hook_ctx_ {
void *vbase;
void *ctx;
};
@ -600,7 +600,7 @@ static int hook_pause_cb_called=0;
static void
rpc_hook_pause_cb(evutil_socket_t fd, short what, void *arg)
{
struct _rpc_hook_ctx *ctx = arg;
struct rpc_hook_ctx_ *ctx = arg;
++hook_pause_cb_called;
evrpc_resume_request(ctx->vbase, ctx->ctx, EVRPC_CONTINUE);
free(arg);
@ -610,7 +610,7 @@ static int
rpc_hook_pause(void *ctx, struct evhttp_request *req, struct evbuffer *evbuf,
void *arg)
{
struct _rpc_hook_ctx *tmp = malloc(sizeof(*tmp));
struct rpc_hook_ctx_ *tmp = malloc(sizeof(*tmp));
struct timeval tv;
assert(tmp != NULL);

View File

@ -534,22 +534,22 @@ test_evutil_log(void *ptr)
* module didn't enforce the requirement that a fatal callback
* actually exit. Now, it exits no matter what, so if we wan to
* reinstate these tests, we'll need to fork for each one. */
check_error_logging(errx_fn, 2, _EVENT_LOG_ERR,
check_error_logging(errx_fn, 2, EVENT_LOG_ERR,
"Fatal error; too many kumquats (5)");
RESET();
#endif
event_warnx("Far too many %s (%d)", "wombats", 99);
LOGEQ(_EVENT_LOG_WARN, "Far too many wombats (99)");
LOGEQ(EVENT_LOG_WARN, "Far too many wombats (99)");
RESET();
event_msgx("Connecting lime to coconut");
LOGEQ(_EVENT_LOG_MSG, "Connecting lime to coconut");
LOGEQ(EVENT_LOG_MSG, "Connecting lime to coconut");
RESET();
event_debug(("A millisecond passed! We should log that!"));
#ifdef USE_DEBUG
LOGEQ(_EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
LOGEQ(EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
#else
tt_int_op(logsev,==,0);
tt_ptr_op(logmsg,==,NULL);
@ -561,13 +561,13 @@ test_evutil_log(void *ptr)
event_warn("Couldn't open %s", "/bad/file");
evutil_snprintf(buf, sizeof(buf),
"Couldn't open /bad/file: %s",strerror(ENOENT));
LOGEQ(_EVENT_LOG_WARN,buf);
LOGEQ(EVENT_LOG_WARN,buf);
RESET();
#ifdef CAN_CHECK_ERR
evutil_snprintf(buf, sizeof(buf),
"Couldn't open /very/bad/file: %s",strerror(ENOENT));
check_error_logging(err_fn, 5, _EVENT_LOG_ERR, buf);
check_error_logging(err_fn, 5, EVENT_LOG_ERR, buf);
RESET();
#endif
@ -584,11 +584,11 @@ test_evutil_log(void *ptr)
errno = EAGAIN;
#endif
event_sock_warn(fd, "Unhappy socket");
LOGEQ(_EVENT_LOG_WARN, buf);
LOGEQ(EVENT_LOG_WARN, buf);
RESET();
#ifdef CAN_CHECK_ERR
check_error_logging(sock_err_fn, 20, _EVENT_LOG_ERR, buf);
check_error_logging(sock_err_fn, 20, EVENT_LOG_ERR, buf);
RESET();
#endif
@ -767,7 +767,7 @@ ai_find_by_protocol(struct evutil_addrinfo *ai, int protocol)
int
_test_ai_eq(const struct evutil_addrinfo *ai, const char *sockaddr_port,
test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
int socktype, int protocol, int line)
{
struct sockaddr_storage ss;
@ -836,8 +836,8 @@ test_evutil_rand(void *arg)
for (k=0;k<32;++k) {
/* Try a few different start and end points; try to catch
* the various misaligned cases of arc4random_buf */
int startpoint = _evutil_weakrand() % 4;
int endpoint = 32 - (_evutil_weakrand() % 4);
int startpoint = evutil_weakrand_() % 4;
int endpoint = 32 - (evutil_weakrand_() % 4);
memset(buf2, 0, sizeof(buf2));

View File

@ -58,13 +58,13 @@ extern "C" {
#endif
/* A good no-op to use in macro definitions. */
#define _EVUTIL_NIL_STMT ((void)0)
#define EVUTIL_NIL_STMT_ ((void)0)
/* A no-op that tricks the compiler into thinking a condition is used while
* definitely not making any code for it. Used to compile out asserts while
* avoiding "unused variable" warnings. The "!" forces the compiler to
* do the sizeof() on an int, in case "condition" is a bitfield value.
*/
#define _EVUTIL_NIL_CONDITION(condition) do { \
#define EVUTIL_NIL_CONDITION_(condition) do { \
(void)sizeof(!(condition)); \
} while(0)
@ -254,7 +254,7 @@ int evutil_resolve(int family, const char *hostname, struct sockaddr *sa,
const char *evutil_getenv(const char *name);
long _evutil_weakrand(void);
long evutil_weakrand_(void);
/* Evaluates to the same boolean value as 'p', and hints to the compiler that
* we expect this value to be false. */
@ -266,13 +266,13 @@ long _evutil_weakrand(void);
/* Replacement for assert() that calls event_errx on failure. */
#ifdef NDEBUG
#define EVUTIL_ASSERT(cond) _EVUTIL_NIL_CONDITION(cond)
#define EVUTIL_ASSERT(cond) EVUTIL_NIL_CONDITION_(cond)
#define EVUTIL_FAILURE_CHECK(cond) 0
#else
#define EVUTIL_ASSERT(cond) \
do { \
if (EVUTIL_UNLIKELY(!(cond))) { \
event_errx(_EVENT_ERR_ABORT, \
event_errx(EVENT_ERR_ABORT_, \
"%s:%d: Assertion %s failed in %s", \
__FILE__,__LINE__,#cond,__func__); \
/* In case a user-supplied handler tries to */ \