mirror of
https://github.com/libevent/libevent.git
synced 2025-01-09 00:56:20 +08:00
Merge branch '21_robust_monotonic'
This commit is contained in:
commit
a163026099
25
LICENSE
25
LICENSE
@ -72,3 +72,28 @@ The arc4module is available under the following, sometimes called the
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
==============================
|
||||
|
||||
The Windows timer code is based on code from libutp, which is
|
||||
distributed under this license, sometimes called the "MIT" license.
|
||||
|
||||
|
||||
Copyright (c) 2010 BitTorrent, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
@ -183,6 +183,7 @@ CORE_SRC = \
|
||||
evthread.c \
|
||||
evutil.c \
|
||||
evutil_rand.c \
|
||||
evutil_time.c \
|
||||
listener.c \
|
||||
log.c \
|
||||
strlcpy.c \
|
||||
@ -255,6 +256,7 @@ noinst_HEADERS = \
|
||||
ratelim-internal.h \
|
||||
ratelim-internal.h \
|
||||
strlcpy-internal.h \
|
||||
time-internal.h \
|
||||
util-internal.h
|
||||
|
||||
EVENT1_HDRS = \
|
||||
|
@ -221,6 +221,7 @@ AC_CHECK_HEADERS([ \
|
||||
sys/sendfile.h \
|
||||
sys/socket.h \
|
||||
sys/time.h \
|
||||
sys/timerfd.h \
|
||||
sys/uio.h \
|
||||
sys/wait.h \
|
||||
unistd.h \
|
||||
@ -357,6 +358,7 @@ AC_CHECK_FUNCS([ \
|
||||
strtok_r \
|
||||
strtoll \
|
||||
sysctl \
|
||||
timerfd_create \
|
||||
unsetenv \
|
||||
usleep \
|
||||
vasprintf \
|
||||
|
85
epoll.c
85
epoll.c
@ -47,6 +47,9 @@
|
||||
#ifdef EVENT__HAVE_FCNTL_H
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
#ifdef EVENT__HAVE_SYS_TIMERFD_H
|
||||
#include <sys/timerfd.h>
|
||||
#endif
|
||||
|
||||
#include "event-internal.h"
|
||||
#include "evsignal-internal.h"
|
||||
@ -55,11 +58,26 @@
|
||||
#include "log-internal.h"
|
||||
#include "evmap-internal.h"
|
||||
#include "changelist-internal.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
#if defined(EVENT__HAVE_SYS_TIMERFD_H) && \
|
||||
defined(EVENT__HAVE_TIMERFD_CREATE) && \
|
||||
defined(HAVE_POSIX_MONOTONIC) && defined(TFD_NONBLOCK) && \
|
||||
defined(TFD_CLOEXEC)
|
||||
/* Note that we only use timerfd if TFD_NONBLOCK and TFD_CLOEXEC are available
|
||||
and working. This means that we can't support it on 2.6.25 (where timerfd
|
||||
was introduced) or 2.6.26, since 2.6.27 introduced those flags.
|
||||
*/
|
||||
#define USING_TIMERFD
|
||||
#endif
|
||||
|
||||
struct epollop {
|
||||
struct epoll_event *events;
|
||||
int nevents;
|
||||
int epfd;
|
||||
#ifdef USING_TIMERFD
|
||||
int timerfd;
|
||||
#endif
|
||||
};
|
||||
|
||||
static void *epoll_init(struct event_base *);
|
||||
@ -146,8 +164,38 @@ epoll_init(struct event_base *base)
|
||||
|
||||
if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 ||
|
||||
((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 &&
|
||||
evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL))
|
||||
evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) {
|
||||
|
||||
base->evsel = &epollops_changelist;
|
||||
}
|
||||
|
||||
#ifdef USING_TIMERFD
|
||||
/*
|
||||
The epoll interface ordinarily gives us one-millisecond precision,
|
||||
so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE
|
||||
timer. But when the user has set the new PRECISE_TIMER flag for an
|
||||
event_base, we can try to use timerfd to give them finer granularity.
|
||||
*/
|
||||
if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) &&
|
||||
base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) {
|
||||
int fd;
|
||||
fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
|
||||
if (epollop->timerfd >= 0) {
|
||||
struct epoll_event epev;
|
||||
epev.data.fd = epollop->timerfd;
|
||||
epev.events = EPOLLIN;
|
||||
if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) {
|
||||
event_warn("epoll_ctl(timerfd)");
|
||||
close(fd);
|
||||
epollop->timerfd = -1;
|
||||
}
|
||||
} else {
|
||||
event_warn("timerfd_create");
|
||||
}
|
||||
} else {
|
||||
epollop->timerfd = -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
evsig_init_(base);
|
||||
|
||||
@ -508,6 +556,33 @@ epoll_dispatch(struct event_base *base, struct timeval *tv)
|
||||
int i, res;
|
||||
long timeout = -1;
|
||||
|
||||
#ifdef USING_TIMERFD
|
||||
if (epollop->timerfd >= 0) {
|
||||
struct itimerspec is;
|
||||
is.it_interval.tv_sec = 0;
|
||||
is.it_interval.tv_nsec = 0;
|
||||
if (tv == NULL) {
|
||||
/* No timeout; disarm the timer. */
|
||||
is.it_value.tv_sec = 0;
|
||||
is.it_value.tv_nsec = 0;
|
||||
} else {
|
||||
if (tv->tv_sec == 0 && tv->tv_usec == 0) {
|
||||
/* we need to exit immediately; timerfd can't
|
||||
* do that. */
|
||||
timeout = 0;
|
||||
}
|
||||
is.it_value.tv_sec = tv->tv_sec;
|
||||
is.it_value.tv_nsec = tv->tv_usec * 1000;
|
||||
}
|
||||
/* TODO: we could avoid unnecessary syscalls here by only
|
||||
calling timerfd_settime when the top timeout changes, or
|
||||
when we're called with a different timeval.
|
||||
*/
|
||||
if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) {
|
||||
event_warn("timerfd_settime");
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (tv != NULL) {
|
||||
timeout = evutil_tv_to_msec_(tv);
|
||||
if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
|
||||
@ -541,6 +616,10 @@ epoll_dispatch(struct event_base *base, struct timeval *tv)
|
||||
for (i = 0; i < res; i++) {
|
||||
int what = events[i].events;
|
||||
short ev = 0;
|
||||
#ifdef USING_TIMERFD
|
||||
if (events[i].data.fd == epollop->timerfd)
|
||||
continue;
|
||||
#endif
|
||||
|
||||
if (what & (EPOLLHUP|EPOLLERR)) {
|
||||
ev = EV_READ | EV_WRITE;
|
||||
@ -585,6 +664,10 @@ epoll_dealloc(struct event_base *base)
|
||||
mm_free(epollop->events);
|
||||
if (epollop->epfd >= 0)
|
||||
close(epollop->epfd);
|
||||
#ifdef USING_TIMERFD
|
||||
if (epollop->timerfd >= 0)
|
||||
close(epollop->timerfd);
|
||||
#endif
|
||||
|
||||
memset(epollop, 0, sizeof(struct epollop));
|
||||
mm_free(epollop);
|
||||
|
@ -36,10 +36,6 @@ extern "C" {
|
||||
|
||||
#include <time.h>
|
||||
#include <sys/queue.h>
|
||||
#ifdef EVENT__HAVE_MACH_MACH_TIME_H
|
||||
/* For mach_timebase_info */
|
||||
#include <mach/mach_time.h>
|
||||
#endif
|
||||
#include "event2/event_struct.h"
|
||||
#include "minheap-internal.h"
|
||||
#include "evsignal-internal.h"
|
||||
@ -62,16 +58,6 @@ extern "C" {
|
||||
#define EV_CLOSURE_SIGNAL 1
|
||||
#define EV_CLOSURE_PERSIST 2
|
||||
|
||||
/* Define HAVE_ANY_MONOTONIC iff we *might* have a working monotonic
|
||||
* clock implementation */
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
#define HAVE_ANY_MONOTONIC 1
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
#define HAVE_ANY_MONOTONIC 1
|
||||
#elif defined(_WIN32)
|
||||
#define HAVE_ANY_MONOTONIC 1
|
||||
#endif
|
||||
|
||||
/** Structure to define the backend of a given event_base. */
|
||||
struct eventop {
|
||||
/** The name of this backend. */
|
||||
@ -252,9 +238,6 @@ struct event_base {
|
||||
/** Mapping from signal numbers to enabled (added) events. */
|
||||
struct event_signal_map sigmap;
|
||||
|
||||
/** Stored timeval; used to detect when time is running backwards. */
|
||||
struct timeval event_tv;
|
||||
|
||||
/** Priority queue of events with timeouts. */
|
||||
struct min_heap timeheap;
|
||||
|
||||
@ -262,27 +245,13 @@ struct event_base {
|
||||
* too often. */
|
||||
struct timeval tv_cache;
|
||||
|
||||
#if defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
struct mach_timebase_info mach_timebase_units;
|
||||
#endif
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) && defined(CLOCK_MONOTONIC_COARSE)
|
||||
#define CLOCK_IS_SELECTED
|
||||
int monotonic_clock;
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
DWORD last_tick_count;
|
||||
struct timeval adjust_tick_count;
|
||||
#endif
|
||||
#if defined(HAVE_ANY_MONOTONIC)
|
||||
/** True iff we should use our system's monotonic time implementation */
|
||||
/* TODO: Support systems where we don't need to detct monotonic time */
|
||||
int use_monotonic;
|
||||
struct evutil_monotonic_timer monotonic_timer;
|
||||
|
||||
/** Difference between internal time (maybe from clock_gettime) and
|
||||
* gettimeofday. */
|
||||
struct timeval tv_clock_diff;
|
||||
/** Second in which we last updated tv_clock_diff, in monotonic time. */
|
||||
time_t last_updated_clock_diff;
|
||||
#endif
|
||||
|
||||
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
||||
/* threading support */
|
||||
@ -418,4 +387,3 @@ int event_base_foreach_event_(struct event_base *base,
|
||||
#endif
|
||||
|
||||
#endif /* EVENT_INTERNAL_H_INCLUDED_ */
|
||||
|
||||
|
209
event.c
209
event.c
@ -152,7 +152,6 @@ static int event_process_active(struct event_base *);
|
||||
|
||||
static int timeout_next(struct event_base *, struct timeval **);
|
||||
static void timeout_process(struct event_base *);
|
||||
static void timeout_correct(struct event_base *, struct timeval *);
|
||||
|
||||
static inline void event_signal_closure(struct event_base *, struct event *ev);
|
||||
static inline void event_persist_closure(struct event_base *, struct event *ev);
|
||||
@ -338,48 +337,6 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
#define EVENT_BASE_ASSERT_LOCKED(base) \
|
||||
EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
|
||||
|
||||
/* Set base->use_monotonic to 1 if we have a clock function that supports
|
||||
* monotonic time */
|
||||
static void
|
||||
detect_monotonic(struct event_base *base, const struct event_config *cfg)
|
||||
{
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
{
|
||||
/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.
|
||||
* You need to check for it at runtime, because some older
|
||||
* versions won't have it working. */
|
||||
struct timespec ts;
|
||||
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
|
||||
base->use_monotonic = 1;
|
||||
#ifdef CLOCK_IS_SELECTED
|
||||
base->monotonic_clock = CLOCK_MONOTONIC;
|
||||
if (cfg == NULL ||
|
||||
!(cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER)) {
|
||||
if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0)
|
||||
base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
{
|
||||
struct mach_timebase_info mi;
|
||||
/* OSX has mach_absolute_time() */
|
||||
if (mach_timebase_info(&mi) == 0 && mach_absolute_time() != 0) {
|
||||
base->use_monotonic = 1;
|
||||
/* mach_timebase_info tells us how to convert
|
||||
* mach_absolute_time() into nanoseconds, but we
|
||||
* want to use microseconds instead. */
|
||||
mi.denom *= 1000;
|
||||
memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
|
||||
}
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
base->use_monotonic = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* How often (in seconds) do we check for changes in wall clock time relative
|
||||
* to monotonic time? Set this to -1 for 'never.' */
|
||||
#define CLOCK_SYNC_INTERVAL 5
|
||||
@ -399,60 +356,19 @@ gettime(struct event_base *base, struct timeval *tp)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
if (base->use_monotonic) {
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
struct timespec ts;
|
||||
#ifdef CLOCK_IS_SELECTED
|
||||
if (clock_gettime(base->monotonic_clock, &ts) == -1)
|
||||
return (-1);
|
||||
#else
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
|
||||
return (-1);
|
||||
#endif
|
||||
|
||||
tp->tv_sec = ts.tv_sec;
|
||||
tp->tv_usec = ts.tv_nsec / 1000;
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
uint64_t abstime = mach_absolute_time();
|
||||
uint64_t usec;
|
||||
usec = (abstime * base->mach_timebase_units.numer)
|
||||
/ (base->mach_timebase_units.denom);
|
||||
tp->tv_sec = usec / 1000000;
|
||||
tp->tv_usec = usec % 1000000;
|
||||
#elif defined(_WIN32)
|
||||
/* TODO: Support GetTickCount64. */
|
||||
/* TODO: Support alternate timer backends if the user asked
|
||||
* for a high-precision timer. QueryPerformanceCounter is
|
||||
* possibly a good idea, but it is also supposed to have
|
||||
* reliability issues under various circumstances. */
|
||||
DWORD ticks = GetTickCount();
|
||||
if (ticks < base->last_tick_count) {
|
||||
/* The 32-bit timer rolled over. Let's assume it only
|
||||
* happened once. Add 2**32 msec to adjust_tick_count. */
|
||||
const struct timeval tv_rollover = { 4294967, 296000 };
|
||||
evutil_timeradd(&tv_rollover, &base->adjust_tick_count, &base->adjust_tick_count);
|
||||
}
|
||||
base->last_tick_count = ticks;
|
||||
tp->tv_sec = ticks / 1000;
|
||||
tp->tv_usec = (ticks % 1000) * 1000;
|
||||
evutil_timeradd(tp, &base->adjust_tick_count, tp);
|
||||
#else
|
||||
#error "Missing monotonic time implementation."
|
||||
#endif
|
||||
if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
|
||||
< tp->tv_sec) {
|
||||
struct timeval tv;
|
||||
evutil_gettimeofday(&tv,NULL);
|
||||
evutil_timersub(&tv, tp, &base->tv_clock_diff);
|
||||
base->last_updated_clock_diff = tp->tv_sec;
|
||||
}
|
||||
|
||||
return (0);
|
||||
if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
return (evutil_gettimeofday(tp, NULL));
|
||||
if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
|
||||
< tp->tv_sec) {
|
||||
struct timeval tv;
|
||||
evutil_gettimeofday(&tv,NULL);
|
||||
evutil_timersub(&tv, tp, &base->tv_clock_diff);
|
||||
base->last_updated_clock_diff = tp->tv_sec;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
@ -469,11 +385,7 @@ event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
|
||||
if (base->tv_cache.tv_sec == 0) {
|
||||
r = evutil_gettimeofday(tv, NULL);
|
||||
} else {
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
|
||||
#else
|
||||
*tv = base->tv_cache;
|
||||
#endif
|
||||
r = 0;
|
||||
}
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
@ -647,8 +559,27 @@ event_base_new_with_config(const struct event_config *cfg)
|
||||
event_warn("%s: calloc", __func__);
|
||||
return NULL;
|
||||
}
|
||||
detect_monotonic(base, cfg);
|
||||
gettime(base, &base->event_tv);
|
||||
|
||||
if (cfg)
|
||||
base->flags = cfg->flags;
|
||||
|
||||
should_check_environment =
|
||||
!(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
|
||||
|
||||
{
|
||||
struct timeval tmp;
|
||||
int precise_time =
|
||||
cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
|
||||
int flags;
|
||||
if (should_check_environment && !precise_time) {
|
||||
precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
|
||||
base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
|
||||
}
|
||||
flags = precise_time ? EV_MONOT_PRECISE : 0;
|
||||
evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
|
||||
|
||||
gettime(base, &tmp);
|
||||
}
|
||||
|
||||
min_heap_ctor_(&base->timeheap);
|
||||
|
||||
@ -661,8 +592,6 @@ event_base_new_with_config(const struct event_config *cfg)
|
||||
base->defer_queue.base = base;
|
||||
base->defer_queue.notify_fn = notify_base_cbq_callback;
|
||||
base->defer_queue.notify_arg = base;
|
||||
if (cfg)
|
||||
base->flags = cfg->flags;
|
||||
|
||||
evmap_io_initmap_(&base->io);
|
||||
evmap_signal_initmap_(&base->sigmap);
|
||||
@ -670,9 +599,6 @@ event_base_new_with_config(const struct event_config *cfg)
|
||||
|
||||
base->evbase = NULL;
|
||||
|
||||
should_check_environment =
|
||||
!(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
|
||||
|
||||
if (cfg) {
|
||||
memcpy(&base->max_dispatch_time,
|
||||
&cfg->max_dispatch_interval, sizeof(struct timeval));
|
||||
@ -1778,8 +1704,6 @@ event_base_loop(struct event_base *base, int flags)
|
||||
break;
|
||||
}
|
||||
|
||||
timeout_correct(base, &tv);
|
||||
|
||||
tv_p = &tv;
|
||||
if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
|
||||
timeout_next(base, &tv_p);
|
||||
@ -1799,9 +1723,6 @@ event_base_loop(struct event_base *base, int flags)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* update last old time */
|
||||
gettime(base, &base->event_tv);
|
||||
|
||||
clear_time_cache(base);
|
||||
|
||||
res = evsel->dispatch(base, tv_p);
|
||||
@ -2087,12 +2008,8 @@ event_pending(const struct event *ev, short event, struct timeval *tv)
|
||||
if (tv != NULL && (flags & event & EV_TIMEOUT)) {
|
||||
struct timeval tmp = ev->ev_timeout;
|
||||
tmp.tv_usec &= MICROSECONDS_MASK;
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
/* correctly remamp to real time */
|
||||
evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
|
||||
#else
|
||||
*tv = tmp;
|
||||
#endif
|
||||
}
|
||||
|
||||
return (flags & event);
|
||||
@ -2637,66 +2554,6 @@ out:
|
||||
return (res);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determines if the time is running backwards by comparing the current time
|
||||
* against the last time we checked. Not needed when using clock monotonic.
|
||||
* If time is running backwards, we adjust the firing time of every event by
|
||||
* the amount that time seems to have jumped.
|
||||
*/
|
||||
static void
|
||||
timeout_correct(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
/* Caller must hold th_base_lock. */
|
||||
struct event **pev;
|
||||
unsigned int size;
|
||||
struct timeval off;
|
||||
int i;
|
||||
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
if (base->use_monotonic)
|
||||
return;
|
||||
#endif
|
||||
|
||||
/* Check if time is running backwards */
|
||||
gettime(base, tv);
|
||||
|
||||
if (evutil_timercmp(tv, &base->event_tv, >=)) {
|
||||
base->event_tv = *tv;
|
||||
return;
|
||||
}
|
||||
|
||||
event_debug(("%s: time is running backwards, corrected",
|
||||
__func__));
|
||||
evutil_timersub(&base->event_tv, tv, &off);
|
||||
|
||||
/*
|
||||
* We can modify the key element of the node without destroying
|
||||
* the minheap property, because we change every element.
|
||||
*/
|
||||
pev = base->timeheap.p;
|
||||
size = base->timeheap.n;
|
||||
for (; size-- > 0; ++pev) {
|
||||
struct timeval *ev_tv = &(**pev).ev_timeout;
|
||||
evutil_timersub(ev_tv, &off, ev_tv);
|
||||
}
|
||||
for (i=0; i<base->n_common_timeouts; ++i) {
|
||||
struct event *ev;
|
||||
struct common_timeout_list *ctl =
|
||||
base->common_timeout_queues[i];
|
||||
TAILQ_FOREACH(ev, &ctl->events,
|
||||
ev_timeout_pos.ev_next_with_common_timeout) {
|
||||
struct timeval *ev_tv = &ev->ev_timeout;
|
||||
ev_tv->tv_usec &= MICROSECONDS_MASK;
|
||||
evutil_timersub(ev_tv, &off, ev_tv);
|
||||
ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
|
||||
(i<<COMMON_TIMEOUT_IDX_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now remember what the new time turned out to be. */
|
||||
base->event_tv = *tv;
|
||||
}
|
||||
|
||||
/* Activate every event whose timeout has elapsed. */
|
||||
static void
|
||||
timeout_process(struct event_base *base)
|
||||
@ -3213,9 +3070,7 @@ dump_inserted_event_fn(struct event_base *base, struct event *e, void *arg)
|
||||
struct timeval tv;
|
||||
tv.tv_sec = e->ev_timeout.tv_sec;
|
||||
tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
|
||||
#if defined(HAVE_ANY_MONOTONIC)
|
||||
evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
|
||||
#endif
|
||||
fprintf(output, " Timeout=%ld.%06d",
|
||||
(long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ struct event_base;
|
||||
|
||||
#include "mm-internal.h"
|
||||
#include "evthread-internal.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
#define SPIN_COUNT 2000
|
||||
|
||||
|
85
evutil.c
85
evutil.c
@ -70,13 +70,6 @@
|
||||
#ifdef EVENT__HAVE_ARPA_INET_H
|
||||
#include <arpa/inet.h>
|
||||
#endif
|
||||
#if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
|
||||
!defined(_WIN32)
|
||||
#include <sys/select.h>
|
||||
#endif
|
||||
#ifndef EVENT__HAVE_GETTIMEOFDAY
|
||||
#include <sys/timeb.h>
|
||||
#endif
|
||||
#include <time.h>
|
||||
#include <sys/stat.h>
|
||||
#ifdef EVENT__HAVE_IFADDRS_H
|
||||
@ -485,46 +478,6 @@ evutil_strtoll(const char *s, char **endptr, int base)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef EVENT__HAVE_GETTIMEOFDAY
|
||||
/* No gettimeofday; this must be windows. */
|
||||
int
|
||||
evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
#define U64_LITERAL(n) n##ui64
|
||||
#else
|
||||
#define U64_LITERAL(n) n##llu
|
||||
#endif
|
||||
|
||||
/* Conversion logic taken from Tor, which in turn took it
|
||||
* from Perl. GetSystemTimeAsFileTime returns its value as
|
||||
* an unaligned (!) 64-bit value containing the number of
|
||||
* 100-nanosecond intervals since 1 January 1601 UTC. */
|
||||
#define EPOCH_BIAS U64_LITERAL(116444736000000000)
|
||||
#define UNITS_PER_SEC U64_LITERAL(10000000)
|
||||
#define USEC_PER_SEC U64_LITERAL(1000000)
|
||||
#define UNITS_PER_USEC U64_LITERAL(10)
|
||||
union {
|
||||
FILETIME ft_ft;
|
||||
ev_uint64_t ft_64;
|
||||
} ft;
|
||||
|
||||
if (tv == NULL)
|
||||
return -1;
|
||||
|
||||
GetSystemTimeAsFileTime(&ft.ft_ft);
|
||||
|
||||
if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
|
||||
/* Time before the unix epoch. */
|
||||
return -1;
|
||||
}
|
||||
ft.ft_64 -= EPOCH_BIAS;
|
||||
tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
|
||||
tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
int
|
||||
evutil_socket_geterror(evutil_socket_t sock)
|
||||
@ -2336,18 +2289,6 @@ evutil_sockaddr_is_loopback_(const struct sockaddr *addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_SECONDS_IN_MSEC_LONG \
|
||||
(((LONG_MAX) - 999) / 1000)
|
||||
|
||||
long
|
||||
evutil_tv_to_msec_(const struct timeval *tv)
|
||||
{
|
||||
if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
|
||||
return -1;
|
||||
|
||||
return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
|
||||
}
|
||||
|
||||
int
|
||||
evutil_hex_char_to_int_(char c)
|
||||
{
|
||||
@ -2388,32 +2329,6 @@ evutil_load_windows_system_library_(const TCHAR *library_name)
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
evutil_usleep_(const struct timeval *tv)
|
||||
{
|
||||
if (!tv)
|
||||
return;
|
||||
#if defined(_WIN32)
|
||||
{
|
||||
long msec = evutil_tv_to_msec_(tv);
|
||||
Sleep((DWORD)msec);
|
||||
}
|
||||
#elif defined(EVENT__HAVE_NANOSLEEP)
|
||||
{
|
||||
struct timespec ts;
|
||||
ts.tv_sec = tv->tv_sec;
|
||||
ts.tv_nsec = tv->tv_usec*1000;
|
||||
nanosleep(&ts, NULL);
|
||||
}
|
||||
#elif defined(EVENT__HAVE_USLEEP)
|
||||
/* Some systems don't like to usleep more than 999999 usec */
|
||||
sleep(tv->tv_sec);
|
||||
usleep(tv->tv_usec);
|
||||
#else
|
||||
select(0, NULL, NULL, NULL, tv);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Internal wrapper around 'socket' to provide Linux-style support for
|
||||
* syscall-saving methods where available.
|
||||
*
|
||||
|
485
evutil_time.c
Normal file
485
evutil_time.c
Normal file
@ -0,0 +1,485 @@
|
||||
/*
|
||||
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "event2/event-config.h"
|
||||
#include "evconfig-private.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <winsock2.h>
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#undef WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
|
||||
#include <sys/types.h>
|
||||
#ifdef EVENT__HAVE_STDLIB_H
|
||||
#include <stdlib.h>
|
||||
#endif
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#ifndef EVENT__HAVE_GETTIMEOFDAY
|
||||
#include <sys/timeb.h>
|
||||
#endif
|
||||
#if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
|
||||
!defined(_WIN32)
|
||||
#include <sys/select.h>
|
||||
#endif
|
||||
#include <time.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "event2/util.h"
|
||||
#include "util-internal.h"
|
||||
#include "log-internal.h"
|
||||
|
||||
#ifndef EVENT__HAVE_GETTIMEOFDAY
|
||||
/* No gettimeofday; this must be windows. */
|
||||
int
|
||||
evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
#define U64_LITERAL(n) n##ui64
|
||||
#else
|
||||
#define U64_LITERAL(n) n##llu
|
||||
#endif
|
||||
|
||||
/* Conversion logic taken from Tor, which in turn took it
|
||||
* from Perl. GetSystemTimeAsFileTime returns its value as
|
||||
* an unaligned (!) 64-bit value containing the number of
|
||||
* 100-nanosecond intervals since 1 January 1601 UTC. */
|
||||
#define EPOCH_BIAS U64_LITERAL(116444736000000000)
|
||||
#define UNITS_PER_SEC U64_LITERAL(10000000)
|
||||
#define USEC_PER_SEC U64_LITERAL(1000000)
|
||||
#define UNITS_PER_USEC U64_LITERAL(10)
|
||||
union {
|
||||
FILETIME ft_ft;
|
||||
ev_uint64_t ft_64;
|
||||
} ft;
|
||||
|
||||
if (tv == NULL)
|
||||
return -1;
|
||||
|
||||
GetSystemTimeAsFileTime(&ft.ft_ft);
|
||||
|
||||
if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
|
||||
/* Time before the unix epoch. */
|
||||
return -1;
|
||||
}
|
||||
ft.ft_64 -= EPOCH_BIAS;
|
||||
tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
|
||||
tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define MAX_SECONDS_IN_MSEC_LONG \
|
||||
(((LONG_MAX) - 999) / 1000)
|
||||
|
||||
long
|
||||
evutil_tv_to_msec_(const struct timeval *tv)
|
||||
{
|
||||
if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
|
||||
return -1;
|
||||
|
||||
return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
|
||||
}
|
||||
|
||||
/*
|
||||
Replacement for usleep on platforms that don't have one. Not guaranteed to
|
||||
be any more finegrained than 1 msec.
|
||||
*/
|
||||
void
|
||||
evutil_usleep_(const struct timeval *tv)
|
||||
{
|
||||
if (!tv)
|
||||
return;
|
||||
#if defined(_WIN32)
|
||||
{
|
||||
long msec = evutil_tv_to_msec_(tv);
|
||||
Sleep((DWORD)msec);
|
||||
}
|
||||
#elif defined(EVENT__HAVE_NANOSLEEP)
|
||||
{
|
||||
struct timespec ts;
|
||||
ts.tv_sec = tv->tv_sec;
|
||||
ts.tv_nsec = tv->tv_usec*1000;
|
||||
nanosleep(&ts, NULL);
|
||||
}
|
||||
#elif defined(EVENT__HAVE_USLEEP)
|
||||
/* Some systems don't like to usleep more than 999999 usec */
|
||||
sleep(tv->tv_sec);
|
||||
usleep(tv->tv_usec);
|
||||
#else
|
||||
select(0, NULL, NULL, NULL, tv);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
This function assumes it's called repeatedly with a
|
||||
not-actually-so-monotonic time source whose outputs are in 'tv'. It
|
||||
implements a trivial ratcheting mechanism so that the values never go
|
||||
backwards.
|
||||
*/
|
||||
static void
|
||||
adjust_monotonic_time(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tv)
|
||||
{
|
||||
evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
|
||||
|
||||
if (evutil_timercmp(tv, &base->last_time, <)) {
|
||||
/* Guess it wasn't monotonic after all. */
|
||||
struct timeval adjust;
|
||||
evutil_timersub(&base->last_time, tv, &adjust);
|
||||
evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
|
||||
&base->adjust_monotonic_clock);
|
||||
*tv = base->last_time;
|
||||
}
|
||||
base->last_time = *tv;
|
||||
}
|
||||
|
||||
#if defined(HAVE_POSIX_MONOTONIC)
|
||||
/* =====
|
||||
The POSIX clock_gettime() interface provides a few ways to get at a
|
||||
monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also
|
||||
provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
|
||||
|
||||
On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
|
||||
Platforms don't agree about whether it should jump on a sleep/resume.
|
||||
*/
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int flags)
|
||||
{
|
||||
/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to
|
||||
* check for it at runtime, because some older kernel versions won't
|
||||
* have it working. */
|
||||
const int precise = flags & EV_MONOT_PRECISE;
|
||||
const int fallback = flags & EV_MONOT_FALLBACK;
|
||||
struct timespec ts;
|
||||
|
||||
#ifdef CLOCK_MONOTONIC_COARSE
|
||||
#if CLOCK_MONOTONIC_COARSE < 0
|
||||
/* Technically speaking, nothing keeps CLOCK_* from being negative (as
|
||||
* far as I know). This check and the one below make sure that it's
|
||||
* safe for us to use -1 as an "unset" value. */
|
||||
#error "I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"
|
||||
#endif
|
||||
if (! precise && ! fallback) {
|
||||
if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
|
||||
base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
|
||||
base->monotonic_clock = CLOCK_MONOTONIC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if CLOCK_MONOTONIC < 0
|
||||
#error "I didn't expect CLOCK_MONOTONIC to be < 0"
|
||||
#endif
|
||||
|
||||
base->monotonic_clock = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
if (base->monotonic_clock < 0) {
|
||||
if (evutil_gettimeofday(tp, NULL) < 0)
|
||||
return -1;
|
||||
adjust_monotonic_time(base, tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (clock_gettime(base->monotonic_clock, &ts) == -1)
|
||||
return -1;
|
||||
tp->tv_sec = ts.tv_sec;
|
||||
tp->tv_usec = ts.tv_nsec / 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_MACH_MONOTONIC)
|
||||
/* ======
|
||||
Apple is a little late to the POSIX party. And why not? Instead of
|
||||
clock_gettime(), they provide mach_absolute_time(). Its units are not
|
||||
fixed; we need to use mach_timebase_info() to get the right functions to
|
||||
convert its units into nanoseconds.
|
||||
|
||||
To all appearances, mach_absolute_time() seems to be honest-to-goodness
|
||||
monotonic. Whether it stops during sleep or not is unspecified in
|
||||
principle, and dependent on CPU architecture in practice.
|
||||
*/
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int flags)
|
||||
{
|
||||
const int fallback = flags & EV_MONOT_FALLBACK;
|
||||
struct mach_timebase_info mi;
|
||||
memset(base, 0, sizeof(*base));
|
||||
/* OSX has mach_absolute_time() */
|
||||
if (!fallback &&
|
||||
mach_timebase_info(&mi) == 0 &&
|
||||
mach_absolute_time() != 0) {
|
||||
/* mach_timebase_info tells us how to convert
|
||||
* mach_absolute_time() into nanoseconds, but we
|
||||
* want to use microseconds instead. */
|
||||
mi.denom *= 1000;
|
||||
memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
|
||||
} else {
|
||||
base->mach_timebase_units.numer = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
ev_uint64_t abstime, usec;
|
||||
if (base->mach_timebase_units.numer == 0) {
|
||||
if (evutil_gettimeofday(tp, NULL) < 0)
|
||||
return -1;
|
||||
adjust_monotonic_time(base, tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
abstime = mach_absolute_time();
|
||||
usec = (abstime * base->mach_timebase_units.numer)
|
||||
/ (base->mach_timebase_units.denom);
|
||||
tp->tv_sec = usec / 1000000;
|
||||
tp->tv_usec = usec % 1000000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_WIN32_MONOTONIC)
|
||||
/* =====
|
||||
Turn we now to Windows. Want monontonic time on Windows?
|
||||
|
||||
Windows has QueryPerformanceCounter(), which gives time most high-
|
||||
resolution time. It's a pity it's not so monotonic in practice; it's
|
||||
also got some fun bugs, especially: with older Windowses, under
|
||||
virtualizations, with funny hardware, on multiprocessor systems, and so
|
||||
on. PEP418 [1] has a nice roundup of the issues here.
|
||||
|
||||
There's GetTickCount64() on Vista and later, which gives a number of 1-msec
|
||||
ticks since startup. The accuracy here might be as bad as 10-20 msec, I
|
||||
hear. There's an undocumented function (NtSetTimerResolution) that
|
||||
allegedly increases the accuracy. Good luck!
|
||||
|
||||
There's also GetTickCount(), which is only 32 bits, but seems to be
|
||||
supported on pre-Vista versions of Windows. Apparently, you can coax
|
||||
another 14 bits out of it, giving you 2231 years before rollover.
|
||||
|
||||
The less said about timeGetTime() the better.
|
||||
|
||||
"We don't care. We don't have to. We're the Phone Company."
|
||||
-- Lily Tomlin, SNL
|
||||
|
||||
Our strategy, if precise timers are turned off, is to just use the best
|
||||
GetTickCount equivalent available. If we've been asked for precise timing,
|
||||
then we mostly[2] assume that GetTickCount is monotonic, and correct
|
||||
GetPerformanceCounter to approximate it.
|
||||
|
||||
[1] http://www.python.org/dev/peps/pep-0418
|
||||
[2] Of course, we feed the Windows stuff into adjust_monotonic_time()
|
||||
anyway, just in case it isn't.
|
||||
|
||||
*/
|
||||
/*
|
||||
Parts of our logic in the win32 timer code here are closely based on
|
||||
BitTorrent's libUTP library. That code is subject to the following
|
||||
license:
|
||||
|
||||
Copyright (c) 2010 BitTorrent, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
static ev_uint64_t
|
||||
evutil_GetTickCount_(struct evutil_monotonic_timer *base)
|
||||
{
|
||||
if (base->GetTickCount64_fn) {
|
||||
/* Let's just use GetTickCount64 if we can. */
|
||||
return base->GetTickCount64_fn();
|
||||
} else if (base->GetTickCount_fn) {
|
||||
/* Greg Hazel assures me that this works, that BitTorrent has
|
||||
* done it for years, and this it won't turn around and
|
||||
* bite us. He says they found it on some game programmers'
|
||||
* forum some time around 2007.
|
||||
*/
|
||||
ev_uint64_t v = base->GetTickCount_fn();
|
||||
return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
|
||||
} else {
|
||||
/* Here's the fallback implementation. We have to use
|
||||
* GetTickCount() with its given signature, so we only get
|
||||
* 32 bits worth of milliseconds, which will roll ove every
|
||||
* 49 days or so. */
|
||||
DWORD ticks = GetTickCount();
|
||||
if (ticks < base->last_tick_count) {
|
||||
base->adjust_tick_count += ((ev_uint64_t)1) << 32;
|
||||
}
|
||||
base->last_tick_count = ticks;
|
||||
return ticks + base->adjust_tick_count;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int flags)
|
||||
{
|
||||
const int precise = flags & EV_MONOT_PRECISE;
|
||||
const int fallback = flags & EV_MONOT_FALLBACK;
|
||||
HANDLE h;
|
||||
memset(base, 0, sizeof(*base));
|
||||
|
||||
h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
|
||||
if (h != NULL && !fallback) {
|
||||
base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
|
||||
base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
|
||||
}
|
||||
|
||||
base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
|
||||
if (precise && !fallback) {
|
||||
LARGE_INTEGER freq;
|
||||
if (QueryPerformanceFrequency(&freq)) {
|
||||
LARGE_INTEGER counter;
|
||||
QueryPerformanceCounter(&counter);
|
||||
base->first_counter = counter.QuadPart;
|
||||
base->usec_per_count = 1.0e6 / freq.QuadPart;
|
||||
base->use_performance_counter = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline ev_int64_t
|
||||
abs64(ev_int64_t i)
|
||||
{
|
||||
return i < 0 ? -i : i;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
ev_uint64_t ticks = evutil_GetTickCount_(base);
|
||||
if (base->use_performance_counter) {
|
||||
/* Here's a trick we took from BitTorrent's libutp, at Greg
|
||||
* Hazel's recommendation. We use QueryPerformanceCounter for
|
||||
* our high-resolution timer, but use GetTickCount*() to keep
|
||||
* it sane, and adjust_monotonic_time() to keep it monotonic.
|
||||
*/
|
||||
LARGE_INTEGER counter;
|
||||
ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
|
||||
QueryPerformanceCounter(&counter);
|
||||
counter_elapsed = (ev_int64_t)
|
||||
(counter.QuadPart - base->first_counter);
|
||||
ticks_elapsed = ticks - base->first_tick;
|
||||
/* TODO: This may upset VC6. If you need this to work with
|
||||
* VC6, please supply an appropriate patch. */
|
||||
counter_usec_elapsed = (ev_int64_t)
|
||||
(counter_elapsed * base->usec_per_count);
|
||||
|
||||
if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
|
||||
/* It appears that the QueryPerformanceCounter()
|
||||
* result is more than 1 second away from
|
||||
* GetTickCount() result. Let's adjust it to be as
|
||||
* accurate as we can; adjust_monotnonic_time() below
|
||||
* will keep it monotonic. */
|
||||
counter_usec_elapsed = ticks_elapsed * 1000;
|
||||
base->first_counter = counter.QuadPart - counter_usec_elapsed / base->usec_per_count;
|
||||
}
|
||||
tp->tv_sec = counter_usec_elapsed / 1000000;
|
||||
tp->tv_usec = counter_usec_elapsed % 1000000;
|
||||
|
||||
} else {
|
||||
/* We're just using GetTickCount(). */
|
||||
tp->tv_sec = ticks / 1000;
|
||||
tp->tv_usec = (ticks % 1000) * 1000;
|
||||
}
|
||||
adjust_monotonic_time(base, tp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_FALLBACK_MONOTONIC)
|
||||
/* =====
|
||||
And if none of the other options work, let's just use gettimeofday(), and
|
||||
ratchet it forward so that it acts like a monotonic timer, whether it
|
||||
wants to or not.
|
||||
*/
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int precise)
|
||||
{
|
||||
memset(base, 0, sizeof(*base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
if (evutil_gettimeofday(tp, NULL) < 0)
|
||||
return -1;
|
||||
adjust_monotonic_time(base, tp);
|
||||
return 0;
|
||||
|
||||
}
|
||||
#endif
|
1
poll.c
1
poll.c
@ -51,6 +51,7 @@
|
||||
#include "evmap-internal.h"
|
||||
#include "event2/thread.h"
|
||||
#include "evthread-internal.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
struct pollidx {
|
||||
int idxplus1;
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include "event-internal.h"
|
||||
#include "evthread-internal.h"
|
||||
#include "log-internal.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
#include "regress.h"
|
||||
|
||||
|
@ -63,6 +63,7 @@
|
||||
#include "defer-internal.h"
|
||||
#include "regress.h"
|
||||
#include "tinytest_macros.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
#ifdef EVENT__HAVE_PTHREADS
|
||||
#define THREAD_T pthread_t
|
||||
|
@ -58,6 +58,7 @@
|
||||
#include "../log-internal.h"
|
||||
#include "../strlcpy-internal.h"
|
||||
#include "../mm-internal.h"
|
||||
#include "../time-internal.h"
|
||||
|
||||
#include "regress.h"
|
||||
|
||||
@ -1217,6 +1218,91 @@ end:
|
||||
;
|
||||
}
|
||||
|
||||
static void
|
||||
test_evutil_monotonic(void *data_)
|
||||
{
|
||||
/* Basic santity-test for monotonic timers. What we'd really like
|
||||
* to do is make sure that they can't go backwards even when the
|
||||
* system clock goes backwards. But we haven't got a good way to
|
||||
* move the system clock backwards.
|
||||
*/
|
||||
struct basic_test_data *data = data_;
|
||||
struct evutil_monotonic_timer timer;
|
||||
const int precise = strstr(data->setup_data, "precise") != NULL;
|
||||
const int fallback = strstr(data->setup_data, "fallback") != NULL;
|
||||
struct timeval tv[10], delay;
|
||||
int total_diff = 0;
|
||||
|
||||
int flags = 0, wantres, acceptdiff, i, maxstep = 25*1000;
|
||||
if (precise)
|
||||
flags |= EV_MONOT_PRECISE;
|
||||
if (fallback)
|
||||
flags |= EV_MONOT_FALLBACK;
|
||||
if (precise || fallback) {
|
||||
#ifdef _WIN32
|
||||
wantres = 10*1000;
|
||||
acceptdiff = 1000;
|
||||
#else
|
||||
wantres = 300;
|
||||
acceptdiff = 100;
|
||||
#endif
|
||||
} else {
|
||||
wantres = 40*1000;
|
||||
acceptdiff = 20*1000;
|
||||
}
|
||||
if (precise)
|
||||
maxstep = 500;
|
||||
|
||||
TT_BLATHER(("Precise = %d", precise));
|
||||
TT_BLATHER(("Fallback = %d", fallback));
|
||||
|
||||
/* First, make sure we match up with usleep. */
|
||||
|
||||
delay.tv_sec = 0;
|
||||
delay.tv_usec = wantres;
|
||||
|
||||
tt_int_op(evutil_configure_monotonic_time_(&timer, flags), ==, 0);
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
evutil_gettime_monotonic_(&timer, &tv[i]);
|
||||
evutil_usleep_(&delay);
|
||||
}
|
||||
|
||||
for (i = 0; i < 9; ++i) {
|
||||
struct timeval diff;
|
||||
tt_assert(evutil_timercmp(&tv[i], &tv[i+1], <));
|
||||
evutil_timersub(&tv[i+1], &tv[i], &diff);
|
||||
tt_int_op(diff.tv_sec, ==, 0);
|
||||
total_diff += diff.tv_usec;
|
||||
TT_BLATHER(("Difference = %d", (int)diff.tv_usec));
|
||||
}
|
||||
tt_int_op(abs(total_diff/9 - wantres), <, acceptdiff);
|
||||
|
||||
/* Second, find out what precision we actually see. */
|
||||
|
||||
evutil_gettime_monotonic_(&timer, &tv[0]);
|
||||
for (i = 1; i < 10; ++i) {
|
||||
do {
|
||||
evutil_gettime_monotonic_(&timer, &tv[i]);
|
||||
} while (evutil_timercmp(&tv[i-1], &tv[i], ==));
|
||||
}
|
||||
|
||||
total_diff = 0;
|
||||
for (i = 0; i < 9; ++i) {
|
||||
struct timeval diff;
|
||||
tt_assert(evutil_timercmp(&tv[i], &tv[i+1], <));
|
||||
evutil_timersub(&tv[i+1], &tv[i], &diff);
|
||||
tt_int_op(diff.tv_sec, ==, 0);
|
||||
total_diff += diff.tv_usec;
|
||||
TT_BLATHER(("Step difference = %d", (int)diff.tv_usec));
|
||||
}
|
||||
TT_BLATHER(("Average step difference = %d", total_diff / 9));
|
||||
tt_int_op(total_diff/9, <, maxstep);
|
||||
|
||||
end:
|
||||
;
|
||||
}
|
||||
|
||||
struct testcase_t util_testcases[] = {
|
||||
{ "ipv4_parse", regress_ipv4_parse, 0, NULL, NULL },
|
||||
{ "ipv6_parse", regress_ipv6_parse, 0, NULL, NULL },
|
||||
@ -1239,6 +1325,9 @@ struct testcase_t util_testcases[] = {
|
||||
{ "mm_calloc", test_event_calloc, 0, NULL, NULL },
|
||||
{ "mm_strdup", test_event_strdup, 0, NULL, NULL },
|
||||
{ "usleep", test_evutil_usleep, 0, NULL, NULL },
|
||||
{ "monotonic", test_evutil_monotonic, 0, &basic_setup, (void*)"" },
|
||||
{ "monotonic_precise", test_evutil_monotonic, 0, &basic_setup, (void*)"precise" },
|
||||
{ "monotonic_fallback", test_evutil_monotonic, 0, &basic_setup, (void*)"fallback" },
|
||||
END_OF_TESTCASES,
|
||||
};
|
||||
|
||||
|
11
test/test.sh
11
test/test.sh
@ -42,6 +42,7 @@ setup () {
|
||||
eval "EVENT_NO$i=yes; export EVENT_NO$i"
|
||||
done
|
||||
unset EVENT_EPOLL_USE_CHANGELIST
|
||||
unset EVENT_PRECISE_TIMER
|
||||
}
|
||||
|
||||
announce () {
|
||||
@ -112,16 +113,24 @@ do_test() {
|
||||
unset EVENT_NO$1
|
||||
if test "$2" = "(changelist)" ; then
|
||||
EVENT_EPOLL_USE_CHANGELIST=yes; export EVENT_EPOLL_USE_CHANGELIST
|
||||
elif test "$2" = "(timerfd)" ; then
|
||||
EVENT_PRECISE_TIMER=1; export EVENT_PRECISE_TIMER
|
||||
elif test "$2" = "(timerfd+changelist)" ; then
|
||||
EVENT_EPOLL_USE_CHANGELIST=yes; export EVENT_EPOLL_USE_CHANGELIST
|
||||
EVENT_PRECISE_TIMER=1; export EVENT_PRECISE_TIMER
|
||||
fi
|
||||
|
||||
run_tests
|
||||
}
|
||||
|
||||
announce "Running tests:"
|
||||
|
||||
do_test EPOLL "(timerfd)"
|
||||
do_test EPOLL "(changelist)"
|
||||
do_test EPOLL "(timerfd+changelist)"
|
||||
for i in $BACKENDS; do
|
||||
do_test $i
|
||||
done
|
||||
do_test EPOLL "(changelist)"
|
||||
|
||||
if test "$FAILED" = "yes"; then
|
||||
exit 1
|
||||
|
101
time-internal.h
Normal file
101
time-internal.h
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
|
||||
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef TIME_INTERNAL_H_INCLUDED_
|
||||
#define TIME_INTERNAL_H_INCLUDED_
|
||||
|
||||
#include "event2/event-config.h"
|
||||
#include "evconfig-private.h"
|
||||
|
||||
#ifdef EVENT__HAVE_MACH_MACH_TIME_H
|
||||
/* For mach_timebase_info */
|
||||
#include <mach/mach_time.h>
|
||||
#endif
|
||||
|
||||
#include <time.h>
|
||||
|
||||
#include "event2/util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
#define HAVE_POSIX_MONOTONIC
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
#define HAVE_MACH_MONOTONIC
|
||||
#elif defined(_WIN32)
|
||||
#define HAVE_WIN32_MONOTONIC
|
||||
#else
|
||||
#define HAVE_FALLBACK_MONOTONIC
|
||||
#endif
|
||||
|
||||
long evutil_tv_to_msec_(const struct timeval *tv);
|
||||
void evutil_usleep_(const struct timeval *tv);
|
||||
|
||||
#ifdef _WIN32
|
||||
typedef ULONGLONG (WINAPI *ev_GetTickCount_func)(void);
|
||||
#endif
|
||||
|
||||
struct evutil_monotonic_timer {
|
||||
|
||||
#ifdef HAVE_MACH_MONOTONIC
|
||||
struct mach_timebase_info mach_timebase_units;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_POSIX_MONOTONIC
|
||||
int monotonic_clock;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_WIN32_MONOTONIC
|
||||
ev_GetTickCount_func GetTickCount64_fn;
|
||||
ev_GetTickCount_func GetTickCount_fn;
|
||||
ev_uint64_t last_tick_count;
|
||||
ev_uint64_t adjust_tick_count;
|
||||
|
||||
ev_uint64_t first_tick;
|
||||
ev_uint64_t first_counter;
|
||||
double usec_per_count;
|
||||
int use_performance_counter;
|
||||
#endif
|
||||
|
||||
struct timeval adjust_monotonic_clock;
|
||||
struct timeval last_time;
|
||||
};
|
||||
|
||||
#define EV_MONOT_PRECISE 1
|
||||
#define EV_MONOT_FALLBACK 2
|
||||
|
||||
int evutil_configure_monotonic_time_(struct evutil_monotonic_timer *mt,
|
||||
int flags);
|
||||
int evutil_gettime_monotonic_(struct evutil_monotonic_timer *mt, struct timeval *tv);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* EVENT_INTERNAL_H_INCLUDED_ */
|
@ -43,6 +43,7 @@
|
||||
#endif
|
||||
#include "event2/util.h"
|
||||
|
||||
#include "time-internal.h"
|
||||
#include "ipv6-internal.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -367,11 +368,8 @@ int evutil_sockaddr_is_loopback_(const struct sockaddr *sa);
|
||||
*/
|
||||
const char *evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen);
|
||||
|
||||
long evutil_tv_to_msec_(const struct timeval *tv);
|
||||
|
||||
int evutil_hex_char_to_int_(char c);
|
||||
|
||||
void evutil_usleep_(const struct timeval *tv);
|
||||
|
||||
void evutil_free_secure_rng_globals_(void);
|
||||
void evutil_free_globals_(void);
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include "evmap-internal.h"
|
||||
#include "event2/thread.h"
|
||||
#include "evthread-internal.h"
|
||||
#include "time-internal.h"
|
||||
|
||||
#define XFREE(ptr) do { if (ptr) mm_free(ptr); } while (0)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user