mirror of
https://github.com/libevent/libevent.git
synced 2025-01-31 09:12:55 +08:00
kqueue: make changelist and eventlist of kevent share the same array
The array used by the changelist of the kevent() can be reused by the eventlist, We should make use of this feature to eliminate the extra overhead of array allocation. https://man.freebsd.org/cgi/man.cgi?kqueue https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html https://leaf.dragonflybsd.org/cgi/web-man?command=kqueue§ion=2 https://man.netbsd.org/kqueue.2 https://man.openbsd.org/kqueue.2 --------- Signed-off-by: Andy Pan <i@andypan.me>
This commit is contained in:
parent
afb7f48344
commit
528fbed184
143
kqueue.c
143
kqueue.c
@ -75,9 +75,6 @@
|
|||||||
#define NEVENT 64
|
#define NEVENT 64
|
||||||
|
|
||||||
struct kqop {
|
struct kqop {
|
||||||
struct kevent *changes;
|
|
||||||
int changes_size;
|
|
||||||
|
|
||||||
struct kevent *events;
|
struct kevent *events;
|
||||||
int events_size;
|
int events_size;
|
||||||
int kq;
|
int kq;
|
||||||
@ -138,26 +135,23 @@ kq_init(struct event_base *base)
|
|||||||
kqueueop->pid = getpid();
|
kqueueop->pid = getpid();
|
||||||
|
|
||||||
/* Initialize fields */
|
/* Initialize fields */
|
||||||
kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
|
|
||||||
if (kqueueop->changes == NULL)
|
|
||||||
goto err;
|
|
||||||
kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
|
kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
|
||||||
if (kqueueop->events == NULL)
|
if (kqueueop->events == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
kqueueop->events_size = kqueueop->changes_size = NEVENT;
|
kqueueop->events_size = NEVENT;
|
||||||
|
|
||||||
/* Check for Mac OS X kqueue bug. */
|
/* Check for Mac OS X kqueue bug. */
|
||||||
memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
|
memset(&kqueueop->events[0], 0, sizeof kqueueop->events[0]);
|
||||||
kqueueop->changes[0].ident = -1;
|
kqueueop->events[0].ident = -1;
|
||||||
kqueueop->changes[0].filter = EVFILT_READ;
|
kqueueop->events[0].filter = EVFILT_READ;
|
||||||
kqueueop->changes[0].flags = EV_ADD;
|
kqueueop->events[0].flags = EV_ADD;
|
||||||
/*
|
/*
|
||||||
* If kqueue works, then kevent will succeed, and it will
|
* If kqueue works, then kevent will succeed, and it will
|
||||||
* stick an error in events[0]. If kqueue is broken, then
|
* stick an error in events[0]. If kqueue is broken, then
|
||||||
* kevent will fail.
|
* kevent will fail.
|
||||||
*/
|
*/
|
||||||
if (kevent(kq,
|
if (kevent(kq,
|
||||||
kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
|
kqueueop->events, 1, kqueueop->events, NEVENT, NULL) != 1 ||
|
||||||
(int)kqueueop->events[0].ident != -1 ||
|
(int)kqueueop->events[0].ident != -1 ||
|
||||||
!(kqueueop->events[0].flags & EV_ERROR)) {
|
!(kqueueop->events[0].flags & EV_ERROR)) {
|
||||||
event_warn("%s: detected broken kqueue; not using.", __func__);
|
event_warn("%s: detected broken kqueue; not using.", __func__);
|
||||||
@ -201,6 +195,29 @@ kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
kq_grow_events(struct kqop *kqop)
|
||||||
|
{
|
||||||
|
size_t new_size;
|
||||||
|
struct kevent *new_events;
|
||||||
|
|
||||||
|
if (kqop->events_size > INT_MAX / 2 ||
|
||||||
|
(size_t)kqop->events_size * 2 > EV_SIZE_MAX / sizeof(struct kevent)) {
|
||||||
|
event_warnx("%s: int overflow", __func__);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
new_size = kqop->events_size * 2;
|
||||||
|
new_events = mm_realloc(kqop->events, new_size * sizeof(struct kevent));
|
||||||
|
if (new_events == NULL) {
|
||||||
|
event_warn("%s: realloc", __func__);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
kqop->events = new_events;
|
||||||
|
kqop->events_size = new_size;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
kq_build_changes_list(const struct event_changelist *changelist,
|
kq_build_changes_list(const struct event_changelist *changelist,
|
||||||
struct kqop *kqop)
|
struct kqop *kqop)
|
||||||
@ -211,65 +228,32 @@ kq_build_changes_list(const struct event_changelist *changelist,
|
|||||||
for (i = 0; i < changelist->n_changes; ++i) {
|
for (i = 0; i < changelist->n_changes; ++i) {
|
||||||
struct event_change *in_ch = &changelist->changes[i];
|
struct event_change *in_ch = &changelist->changes[i];
|
||||||
struct kevent *out_ch;
|
struct kevent *out_ch;
|
||||||
if (n_changes >= kqop->changes_size - 1) {
|
|
||||||
int newsize;
|
|
||||||
struct kevent *newchanges;
|
|
||||||
|
|
||||||
if (kqop->changes_size > INT_MAX / 2 ||
|
|
||||||
(size_t)kqop->changes_size * 2 > EV_SIZE_MAX /
|
|
||||||
sizeof(struct kevent)) {
|
|
||||||
event_warnx("%s: int overflow", __func__);
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
newsize = kqop->changes_size * 2;
|
|
||||||
newchanges = mm_realloc(kqop->changes,
|
|
||||||
newsize * sizeof(struct kevent));
|
|
||||||
if (newchanges == NULL) {
|
|
||||||
event_warn("%s: realloc", __func__);
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
kqop->changes = newchanges;
|
|
||||||
kqop->changes_size = newsize;
|
|
||||||
}
|
|
||||||
if (in_ch->read_change) {
|
if (in_ch->read_change) {
|
||||||
out_ch = &kqop->changes[n_changes++];
|
out_ch = &kqop->events[n_changes++];
|
||||||
kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
|
kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
|
||||||
in_ch->read_change);
|
in_ch->read_change);
|
||||||
|
if (n_changes == kqop->events_size && kq_grow_events(kqop)) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (in_ch->write_change) {
|
if (in_ch->write_change) {
|
||||||
out_ch = &kqop->changes[n_changes++];
|
out_ch = &kqop->events[n_changes++];
|
||||||
kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
|
kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
|
||||||
in_ch->write_change);
|
in_ch->write_change);
|
||||||
|
if (n_changes == kqop->events_size && kq_grow_events(kqop)) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return n_changes;
|
return n_changes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
kq_grow_events(struct kqop *kqop, size_t new_size)
|
|
||||||
{
|
|
||||||
struct kevent *newresult;
|
|
||||||
|
|
||||||
newresult = mm_realloc(kqop->events,
|
|
||||||
new_size * sizeof(struct kevent));
|
|
||||||
|
|
||||||
if (newresult) {
|
|
||||||
kqop->events = newresult;
|
|
||||||
kqop->events_size = new_size;
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
kq_dispatch(struct event_base *base, struct timeval *tv)
|
kq_dispatch(struct event_base *base, struct timeval *tv)
|
||||||
{
|
{
|
||||||
struct kqop *kqop = base->evbase;
|
struct kqop *kqop = base->evbase;
|
||||||
struct kevent *events = kqop->events;
|
|
||||||
struct kevent *changes;
|
|
||||||
struct timespec ts, *ts_p = NULL;
|
struct timespec ts, *ts_p = NULL;
|
||||||
|
struct kevent *events;
|
||||||
int i, n_changes, res;
|
int i, n_changes, res;
|
||||||
|
|
||||||
if (tv != NULL) {
|
if (tv != NULL) {
|
||||||
@ -278,47 +262,27 @@ kq_dispatch(struct event_base *base, struct timeval *tv)
|
|||||||
ts_p = &ts;
|
ts_p = &ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Build "changes" from "base->changes" */
|
/* Build the changelist of kevent() from "base->changelist" */
|
||||||
EVUTIL_ASSERT(kqop->changes);
|
EVUTIL_ASSERT(kqop->events);
|
||||||
n_changes = kq_build_changes_list(&base->changelist, kqop);
|
n_changes = kq_build_changes_list(&base->changelist, kqop);
|
||||||
if (n_changes < 0)
|
if (n_changes < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
event_changelist_remove_all_(&base->changelist, base);
|
event_changelist_remove_all_(&base->changelist, base);
|
||||||
|
|
||||||
/* steal the changes array in case some broken code tries to call
|
/* Offload the events array before calling kevent() to register and
|
||||||
* dispatch twice at once. */
|
* retrieve events in case some broken code tries to call dispatch
|
||||||
changes = kqop->changes;
|
* while there is already one on the fly. */
|
||||||
kqop->changes = NULL;
|
events = kqop->events;
|
||||||
|
kqop->events = NULL;
|
||||||
/* Make sure that 'events' is at least as long as the list of changes:
|
|
||||||
* otherwise errors in the changes can get reported as a -1 return
|
|
||||||
* value from kevent() rather than as EV_ERROR events in the events
|
|
||||||
* array.
|
|
||||||
*
|
|
||||||
* (We could instead handle -1 return values from kevent() by
|
|
||||||
* retrying with a smaller changes array or a larger events array,
|
|
||||||
* but this approach seems less risky for now.)
|
|
||||||
*/
|
|
||||||
if (kqop->events_size < n_changes) {
|
|
||||||
int new_size = kqop->events_size;
|
|
||||||
do {
|
|
||||||
new_size *= 2;
|
|
||||||
} while (new_size < n_changes);
|
|
||||||
|
|
||||||
kq_grow_events(kqop, new_size);
|
|
||||||
events = kqop->events;
|
|
||||||
}
|
|
||||||
|
|
||||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||||
|
res = kevent(kqop->kq, events, n_changes, events, kqop->events_size, ts_p);
|
||||||
res = kevent(kqop->kq, changes, n_changes,
|
|
||||||
events, kqop->events_size, ts_p);
|
|
||||||
|
|
||||||
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
||||||
|
|
||||||
EVUTIL_ASSERT(kqop->changes == NULL);
|
EVUTIL_ASSERT(kqop->events == NULL);
|
||||||
kqop->changes = changes;
|
/* Reinstate the events array to provision the next dispatch. */
|
||||||
|
kqop->events = events;
|
||||||
|
|
||||||
if (res == -1) {
|
if (res == -1) {
|
||||||
if (errno != EINTR) {
|
if (errno != EINTR) {
|
||||||
@ -420,9 +384,10 @@ kq_dispatch(struct event_base *base, struct timeval *tv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (res == kqop->events_size) {
|
if (res == kqop->events_size) {
|
||||||
/* We used all the events space that we have. Maybe we should
|
/* We've used up all the events space in this round.
|
||||||
make it bigger. */
|
* Try to double the size of the eventlist for next round. */
|
||||||
kq_grow_events(kqop, kqop->events_size * 2);
|
if (kq_grow_events(kqop) < 0)
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
@ -431,8 +396,6 @@ kq_dispatch(struct event_base *base, struct timeval *tv)
|
|||||||
static void
|
static void
|
||||||
kqop_free(struct kqop *kqop)
|
kqop_free(struct kqop *kqop)
|
||||||
{
|
{
|
||||||
if (kqop->changes)
|
|
||||||
mm_free(kqop->changes);
|
|
||||||
if (kqop->events)
|
if (kqop->events)
|
||||||
mm_free(kqop->events);
|
mm_free(kqop->events);
|
||||||
if (kqop->kq >= 0 && kqop->pid == getpid())
|
if (kqop->kq >= 0 && kqop->pid == getpid())
|
||||||
|
Loading…
x
Reference in New Issue
Block a user