mirror of
https://github.com/libevent/libevent.git
synced 2025-01-09 00:56:20 +08:00
Protect min_heap_push_ against integer overflow.
Converting unsigned to size_t for size of memory objects allows proper handling of very large heaps on 64 bit systems. Signed-off-by: Tobias Stoeckmann <tobias@stoeckmann.org> Closes: #799 (cherry-picked)
This commit is contained in:
parent
16d8564a2c
commit
176fd56655
18
event.c
18
event.c
@ -842,7 +842,8 @@ static int event_base_free_queues_(struct event_base *base, int run_finalizers)
|
||||
static void
|
||||
event_base_free_(struct event_base *base, int run_finalizers)
|
||||
{
|
||||
int i, n_deleted=0;
|
||||
int i;
|
||||
size_t n_deleted=0;
|
||||
struct event *ev;
|
||||
struct evwatch *watcher;
|
||||
/* XXXX grab the lock? If there is contention when one thread frees
|
||||
@ -918,7 +919,7 @@ event_base_free_(struct event_base *base, int run_finalizers)
|
||||
}
|
||||
|
||||
if (n_deleted)
|
||||
event_debug(("%s: %d events were still set in base",
|
||||
event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
|
||||
__func__, n_deleted));
|
||||
|
||||
while (LIST_FIRST(&base->once_events)) {
|
||||
@ -3712,7 +3713,7 @@ event_base_foreach_event_nolock_(struct event_base *base,
|
||||
event_base_foreach_event_cb fn, void *arg)
|
||||
{
|
||||
int r, i;
|
||||
unsigned u;
|
||||
size_t u;
|
||||
struct event *ev;
|
||||
|
||||
/* Start out with all the EVLIST_INSERTED events. */
|
||||
@ -3865,7 +3866,7 @@ event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short event
|
||||
/* If we want to activate timer events, loop and activate each event with
|
||||
* the same fd in both the timeheap and common timeouts list */
|
||||
int i;
|
||||
unsigned u;
|
||||
size_t u;
|
||||
struct event *ev;
|
||||
|
||||
for (u = 0; u < base->timeheap.n; ++u) {
|
||||
@ -3995,20 +3996,21 @@ void
|
||||
event_base_assert_ok_nolock_(struct event_base *base)
|
||||
{
|
||||
int i;
|
||||
size_t u;
|
||||
int count;
|
||||
|
||||
/* First do checks on the per-fd and per-signal lists */
|
||||
evmap_check_integrity_(base);
|
||||
|
||||
/* Check the heap property */
|
||||
for (i = 1; i < (int)base->timeheap.n; ++i) {
|
||||
int parent = (i - 1) / 2;
|
||||
for (u = 1; u < base->timeheap.n; ++u) {
|
||||
size_t parent = (u - 1) / 2;
|
||||
struct event *ev, *p_ev;
|
||||
ev = base->timeheap.p[i];
|
||||
ev = base->timeheap.p[u];
|
||||
p_ev = base->timeheap.p[parent];
|
||||
EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
|
||||
EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
|
||||
EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
|
||||
EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
|
||||
}
|
||||
|
||||
/* Check that the common timeouts are fine */
|
||||
|
@ -126,7 +126,7 @@ struct event {
|
||||
/* for managing timeouts */
|
||||
union {
|
||||
TAILQ_ENTRY(event) ev_next_with_common_timeout;
|
||||
int min_heap_idx;
|
||||
size_t min_heap_idx;
|
||||
} ev_timeout_pos;
|
||||
evutil_socket_t ev_fd;
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
||||
typedef struct min_heap
|
||||
{
|
||||
struct event** p;
|
||||
unsigned n, a;
|
||||
size_t n, a;
|
||||
} min_heap_t;
|
||||
|
||||
static inline void min_heap_ctor_(min_heap_t* s);
|
||||
@ -47,25 +47,25 @@ static inline void min_heap_dtor_(min_heap_t* s);
|
||||
static inline void min_heap_elem_init_(struct event* e);
|
||||
static inline int min_heap_elt_is_top_(const struct event *e);
|
||||
static inline int min_heap_empty_(min_heap_t* s);
|
||||
static inline unsigned min_heap_size_(min_heap_t* s);
|
||||
static inline size_t min_heap_size_(min_heap_t* s);
|
||||
static inline struct event* min_heap_top_(min_heap_t* s);
|
||||
static inline int min_heap_reserve_(min_heap_t* s, unsigned n);
|
||||
static inline int min_heap_reserve_(min_heap_t* s, size_t n);
|
||||
static inline int min_heap_push_(min_heap_t* s, struct event* e);
|
||||
static inline struct event* min_heap_pop_(min_heap_t* s);
|
||||
static inline int min_heap_adjust_(min_heap_t *s, struct event* e);
|
||||
static inline int min_heap_erase_(min_heap_t* s, struct event* e);
|
||||
static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
|
||||
static inline void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
|
||||
static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
|
||||
static inline void min_heap_shift_up_(min_heap_t* s, size_t hole_index, struct event* e);
|
||||
static inline void min_heap_shift_up_unconditional_(min_heap_t* s, size_t hole_index, struct event* e);
|
||||
static inline void min_heap_shift_down_(min_heap_t* s, size_t hole_index, struct event* e);
|
||||
|
||||
#define min_heap_elem_greater(a, b) \
|
||||
(evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
|
||||
|
||||
void min_heap_ctor_(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
|
||||
void min_heap_dtor_(min_heap_t* s) { if (s->p) mm_free(s->p); }
|
||||
void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; }
|
||||
int min_heap_empty_(min_heap_t* s) { return 0u == s->n; }
|
||||
unsigned min_heap_size_(min_heap_t* s) { return s->n; }
|
||||
void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = EV_SIZE_MAX; }
|
||||
int min_heap_empty_(min_heap_t* s) { return 0 == s->n; }
|
||||
size_t min_heap_size_(min_heap_t* s) { return s->n; }
|
||||
struct event* min_heap_top_(min_heap_t* s) { return s->n ? *s->p : 0; }
|
||||
|
||||
int min_heap_push_(min_heap_t* s, struct event* e)
|
||||
@ -81,8 +81,8 @@ struct event* min_heap_pop_(min_heap_t* s)
|
||||
if (s->n)
|
||||
{
|
||||
struct event* e = *s->p;
|
||||
min_heap_shift_down_(s, 0u, s->p[--s->n]);
|
||||
e->ev_timeout_pos.min_heap_idx = -1;
|
||||
min_heap_shift_down_(s, 0, s->p[--s->n]);
|
||||
e->ev_timeout_pos.min_heap_idx = EV_SIZE_MAX;
|
||||
return e;
|
||||
}
|
||||
return 0;
|
||||
@ -95,10 +95,10 @@ int min_heap_elt_is_top_(const struct event *e)
|
||||
|
||||
int min_heap_erase_(min_heap_t* s, struct event* e)
|
||||
{
|
||||
if (-1 != e->ev_timeout_pos.min_heap_idx)
|
||||
if (EV_SIZE_MAX != e->ev_timeout_pos.min_heap_idx)
|
||||
{
|
||||
struct event *last = s->p[--s->n];
|
||||
unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
|
||||
size_t parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
|
||||
/* we replace e with the last element in the heap. We might need to
|
||||
shift it upward if it is less than its parent, or downward if it is
|
||||
greater than one or both its children. Since the children are known
|
||||
@ -108,7 +108,7 @@ int min_heap_erase_(min_heap_t* s, struct event* e)
|
||||
min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
|
||||
else
|
||||
min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
|
||||
e->ev_timeout_pos.min_heap_idx = -1;
|
||||
e->ev_timeout_pos.min_heap_idx = EV_SIZE_MAX;
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
@ -116,10 +116,10 @@ int min_heap_erase_(min_heap_t* s, struct event* e)
|
||||
|
||||
int min_heap_adjust_(min_heap_t *s, struct event *e)
|
||||
{
|
||||
if (-1 == e->ev_timeout_pos.min_heap_idx) {
|
||||
if (EV_SIZE_MAX == e->ev_timeout_pos.min_heap_idx) {
|
||||
return min_heap_push_(s, e);
|
||||
} else {
|
||||
unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
|
||||
size_t parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
|
||||
/* The position of e has changed; we shift it up or down
|
||||
* as needed. We can't need to do both. */
|
||||
if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
|
||||
@ -130,12 +130,12 @@ int min_heap_adjust_(min_heap_t *s, struct event *e)
|
||||
}
|
||||
}
|
||||
|
||||
int min_heap_reserve_(min_heap_t* s, unsigned n)
|
||||
int min_heap_reserve_(min_heap_t* s, size_t n)
|
||||
{
|
||||
if (s->a < n)
|
||||
{
|
||||
struct event** p;
|
||||
unsigned a = s->a ? s->a * 2 : 8;
|
||||
size_t a = s->a ? s->a * 2 : 8;
|
||||
if (a < n)
|
||||
a = n;
|
||||
if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p)))
|
||||
@ -146,9 +146,9 @@ int min_heap_reserve_(min_heap_t* s, unsigned n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
|
||||
void min_heap_shift_up_unconditional_(min_heap_t* s, size_t hole_index, struct event* e)
|
||||
{
|
||||
unsigned parent = (hole_index - 1) / 2;
|
||||
size_t parent = (hole_index - 1) / 2;
|
||||
do
|
||||
{
|
||||
(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
|
||||
@ -158,9 +158,9 @@ void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct
|
||||
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
|
||||
}
|
||||
|
||||
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
|
||||
void min_heap_shift_up_(min_heap_t* s, size_t hole_index, struct event* e)
|
||||
{
|
||||
unsigned parent = (hole_index - 1) / 2;
|
||||
size_t parent = (hole_index - 1) / 2;
|
||||
while (hole_index && min_heap_elem_greater(s->p[parent], e))
|
||||
{
|
||||
(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
|
||||
@ -170,9 +170,9 @@ void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
|
||||
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
|
||||
}
|
||||
|
||||
void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
|
||||
void min_heap_shift_down_(min_heap_t* s, size_t hole_index, struct event* e)
|
||||
{
|
||||
unsigned min_child = 2 * (hole_index + 1);
|
||||
size_t min_child = 2 * (hole_index + 1);
|
||||
while (min_child <= s->n)
|
||||
{
|
||||
min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
|
||||
|
Loading…
x
Reference in New Issue
Block a user