xref: /freebsd/contrib/libevent/minheap-internal.h (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 /*
2  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3  *
4  * Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 #ifndef MINHEAP_INTERNAL_H_INCLUDED_
29 #define MINHEAP_INTERNAL_H_INCLUDED_
30 
31 #include "event2/event-config.h"
32 #include "evconfig-private.h"
33 #include "event2/event.h"
34 #include "event2/event_struct.h"
35 #include "event2/util.h"
36 #include "util-internal.h"
37 #include "mm-internal.h"
38 
39 typedef struct min_heap
40 {
41 	struct event** p;
42 	unsigned n, a;
43 } min_heap_t;
44 
45 static inline void	     min_heap_ctor_(min_heap_t* s);
46 static inline void	     min_heap_dtor_(min_heap_t* s);
47 static inline void	     min_heap_elem_init_(struct event* e);
48 static inline int	     min_heap_elt_is_top_(const struct event *e);
49 static inline int	     min_heap_empty_(min_heap_t* s);
50 static inline unsigned	     min_heap_size_(min_heap_t* s);
51 static inline struct event*  min_heap_top_(min_heap_t* s);
52 static inline int	     min_heap_reserve_(min_heap_t* s, unsigned n);
53 static inline int	     min_heap_push_(min_heap_t* s, struct event* e);
54 static inline struct event*  min_heap_pop_(min_heap_t* s);
55 static inline int	     min_heap_adjust_(min_heap_t *s, struct event* e);
56 static inline int	     min_heap_erase_(min_heap_t* s, struct event* e);
57 static inline void	     min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
58 static inline void	     min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
59 static inline void	     min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
60 
61 #define min_heap_elem_greater(a, b) \
62 	(evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
63 
64 void min_heap_ctor_(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
65 void min_heap_dtor_(min_heap_t* s) { if (s->p) mm_free(s->p); }
66 void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; }
67 int min_heap_empty_(min_heap_t* s) { return 0u == s->n; }
68 unsigned min_heap_size_(min_heap_t* s) { return s->n; }
69 struct event* min_heap_top_(min_heap_t* s) { return s->n ? *s->p : 0; }
70 
71 int min_heap_push_(min_heap_t* s, struct event* e)
72 {
73 	if (s->n == UINT32_MAX || min_heap_reserve_(s, s->n + 1))
74 		return -1;
75 	min_heap_shift_up_(s, s->n++, e);
76 	return 0;
77 }
78 
79 struct event* min_heap_pop_(min_heap_t* s)
80 {
81 	if (s->n)
82 	{
83 		struct event* e = *s->p;
84 		min_heap_shift_down_(s, 0u, s->p[--s->n]);
85 		e->ev_timeout_pos.min_heap_idx = -1;
86 		return e;
87 	}
88 	return 0;
89 }
90 
91 int min_heap_elt_is_top_(const struct event *e)
92 {
93 	return e->ev_timeout_pos.min_heap_idx == 0;
94 }
95 
96 int min_heap_erase_(min_heap_t* s, struct event* e)
97 {
98 	if (-1 != e->ev_timeout_pos.min_heap_idx)
99 	{
100 		struct event *last = s->p[--s->n];
101 		unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
102 		/* we replace e with the last element in the heap.  We might need to
103 		   shift it upward if it is less than its parent, or downward if it is
104 		   greater than one or both its children. Since the children are known
105 		   to be less than the parent, it can't need to shift both up and
106 		   down. */
107 		if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
108 			min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
109 		else
110 			min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
111 		e->ev_timeout_pos.min_heap_idx = -1;
112 		return 0;
113 	}
114 	return -1;
115 }
116 
117 int min_heap_adjust_(min_heap_t *s, struct event *e)
118 {
119 	if (-1 == e->ev_timeout_pos.min_heap_idx) {
120 		return min_heap_push_(s, e);
121 	} else {
122 		unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
123 		/* The position of e has changed; we shift it up or down
124 		 * as needed.  We can't need to do both. */
125 		if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
126 			min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, e);
127 		else
128 			min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, e);
129 		return 0;
130 	}
131 }
132 
133 int min_heap_reserve_(min_heap_t* s, unsigned n)
134 {
135 	if (s->a < n)
136 	{
137 		struct event** p;
138 		unsigned a = s->a ? s->a * 2 : 8;
139 		if (a < n)
140 			a = n;
141 #if (SIZE_MAX == UINT32_MAX)
142 		if (a > SIZE_MAX / sizeof *p)
143 			return -1;
144 #endif
145 		if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p)))
146 			return -1;
147 		s->p = p;
148 		s->a = a;
149 	}
150 	return 0;
151 }
152 
153 void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
154 {
155     unsigned parent = (hole_index - 1) / 2;
156     do
157     {
158 	(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
159 	hole_index = parent;
160 	parent = (hole_index - 1) / 2;
161     } while (hole_index && min_heap_elem_greater(s->p[parent], e));
162     (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
163 }
164 
165 void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
166 {
167     unsigned parent = (hole_index - 1) / 2;
168     while (hole_index && min_heap_elem_greater(s->p[parent], e))
169     {
170 	(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
171 	hole_index = parent;
172 	parent = (hole_index - 1) / 2;
173     }
174     (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
175 }
176 
177 void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
178 {
179     unsigned min_child = 2 * (hole_index + 1);
180     while (min_child <= s->n)
181 	{
182 	min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
183 	if (!(min_heap_elem_greater(e, s->p[min_child])))
184 	    break;
185 	(s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index;
186 	hole_index = min_child;
187 	min_child = 2 * (hole_index + 1);
188 	}
189     (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
190 }
191 
192 #endif /* MINHEAP_INTERNAL_H_INCLUDED_ */
193