1c43e99fdSEd Maste /*
2c43e99fdSEd Maste * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3c43e99fdSEd Maste * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
4c43e99fdSEd Maste * All rights reserved.
5c43e99fdSEd Maste *
6c43e99fdSEd Maste * Redistribution and use in source and binary forms, with or without
7c43e99fdSEd Maste * modification, are permitted provided that the following conditions
8c43e99fdSEd Maste * are met:
9c43e99fdSEd Maste * 1. Redistributions of source code must retain the above copyright
10c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer.
11c43e99fdSEd Maste * 2. Redistributions in binary form must reproduce the above copyright
12c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer in the
13c43e99fdSEd Maste * documentation and/or other materials provided with the distribution.
14c43e99fdSEd Maste * 3. The name of the author may not be used to endorse or promote products
15c43e99fdSEd Maste * derived from this software without specific prior written permission.
16c43e99fdSEd Maste *
17c43e99fdSEd Maste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18c43e99fdSEd Maste * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19c43e99fdSEd Maste * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20c43e99fdSEd Maste * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21c43e99fdSEd Maste * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22c43e99fdSEd Maste * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23c43e99fdSEd Maste * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24c43e99fdSEd Maste * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25c43e99fdSEd Maste * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26c43e99fdSEd Maste * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27c43e99fdSEd Maste */
28c43e99fdSEd Maste #include "evconfig-private.h"
29c43e99fdSEd Maste
30c43e99fdSEd Maste #include <sys/types.h>
31c43e99fdSEd Maste #include <limits.h>
32c43e99fdSEd Maste #include <string.h>
33c43e99fdSEd Maste #include <stdlib.h>
34c43e99fdSEd Maste
35c43e99fdSEd Maste #include "event2/event.h"
36c43e99fdSEd Maste #include "event2/event_struct.h"
37c43e99fdSEd Maste #include "event2/util.h"
38c43e99fdSEd Maste #include "event2/bufferevent.h"
39c43e99fdSEd Maste #include "event2/bufferevent_struct.h"
40c43e99fdSEd Maste #include "event2/buffer.h"
41c43e99fdSEd Maste
42c43e99fdSEd Maste #include "ratelim-internal.h"
43c43e99fdSEd Maste
44c43e99fdSEd Maste #include "bufferevent-internal.h"
45c43e99fdSEd Maste #include "mm-internal.h"
46c43e99fdSEd Maste #include "util-internal.h"
47c43e99fdSEd Maste #include "event-internal.h"
48c43e99fdSEd Maste
49c43e99fdSEd Maste int
ev_token_bucket_init_(struct ev_token_bucket * bucket,const struct ev_token_bucket_cfg * cfg,ev_uint32_t current_tick,int reinitialize)50c43e99fdSEd Maste ev_token_bucket_init_(struct ev_token_bucket *bucket,
51c43e99fdSEd Maste const struct ev_token_bucket_cfg *cfg,
52c43e99fdSEd Maste ev_uint32_t current_tick,
53c43e99fdSEd Maste int reinitialize)
54c43e99fdSEd Maste {
55c43e99fdSEd Maste if (reinitialize) {
56c43e99fdSEd Maste /* on reinitialization, we only clip downwards, since we've
57c43e99fdSEd Maste already used who-knows-how-much bandwidth this tick. We
58c43e99fdSEd Maste leave "last_updated" as it is; the next update will add the
59c43e99fdSEd Maste appropriate amount of bandwidth to the bucket.
60c43e99fdSEd Maste */
61c43e99fdSEd Maste if (bucket->read_limit > (ev_int64_t) cfg->read_maximum)
62c43e99fdSEd Maste bucket->read_limit = cfg->read_maximum;
63c43e99fdSEd Maste if (bucket->write_limit > (ev_int64_t) cfg->write_maximum)
64c43e99fdSEd Maste bucket->write_limit = cfg->write_maximum;
65c43e99fdSEd Maste } else {
66c43e99fdSEd Maste bucket->read_limit = cfg->read_rate;
67c43e99fdSEd Maste bucket->write_limit = cfg->write_rate;
68c43e99fdSEd Maste bucket->last_updated = current_tick;
69c43e99fdSEd Maste }
70c43e99fdSEd Maste return 0;
71c43e99fdSEd Maste }
72c43e99fdSEd Maste
73c43e99fdSEd Maste int
ev_token_bucket_update_(struct ev_token_bucket * bucket,const struct ev_token_bucket_cfg * cfg,ev_uint32_t current_tick)74c43e99fdSEd Maste ev_token_bucket_update_(struct ev_token_bucket *bucket,
75c43e99fdSEd Maste const struct ev_token_bucket_cfg *cfg,
76c43e99fdSEd Maste ev_uint32_t current_tick)
77c43e99fdSEd Maste {
78c43e99fdSEd Maste /* It's okay if the tick number overflows, since we'll just
79c43e99fdSEd Maste * wrap around when we do the unsigned substraction. */
80c43e99fdSEd Maste unsigned n_ticks = current_tick - bucket->last_updated;
81c43e99fdSEd Maste
82c43e99fdSEd Maste /* Make sure some ticks actually happened, and that time didn't
83c43e99fdSEd Maste * roll back. */
84c43e99fdSEd Maste if (n_ticks == 0 || n_ticks > INT_MAX)
85c43e99fdSEd Maste return 0;
86c43e99fdSEd Maste
87c43e99fdSEd Maste /* Naively, we would say
88c43e99fdSEd Maste bucket->limit += n_ticks * cfg->rate;
89c43e99fdSEd Maste
90c43e99fdSEd Maste if (bucket->limit > cfg->maximum)
91c43e99fdSEd Maste bucket->limit = cfg->maximum;
92c43e99fdSEd Maste
93c43e99fdSEd Maste But we're worried about overflow, so we do it like this:
94c43e99fdSEd Maste */
95c43e99fdSEd Maste
96c43e99fdSEd Maste if ((cfg->read_maximum - bucket->read_limit) / n_ticks < cfg->read_rate)
97c43e99fdSEd Maste bucket->read_limit = cfg->read_maximum;
98c43e99fdSEd Maste else
99c43e99fdSEd Maste bucket->read_limit += n_ticks * cfg->read_rate;
100c43e99fdSEd Maste
101c43e99fdSEd Maste
102c43e99fdSEd Maste if ((cfg->write_maximum - bucket->write_limit) / n_ticks < cfg->write_rate)
103c43e99fdSEd Maste bucket->write_limit = cfg->write_maximum;
104c43e99fdSEd Maste else
105c43e99fdSEd Maste bucket->write_limit += n_ticks * cfg->write_rate;
106c43e99fdSEd Maste
107c43e99fdSEd Maste
108c43e99fdSEd Maste bucket->last_updated = current_tick;
109c43e99fdSEd Maste
110c43e99fdSEd Maste return 1;
111c43e99fdSEd Maste }
112c43e99fdSEd Maste
113c43e99fdSEd Maste static inline void
bufferevent_update_buckets(struct bufferevent_private * bev)114c43e99fdSEd Maste bufferevent_update_buckets(struct bufferevent_private *bev)
115c43e99fdSEd Maste {
116c43e99fdSEd Maste /* Must hold lock on bev. */
117c43e99fdSEd Maste struct timeval now;
118c43e99fdSEd Maste unsigned tick;
119c43e99fdSEd Maste event_base_gettimeofday_cached(bev->bev.ev_base, &now);
120c43e99fdSEd Maste tick = ev_token_bucket_get_tick_(&now, bev->rate_limiting->cfg);
121c43e99fdSEd Maste if (tick != bev->rate_limiting->limit.last_updated)
122c43e99fdSEd Maste ev_token_bucket_update_(&bev->rate_limiting->limit,
123c43e99fdSEd Maste bev->rate_limiting->cfg, tick);
124c43e99fdSEd Maste }
125c43e99fdSEd Maste
126c43e99fdSEd Maste ev_uint32_t
ev_token_bucket_get_tick_(const struct timeval * tv,const struct ev_token_bucket_cfg * cfg)127c43e99fdSEd Maste ev_token_bucket_get_tick_(const struct timeval *tv,
128c43e99fdSEd Maste const struct ev_token_bucket_cfg *cfg)
129c43e99fdSEd Maste {
130c43e99fdSEd Maste /* This computation uses two multiplies and a divide. We could do
131c43e99fdSEd Maste * fewer if we knew that the tick length was an integer number of
132c43e99fdSEd Maste * seconds, or if we knew it divided evenly into a second. We should
133c43e99fdSEd Maste * investigate that more.
134c43e99fdSEd Maste */
135c43e99fdSEd Maste
136c43e99fdSEd Maste /* We cast to an ev_uint64_t first, since we don't want to overflow
137c43e99fdSEd Maste * before we do the final divide. */
138c43e99fdSEd Maste ev_uint64_t msec = (ev_uint64_t)tv->tv_sec * 1000 + tv->tv_usec / 1000;
139c43e99fdSEd Maste return (unsigned)(msec / cfg->msec_per_tick);
140c43e99fdSEd Maste }
141c43e99fdSEd Maste
142c43e99fdSEd Maste struct ev_token_bucket_cfg *
ev_token_bucket_cfg_new(size_t read_rate,size_t read_burst,size_t write_rate,size_t write_burst,const struct timeval * tick_len)143c43e99fdSEd Maste ev_token_bucket_cfg_new(size_t read_rate, size_t read_burst,
144c43e99fdSEd Maste size_t write_rate, size_t write_burst,
145c43e99fdSEd Maste const struct timeval *tick_len)
146c43e99fdSEd Maste {
147c43e99fdSEd Maste struct ev_token_bucket_cfg *r;
148c43e99fdSEd Maste struct timeval g;
149c43e99fdSEd Maste if (! tick_len) {
150c43e99fdSEd Maste g.tv_sec = 1;
151c43e99fdSEd Maste g.tv_usec = 0;
152c43e99fdSEd Maste tick_len = &g;
153c43e99fdSEd Maste }
154c43e99fdSEd Maste if (read_rate > read_burst || write_rate > write_burst ||
155c43e99fdSEd Maste read_rate < 1 || write_rate < 1)
156c43e99fdSEd Maste return NULL;
157c43e99fdSEd Maste if (read_rate > EV_RATE_LIMIT_MAX ||
158c43e99fdSEd Maste write_rate > EV_RATE_LIMIT_MAX ||
159c43e99fdSEd Maste read_burst > EV_RATE_LIMIT_MAX ||
160c43e99fdSEd Maste write_burst > EV_RATE_LIMIT_MAX)
161c43e99fdSEd Maste return NULL;
162c43e99fdSEd Maste r = mm_calloc(1, sizeof(struct ev_token_bucket_cfg));
163c43e99fdSEd Maste if (!r)
164c43e99fdSEd Maste return NULL;
165c43e99fdSEd Maste r->read_rate = read_rate;
166c43e99fdSEd Maste r->write_rate = write_rate;
167c43e99fdSEd Maste r->read_maximum = read_burst;
168c43e99fdSEd Maste r->write_maximum = write_burst;
169c43e99fdSEd Maste memcpy(&r->tick_timeout, tick_len, sizeof(struct timeval));
170c43e99fdSEd Maste r->msec_per_tick = (tick_len->tv_sec * 1000) +
171c43e99fdSEd Maste (tick_len->tv_usec & COMMON_TIMEOUT_MICROSECONDS_MASK)/1000;
172c43e99fdSEd Maste return r;
173c43e99fdSEd Maste }
174c43e99fdSEd Maste
175c43e99fdSEd Maste void
ev_token_bucket_cfg_free(struct ev_token_bucket_cfg * cfg)176c43e99fdSEd Maste ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
177c43e99fdSEd Maste {
178c43e99fdSEd Maste mm_free(cfg);
179c43e99fdSEd Maste }
180c43e99fdSEd Maste
181c43e99fdSEd Maste /* Default values for max_single_read & max_single_write variables. */
182c43e99fdSEd Maste #define MAX_SINGLE_READ_DEFAULT 16384
183c43e99fdSEd Maste #define MAX_SINGLE_WRITE_DEFAULT 16384
184c43e99fdSEd Maste
185c43e99fdSEd Maste #define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
186c43e99fdSEd Maste #define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
187c43e99fdSEd Maste
188c43e99fdSEd Maste static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
189c43e99fdSEd Maste static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
190c43e99fdSEd Maste static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
191c43e99fdSEd Maste static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
192c43e99fdSEd Maste
193c43e99fdSEd Maste /** Helper: figure out the maximum amount we should write if is_write, or
194c43e99fdSEd Maste the maximum amount we should read if is_read. Return that maximum, or
195c43e99fdSEd Maste 0 if our bucket is wholly exhausted.
196c43e99fdSEd Maste */
197c43e99fdSEd Maste static inline ev_ssize_t
bufferevent_get_rlim_max_(struct bufferevent_private * bev,int is_write)198c43e99fdSEd Maste bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write)
199c43e99fdSEd Maste {
200c43e99fdSEd Maste /* needs lock on bev. */
201c43e99fdSEd Maste ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read;
202c43e99fdSEd Maste
203c43e99fdSEd Maste #define LIM(x) \
204c43e99fdSEd Maste (is_write ? (x).write_limit : (x).read_limit)
205c43e99fdSEd Maste
206c43e99fdSEd Maste #define GROUP_SUSPENDED(g) \
207c43e99fdSEd Maste (is_write ? (g)->write_suspended : (g)->read_suspended)
208c43e99fdSEd Maste
209c43e99fdSEd Maste /* Sets max_so_far to MIN(x, max_so_far) */
210c43e99fdSEd Maste #define CLAMPTO(x) \
211c43e99fdSEd Maste do { \
212c43e99fdSEd Maste if (max_so_far > (x)) \
213c43e99fdSEd Maste max_so_far = (x); \
214c43e99fdSEd Maste } while (0);
215c43e99fdSEd Maste
216c43e99fdSEd Maste if (!bev->rate_limiting)
217c43e99fdSEd Maste return max_so_far;
218c43e99fdSEd Maste
219c43e99fdSEd Maste /* If rate-limiting is enabled at all, update the appropriate
220c43e99fdSEd Maste bucket, and take the smaller of our rate limit and the group
221c43e99fdSEd Maste rate limit.
222c43e99fdSEd Maste */
223c43e99fdSEd Maste
224c43e99fdSEd Maste if (bev->rate_limiting->cfg) {
225c43e99fdSEd Maste bufferevent_update_buckets(bev);
226c43e99fdSEd Maste max_so_far = LIM(bev->rate_limiting->limit);
227c43e99fdSEd Maste }
228c43e99fdSEd Maste if (bev->rate_limiting->group) {
229c43e99fdSEd Maste struct bufferevent_rate_limit_group *g =
230c43e99fdSEd Maste bev->rate_limiting->group;
231c43e99fdSEd Maste ev_ssize_t share;
232c43e99fdSEd Maste LOCK_GROUP(g);
233c43e99fdSEd Maste if (GROUP_SUSPENDED(g)) {
234c43e99fdSEd Maste /* We can get here if we failed to lock this
235c43e99fdSEd Maste * particular bufferevent while suspending the whole
236c43e99fdSEd Maste * group. */
237c43e99fdSEd Maste if (is_write)
238c43e99fdSEd Maste bufferevent_suspend_write_(&bev->bev,
239c43e99fdSEd Maste BEV_SUSPEND_BW_GROUP);
240c43e99fdSEd Maste else
241c43e99fdSEd Maste bufferevent_suspend_read_(&bev->bev,
242c43e99fdSEd Maste BEV_SUSPEND_BW_GROUP);
243c43e99fdSEd Maste share = 0;
244c43e99fdSEd Maste } else {
245c43e99fdSEd Maste /* XXXX probably we should divide among the active
246c43e99fdSEd Maste * members, not the total members. */
247c43e99fdSEd Maste share = LIM(g->rate_limit) / g->n_members;
248c43e99fdSEd Maste if (share < g->min_share)
249c43e99fdSEd Maste share = g->min_share;
250c43e99fdSEd Maste }
251c43e99fdSEd Maste UNLOCK_GROUP(g);
252c43e99fdSEd Maste CLAMPTO(share);
253c43e99fdSEd Maste }
254c43e99fdSEd Maste
255c43e99fdSEd Maste if (max_so_far < 0)
256c43e99fdSEd Maste max_so_far = 0;
257c43e99fdSEd Maste return max_so_far;
258c43e99fdSEd Maste }
259c43e99fdSEd Maste
260c43e99fdSEd Maste ev_ssize_t
bufferevent_get_read_max_(struct bufferevent_private * bev)261c43e99fdSEd Maste bufferevent_get_read_max_(struct bufferevent_private *bev)
262c43e99fdSEd Maste {
263c43e99fdSEd Maste return bufferevent_get_rlim_max_(bev, 0);
264c43e99fdSEd Maste }
265c43e99fdSEd Maste
266c43e99fdSEd Maste ev_ssize_t
bufferevent_get_write_max_(struct bufferevent_private * bev)267c43e99fdSEd Maste bufferevent_get_write_max_(struct bufferevent_private *bev)
268c43e99fdSEd Maste {
269c43e99fdSEd Maste return bufferevent_get_rlim_max_(bev, 1);
270c43e99fdSEd Maste }
271c43e99fdSEd Maste
272c43e99fdSEd Maste int
bufferevent_decrement_read_buckets_(struct bufferevent_private * bev,ev_ssize_t bytes)273c43e99fdSEd Maste bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
274c43e99fdSEd Maste {
275c43e99fdSEd Maste /* XXXXX Make sure all users of this function check its return value */
276c43e99fdSEd Maste int r = 0;
277c43e99fdSEd Maste /* need to hold lock on bev */
278c43e99fdSEd Maste if (!bev->rate_limiting)
279c43e99fdSEd Maste return 0;
280c43e99fdSEd Maste
281c43e99fdSEd Maste if (bev->rate_limiting->cfg) {
282c43e99fdSEd Maste bev->rate_limiting->limit.read_limit -= bytes;
283c43e99fdSEd Maste if (bev->rate_limiting->limit.read_limit <= 0) {
284c43e99fdSEd Maste bufferevent_suspend_read_(&bev->bev, BEV_SUSPEND_BW);
285c43e99fdSEd Maste if (event_add(&bev->rate_limiting->refill_bucket_event,
286c43e99fdSEd Maste &bev->rate_limiting->cfg->tick_timeout) < 0)
287c43e99fdSEd Maste r = -1;
288c43e99fdSEd Maste } else if (bev->read_suspended & BEV_SUSPEND_BW) {
289c43e99fdSEd Maste if (!(bev->write_suspended & BEV_SUSPEND_BW))
290c43e99fdSEd Maste event_del(&bev->rate_limiting->refill_bucket_event);
291c43e99fdSEd Maste bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW);
292c43e99fdSEd Maste }
293c43e99fdSEd Maste }
294c43e99fdSEd Maste
295c43e99fdSEd Maste if (bev->rate_limiting->group) {
296c43e99fdSEd Maste LOCK_GROUP(bev->rate_limiting->group);
297c43e99fdSEd Maste bev->rate_limiting->group->rate_limit.read_limit -= bytes;
298c43e99fdSEd Maste bev->rate_limiting->group->total_read += bytes;
299c43e99fdSEd Maste if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
300c43e99fdSEd Maste bev_group_suspend_reading_(bev->rate_limiting->group);
301c43e99fdSEd Maste } else if (bev->rate_limiting->group->read_suspended) {
302c43e99fdSEd Maste bev_group_unsuspend_reading_(bev->rate_limiting->group);
303c43e99fdSEd Maste }
304c43e99fdSEd Maste UNLOCK_GROUP(bev->rate_limiting->group);
305c43e99fdSEd Maste }
306c43e99fdSEd Maste
307c43e99fdSEd Maste return r;
308c43e99fdSEd Maste }
309c43e99fdSEd Maste
310c43e99fdSEd Maste int
bufferevent_decrement_write_buckets_(struct bufferevent_private * bev,ev_ssize_t bytes)311c43e99fdSEd Maste bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
312c43e99fdSEd Maste {
313c43e99fdSEd Maste /* XXXXX Make sure all users of this function check its return value */
314c43e99fdSEd Maste int r = 0;
315c43e99fdSEd Maste /* need to hold lock */
316c43e99fdSEd Maste if (!bev->rate_limiting)
317c43e99fdSEd Maste return 0;
318c43e99fdSEd Maste
319c43e99fdSEd Maste if (bev->rate_limiting->cfg) {
320c43e99fdSEd Maste bev->rate_limiting->limit.write_limit -= bytes;
321c43e99fdSEd Maste if (bev->rate_limiting->limit.write_limit <= 0) {
322c43e99fdSEd Maste bufferevent_suspend_write_(&bev->bev, BEV_SUSPEND_BW);
323c43e99fdSEd Maste if (event_add(&bev->rate_limiting->refill_bucket_event,
324c43e99fdSEd Maste &bev->rate_limiting->cfg->tick_timeout) < 0)
325c43e99fdSEd Maste r = -1;
326c43e99fdSEd Maste } else if (bev->write_suspended & BEV_SUSPEND_BW) {
327c43e99fdSEd Maste if (!(bev->read_suspended & BEV_SUSPEND_BW))
328c43e99fdSEd Maste event_del(&bev->rate_limiting->refill_bucket_event);
329c43e99fdSEd Maste bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW);
330c43e99fdSEd Maste }
331c43e99fdSEd Maste }
332c43e99fdSEd Maste
333c43e99fdSEd Maste if (bev->rate_limiting->group) {
334c43e99fdSEd Maste LOCK_GROUP(bev->rate_limiting->group);
335c43e99fdSEd Maste bev->rate_limiting->group->rate_limit.write_limit -= bytes;
336c43e99fdSEd Maste bev->rate_limiting->group->total_written += bytes;
337c43e99fdSEd Maste if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
338c43e99fdSEd Maste bev_group_suspend_writing_(bev->rate_limiting->group);
339c43e99fdSEd Maste } else if (bev->rate_limiting->group->write_suspended) {
340c43e99fdSEd Maste bev_group_unsuspend_writing_(bev->rate_limiting->group);
341c43e99fdSEd Maste }
342c43e99fdSEd Maste UNLOCK_GROUP(bev->rate_limiting->group);
343c43e99fdSEd Maste }
344c43e99fdSEd Maste
345c43e99fdSEd Maste return r;
346c43e99fdSEd Maste }
347c43e99fdSEd Maste
348c43e99fdSEd Maste /** Stop reading on every bufferevent in <b>g</b> */
349c43e99fdSEd Maste static int
bev_group_suspend_reading_(struct bufferevent_rate_limit_group * g)350c43e99fdSEd Maste bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g)
351c43e99fdSEd Maste {
352c43e99fdSEd Maste /* Needs group lock */
353c43e99fdSEd Maste struct bufferevent_private *bev;
354c43e99fdSEd Maste g->read_suspended = 1;
355c43e99fdSEd Maste g->pending_unsuspend_read = 0;
356c43e99fdSEd Maste
357c43e99fdSEd Maste /* Note that in this loop we call EVLOCK_TRY_LOCK_ instead of BEV_LOCK,
358c43e99fdSEd Maste to prevent a deadlock. (Ordinarily, the group lock nests inside
359c43e99fdSEd Maste the bufferevent locks. If we are unable to lock any individual
360c43e99fdSEd Maste bufferevent, it will find out later when it looks at its limit
361c43e99fdSEd Maste and sees that its group is suspended.)
362c43e99fdSEd Maste */
363c43e99fdSEd Maste LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
364c43e99fdSEd Maste if (EVLOCK_TRY_LOCK_(bev->lock)) {
365c43e99fdSEd Maste bufferevent_suspend_read_(&bev->bev,
366c43e99fdSEd Maste BEV_SUSPEND_BW_GROUP);
367c43e99fdSEd Maste EVLOCK_UNLOCK(bev->lock, 0);
368c43e99fdSEd Maste }
369c43e99fdSEd Maste }
370c43e99fdSEd Maste return 0;
371c43e99fdSEd Maste }
372c43e99fdSEd Maste
373c43e99fdSEd Maste /** Stop writing on every bufferevent in <b>g</b> */
374c43e99fdSEd Maste static int
bev_group_suspend_writing_(struct bufferevent_rate_limit_group * g)375c43e99fdSEd Maste bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g)
376c43e99fdSEd Maste {
377c43e99fdSEd Maste /* Needs group lock */
378c43e99fdSEd Maste struct bufferevent_private *bev;
379c43e99fdSEd Maste g->write_suspended = 1;
380c43e99fdSEd Maste g->pending_unsuspend_write = 0;
381c43e99fdSEd Maste LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
382c43e99fdSEd Maste if (EVLOCK_TRY_LOCK_(bev->lock)) {
383c43e99fdSEd Maste bufferevent_suspend_write_(&bev->bev,
384c43e99fdSEd Maste BEV_SUSPEND_BW_GROUP);
385c43e99fdSEd Maste EVLOCK_UNLOCK(bev->lock, 0);
386c43e99fdSEd Maste }
387c43e99fdSEd Maste }
388c43e99fdSEd Maste return 0;
389c43e99fdSEd Maste }
390c43e99fdSEd Maste
391c43e99fdSEd Maste /** Timer callback invoked on a single bufferevent with one or more exhausted
392c43e99fdSEd Maste buckets when they are ready to refill. */
393c43e99fdSEd Maste static void
bev_refill_callback_(evutil_socket_t fd,short what,void * arg)394c43e99fdSEd Maste bev_refill_callback_(evutil_socket_t fd, short what, void *arg)
395c43e99fdSEd Maste {
396c43e99fdSEd Maste unsigned tick;
397c43e99fdSEd Maste struct timeval now;
398c43e99fdSEd Maste struct bufferevent_private *bev = arg;
399c43e99fdSEd Maste int again = 0;
400c43e99fdSEd Maste BEV_LOCK(&bev->bev);
401c43e99fdSEd Maste if (!bev->rate_limiting || !bev->rate_limiting->cfg) {
402c43e99fdSEd Maste BEV_UNLOCK(&bev->bev);
403c43e99fdSEd Maste return;
404c43e99fdSEd Maste }
405c43e99fdSEd Maste
406c43e99fdSEd Maste /* First, update the bucket */
407c43e99fdSEd Maste event_base_gettimeofday_cached(bev->bev.ev_base, &now);
408c43e99fdSEd Maste tick = ev_token_bucket_get_tick_(&now,
409c43e99fdSEd Maste bev->rate_limiting->cfg);
410c43e99fdSEd Maste ev_token_bucket_update_(&bev->rate_limiting->limit,
411c43e99fdSEd Maste bev->rate_limiting->cfg,
412c43e99fdSEd Maste tick);
413c43e99fdSEd Maste
414c43e99fdSEd Maste /* Now unsuspend any read/write operations as appropriate. */
415c43e99fdSEd Maste if ((bev->read_suspended & BEV_SUSPEND_BW)) {
416c43e99fdSEd Maste if (bev->rate_limiting->limit.read_limit > 0)
417c43e99fdSEd Maste bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW);
418c43e99fdSEd Maste else
419c43e99fdSEd Maste again = 1;
420c43e99fdSEd Maste }
421c43e99fdSEd Maste if ((bev->write_suspended & BEV_SUSPEND_BW)) {
422c43e99fdSEd Maste if (bev->rate_limiting->limit.write_limit > 0)
423c43e99fdSEd Maste bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW);
424c43e99fdSEd Maste else
425c43e99fdSEd Maste again = 1;
426c43e99fdSEd Maste }
427c43e99fdSEd Maste if (again) {
428c43e99fdSEd Maste /* One or more of the buckets may need another refill if they
429c43e99fdSEd Maste started negative.
430c43e99fdSEd Maste
431c43e99fdSEd Maste XXXX if we need to be quiet for more ticks, we should
432c43e99fdSEd Maste maybe figure out what timeout we really want.
433c43e99fdSEd Maste */
434c43e99fdSEd Maste /* XXXX Handle event_add failure somehow */
435c43e99fdSEd Maste event_add(&bev->rate_limiting->refill_bucket_event,
436c43e99fdSEd Maste &bev->rate_limiting->cfg->tick_timeout);
437c43e99fdSEd Maste }
438c43e99fdSEd Maste BEV_UNLOCK(&bev->bev);
439c43e99fdSEd Maste }
440c43e99fdSEd Maste
441c43e99fdSEd Maste /** Helper: grab a random element from a bufferevent group.
442c43e99fdSEd Maste *
443c43e99fdSEd Maste * Requires that we hold the lock on the group.
444c43e99fdSEd Maste */
445c43e99fdSEd Maste static struct bufferevent_private *
bev_group_random_element_(struct bufferevent_rate_limit_group * group)446c43e99fdSEd Maste bev_group_random_element_(struct bufferevent_rate_limit_group *group)
447c43e99fdSEd Maste {
448c43e99fdSEd Maste int which;
449c43e99fdSEd Maste struct bufferevent_private *bev;
450c43e99fdSEd Maste
451c43e99fdSEd Maste /* requires group lock */
452c43e99fdSEd Maste
453c43e99fdSEd Maste if (!group->n_members)
454c43e99fdSEd Maste return NULL;
455c43e99fdSEd Maste
456c43e99fdSEd Maste EVUTIL_ASSERT(! LIST_EMPTY(&group->members));
457c43e99fdSEd Maste
458c43e99fdSEd Maste which = evutil_weakrand_range_(&group->weakrand_seed, group->n_members);
459c43e99fdSEd Maste
460c43e99fdSEd Maste bev = LIST_FIRST(&group->members);
461c43e99fdSEd Maste while (which--)
462c43e99fdSEd Maste bev = LIST_NEXT(bev, rate_limiting->next_in_group);
463c43e99fdSEd Maste
464c43e99fdSEd Maste return bev;
465c43e99fdSEd Maste }
466c43e99fdSEd Maste
467c43e99fdSEd Maste /** Iterate over the elements of a rate-limiting group 'g' with a random
468c43e99fdSEd Maste starting point, assigning each to the variable 'bev', and executing the
469c43e99fdSEd Maste block 'block'.
470c43e99fdSEd Maste
471c43e99fdSEd Maste We do this in a half-baked effort to get fairness among group members.
472c43e99fdSEd Maste XXX Round-robin or some kind of priority queue would be even more fair.
473c43e99fdSEd Maste */
474c43e99fdSEd Maste #define FOREACH_RANDOM_ORDER(block) \
475c43e99fdSEd Maste do { \
476c43e99fdSEd Maste first = bev_group_random_element_(g); \
477c43e99fdSEd Maste for (bev = first; bev != LIST_END(&g->members); \
478c43e99fdSEd Maste bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
479c43e99fdSEd Maste block ; \
480c43e99fdSEd Maste } \
481c43e99fdSEd Maste for (bev = LIST_FIRST(&g->members); bev && bev != first; \
482c43e99fdSEd Maste bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
483c43e99fdSEd Maste block ; \
484c43e99fdSEd Maste } \
485c43e99fdSEd Maste } while (0)
486c43e99fdSEd Maste
487c43e99fdSEd Maste static void
bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group * g)488c43e99fdSEd Maste bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g)
489c43e99fdSEd Maste {
490c43e99fdSEd Maste int again = 0;
491c43e99fdSEd Maste struct bufferevent_private *bev, *first;
492c43e99fdSEd Maste
493c43e99fdSEd Maste g->read_suspended = 0;
494c43e99fdSEd Maste FOREACH_RANDOM_ORDER({
495c43e99fdSEd Maste if (EVLOCK_TRY_LOCK_(bev->lock)) {
496c43e99fdSEd Maste bufferevent_unsuspend_read_(&bev->bev,
497c43e99fdSEd Maste BEV_SUSPEND_BW_GROUP);
498c43e99fdSEd Maste EVLOCK_UNLOCK(bev->lock, 0);
499c43e99fdSEd Maste } else {
500c43e99fdSEd Maste again = 1;
501c43e99fdSEd Maste }
502c43e99fdSEd Maste });
503c43e99fdSEd Maste g->pending_unsuspend_read = again;
504c43e99fdSEd Maste }
505c43e99fdSEd Maste
506c43e99fdSEd Maste static void
bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group * g)507c43e99fdSEd Maste bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g)
508c43e99fdSEd Maste {
509c43e99fdSEd Maste int again = 0;
510c43e99fdSEd Maste struct bufferevent_private *bev, *first;
511c43e99fdSEd Maste g->write_suspended = 0;
512c43e99fdSEd Maste
513c43e99fdSEd Maste FOREACH_RANDOM_ORDER({
514c43e99fdSEd Maste if (EVLOCK_TRY_LOCK_(bev->lock)) {
515c43e99fdSEd Maste bufferevent_unsuspend_write_(&bev->bev,
516c43e99fdSEd Maste BEV_SUSPEND_BW_GROUP);
517c43e99fdSEd Maste EVLOCK_UNLOCK(bev->lock, 0);
518c43e99fdSEd Maste } else {
519c43e99fdSEd Maste again = 1;
520c43e99fdSEd Maste }
521c43e99fdSEd Maste });
522c43e99fdSEd Maste g->pending_unsuspend_write = again;
523c43e99fdSEd Maste }
524c43e99fdSEd Maste
525c43e99fdSEd Maste /** Callback invoked every tick to add more elements to the group bucket
526c43e99fdSEd Maste and unsuspend group members as needed.
527c43e99fdSEd Maste */
528c43e99fdSEd Maste static void
bev_group_refill_callback_(evutil_socket_t fd,short what,void * arg)529c43e99fdSEd Maste bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg)
530c43e99fdSEd Maste {
531c43e99fdSEd Maste struct bufferevent_rate_limit_group *g = arg;
532c43e99fdSEd Maste unsigned tick;
533c43e99fdSEd Maste struct timeval now;
534c43e99fdSEd Maste
535c43e99fdSEd Maste event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now);
536c43e99fdSEd Maste
537c43e99fdSEd Maste LOCK_GROUP(g);
538c43e99fdSEd Maste
539c43e99fdSEd Maste tick = ev_token_bucket_get_tick_(&now, &g->rate_limit_cfg);
540c43e99fdSEd Maste ev_token_bucket_update_(&g->rate_limit, &g->rate_limit_cfg, tick);
541c43e99fdSEd Maste
542c43e99fdSEd Maste if (g->pending_unsuspend_read ||
543c43e99fdSEd Maste (g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
544c43e99fdSEd Maste bev_group_unsuspend_reading_(g);
545c43e99fdSEd Maste }
546c43e99fdSEd Maste if (g->pending_unsuspend_write ||
547c43e99fdSEd Maste (g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
548c43e99fdSEd Maste bev_group_unsuspend_writing_(g);
549c43e99fdSEd Maste }
550c43e99fdSEd Maste
551c43e99fdSEd Maste /* XXXX Rather than waiting to the next tick to unsuspend stuff
552c43e99fdSEd Maste * with pending_unsuspend_write/read, we should do it on the
553c43e99fdSEd Maste * next iteration of the mainloop.
554c43e99fdSEd Maste */
555c43e99fdSEd Maste
556c43e99fdSEd Maste UNLOCK_GROUP(g);
557c43e99fdSEd Maste }
558c43e99fdSEd Maste
559c43e99fdSEd Maste int
bufferevent_set_rate_limit(struct bufferevent * bev,struct ev_token_bucket_cfg * cfg)560c43e99fdSEd Maste bufferevent_set_rate_limit(struct bufferevent *bev,
561c43e99fdSEd Maste struct ev_token_bucket_cfg *cfg)
562c43e99fdSEd Maste {
563*b50261e2SCy Schubert struct bufferevent_private *bevp = BEV_UPCAST(bev);
564c43e99fdSEd Maste int r = -1;
565c43e99fdSEd Maste struct bufferevent_rate_limit *rlim;
566c43e99fdSEd Maste struct timeval now;
567c43e99fdSEd Maste ev_uint32_t tick;
568c43e99fdSEd Maste int reinit = 0, suspended = 0;
569c43e99fdSEd Maste /* XXX reference-count cfg */
570c43e99fdSEd Maste
571c43e99fdSEd Maste BEV_LOCK(bev);
572c43e99fdSEd Maste
573c43e99fdSEd Maste if (cfg == NULL) {
574c43e99fdSEd Maste if (bevp->rate_limiting) {
575c43e99fdSEd Maste rlim = bevp->rate_limiting;
576c43e99fdSEd Maste rlim->cfg = NULL;
577c43e99fdSEd Maste bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
578c43e99fdSEd Maste bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
579c43e99fdSEd Maste if (event_initialized(&rlim->refill_bucket_event))
580c43e99fdSEd Maste event_del(&rlim->refill_bucket_event);
581c43e99fdSEd Maste }
582c43e99fdSEd Maste r = 0;
583c43e99fdSEd Maste goto done;
584c43e99fdSEd Maste }
585c43e99fdSEd Maste
586c43e99fdSEd Maste event_base_gettimeofday_cached(bev->ev_base, &now);
587c43e99fdSEd Maste tick = ev_token_bucket_get_tick_(&now, cfg);
588c43e99fdSEd Maste
589c43e99fdSEd Maste if (bevp->rate_limiting && bevp->rate_limiting->cfg == cfg) {
590c43e99fdSEd Maste /* no-op */
591c43e99fdSEd Maste r = 0;
592c43e99fdSEd Maste goto done;
593c43e99fdSEd Maste }
594c43e99fdSEd Maste if (bevp->rate_limiting == NULL) {
595c43e99fdSEd Maste rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
596c43e99fdSEd Maste if (!rlim)
597c43e99fdSEd Maste goto done;
598c43e99fdSEd Maste bevp->rate_limiting = rlim;
599c43e99fdSEd Maste } else {
600c43e99fdSEd Maste rlim = bevp->rate_limiting;
601c43e99fdSEd Maste }
602c43e99fdSEd Maste reinit = rlim->cfg != NULL;
603c43e99fdSEd Maste
604c43e99fdSEd Maste rlim->cfg = cfg;
605c43e99fdSEd Maste ev_token_bucket_init_(&rlim->limit, cfg, tick, reinit);
606c43e99fdSEd Maste
607c43e99fdSEd Maste if (reinit) {
608c43e99fdSEd Maste EVUTIL_ASSERT(event_initialized(&rlim->refill_bucket_event));
609c43e99fdSEd Maste event_del(&rlim->refill_bucket_event);
610c43e99fdSEd Maste }
611c43e99fdSEd Maste event_assign(&rlim->refill_bucket_event, bev->ev_base,
612c43e99fdSEd Maste -1, EV_FINALIZE, bev_refill_callback_, bevp);
613c43e99fdSEd Maste
614c43e99fdSEd Maste if (rlim->limit.read_limit > 0) {
615c43e99fdSEd Maste bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
616c43e99fdSEd Maste } else {
617c43e99fdSEd Maste bufferevent_suspend_read_(bev, BEV_SUSPEND_BW);
618c43e99fdSEd Maste suspended=1;
619c43e99fdSEd Maste }
620c43e99fdSEd Maste if (rlim->limit.write_limit > 0) {
621c43e99fdSEd Maste bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
622c43e99fdSEd Maste } else {
623c43e99fdSEd Maste bufferevent_suspend_write_(bev, BEV_SUSPEND_BW);
624c43e99fdSEd Maste suspended = 1;
625c43e99fdSEd Maste }
626c43e99fdSEd Maste
627c43e99fdSEd Maste if (suspended)
628c43e99fdSEd Maste event_add(&rlim->refill_bucket_event, &cfg->tick_timeout);
629c43e99fdSEd Maste
630c43e99fdSEd Maste r = 0;
631c43e99fdSEd Maste
632c43e99fdSEd Maste done:
633c43e99fdSEd Maste BEV_UNLOCK(bev);
634c43e99fdSEd Maste return r;
635c43e99fdSEd Maste }
636c43e99fdSEd Maste
637c43e99fdSEd Maste struct bufferevent_rate_limit_group *
bufferevent_rate_limit_group_new(struct event_base * base,const struct ev_token_bucket_cfg * cfg)638c43e99fdSEd Maste bufferevent_rate_limit_group_new(struct event_base *base,
639c43e99fdSEd Maste const struct ev_token_bucket_cfg *cfg)
640c43e99fdSEd Maste {
641c43e99fdSEd Maste struct bufferevent_rate_limit_group *g;
642c43e99fdSEd Maste struct timeval now;
643c43e99fdSEd Maste ev_uint32_t tick;
644c43e99fdSEd Maste
645c43e99fdSEd Maste event_base_gettimeofday_cached(base, &now);
646c43e99fdSEd Maste tick = ev_token_bucket_get_tick_(&now, cfg);
647c43e99fdSEd Maste
648c43e99fdSEd Maste g = mm_calloc(1, sizeof(struct bufferevent_rate_limit_group));
649c43e99fdSEd Maste if (!g)
650c43e99fdSEd Maste return NULL;
651c43e99fdSEd Maste memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
652c43e99fdSEd Maste LIST_INIT(&g->members);
653c43e99fdSEd Maste
654c43e99fdSEd Maste ev_token_bucket_init_(&g->rate_limit, cfg, tick, 0);
655c43e99fdSEd Maste
656c43e99fdSEd Maste event_assign(&g->master_refill_event, base, -1, EV_PERSIST|EV_FINALIZE,
657c43e99fdSEd Maste bev_group_refill_callback_, g);
658c43e99fdSEd Maste /*XXXX handle event_add failure */
659c43e99fdSEd Maste event_add(&g->master_refill_event, &cfg->tick_timeout);
660c43e99fdSEd Maste
661c43e99fdSEd Maste EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
662c43e99fdSEd Maste
663c43e99fdSEd Maste bufferevent_rate_limit_group_set_min_share(g, 64);
664c43e99fdSEd Maste
665c43e99fdSEd Maste evutil_weakrand_seed_(&g->weakrand_seed,
666c43e99fdSEd Maste (ev_uint32_t) ((now.tv_sec + now.tv_usec) + (ev_intptr_t)g));
667c43e99fdSEd Maste
668c43e99fdSEd Maste return g;
669c43e99fdSEd Maste }
670c43e99fdSEd Maste
671c43e99fdSEd Maste int
bufferevent_rate_limit_group_set_cfg(struct bufferevent_rate_limit_group * g,const struct ev_token_bucket_cfg * cfg)672c43e99fdSEd Maste bufferevent_rate_limit_group_set_cfg(
673c43e99fdSEd Maste struct bufferevent_rate_limit_group *g,
674c43e99fdSEd Maste const struct ev_token_bucket_cfg *cfg)
675c43e99fdSEd Maste {
676c43e99fdSEd Maste int same_tick;
677c43e99fdSEd Maste if (!g || !cfg)
678c43e99fdSEd Maste return -1;
679c43e99fdSEd Maste
680c43e99fdSEd Maste LOCK_GROUP(g);
681c43e99fdSEd Maste same_tick = evutil_timercmp(
682c43e99fdSEd Maste &g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==);
683c43e99fdSEd Maste memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
684c43e99fdSEd Maste
685c43e99fdSEd Maste if (g->rate_limit.read_limit > (ev_ssize_t)cfg->read_maximum)
686c43e99fdSEd Maste g->rate_limit.read_limit = cfg->read_maximum;
687c43e99fdSEd Maste if (g->rate_limit.write_limit > (ev_ssize_t)cfg->write_maximum)
688c43e99fdSEd Maste g->rate_limit.write_limit = cfg->write_maximum;
689c43e99fdSEd Maste
690c43e99fdSEd Maste if (!same_tick) {
691c43e99fdSEd Maste /* This can cause a hiccup in the schedule */
692c43e99fdSEd Maste event_add(&g->master_refill_event, &cfg->tick_timeout);
693c43e99fdSEd Maste }
694c43e99fdSEd Maste
695c43e99fdSEd Maste /* The new limits might force us to adjust min_share differently. */
696c43e99fdSEd Maste bufferevent_rate_limit_group_set_min_share(g, g->configured_min_share);
697c43e99fdSEd Maste
698c43e99fdSEd Maste UNLOCK_GROUP(g);
699c43e99fdSEd Maste return 0;
700c43e99fdSEd Maste }
701c43e99fdSEd Maste
702c43e99fdSEd Maste int
bufferevent_rate_limit_group_set_min_share(struct bufferevent_rate_limit_group * g,size_t share)703c43e99fdSEd Maste bufferevent_rate_limit_group_set_min_share(
704c43e99fdSEd Maste struct bufferevent_rate_limit_group *g,
705c43e99fdSEd Maste size_t share)
706c43e99fdSEd Maste {
707c43e99fdSEd Maste if (share > EV_SSIZE_MAX)
708c43e99fdSEd Maste return -1;
709c43e99fdSEd Maste
710c43e99fdSEd Maste g->configured_min_share = share;
711c43e99fdSEd Maste
712c43e99fdSEd Maste /* Can't set share to less than the one-tick maximum. IOW, at steady
713c43e99fdSEd Maste * state, at least one connection can go per tick. */
714c43e99fdSEd Maste if (share > g->rate_limit_cfg.read_rate)
715c43e99fdSEd Maste share = g->rate_limit_cfg.read_rate;
716c43e99fdSEd Maste if (share > g->rate_limit_cfg.write_rate)
717c43e99fdSEd Maste share = g->rate_limit_cfg.write_rate;
718c43e99fdSEd Maste
719c43e99fdSEd Maste g->min_share = share;
720c43e99fdSEd Maste return 0;
721c43e99fdSEd Maste }
722c43e99fdSEd Maste
723c43e99fdSEd Maste void
bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group * g)724c43e99fdSEd Maste bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *g)
725c43e99fdSEd Maste {
726c43e99fdSEd Maste LOCK_GROUP(g);
727c43e99fdSEd Maste EVUTIL_ASSERT(0 == g->n_members);
728c43e99fdSEd Maste event_del(&g->master_refill_event);
729c43e99fdSEd Maste UNLOCK_GROUP(g);
730c43e99fdSEd Maste EVTHREAD_FREE_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
731c43e99fdSEd Maste mm_free(g);
732c43e99fdSEd Maste }
733c43e99fdSEd Maste
734c43e99fdSEd Maste int
bufferevent_add_to_rate_limit_group(struct bufferevent * bev,struct bufferevent_rate_limit_group * g)735c43e99fdSEd Maste bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
736c43e99fdSEd Maste struct bufferevent_rate_limit_group *g)
737c43e99fdSEd Maste {
738c43e99fdSEd Maste int wsuspend, rsuspend;
739*b50261e2SCy Schubert struct bufferevent_private *bevp = BEV_UPCAST(bev);
740c43e99fdSEd Maste BEV_LOCK(bev);
741c43e99fdSEd Maste
742c43e99fdSEd Maste if (!bevp->rate_limiting) {
743c43e99fdSEd Maste struct bufferevent_rate_limit *rlim;
744c43e99fdSEd Maste rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
745c43e99fdSEd Maste if (!rlim) {
746c43e99fdSEd Maste BEV_UNLOCK(bev);
747c43e99fdSEd Maste return -1;
748c43e99fdSEd Maste }
749c43e99fdSEd Maste event_assign(&rlim->refill_bucket_event, bev->ev_base,
750c43e99fdSEd Maste -1, EV_FINALIZE, bev_refill_callback_, bevp);
751c43e99fdSEd Maste bevp->rate_limiting = rlim;
752c43e99fdSEd Maste }
753c43e99fdSEd Maste
754c43e99fdSEd Maste if (bevp->rate_limiting->group == g) {
755c43e99fdSEd Maste BEV_UNLOCK(bev);
756c43e99fdSEd Maste return 0;
757c43e99fdSEd Maste }
758c43e99fdSEd Maste if (bevp->rate_limiting->group)
759c43e99fdSEd Maste bufferevent_remove_from_rate_limit_group(bev);
760c43e99fdSEd Maste
761c43e99fdSEd Maste LOCK_GROUP(g);
762c43e99fdSEd Maste bevp->rate_limiting->group = g;
763c43e99fdSEd Maste ++g->n_members;
764c43e99fdSEd Maste LIST_INSERT_HEAD(&g->members, bevp, rate_limiting->next_in_group);
765c43e99fdSEd Maste
766c43e99fdSEd Maste rsuspend = g->read_suspended;
767c43e99fdSEd Maste wsuspend = g->write_suspended;
768c43e99fdSEd Maste
769c43e99fdSEd Maste UNLOCK_GROUP(g);
770c43e99fdSEd Maste
771c43e99fdSEd Maste if (rsuspend)
772c43e99fdSEd Maste bufferevent_suspend_read_(bev, BEV_SUSPEND_BW_GROUP);
773c43e99fdSEd Maste if (wsuspend)
774c43e99fdSEd Maste bufferevent_suspend_write_(bev, BEV_SUSPEND_BW_GROUP);
775c43e99fdSEd Maste
776c43e99fdSEd Maste BEV_UNLOCK(bev);
777c43e99fdSEd Maste return 0;
778c43e99fdSEd Maste }
779c43e99fdSEd Maste
780c43e99fdSEd Maste int
bufferevent_remove_from_rate_limit_group(struct bufferevent * bev)781c43e99fdSEd Maste bufferevent_remove_from_rate_limit_group(struct bufferevent *bev)
782c43e99fdSEd Maste {
783c43e99fdSEd Maste return bufferevent_remove_from_rate_limit_group_internal_(bev, 1);
784c43e99fdSEd Maste }
785c43e99fdSEd Maste
786c43e99fdSEd Maste int
bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent * bev,int unsuspend)787c43e99fdSEd Maste bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev,
788c43e99fdSEd Maste int unsuspend)
789c43e99fdSEd Maste {
790*b50261e2SCy Schubert struct bufferevent_private *bevp = BEV_UPCAST(bev);
791c43e99fdSEd Maste BEV_LOCK(bev);
792c43e99fdSEd Maste if (bevp->rate_limiting && bevp->rate_limiting->group) {
793c43e99fdSEd Maste struct bufferevent_rate_limit_group *g =
794c43e99fdSEd Maste bevp->rate_limiting->group;
795c43e99fdSEd Maste LOCK_GROUP(g);
796c43e99fdSEd Maste bevp->rate_limiting->group = NULL;
797c43e99fdSEd Maste --g->n_members;
798c43e99fdSEd Maste LIST_REMOVE(bevp, rate_limiting->next_in_group);
799c43e99fdSEd Maste UNLOCK_GROUP(g);
800c43e99fdSEd Maste }
801c43e99fdSEd Maste if (unsuspend) {
802c43e99fdSEd Maste bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW_GROUP);
803c43e99fdSEd Maste bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW_GROUP);
804c43e99fdSEd Maste }
805c43e99fdSEd Maste BEV_UNLOCK(bev);
806c43e99fdSEd Maste return 0;
807c43e99fdSEd Maste }
808c43e99fdSEd Maste
809c43e99fdSEd Maste /* ===
810c43e99fdSEd Maste * API functions to expose rate limits.
811c43e99fdSEd Maste *
812c43e99fdSEd Maste * Don't use these from inside Libevent; they're meant to be for use by
813c43e99fdSEd Maste * the program.
814c43e99fdSEd Maste * === */
815c43e99fdSEd Maste
816c43e99fdSEd Maste /* Mostly you don't want to use this function from inside libevent;
817c43e99fdSEd Maste * bufferevent_get_read_max_() is more likely what you want*/
818c43e99fdSEd Maste ev_ssize_t
bufferevent_get_read_limit(struct bufferevent * bev)819c43e99fdSEd Maste bufferevent_get_read_limit(struct bufferevent *bev)
820c43e99fdSEd Maste {
821c43e99fdSEd Maste ev_ssize_t r;
822c43e99fdSEd Maste struct bufferevent_private *bevp;
823c43e99fdSEd Maste BEV_LOCK(bev);
824c43e99fdSEd Maste bevp = BEV_UPCAST(bev);
825c43e99fdSEd Maste if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
826c43e99fdSEd Maste bufferevent_update_buckets(bevp);
827c43e99fdSEd Maste r = bevp->rate_limiting->limit.read_limit;
828c43e99fdSEd Maste } else {
829c43e99fdSEd Maste r = EV_SSIZE_MAX;
830c43e99fdSEd Maste }
831c43e99fdSEd Maste BEV_UNLOCK(bev);
832c43e99fdSEd Maste return r;
833c43e99fdSEd Maste }
834c43e99fdSEd Maste
835c43e99fdSEd Maste /* Mostly you don't want to use this function from inside libevent;
836c43e99fdSEd Maste * bufferevent_get_write_max_() is more likely what you want*/
837c43e99fdSEd Maste ev_ssize_t
bufferevent_get_write_limit(struct bufferevent * bev)838c43e99fdSEd Maste bufferevent_get_write_limit(struct bufferevent *bev)
839c43e99fdSEd Maste {
840c43e99fdSEd Maste ev_ssize_t r;
841c43e99fdSEd Maste struct bufferevent_private *bevp;
842c43e99fdSEd Maste BEV_LOCK(bev);
843c43e99fdSEd Maste bevp = BEV_UPCAST(bev);
844c43e99fdSEd Maste if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
845c43e99fdSEd Maste bufferevent_update_buckets(bevp);
846c43e99fdSEd Maste r = bevp->rate_limiting->limit.write_limit;
847c43e99fdSEd Maste } else {
848c43e99fdSEd Maste r = EV_SSIZE_MAX;
849c43e99fdSEd Maste }
850c43e99fdSEd Maste BEV_UNLOCK(bev);
851c43e99fdSEd Maste return r;
852c43e99fdSEd Maste }
853c43e99fdSEd Maste
854c43e99fdSEd Maste int
bufferevent_set_max_single_read(struct bufferevent * bev,size_t size)855c43e99fdSEd Maste bufferevent_set_max_single_read(struct bufferevent *bev, size_t size)
856c43e99fdSEd Maste {
857c43e99fdSEd Maste struct bufferevent_private *bevp;
858c43e99fdSEd Maste BEV_LOCK(bev);
859c43e99fdSEd Maste bevp = BEV_UPCAST(bev);
860c43e99fdSEd Maste if (size == 0 || size > EV_SSIZE_MAX)
861c43e99fdSEd Maste bevp->max_single_read = MAX_SINGLE_READ_DEFAULT;
862c43e99fdSEd Maste else
863c43e99fdSEd Maste bevp->max_single_read = size;
864c43e99fdSEd Maste BEV_UNLOCK(bev);
865c43e99fdSEd Maste return 0;
866c43e99fdSEd Maste }
867c43e99fdSEd Maste
868c43e99fdSEd Maste int
bufferevent_set_max_single_write(struct bufferevent * bev,size_t size)869c43e99fdSEd Maste bufferevent_set_max_single_write(struct bufferevent *bev, size_t size)
870c43e99fdSEd Maste {
871c43e99fdSEd Maste struct bufferevent_private *bevp;
872c43e99fdSEd Maste BEV_LOCK(bev);
873c43e99fdSEd Maste bevp = BEV_UPCAST(bev);
874c43e99fdSEd Maste if (size == 0 || size > EV_SSIZE_MAX)
875c43e99fdSEd Maste bevp->max_single_write = MAX_SINGLE_WRITE_DEFAULT;
876c43e99fdSEd Maste else
877c43e99fdSEd Maste bevp->max_single_write = size;
878c43e99fdSEd Maste BEV_UNLOCK(bev);
879c43e99fdSEd Maste return 0;
880c43e99fdSEd Maste }
881c43e99fdSEd Maste
882c43e99fdSEd Maste ev_ssize_t
bufferevent_get_max_single_read(struct bufferevent * bev)883c43e99fdSEd Maste bufferevent_get_max_single_read(struct bufferevent *bev)
884c43e99fdSEd Maste {
885c43e99fdSEd Maste ev_ssize_t r;
886c43e99fdSEd Maste
887c43e99fdSEd Maste BEV_LOCK(bev);
888c43e99fdSEd Maste r = BEV_UPCAST(bev)->max_single_read;
889c43e99fdSEd Maste BEV_UNLOCK(bev);
890c43e99fdSEd Maste return r;
891c43e99fdSEd Maste }
892c43e99fdSEd Maste
893c43e99fdSEd Maste ev_ssize_t
bufferevent_get_max_single_write(struct bufferevent * bev)894c43e99fdSEd Maste bufferevent_get_max_single_write(struct bufferevent *bev)
895c43e99fdSEd Maste {
896c43e99fdSEd Maste ev_ssize_t r;
897c43e99fdSEd Maste
898c43e99fdSEd Maste BEV_LOCK(bev);
899c43e99fdSEd Maste r = BEV_UPCAST(bev)->max_single_write;
900c43e99fdSEd Maste BEV_UNLOCK(bev);
901c43e99fdSEd Maste return r;
902c43e99fdSEd Maste }
903c43e99fdSEd Maste
904c43e99fdSEd Maste ev_ssize_t
bufferevent_get_max_to_read(struct bufferevent * bev)905c43e99fdSEd Maste bufferevent_get_max_to_read(struct bufferevent *bev)
906c43e99fdSEd Maste {
907c43e99fdSEd Maste ev_ssize_t r;
908c43e99fdSEd Maste BEV_LOCK(bev);
909c43e99fdSEd Maste r = bufferevent_get_read_max_(BEV_UPCAST(bev));
910c43e99fdSEd Maste BEV_UNLOCK(bev);
911c43e99fdSEd Maste return r;
912c43e99fdSEd Maste }
913c43e99fdSEd Maste
914c43e99fdSEd Maste ev_ssize_t
bufferevent_get_max_to_write(struct bufferevent * bev)915c43e99fdSEd Maste bufferevent_get_max_to_write(struct bufferevent *bev)
916c43e99fdSEd Maste {
917c43e99fdSEd Maste ev_ssize_t r;
918c43e99fdSEd Maste BEV_LOCK(bev);
919c43e99fdSEd Maste r = bufferevent_get_write_max_(BEV_UPCAST(bev));
920c43e99fdSEd Maste BEV_UNLOCK(bev);
921c43e99fdSEd Maste return r;
922c43e99fdSEd Maste }
923c43e99fdSEd Maste
924c43e99fdSEd Maste const struct ev_token_bucket_cfg *
bufferevent_get_token_bucket_cfg(const struct bufferevent * bev)925c43e99fdSEd Maste bufferevent_get_token_bucket_cfg(const struct bufferevent *bev) {
926c43e99fdSEd Maste struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
927c43e99fdSEd Maste struct ev_token_bucket_cfg *cfg;
928c43e99fdSEd Maste
929c43e99fdSEd Maste BEV_LOCK(bev);
930c43e99fdSEd Maste
931c43e99fdSEd Maste if (bufev_private->rate_limiting) {
932c43e99fdSEd Maste cfg = bufev_private->rate_limiting->cfg;
933c43e99fdSEd Maste } else {
934c43e99fdSEd Maste cfg = NULL;
935c43e99fdSEd Maste }
936c43e99fdSEd Maste
937c43e99fdSEd Maste BEV_UNLOCK(bev);
938c43e99fdSEd Maste
939c43e99fdSEd Maste return cfg;
940c43e99fdSEd Maste }
941c43e99fdSEd Maste
942c43e99fdSEd Maste /* Mostly you don't want to use this function from inside libevent;
943c43e99fdSEd Maste * bufferevent_get_read_max_() is more likely what you want*/
944c43e99fdSEd Maste ev_ssize_t
bufferevent_rate_limit_group_get_read_limit(struct bufferevent_rate_limit_group * grp)945c43e99fdSEd Maste bufferevent_rate_limit_group_get_read_limit(
946c43e99fdSEd Maste struct bufferevent_rate_limit_group *grp)
947c43e99fdSEd Maste {
948c43e99fdSEd Maste ev_ssize_t r;
949c43e99fdSEd Maste LOCK_GROUP(grp);
950c43e99fdSEd Maste r = grp->rate_limit.read_limit;
951c43e99fdSEd Maste UNLOCK_GROUP(grp);
952c43e99fdSEd Maste return r;
953c43e99fdSEd Maste }
954c43e99fdSEd Maste
955c43e99fdSEd Maste /* Mostly you don't want to use this function from inside libevent;
956c43e99fdSEd Maste * bufferevent_get_write_max_() is more likely what you want. */
957c43e99fdSEd Maste ev_ssize_t
bufferevent_rate_limit_group_get_write_limit(struct bufferevent_rate_limit_group * grp)958c43e99fdSEd Maste bufferevent_rate_limit_group_get_write_limit(
959c43e99fdSEd Maste struct bufferevent_rate_limit_group *grp)
960c43e99fdSEd Maste {
961c43e99fdSEd Maste ev_ssize_t r;
962c43e99fdSEd Maste LOCK_GROUP(grp);
963c43e99fdSEd Maste r = grp->rate_limit.write_limit;
964c43e99fdSEd Maste UNLOCK_GROUP(grp);
965c43e99fdSEd Maste return r;
966c43e99fdSEd Maste }
967c43e99fdSEd Maste
968c43e99fdSEd Maste int
bufferevent_decrement_read_limit(struct bufferevent * bev,ev_ssize_t decr)969c43e99fdSEd Maste bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr)
970c43e99fdSEd Maste {
971c43e99fdSEd Maste int r = 0;
972c43e99fdSEd Maste ev_ssize_t old_limit, new_limit;
973c43e99fdSEd Maste struct bufferevent_private *bevp;
974c43e99fdSEd Maste BEV_LOCK(bev);
975c43e99fdSEd Maste bevp = BEV_UPCAST(bev);
976c43e99fdSEd Maste EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
977c43e99fdSEd Maste old_limit = bevp->rate_limiting->limit.read_limit;
978c43e99fdSEd Maste
979c43e99fdSEd Maste new_limit = (bevp->rate_limiting->limit.read_limit -= decr);
980c43e99fdSEd Maste if (old_limit > 0 && new_limit <= 0) {
981c43e99fdSEd Maste bufferevent_suspend_read_(bev, BEV_SUSPEND_BW);
982c43e99fdSEd Maste if (event_add(&bevp->rate_limiting->refill_bucket_event,
983c43e99fdSEd Maste &bevp->rate_limiting->cfg->tick_timeout) < 0)
984c43e99fdSEd Maste r = -1;
985c43e99fdSEd Maste } else if (old_limit <= 0 && new_limit > 0) {
986c43e99fdSEd Maste if (!(bevp->write_suspended & BEV_SUSPEND_BW))
987c43e99fdSEd Maste event_del(&bevp->rate_limiting->refill_bucket_event);
988c43e99fdSEd Maste bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
989c43e99fdSEd Maste }
990c43e99fdSEd Maste
991c43e99fdSEd Maste BEV_UNLOCK(bev);
992c43e99fdSEd Maste return r;
993c43e99fdSEd Maste }
994c43e99fdSEd Maste
995c43e99fdSEd Maste int
bufferevent_decrement_write_limit(struct bufferevent * bev,ev_ssize_t decr)996c43e99fdSEd Maste bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr)
997c43e99fdSEd Maste {
998c43e99fdSEd Maste /* XXXX this is mostly copy-and-paste from
999c43e99fdSEd Maste * bufferevent_decrement_read_limit */
1000c43e99fdSEd Maste int r = 0;
1001c43e99fdSEd Maste ev_ssize_t old_limit, new_limit;
1002c43e99fdSEd Maste struct bufferevent_private *bevp;
1003c43e99fdSEd Maste BEV_LOCK(bev);
1004c43e99fdSEd Maste bevp = BEV_UPCAST(bev);
1005c43e99fdSEd Maste EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
1006c43e99fdSEd Maste old_limit = bevp->rate_limiting->limit.write_limit;
1007c43e99fdSEd Maste
1008c43e99fdSEd Maste new_limit = (bevp->rate_limiting->limit.write_limit -= decr);
1009c43e99fdSEd Maste if (old_limit > 0 && new_limit <= 0) {
1010c43e99fdSEd Maste bufferevent_suspend_write_(bev, BEV_SUSPEND_BW);
1011c43e99fdSEd Maste if (event_add(&bevp->rate_limiting->refill_bucket_event,
1012c43e99fdSEd Maste &bevp->rate_limiting->cfg->tick_timeout) < 0)
1013c43e99fdSEd Maste r = -1;
1014c43e99fdSEd Maste } else if (old_limit <= 0 && new_limit > 0) {
1015c43e99fdSEd Maste if (!(bevp->read_suspended & BEV_SUSPEND_BW))
1016c43e99fdSEd Maste event_del(&bevp->rate_limiting->refill_bucket_event);
1017c43e99fdSEd Maste bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
1018c43e99fdSEd Maste }
1019c43e99fdSEd Maste
1020c43e99fdSEd Maste BEV_UNLOCK(bev);
1021c43e99fdSEd Maste return r;
1022c43e99fdSEd Maste }
1023c43e99fdSEd Maste
1024c43e99fdSEd Maste int
bufferevent_rate_limit_group_decrement_read(struct bufferevent_rate_limit_group * grp,ev_ssize_t decr)1025c43e99fdSEd Maste bufferevent_rate_limit_group_decrement_read(
1026c43e99fdSEd Maste struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
1027c43e99fdSEd Maste {
1028c43e99fdSEd Maste int r = 0;
1029c43e99fdSEd Maste ev_ssize_t old_limit, new_limit;
1030c43e99fdSEd Maste LOCK_GROUP(grp);
1031c43e99fdSEd Maste old_limit = grp->rate_limit.read_limit;
1032c43e99fdSEd Maste new_limit = (grp->rate_limit.read_limit -= decr);
1033c43e99fdSEd Maste
1034c43e99fdSEd Maste if (old_limit > 0 && new_limit <= 0) {
1035c43e99fdSEd Maste bev_group_suspend_reading_(grp);
1036c43e99fdSEd Maste } else if (old_limit <= 0 && new_limit > 0) {
1037c43e99fdSEd Maste bev_group_unsuspend_reading_(grp);
1038c43e99fdSEd Maste }
1039c43e99fdSEd Maste
1040c43e99fdSEd Maste UNLOCK_GROUP(grp);
1041c43e99fdSEd Maste return r;
1042c43e99fdSEd Maste }
1043c43e99fdSEd Maste
1044c43e99fdSEd Maste int
bufferevent_rate_limit_group_decrement_write(struct bufferevent_rate_limit_group * grp,ev_ssize_t decr)1045c43e99fdSEd Maste bufferevent_rate_limit_group_decrement_write(
1046c43e99fdSEd Maste struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
1047c43e99fdSEd Maste {
1048c43e99fdSEd Maste int r = 0;
1049c43e99fdSEd Maste ev_ssize_t old_limit, new_limit;
1050c43e99fdSEd Maste LOCK_GROUP(grp);
1051c43e99fdSEd Maste old_limit = grp->rate_limit.write_limit;
1052c43e99fdSEd Maste new_limit = (grp->rate_limit.write_limit -= decr);
1053c43e99fdSEd Maste
1054c43e99fdSEd Maste if (old_limit > 0 && new_limit <= 0) {
1055c43e99fdSEd Maste bev_group_suspend_writing_(grp);
1056c43e99fdSEd Maste } else if (old_limit <= 0 && new_limit > 0) {
1057c43e99fdSEd Maste bev_group_unsuspend_writing_(grp);
1058c43e99fdSEd Maste }
1059c43e99fdSEd Maste
1060c43e99fdSEd Maste UNLOCK_GROUP(grp);
1061c43e99fdSEd Maste return r;
1062c43e99fdSEd Maste }
1063c43e99fdSEd Maste
1064c43e99fdSEd Maste void
bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group * grp,ev_uint64_t * total_read_out,ev_uint64_t * total_written_out)1065c43e99fdSEd Maste bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group *grp,
1066c43e99fdSEd Maste ev_uint64_t *total_read_out, ev_uint64_t *total_written_out)
1067c43e99fdSEd Maste {
1068c43e99fdSEd Maste EVUTIL_ASSERT(grp != NULL);
1069c43e99fdSEd Maste if (total_read_out)
1070c43e99fdSEd Maste *total_read_out = grp->total_read;
1071c43e99fdSEd Maste if (total_written_out)
1072c43e99fdSEd Maste *total_written_out = grp->total_written;
1073c43e99fdSEd Maste }
1074c43e99fdSEd Maste
1075c43e99fdSEd Maste void
bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group * grp)1076c43e99fdSEd Maste bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *grp)
1077c43e99fdSEd Maste {
1078c43e99fdSEd Maste grp->total_read = grp->total_written = 0;
1079c43e99fdSEd Maste }
1080c43e99fdSEd Maste
1081c43e99fdSEd Maste int
bufferevent_ratelim_init_(struct bufferevent_private * bev)1082c43e99fdSEd Maste bufferevent_ratelim_init_(struct bufferevent_private *bev)
1083c43e99fdSEd Maste {
1084c43e99fdSEd Maste bev->rate_limiting = NULL;
1085c43e99fdSEd Maste bev->max_single_read = MAX_SINGLE_READ_DEFAULT;
1086c43e99fdSEd Maste bev->max_single_write = MAX_SINGLE_WRITE_DEFAULT;
1087c43e99fdSEd Maste
1088c43e99fdSEd Maste return 0;
1089c43e99fdSEd Maste }
1090