Lines Matching refs:g
148 struct timeval g; in ev_token_bucket_cfg_new() local
150 g.tv_sec = 1; in ev_token_bucket_cfg_new()
151 g.tv_usec = 0; in ev_token_bucket_cfg_new()
152 tick_len = &g; in ev_token_bucket_cfg_new()
185 #define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0) argument
186 #define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0) argument
188 static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
189 static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
190 static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
191 static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
206 #define GROUP_SUSPENDED(g) \ in bufferevent_get_rlim_max_() argument
207 (is_write ? (g)->write_suspended : (g)->read_suspended) in bufferevent_get_rlim_max_()
229 struct bufferevent_rate_limit_group *g = in bufferevent_get_rlim_max_() local
232 LOCK_GROUP(g); in bufferevent_get_rlim_max_()
233 if (GROUP_SUSPENDED(g)) { in bufferevent_get_rlim_max_()
247 share = LIM(g->rate_limit) / g->n_members; in bufferevent_get_rlim_max_()
248 if (share < g->min_share) in bufferevent_get_rlim_max_()
249 share = g->min_share; in bufferevent_get_rlim_max_()
251 UNLOCK_GROUP(g); in bufferevent_get_rlim_max_()
350 bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g) in bev_group_suspend_reading_() argument
354 g->read_suspended = 1; in bev_group_suspend_reading_()
355 g->pending_unsuspend_read = 0; in bev_group_suspend_reading_()
363 LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) { in bev_group_suspend_reading_()
375 bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g) in bev_group_suspend_writing_() argument
379 g->write_suspended = 1; in bev_group_suspend_writing_()
380 g->pending_unsuspend_write = 0; in bev_group_suspend_writing_()
381 LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) { in bev_group_suspend_writing_()
476 first = bev_group_random_element_(g); \
477 for (bev = first; bev != LIST_END(&g->members); \
481 for (bev = LIST_FIRST(&g->members); bev && bev != first; \
488 bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g) in bev_group_unsuspend_reading_() argument
493 g->read_suspended = 0; in bev_group_unsuspend_reading_()
503 g->pending_unsuspend_read = again; in bev_group_unsuspend_reading_()
507 bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g) in bev_group_unsuspend_writing_() argument
511 g->write_suspended = 0; in bev_group_unsuspend_writing_()
522 g->pending_unsuspend_write = again; in bev_group_unsuspend_writing_()
531 struct bufferevent_rate_limit_group *g = arg; in bev_group_refill_callback_() local
535 event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now); in bev_group_refill_callback_()
537 LOCK_GROUP(g); in bev_group_refill_callback_()
539 tick = ev_token_bucket_get_tick_(&now, &g->rate_limit_cfg); in bev_group_refill_callback_()
540 ev_token_bucket_update_(&g->rate_limit, &g->rate_limit_cfg, tick); in bev_group_refill_callback_()
542 if (g->pending_unsuspend_read || in bev_group_refill_callback_()
543 (g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) { in bev_group_refill_callback_()
544 bev_group_unsuspend_reading_(g); in bev_group_refill_callback_()
546 if (g->pending_unsuspend_write || in bev_group_refill_callback_()
547 (g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){ in bev_group_refill_callback_()
548 bev_group_unsuspend_writing_(g); in bev_group_refill_callback_()
556 UNLOCK_GROUP(g); in bev_group_refill_callback_()
641 struct bufferevent_rate_limit_group *g; in bufferevent_rate_limit_group_new() local
648 g = mm_calloc(1, sizeof(struct bufferevent_rate_limit_group)); in bufferevent_rate_limit_group_new()
649 if (!g) in bufferevent_rate_limit_group_new()
651 memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg)); in bufferevent_rate_limit_group_new()
652 LIST_INIT(&g->members); in bufferevent_rate_limit_group_new()
654 ev_token_bucket_init_(&g->rate_limit, cfg, tick, 0); in bufferevent_rate_limit_group_new()
656 event_assign(&g->master_refill_event, base, -1, EV_PERSIST|EV_FINALIZE, in bufferevent_rate_limit_group_new()
657 bev_group_refill_callback_, g); in bufferevent_rate_limit_group_new()
659 event_add(&g->master_refill_event, &cfg->tick_timeout); in bufferevent_rate_limit_group_new()
661 EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE); in bufferevent_rate_limit_group_new()
663 bufferevent_rate_limit_group_set_min_share(g, 64); in bufferevent_rate_limit_group_new()
665 evutil_weakrand_seed_(&g->weakrand_seed, in bufferevent_rate_limit_group_new()
666 (ev_uint32_t) ((now.tv_sec + now.tv_usec) + (ev_intptr_t)g)); in bufferevent_rate_limit_group_new()
668 return g; in bufferevent_rate_limit_group_new()
673 struct bufferevent_rate_limit_group *g, in bufferevent_rate_limit_group_set_cfg() argument
677 if (!g || !cfg) in bufferevent_rate_limit_group_set_cfg()
680 LOCK_GROUP(g); in bufferevent_rate_limit_group_set_cfg()
682 &g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==); in bufferevent_rate_limit_group_set_cfg()
683 memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg)); in bufferevent_rate_limit_group_set_cfg()
685 if (g->rate_limit.read_limit > (ev_ssize_t)cfg->read_maximum) in bufferevent_rate_limit_group_set_cfg()
686 g->rate_limit.read_limit = cfg->read_maximum; in bufferevent_rate_limit_group_set_cfg()
687 if (g->rate_limit.write_limit > (ev_ssize_t)cfg->write_maximum) in bufferevent_rate_limit_group_set_cfg()
688 g->rate_limit.write_limit = cfg->write_maximum; in bufferevent_rate_limit_group_set_cfg()
692 event_add(&g->master_refill_event, &cfg->tick_timeout); in bufferevent_rate_limit_group_set_cfg()
696 bufferevent_rate_limit_group_set_min_share(g, g->configured_min_share); in bufferevent_rate_limit_group_set_cfg()
698 UNLOCK_GROUP(g); in bufferevent_rate_limit_group_set_cfg()
704 struct bufferevent_rate_limit_group *g, in bufferevent_rate_limit_group_set_min_share() argument
710 g->configured_min_share = share; in bufferevent_rate_limit_group_set_min_share()
714 if (share > g->rate_limit_cfg.read_rate) in bufferevent_rate_limit_group_set_min_share()
715 share = g->rate_limit_cfg.read_rate; in bufferevent_rate_limit_group_set_min_share()
716 if (share > g->rate_limit_cfg.write_rate) in bufferevent_rate_limit_group_set_min_share()
717 share = g->rate_limit_cfg.write_rate; in bufferevent_rate_limit_group_set_min_share()
719 g->min_share = share; in bufferevent_rate_limit_group_set_min_share()
724 bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *g) in bufferevent_rate_limit_group_free() argument
726 LOCK_GROUP(g); in bufferevent_rate_limit_group_free()
727 EVUTIL_ASSERT(0 == g->n_members); in bufferevent_rate_limit_group_free()
728 event_del(&g->master_refill_event); in bufferevent_rate_limit_group_free()
729 UNLOCK_GROUP(g); in bufferevent_rate_limit_group_free()
730 EVTHREAD_FREE_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE); in bufferevent_rate_limit_group_free()
731 mm_free(g); in bufferevent_rate_limit_group_free()
736 struct bufferevent_rate_limit_group *g) in bufferevent_add_to_rate_limit_group() argument
754 if (bevp->rate_limiting->group == g) { in bufferevent_add_to_rate_limit_group()
761 LOCK_GROUP(g); in bufferevent_add_to_rate_limit_group()
762 bevp->rate_limiting->group = g; in bufferevent_add_to_rate_limit_group()
763 ++g->n_members; in bufferevent_add_to_rate_limit_group()
764 LIST_INSERT_HEAD(&g->members, bevp, rate_limiting->next_in_group); in bufferevent_add_to_rate_limit_group()
766 rsuspend = g->read_suspended; in bufferevent_add_to_rate_limit_group()
767 wsuspend = g->write_suspended; in bufferevent_add_to_rate_limit_group()
769 UNLOCK_GROUP(g); in bufferevent_add_to_rate_limit_group()
793 struct bufferevent_rate_limit_group *g = in bufferevent_remove_from_rate_limit_group_internal_() local
795 LOCK_GROUP(g); in bufferevent_remove_from_rate_limit_group_internal_()
797 --g->n_members; in bufferevent_remove_from_rate_limit_group_internal_()
799 UNLOCK_GROUP(g); in bufferevent_remove_from_rate_limit_group_internal_()