Lines Matching +full:gain +full:- +full:scaling +full:- +full:p
1 /*-
2 * Copyright (c) 1991-1997 Regents of the University of California.
65 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
108 * now heuristics for setting the top-level variable (cutoff_) becomes:
109 * 1. if a packet arrives for a not-overlimit class, set cutoff
121 * rmc_newclass(...) - Create a new resource management class at priority
147 * limit (this parameter determines the steady-state burst
152 * class is f, we want to allow b packet bursts, and the gain of the
153 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
155 * ptime = s * nsPerByte * (1 - f) / f
156 * maxidle = ptime * (1 - g^b) / g^b
157 * minidle = -ptime * (1 / (f - 1))
158 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
164 * integers. This scaling also means that the 'unscaled' values of
218 CALLOUT_INIT(&cl->callout_); in rmc_newclass()
219 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); in rmc_newclass()
220 if (cl->q_ == NULL) { in rmc_newclass()
228 cl->children_ = NULL; in rmc_newclass()
229 cl->parent_ = parent; in rmc_newclass()
230 cl->borrow_ = borrow; in rmc_newclass()
231 cl->leaf_ = 1; in rmc_newclass()
232 cl->ifdat_ = ifd; in rmc_newclass()
233 cl->pri_ = pri; in rmc_newclass()
234 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */ in rmc_newclass()
235 cl->depth_ = 0; in rmc_newclass()
236 cl->qthresh_ = 0; in rmc_newclass()
237 cl->ns_per_byte_ = nsecPerByte; in rmc_newclass()
239 qlimit(cl->q_) = maxq; in rmc_newclass()
240 qtype(cl->q_) = Q_DROPHEAD; in rmc_newclass()
241 qlen(cl->q_) = 0; in rmc_newclass()
242 cl->flags_ = flags; in rmc_newclass()
245 cl->minidle_ = (minidle * (int)nsecPerByte) / 8; in rmc_newclass()
246 if (cl->minidle_ > 0) in rmc_newclass()
247 cl->minidle_ = 0; in rmc_newclass()
249 cl->minidle_ = minidle; in rmc_newclass()
251 cl->maxidle_ = (maxidle * nsecPerByte) / 8; in rmc_newclass()
252 if (cl->maxidle_ == 0) in rmc_newclass()
253 cl->maxidle_ = 1; in rmc_newclass()
255 cl->avgidle_ = cl->maxidle_; in rmc_newclass()
256 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN; in rmc_newclass()
257 if (cl->offtime_ == 0) in rmc_newclass()
258 cl->offtime_ = 1; in rmc_newclass()
260 cl->avgidle_ = 0; in rmc_newclass()
261 cl->offtime_ = (offtime * nsecPerByte) / 8; in rmc_newclass()
263 cl->overlimit = action; in rmc_newclass()
281 cl->red_ = red_alloc(0, 0, in rmc_newclass()
282 qlimit(cl->q_) * 10/100, in rmc_newclass()
283 qlimit(cl->q_) * 30/100, in rmc_newclass()
285 if (cl->red_ != NULL) in rmc_newclass()
286 qtype(cl->q_) = Q_RED; in rmc_newclass()
290 cl->red_ = (red_t *)rio_alloc(0, NULL, in rmc_newclass()
292 if (cl->red_ != NULL) in rmc_newclass()
293 qtype(cl->q_) = Q_RIO; in rmc_newclass()
300 cl->codel_ = codel_alloc(5, 100, 0); in rmc_newclass()
301 if (cl->codel_ != NULL) in rmc_newclass()
302 qtype(cl->q_) = Q_CODEL; in rmc_newclass()
310 IFQ_LOCK(ifd->ifq_); in rmc_newclass()
311 if ((peer = ifd->active_[pri]) != NULL) { in rmc_newclass()
313 cl->peer_ = peer; in rmc_newclass()
314 while (peer->peer_ != ifd->active_[pri]) in rmc_newclass()
315 peer = peer->peer_; in rmc_newclass()
316 peer->peer_ = cl; in rmc_newclass()
318 ifd->active_[pri] = cl; in rmc_newclass()
319 cl->peer_ = cl; in rmc_newclass()
322 if (cl->parent_) { in rmc_newclass()
323 cl->next_ = parent->children_; in rmc_newclass()
324 parent->children_ = cl; in rmc_newclass()
325 parent->leaf_ = 0; in rmc_newclass()
337 if (ifd->wrr_) { in rmc_newclass()
338 ifd->num_[pri]++; in rmc_newclass()
339 ifd->alloc_[pri] += cl->allotment_; in rmc_newclass()
342 IFQ_UNLOCK(ifd->ifq_); in rmc_newclass()
355 ifd = cl->ifdat_; in rmc_modclass()
356 old_allotment = cl->allotment_; in rmc_modclass()
359 IFQ_LOCK(ifd->ifq_); in rmc_modclass()
360 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */ in rmc_modclass()
361 cl->qthresh_ = 0; in rmc_modclass()
362 cl->ns_per_byte_ = nsecPerByte; in rmc_modclass()
364 qlimit(cl->q_) = maxq; in rmc_modclass()
367 cl->minidle_ = (minidle * nsecPerByte) / 8; in rmc_modclass()
368 if (cl->minidle_ > 0) in rmc_modclass()
369 cl->minidle_ = 0; in rmc_modclass()
371 cl->minidle_ = minidle; in rmc_modclass()
373 cl->maxidle_ = (maxidle * nsecPerByte) / 8; in rmc_modclass()
374 if (cl->maxidle_ == 0) in rmc_modclass()
375 cl->maxidle_ = 1; in rmc_modclass()
377 cl->avgidle_ = cl->maxidle_; in rmc_modclass()
378 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN; in rmc_modclass()
379 if (cl->offtime_ == 0) in rmc_modclass()
380 cl->offtime_ = 1; in rmc_modclass()
382 cl->avgidle_ = 0; in rmc_modclass()
383 cl->offtime_ = (offtime * nsecPerByte) / 8; in rmc_modclass()
389 if (ifd->wrr_) { in rmc_modclass()
390 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment; in rmc_modclass()
393 IFQ_UNLOCK(ifd->ifq_); in rmc_modclass()
400 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
418 if (ifd->num_[i] == 0) in rmc_wrr_set_weights()
419 ifd->M_[i] = 0; in rmc_wrr_set_weights()
421 ifd->M_[i] = ifd->alloc_[i] / in rmc_wrr_set_weights()
422 (ifd->num_[i] * ifd->maxpkt_); in rmc_wrr_set_weights()
430 if (ifd->active_[i] != NULL) { in rmc_wrr_set_weights()
431 clh = cl = ifd->active_[i]; in rmc_wrr_set_weights()
433 /* safe-guard for slow link or alloc_ == 0 */ in rmc_wrr_set_weights()
434 if (ifd->M_[i] == 0) in rmc_wrr_set_weights()
435 cl->w_allotment_ = 0; in rmc_wrr_set_weights()
437 cl->w_allotment_ = cl->allotment_ / in rmc_wrr_set_weights()
438 ifd->M_[i]; in rmc_wrr_set_weights()
439 cl = cl->peer_; in rmc_wrr_set_weights()
449 return (ifd->M_[pri]); in rmc_get_weight()
456 * rmc_depth_compute(struct rm_class *cl) - This function computes the
465 rm_class_t *t = cl, *p; in rmc_depth_compute() local
471 p = t->parent_; in rmc_depth_compute()
472 if (p && (t->depth_ >= p->depth_)) { in rmc_depth_compute()
473 p->depth_ = t->depth_ + 1; in rmc_depth_compute()
474 t = p; in rmc_depth_compute()
482 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
492 rm_class_t *p, *t; in rmc_depth_recompute() local
494 p = cl; in rmc_depth_recompute()
495 while (p != NULL) { in rmc_depth_recompute()
496 if ((t = p->children_) == NULL) { in rmc_depth_recompute()
497 p->depth_ = 0; in rmc_depth_recompute()
502 if (t->depth_ > cdepth) in rmc_depth_recompute()
503 cdepth = t->depth_; in rmc_depth_recompute()
504 t = t->next_; in rmc_depth_recompute()
507 if (p->depth_ == cdepth + 1) in rmc_depth_recompute()
511 p->depth_ = cdepth + 1; in rmc_depth_recompute()
514 p = p->parent_; in rmc_depth_recompute()
519 if (cl->depth_ >= 1) { in rmc_depth_recompute()
520 if (cl->children_ == NULL) { in rmc_depth_recompute()
521 cl->depth_ = 0; in rmc_depth_recompute()
522 } else if ((t = cl->children_) != NULL) { in rmc_depth_recompute()
524 if (t->children_ != NULL) in rmc_depth_recompute()
526 t = t->next_; in rmc_depth_recompute()
536 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
537 * function deletes a class from the link-sharing structure and frees
546 struct rm_class *p, *head, *previous; in rmc_delete_class() local
549 ASSERT(cl->children_ == NULL); in rmc_delete_class()
551 if (cl->sleeping_) in rmc_delete_class()
552 CALLOUT_STOP(&cl->callout_); in rmc_delete_class()
555 IFQ_LOCK(ifd->ifq_); in rmc_delete_class()
558 * XXX - this may not be a desired behavior. Packets should be in rmc_delete_class()
559 * re-queued. in rmc_delete_class()
567 if (cl->parent_ != NULL) { in rmc_delete_class()
568 head = cl->parent_->children_; in rmc_delete_class()
569 p = previous = head; in rmc_delete_class()
570 if (head->next_ == NULL) { in rmc_delete_class()
572 cl->parent_->children_ = NULL; in rmc_delete_class()
573 cl->parent_->leaf_ = 1; in rmc_delete_class()
574 } else while (p != NULL) { in rmc_delete_class()
575 if (p == cl) { in rmc_delete_class()
577 cl->parent_->children_ = cl->next_; in rmc_delete_class()
579 previous->next_ = cl->next_; in rmc_delete_class()
580 cl->next_ = NULL; in rmc_delete_class()
581 p = NULL; in rmc_delete_class()
583 previous = p; in rmc_delete_class()
584 p = p->next_; in rmc_delete_class()
592 if ((p = ifd->active_[cl->pri_]) != NULL) { in rmc_delete_class()
597 if (p != p->peer_) { in rmc_delete_class()
598 while (p->peer_ != cl) in rmc_delete_class()
599 p = p->peer_; in rmc_delete_class()
600 p->peer_ = cl->peer_; in rmc_delete_class()
602 if (ifd->active_[cl->pri_] == cl) in rmc_delete_class()
603 ifd->active_[cl->pri_] = cl->peer_; in rmc_delete_class()
605 ASSERT(p == cl); in rmc_delete_class()
606 ifd->active_[cl->pri_] = NULL; in rmc_delete_class()
613 if (ifd->wrr_) { in rmc_delete_class()
614 ifd->alloc_[cl->pri_] -= cl->allotment_; in rmc_delete_class()
615 ifd->num_[cl->pri_]--; in rmc_delete_class()
620 * Re-compute the depth of the tree. in rmc_delete_class()
623 rmc_depth_recompute(cl->parent_); in rmc_delete_class()
625 rmc_depth_recompute(ifd->root_); in rmc_delete_class()
628 IFQ_UNLOCK(ifd->ifq_); in rmc_delete_class()
634 if (cl->red_ != NULL) { in rmc_delete_class()
636 if (q_is_rio(cl->q_)) in rmc_delete_class()
637 rio_destroy((rio_t *)cl->red_); in rmc_delete_class()
640 if (q_is_red(cl->q_)) in rmc_delete_class()
641 red_destroy(cl->red_); in rmc_delete_class()
644 if (q_is_codel(cl->q_)) in rmc_delete_class()
645 codel_destroy(cl->codel_); in rmc_delete_class()
648 free(cl->q_, M_DEVBUF); in rmc_delete_class()
654 * rmc_init(...) - Initialize the resource management data structures
659 * 'restart' is the driver-specific routine that the generic 'delay
681 mtu = ifq->altq_ifp->if_mtu; in rmc_init()
682 ifd->ifq_ = ifq; in rmc_init()
683 ifd->restart = restart; in rmc_init()
684 ifd->maxqueued_ = maxqueued; in rmc_init()
685 ifd->ns_per_byte_ = nsecPerByte; in rmc_init()
686 ifd->maxpkt_ = mtu; in rmc_init()
687 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0; in rmc_init()
688 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0; in rmc_init()
690 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16; in rmc_init()
692 ifd->maxiftime_ /= 4; in rmc_init()
696 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_); in rmc_init()
702 ifd->alloc_[i] = 0; in rmc_init()
703 ifd->M_[i] = 0; in rmc_init()
704 ifd->num_[i] = 0; in rmc_init()
705 ifd->na_[i] = 0; in rmc_init()
706 ifd->active_[i] = NULL; in rmc_init()
712 ifd->qi_ = 0; in rmc_init()
713 ifd->qo_ = 0; in rmc_init()
715 ifd->class_[i] = NULL; in rmc_init()
716 ifd->curlen_[i] = 0; in rmc_init()
717 ifd->borrowed_[i] = NULL; in rmc_init()
721 * Create the root class of the link-sharing structure. in rmc_init()
723 if ((ifd->root_ = rmc_newclass(0, ifd, in rmc_init()
731 ifd->root_->depth_ = 0; in rmc_init()
736 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
743 * -1 when packet drop occurs
749 struct rm_ifdat *ifd = cl->ifdat_; in rmc_queue_packet()
750 int cpri = cl->pri_; in rmc_queue_packet()
751 int is_empty = qempty(cl->q_); in rmc_queue_packet()
754 if (ifd->cutoff_ > 0) { in rmc_queue_packet()
755 if (TV_LT(&cl->undertime_, &now)) { in rmc_queue_packet()
756 if (ifd->cutoff_ > cl->depth_) in rmc_queue_packet()
757 ifd->cutoff_ = cl->depth_; in rmc_queue_packet()
758 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_); in rmc_queue_packet()
767 struct rm_class *borrow = cl->borrow_; in rmc_queue_packet()
770 borrow->depth_ < ifd->cutoff_) { in rmc_queue_packet()
771 if (TV_LT(&borrow->undertime_, &now)) { in rmc_queue_packet()
772 ifd->cutoff_ = borrow->depth_; in rmc_queue_packet()
773 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_); in rmc_queue_packet()
776 borrow = borrow->borrow_; in rmc_queue_packet()
780 else if ((ifd->cutoff_ > 1) && cl->borrow_) { in rmc_queue_packet()
781 if (TV_LT(&cl->borrow_->undertime_, &now)) { in rmc_queue_packet()
782 ifd->cutoff_ = cl->borrow_->depth_; in rmc_queue_packet()
784 cl->borrow_->depth_); in rmc_queue_packet()
792 return (-1); in rmc_queue_packet()
795 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle); in rmc_queue_packet()
796 ifd->na_[cpri]++; in rmc_queue_packet()
799 if (qlen(cl->q_) > qlimit(cl->q_)) { in rmc_queue_packet()
802 return (-1); in rmc_queue_packet()
809 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
817 rm_class_t *p, *bp; in rmc_tl_satisfied() local
819 for (i = RM_MAXPRIO - 1; i >= 0; i--) { in rmc_tl_satisfied()
820 if ((bp = ifd->active_[i]) != NULL) { in rmc_tl_satisfied()
821 p = bp; in rmc_tl_satisfied()
823 if (!rmc_satisfied(p, now)) { in rmc_tl_satisfied()
824 ifd->cutoff_ = p->depth_; in rmc_tl_satisfied()
827 p = p->peer_; in rmc_tl_satisfied()
828 } while (p != bp); in rmc_tl_satisfied()
836 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
842 rm_class_t *p; in rmc_satisfied() local
846 if (TV_LT(now, &cl->undertime_)) in rmc_satisfied()
848 if (cl->depth_ == 0) { in rmc_satisfied()
849 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_)) in rmc_satisfied()
854 if (cl->children_ != NULL) { in rmc_satisfied()
855 p = cl->children_; in rmc_satisfied()
856 while (p != NULL) { in rmc_satisfied()
857 if (!rmc_satisfied(p, now)) in rmc_satisfied()
859 p = p->next_; in rmc_satisfied()
868 * 0 if overlimit. As a side-effect, this routine will invoke the
875 rm_class_t *p = cl; in rmc_under_limit() local
877 struct rm_ifdat *ifd = cl->ifdat_; in rmc_under_limit()
879 ifd->borrowed_[ifd->qi_] = NULL; in rmc_under_limit()
884 if (cl->parent_ == NULL) in rmc_under_limit()
887 if (cl->sleeping_) { in rmc_under_limit()
888 if (TV_LT(now, &cl->undertime_)) in rmc_under_limit()
891 CALLOUT_STOP(&cl->callout_); in rmc_under_limit()
892 cl->sleeping_ = 0; in rmc_under_limit()
893 cl->undertime_.tv_sec = 0; in rmc_under_limit()
898 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) { in rmc_under_limit()
899 if (((cl = cl->borrow_) == NULL) || in rmc_under_limit()
900 (cl->depth_ > ifd->cutoff_)) { in rmc_under_limit()
917 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_); in rmc_under_limit()
919 if (top != NULL && top->avgidle_ == top->minidle_) in rmc_under_limit()
921 p->overtime_ = *now; in rmc_under_limit()
922 (p->overlimit)(p, top); in rmc_under_limit()
924 p->overtime_ = *now; in rmc_under_limit()
925 (p->overlimit)(p, NULL); in rmc_under_limit()
932 if (cl != p) in rmc_under_limit()
933 ifd->borrowed_[ifd->qi_] = cl; in rmc_under_limit()
938 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
939 * Packet-by-packet round robin.
941 * The heart of the weighted round-robin scheduler, which decides which
943 * weighted round-robin within priorites.
945 * Each able-to-send class gets to send until its byte allocation is
969 if (op == ALTDQ_REMOVE && ifd->pollcache_) { in _rmc_wrr_dequeue_next()
970 cl = ifd->pollcache_; in _rmc_wrr_dequeue_next()
971 cpri = cl->pri_; in _rmc_wrr_dequeue_next()
972 if (ifd->efficient_) { in _rmc_wrr_dequeue_next()
974 if (cl->undertime_.tv_sec != 0 && in _rmc_wrr_dequeue_next()
978 ifd->pollcache_ = NULL; in _rmc_wrr_dequeue_next()
983 ifd->pollcache_ = NULL; in _rmc_wrr_dequeue_next()
984 ifd->borrowed_[ifd->qi_] = NULL; in _rmc_wrr_dequeue_next()
989 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) { in _rmc_wrr_dequeue_next()
990 if (ifd->na_[cpri] == 0) in _rmc_wrr_dequeue_next()
996 * of the weighted round-robin mechanism. in _rmc_wrr_dequeue_next()
999 * "M[cl->pri_])" times "cl->allotment" is greater than in _rmc_wrr_dequeue_next()
1003 cl = ifd->active_[cpri]; in _rmc_wrr_dequeue_next()
1006 if ((deficit < 2) && (cl->bytes_alloc_ <= 0)) in _rmc_wrr_dequeue_next()
1007 cl->bytes_alloc_ += cl->w_allotment_; in _rmc_wrr_dequeue_next()
1008 if (!qempty(cl->q_)) { in _rmc_wrr_dequeue_next()
1009 if ((cl->undertime_.tv_sec == 0) || in _rmc_wrr_dequeue_next()
1011 if (cl->bytes_alloc_ > 0 || deficit > 1) in _rmc_wrr_dequeue_next()
1017 ifd->borrowed_[ifd->qi_] = NULL; in _rmc_wrr_dequeue_next()
1020 else if (first == NULL && cl->borrow_ != NULL) in _rmc_wrr_dequeue_next()
1024 cl->bytes_alloc_ = 0; in _rmc_wrr_dequeue_next()
1025 cl = cl->peer_; in _rmc_wrr_dequeue_next()
1026 } while (cl != ifd->active_[cpri]); in _rmc_wrr_dequeue_next()
1041 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) { in _rmc_wrr_dequeue_next()
1042 ifd->cutoff_++; in _rmc_wrr_dequeue_next()
1043 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_); in _rmc_wrr_dequeue_next()
1050 * of the link-sharing structure are overlimit. in _rmc_wrr_dequeue_next()
1053 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_); in _rmc_wrr_dequeue_next()
1055 if (!ifd->efficient_ || first == NULL) in _rmc_wrr_dequeue_next()
1059 cpri = cl->pri_; in _rmc_wrr_dequeue_next()
1060 #if 0 /* too time-consuming for nothing */ in _rmc_wrr_dequeue_next()
1061 if (cl->sleeping_) in _rmc_wrr_dequeue_next()
1062 CALLOUT_STOP(&cl->callout_); in _rmc_wrr_dequeue_next()
1063 cl->sleeping_ = 0; in _rmc_wrr_dequeue_next()
1064 cl->undertime_.tv_sec = 0; in _rmc_wrr_dequeue_next()
1066 ifd->borrowed_[ifd->qi_] = cl->borrow_; in _rmc_wrr_dequeue_next()
1067 ifd->cutoff_ = cl->borrow_->depth_; in _rmc_wrr_dequeue_next()
1077 if (qempty(cl->q_)) in _rmc_wrr_dequeue_next()
1078 ifd->na_[cpri]--; in _rmc_wrr_dequeue_next()
1083 if (cl->bytes_alloc_ > 0) in _rmc_wrr_dequeue_next()
1084 cl->bytes_alloc_ -= m_pktlen(m); in _rmc_wrr_dequeue_next()
1086 if ((cl->bytes_alloc_ <= 0) || first == cl) in _rmc_wrr_dequeue_next()
1087 ifd->active_[cl->pri_] = cl->peer_; in _rmc_wrr_dequeue_next()
1089 ifd->active_[cl->pri_] = cl; in _rmc_wrr_dequeue_next()
1091 ifd->class_[ifd->qi_] = cl; in _rmc_wrr_dequeue_next()
1092 ifd->curlen_[ifd->qi_] = m_pktlen(m); in _rmc_wrr_dequeue_next()
1093 ifd->now_[ifd->qi_] = now; in _rmc_wrr_dequeue_next()
1094 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_; in _rmc_wrr_dequeue_next()
1095 ifd->queued_++; in _rmc_wrr_dequeue_next()
1099 ifd->pollcache_ = cl; in _rmc_wrr_dequeue_next()
1124 if (op == ALTDQ_REMOVE && ifd->pollcache_) { in _rmc_prr_dequeue_next()
1125 cl = ifd->pollcache_; in _rmc_prr_dequeue_next()
1126 cpri = cl->pri_; in _rmc_prr_dequeue_next()
1127 ifd->pollcache_ = NULL; in _rmc_prr_dequeue_next()
1131 ifd->pollcache_ = NULL; in _rmc_prr_dequeue_next()
1132 ifd->borrowed_[ifd->qi_] = NULL; in _rmc_prr_dequeue_next()
1137 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) { in _rmc_prr_dequeue_next()
1138 if (ifd->na_[cpri] == 0) in _rmc_prr_dequeue_next()
1140 cl = ifd->active_[cpri]; in _rmc_prr_dequeue_next()
1143 if (!qempty(cl->q_)) { in _rmc_prr_dequeue_next()
1144 if ((cl->undertime_.tv_sec == 0) || in _rmc_prr_dequeue_next()
1147 if (first == NULL && cl->borrow_ != NULL) in _rmc_prr_dequeue_next()
1150 cl = cl->peer_; in _rmc_prr_dequeue_next()
1151 } while (cl != ifd->active_[cpri]); in _rmc_prr_dequeue_next()
1159 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) { in _rmc_prr_dequeue_next()
1160 ifd->cutoff_++; in _rmc_prr_dequeue_next()
1167 * of the link-sharing structure are overlimit. in _rmc_prr_dequeue_next()
1170 if (!ifd->efficient_ || first == NULL) in _rmc_prr_dequeue_next()
1174 cpri = cl->pri_; in _rmc_prr_dequeue_next()
1175 #if 0 /* too time-consuming for nothing */ in _rmc_prr_dequeue_next()
1176 if (cl->sleeping_) in _rmc_prr_dequeue_next()
1177 CALLOUT_STOP(&cl->callout_); in _rmc_prr_dequeue_next()
1178 cl->sleeping_ = 0; in _rmc_prr_dequeue_next()
1179 cl->undertime_.tv_sec = 0; in _rmc_prr_dequeue_next()
1181 ifd->borrowed_[ifd->qi_] = cl->borrow_; in _rmc_prr_dequeue_next()
1182 ifd->cutoff_ = cl->borrow_->depth_; in _rmc_prr_dequeue_next()
1192 if (qempty(cl->q_)) in _rmc_prr_dequeue_next()
1193 ifd->na_[cpri]--; in _rmc_prr_dequeue_next()
1195 ifd->active_[cpri] = cl->peer_; in _rmc_prr_dequeue_next()
1197 ifd->class_[ifd->qi_] = cl; in _rmc_prr_dequeue_next()
1198 ifd->curlen_[ifd->qi_] = m_pktlen(m); in _rmc_prr_dequeue_next()
1199 ifd->now_[ifd->qi_] = now; in _rmc_prr_dequeue_next()
1200 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_; in _rmc_prr_dequeue_next()
1201 ifd->queued_++; in _rmc_prr_dequeue_next()
1205 ifd->pollcache_ = cl; in _rmc_prr_dequeue_next()
1212 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1216 * Otherwise, packet-by-packet round robin is invoked.
1227 if (ifd->queued_ >= ifd->maxqueued_) in rmc_dequeue_next()
1229 else if (ifd->wrr_) in rmc_dequeue_next()
1238 * estimators updated. This routine is called by the driver's output-
1239 * packet-completion interrupt service routine.
1260 if ((cl = ifd->class_[ifd->qo_]) == NULL) in rmc_update_class_util()
1263 pktlen = ifd->curlen_[ifd->qo_]; in rmc_update_class_util()
1264 borrowed = ifd->borrowed_[ifd->qo_]; in rmc_update_class_util()
1267 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen); in rmc_update_class_util()
1277 * as a result, ifd->qi_ and ifd->qo_ are always synced. in rmc_update_class_util()
1279 nowp = &ifd->now_[ifd->qo_]; in rmc_update_class_util()
1282 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_; in rmc_update_class_util()
1285 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000; in rmc_update_class_util()
1288 if (TV_LT(nowp, &ifd->ifnow_)) { in rmc_update_class_util()
1297 TV_DELTA(&ifd->ifnow_, nowp, iftime); in rmc_update_class_util()
1298 if (iftime+pkt_time < ifd->maxiftime_) { in rmc_update_class_util()
1299 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_); in rmc_update_class_util()
1301 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_); in rmc_update_class_util()
1304 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_); in rmc_update_class_util()
1307 if (TV_LT(nowp, &ifd->ifnow_)) { in rmc_update_class_util()
1308 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_); in rmc_update_class_util()
1310 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_); in rmc_update_class_util()
1315 TV_DELTA(&ifd->ifnow_, &cl->last_, idle); in rmc_update_class_util()
1321 cl->avgidle_ = cl->maxidle_; in rmc_update_class_util()
1325 pkt_time = pktlen * cl->ns_per_byte_; in rmc_update_class_util()
1328 pkt_time = pktlen * cl->ns_per_byte_ / 1000; in rmc_update_class_util()
1330 idle -= pkt_time; in rmc_update_class_util()
1332 avgidle = cl->avgidle_; in rmc_update_class_util()
1333 avgidle += idle - (avgidle >> RM_FILTER_GAIN); in rmc_update_class_util()
1334 cl->avgidle_ = avgidle; in rmc_update_class_util()
1338 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle); in rmc_update_class_util()
1344 if (avgidle < cl->minidle_) in rmc_update_class_util()
1345 avgidle = cl->avgidle_ = cl->minidle_; in rmc_update_class_util()
1349 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN); in rmc_update_class_util()
1350 TV_ADD_DELTA(nowp, tidle, &cl->undertime_); in rmc_update_class_util()
1351 ++cl->stats_.over; in rmc_update_class_util()
1353 cl->avgidle_ = in rmc_update_class_util()
1354 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle; in rmc_update_class_util()
1355 cl->undertime_.tv_sec = 0; in rmc_update_class_util()
1356 if (cl->sleeping_) { in rmc_update_class_util()
1357 CALLOUT_STOP(&cl->callout_); in rmc_update_class_util()
1358 cl->sleeping_ = 0; in rmc_update_class_util()
1364 ++cl->stats_.borrows; in rmc_update_class_util()
1368 cl->last_ = ifd->ifnow_; in rmc_update_class_util()
1369 cl->last_pkttime_ = pkt_time; in rmc_update_class_util()
1372 if (cl->parent_ == NULL) { in rmc_update_class_util()
1374 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen); in rmc_update_class_util()
1378 cl = cl->parent_; in rmc_update_class_util()
1384 cl = ifd->class_[ifd->qo_]; in rmc_update_class_util()
1385 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) { in rmc_update_class_util()
1387 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) { in rmc_update_class_util()
1389 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_); in rmc_update_class_util()
1391 ifd->cutoff_ = borrowed->depth_; in rmc_update_class_util()
1392 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_); in rmc_update_class_util()
1395 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) { in rmc_update_class_util()
1400 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_); in rmc_update_class_util()
1402 ifd->cutoff_ = borrowed->depth_; in rmc_update_class_util()
1403 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_); in rmc_update_class_util()
1411 ifd->borrowed_[ifd->qo_] = NULL; in rmc_update_class_util()
1412 ifd->class_[ifd->qo_] = NULL; in rmc_update_class_util()
1413 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_; in rmc_update_class_util()
1414 ifd->queued_--; in rmc_update_class_util()
1419 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1420 * over-limit action routines. These get invoked by rmc_under_limit()
1430 struct rm_ifdat *ifd = cl->ifdat_; in rmc_drop_action()
1432 ASSERT(qlen(cl->q_) > 0); in rmc_drop_action()
1434 if (qempty(cl->q_)) in rmc_drop_action()
1435 ifd->na_[cl->pri_]--; in rmc_drop_action()
1440 struct rm_ifdat *ifd = cl->ifdat_; in rmc_dropall()
1442 if (!qempty(cl->q_)) { in rmc_dropall()
1443 _flushq(cl->q_); in rmc_dropall()
1445 ifd->na_[cl->pri_]--; in rmc_dropall()
1455 t2.tv_sec = tv->tv_sec - t2.tv_sec; in hzto()
1456 t2.tv_usec = tv->tv_usec - t2.tv_usec; in hzto()
1462 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1477 cl->stats_.overactions++; in rmc_delay_action()
1478 TV_DELTA(&cl->undertime_, &cl->overtime_, delay); in rmc_delay_action()
1480 delay += cl->offtime_; in rmc_delay_action()
1483 if (!cl->sleeping_) { in rmc_delay_action()
1484 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle); in rmc_delay_action()
1487 extradelay = borrow->offtime_; in rmc_delay_action()
1490 extradelay = cl->offtime_; in rmc_delay_action()
1501 extradelay -= cl->last_pkttime_; in rmc_delay_action()
1504 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_); in rmc_delay_action()
1508 cl->sleeping_ = 1; in rmc_delay_action()
1509 cl->stats_.delays++; in rmc_delay_action()
1520 t = hzto(&cl->undertime_); in rmc_delay_action()
1523 CALLOUT_RESET(&cl->callout_, t, rmc_restart, cl); in rmc_delay_action()
1529 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1548 struct rm_ifdat *ifd = cl->ifdat_; in rmc_restart()
1554 IFQ_LOCK(ifd->ifq_); in rmc_restart()
1555 CURVNET_SET(ifd->ifq_->altq_ifp->if_vnet); in rmc_restart()
1556 if (cl->sleeping_) { in rmc_restart()
1557 cl->sleeping_ = 0; in rmc_restart()
1558 cl->undertime_.tv_sec = 0; in rmc_restart()
1560 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) { in rmc_restart()
1561 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle); in rmc_restart()
1562 (ifd->restart)(ifd->ifq_); in rmc_restart()
1566 IFQ_UNLOCK(ifd->ifq_); in rmc_restart()
1573 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1595 if (q_is_rio(cl->q_)) in _rmc_addq()
1596 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_); in _rmc_addq()
1599 if (q_is_red(cl->q_)) in _rmc_addq()
1600 return red_addq(cl->red_, cl->q_, m, cl->pktattr_); in _rmc_addq()
1603 if (q_is_codel(cl->q_)) in _rmc_addq()
1604 return codel_addq(cl->codel_, cl->q_, m); in _rmc_addq()
1607 if (cl->flags_ & RMCF_CLEARDSCP) in _rmc_addq()
1608 write_dsfield(m, cl->pktattr_, 0); in _rmc_addq()
1610 _addq(cl->q_, m); in _rmc_addq()
1620 if ((m = _getq(cl->q_)) != NULL) in _rmc_dropq()
1628 if (q_is_rio(cl->q_)) in _rmc_getq()
1629 return rio_getq((rio_t *)cl->red_, cl->q_); in _rmc_getq()
1632 if (q_is_red(cl->q_)) in _rmc_getq()
1633 return red_getq(cl->red_, cl->q_); in _rmc_getq()
1636 if (q_is_codel(cl->q_)) in _rmc_getq()
1637 return codel_getq(cl->codel_, cl->q_); in _rmc_getq()
1639 return _getq(cl->q_); in _rmc_getq()
1645 return qhead(cl->q_); in _rmc_pollq()
1681 for (fp = rmc_funcs; fp->func != NULL; fp++) in rmc_funcname()
1682 if (fp->func == func) in rmc_funcname()
1683 return (fp->name); in rmc_funcname()
1689 int i, *p; in cbqtrace_dump() local
1693 p = (int *)&cbqtrace_buffer[counter]; in cbqtrace_dump()
1696 printf("[0x%x] ", *p++); in cbqtrace_dump()
1697 printf("%s: ", rmc_funcname((void *)*p++)); in cbqtrace_dump()
1698 cp = (char *)p++; in cbqtrace_dump()
1700 printf("%d\n",*p++); in cbqtrace_dump()
1702 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE]) in cbqtrace_dump()
1703 p = (int *)cbqtrace_buffer; in cbqtrace_dump()
1719 m->m_nextpkt = m0->m_nextpkt; in _addq()
1722 m0->m_nextpkt = m; in _addq()
1734 if ((m0 = m->m_nextpkt) != m) in _getq()
1735 m->m_nextpkt = m0->m_nextpkt; in _getq()
1740 qlen(q)--; in _getq()
1741 m0->m_nextpkt = NULL; in _getq()
1755 m0 = m0->m_nextpkt; in _getq_tail()
1757 prev->m_nextpkt = m->m_nextpkt; in _getq_tail()
1763 qlen(q)--; in _getq_tail()
1764 m->m_nextpkt = NULL; in _getq_tail()
1777 if (m->m_nextpkt == m) { in _getq_random()
1786 m = m->m_nextpkt; in _getq_random()
1788 prev->m_nextpkt = m->m_nextpkt; in _getq_random()
1792 qlen(q)--; in _getq_random()
1793 m->m_nextpkt = NULL; in _getq_random()
1805 m0 = m0->m_nextpkt; in _removeq()
1807 prev->m_nextpkt = m->m_nextpkt; in _removeq()
1812 qlen(q)--; in _removeq()