Lines Matching +full:row +full:- +full:hold
1 // SPDX-License-Identifier: GPL-2.0-or-later
50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
78 struct rb_root row; member
82 /* When class changes from state 1->2 and disconnects from
101 int quantum; /* but stored for parent-to-leaf return */
172 /* time of nearest event per level (row) */
191 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
202 #define HTB_DIRECT ((struct htb_class *)-1L)
205 * htb_classify - classify a packet into class
210 * It returns NULL if the packet should be dropped or -1 if the packet
228 /* allow to select class by setting skb->priority to valid classid; in htb_classify()
232 if (skb->priority == sch->handle) in htb_classify()
234 cl = htb_find(skb->priority, sch); in htb_classify()
236 if (cl->level == 0) in htb_classify()
238 /* Start with inner filter chain if a non-leaf class is selected */ in htb_classify()
239 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
241 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
259 if (res.classid == sch->handle) in htb_classify()
265 if (!cl->level) in htb_classify()
269 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
273 if (!cl || cl->level) in htb_classify()
279 * htb_add_to_id_tree - adds class to the round robin list
290 struct rb_node **p = &root->rb_node, *parent = NULL; in htb_add_to_id_tree()
297 if (cl->common.classid > c->common.classid) in htb_add_to_id_tree()
298 p = &parent->rb_right; in htb_add_to_id_tree()
300 p = &parent->rb_left; in htb_add_to_id_tree()
302 rb_link_node(&cl->node[prio], parent, p); in htb_add_to_id_tree()
303 rb_insert_color(&cl->node[prio], root); in htb_add_to_id_tree()
307 * htb_add_to_wait_tree - adds class to the event queue with delay
313 * change its mode in cl->pq_key microseconds. Make sure that class is not
319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
321 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
322 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
323 cl->pq_key++; in htb_add_to_wait_tree()
326 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
327 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
333 if (cl->pq_key >= c->pq_key) in htb_add_to_wait_tree()
334 p = &parent->rb_right; in htb_add_to_wait_tree()
336 p = &parent->rb_left; in htb_add_to_wait_tree()
338 rb_link_node(&cl->pq_node, parent, p); in htb_add_to_wait_tree()
339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
343 * htb_next_rb_node - finds next node in binary tree
356 * htb_add_class_to_row - add class to its row
361 * The class is added to row at priorities marked in mask.
367 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
371 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
388 * htb_remove_class_from_row - removes class from its row
393 * The class is removed from row at priorities marked in mask.
400 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
404 struct htb_prio *hprio = &hlevel->hprio[prio]; in htb_remove_class_from_row()
407 if (hprio->ptr == cl->node + prio) in htb_remove_class_from_row()
408 htb_next_rb_node(&hprio->ptr); in htb_remove_class_from_row()
410 htb_safe_rb_erase(cl->node + prio, &hprio->row); in htb_remove_class_from_row()
411 if (!hprio->row.rb_node) in htb_remove_class_from_row()
414 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
418 * htb_activate_prios - creates active classe's feed chain
423 * for priorities it is participating on. cl->cmode must be new
424 * (activated) mode. It does nothing if cl->prio_activity == 0.
428 struct htb_class *p = cl->parent; in htb_activate_prios()
429 long m, mask = cl->prio_activity; in htb_activate_prios()
431 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_activate_prios()
436 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) in htb_activate_prios()
440 if (p->inner.clprio[prio].feed.rb_node) in htb_activate_prios()
446 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); in htb_activate_prios()
448 p->prio_activity |= mask; in htb_activate_prios()
450 p = cl->parent; in htb_activate_prios()
453 if (cl->cmode == HTB_CAN_SEND && mask) in htb_activate_prios()
458 * htb_deactivate_prios - remove class from feed chain
462 * cl->cmode must represent old mode (before deactivation). It does
463 * nothing if cl->prio_activity == 0. Class is removed from all feed
468 struct htb_class *p = cl->parent; in htb_deactivate_prios()
469 long m, mask = cl->prio_activity; in htb_deactivate_prios()
471 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_deactivate_prios()
478 if (p->inner.clprio[prio].ptr == cl->node + prio) { in htb_deactivate_prios()
480 * parent feed - forget the pointer but remember in htb_deactivate_prios()
483 p->inner.clprio[prio].last_ptr_id = cl->common.classid; in htb_deactivate_prios()
484 p->inner.clprio[prio].ptr = NULL; in htb_deactivate_prios()
487 htb_safe_rb_erase(cl->node + prio, in htb_deactivate_prios()
488 &p->inner.clprio[prio].feed); in htb_deactivate_prios()
490 if (!p->inner.clprio[prio].feed.rb_node) in htb_deactivate_prios()
494 p->prio_activity &= ~mask; in htb_deactivate_prios()
496 p = cl->parent; in htb_deactivate_prios()
499 if (cl->cmode == HTB_CAN_SEND && mask) in htb_deactivate_prios()
506 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; in htb_lowater()
513 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; in htb_hiwater()
520 * htb_class_mode - computes and returns current class mode
524 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
525 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
528 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
529 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
537 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { in htb_class_mode()
538 *diff = -toks; in htb_class_mode()
542 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) in htb_class_mode()
545 *diff = -toks; in htb_class_mode()
550 * htb_change_class_mode - changes classe's mode
558 * be different from old one and cl->pq_key has to be valid if changing
566 if (new_mode == cl->cmode) in htb_change_class_mode()
570 cl->overlimits++; in htb_change_class_mode()
571 q->overlimits++; in htb_change_class_mode()
574 if (cl->prio_activity) { /* not necessary: speed optimization */ in htb_change_class_mode()
575 if (cl->cmode != HTB_CANT_SEND) in htb_change_class_mode()
577 cl->cmode = new_mode; in htb_change_class_mode()
581 cl->cmode = new_mode; in htb_change_class_mode()
585 * htb_activate - inserts leaf cl into appropriate active feeds
595 WARN_ON(cl->level || !cl->leaf.q); in htb_activate()
597 if (!cl->prio_activity) { in htb_activate()
598 cl->prio_activity = 1 << cl->prio; in htb_activate()
604 * htb_deactivate - remove leaf cl from active feeds
609 * with non-active leaf. It also removes class from the drop list.
613 if (!cl->prio_activity) in htb_deactivate()
616 cl->prio_activity = 0; in htb_deactivate()
629 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
630 __qdisc_enqueue_tail(skb, &q->direct_queue); in htb_enqueue()
631 q->direct_pkts++; in htb_enqueue()
642 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, in htb_enqueue()
646 cl->drops++; in htb_enqueue()
653 sch->qstats.backlog += len; in htb_enqueue()
654 sch->q.qlen++; in htb_enqueue()
660 s64 toks = diff + cl->tokens; in htb_accnt_tokens()
662 if (toks > cl->buffer) in htb_accnt_tokens()
663 toks = cl->buffer; in htb_accnt_tokens()
664 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); in htb_accnt_tokens()
665 if (toks <= -cl->mbuffer) in htb_accnt_tokens()
666 toks = 1 - cl->mbuffer; in htb_accnt_tokens()
668 cl->tokens = toks; in htb_accnt_tokens()
673 s64 toks = diff + cl->ctokens; in htb_accnt_ctokens()
675 if (toks > cl->cbuffer) in htb_accnt_ctokens()
676 toks = cl->cbuffer; in htb_accnt_ctokens()
677 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens()
678 if (toks <= -cl->mbuffer) in htb_accnt_ctokens()
679 toks = 1 - cl->mbuffer; in htb_accnt_ctokens()
681 cl->ctokens = toks; in htb_accnt_ctokens()
685 * htb_charge_class - charges amount "bytes" to leaf and ancestors
707 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
708 if (cl->level >= level) { in htb_charge_class()
709 if (cl->level == level) in htb_charge_class()
710 cl->xstats.lends++; in htb_charge_class()
713 cl->xstats.borrows++; in htb_charge_class()
714 cl->tokens += diff; /* we moved t_c; update tokens */ in htb_charge_class()
717 cl->t_c = q->now; in htb_charge_class()
719 old_mode = cl->cmode; in htb_charge_class()
722 if (old_mode != cl->cmode) { in htb_charge_class()
724 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
725 if (cl->cmode != HTB_CAN_SEND) in htb_charge_class()
730 if (cl->level) in htb_charge_class()
731 bstats_update(&cl->bstats, skb); in htb_charge_class()
733 cl = cl->parent; in htb_charge_class()
738 * htb_do_events - make mode changes to classes at the level
740 * @level: which wait_pq in 'q->hlevel'
744 * next pending event (0 for no event in pq, q->now for too many events).
745 * Note: Applied are events whose have cl->pq_key <= q->now.
755 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; in htb_do_events()
766 if (cl->pq_key > q->now) in htb_do_events()
767 return cl->pq_key; in htb_do_events()
770 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
772 if (cl->cmode != HTB_CAN_SEND) in htb_do_events()
776 /* too much load - let's continue after a break for scheduling */ in htb_do_events()
777 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { in htb_do_events()
779 q->warned |= HTB_WARN_TOOMANYEVENTS; in htb_do_events()
782 return q->now; in htb_do_events()
785 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
796 if (id > cl->common.classid) { in htb_id_find_next_upper()
797 n = n->rb_right; in htb_id_find_next_upper()
798 } else if (id < cl->common.classid) { in htb_id_find_next_upper()
800 n = n->rb_left; in htb_id_find_next_upper()
809 * htb_lookup_leaf - returns next leaf class in DRR order
824 if (unlikely(!hprio->row.rb_node)) in htb_lookup_leaf()
827 sp->root = hprio->row.rb_node; in htb_lookup_leaf()
828 sp->pptr = &hprio->ptr; in htb_lookup_leaf()
829 sp->pid = &hprio->last_ptr_id; in htb_lookup_leaf()
832 if (!*sp->pptr && *sp->pid) { in htb_lookup_leaf()
833 /* ptr was invalidated but id is valid - try to recover in htb_lookup_leaf()
836 *sp->pptr = in htb_lookup_leaf()
837 htb_id_find_next_upper(prio, sp->root, *sp->pid); in htb_lookup_leaf()
839 *sp->pid = 0; /* ptr is valid now so that remove this hint as it in htb_lookup_leaf()
842 if (!*sp->pptr) { /* we are at right end; rewind & go up */ in htb_lookup_leaf()
843 *sp->pptr = sp->root; in htb_lookup_leaf()
844 while ((*sp->pptr)->rb_left) in htb_lookup_leaf()
845 *sp->pptr = (*sp->pptr)->rb_left; in htb_lookup_leaf()
847 sp--; in htb_lookup_leaf()
848 if (!*sp->pptr) { in htb_lookup_leaf()
852 htb_next_rb_node(sp->pptr); in htb_lookup_leaf()
858 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
859 if (!cl->level) in htb_lookup_leaf()
861 clp = &cl->inner.clprio[prio]; in htb_lookup_leaf()
862 (++sp)->root = clp->feed.rb_node; in htb_lookup_leaf()
863 sp->pptr = &clp->ptr; in htb_lookup_leaf()
864 sp->pid = &clp->last_ptr_id; in htb_lookup_leaf()
879 struct htb_level *hlevel = &q->hlevel[level]; in htb_dequeue_tree()
880 struct htb_prio *hprio = &hlevel->hprio[prio]; in htb_dequeue_tree()
882 /* look initial class up in the row */ in htb_dequeue_tree()
890 /* class can be empty - it is unlikely but can be true if leaf in htb_dequeue_tree()
895 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
899 /* row/level might become empty */ in htb_dequeue_tree()
900 if ((q->row_mask[level] & (1 << prio)) == 0) in htb_dequeue_tree()
911 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
915 qdisc_warn_nonwc("htb", cl->leaf.q); in htb_dequeue_tree()
916 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: in htb_dequeue_tree()
917 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
923 bstats_update(&cl->bstats, skb); in htb_dequeue_tree()
924 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); in htb_dequeue_tree()
925 if (cl->leaf.deficit[level] < 0) { in htb_dequeue_tree()
926 cl->leaf.deficit[level] += cl->quantum; in htb_dequeue_tree()
927 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : in htb_dequeue_tree()
928 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
933 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
949 skb = __qdisc_dequeue_head(&q->direct_queue); in htb_dequeue()
954 sch->q.qlen--; in htb_dequeue()
958 if (!sch->q.qlen) in htb_dequeue()
960 q->now = ktime_get_ns(); in htb_dequeue()
963 next_event = q->now + 5LLU * NSEC_PER_SEC; in htb_dequeue()
966 /* common case optimization - skip event handler quickly */ in htb_dequeue()
968 s64 event = q->near_ev_cache[level]; in htb_dequeue()
970 if (q->now >= event) { in htb_dequeue()
973 event = q->now + NSEC_PER_SEC; in htb_dequeue()
974 q->near_ev_cache[level] = event; in htb_dequeue()
980 m = ~q->row_mask[level]; in htb_dequeue()
981 while (m != (int)(-1)) { in htb_dequeue()
990 if (likely(next_event > q->now)) in htb_dequeue()
991 qdisc_watchdog_schedule_ns(&q->watchdog, next_event); in htb_dequeue()
993 schedule_work(&q->work); in htb_dequeue()
1006 for (i = 0; i < q->clhash.hashsize; i++) { in htb_reset()
1007 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
1008 if (cl->level) in htb_reset()
1009 memset(&cl->inner, 0, sizeof(cl->inner)); in htb_reset()
1011 if (cl->leaf.q && !q->offload) in htb_reset()
1012 qdisc_reset(cl->leaf.q); in htb_reset()
1014 cl->prio_activity = 0; in htb_reset()
1015 cl->cmode = HTB_CAN_SEND; in htb_reset()
1018 qdisc_watchdog_cancel(&q->watchdog); in htb_reset()
1019 __qdisc_reset_queue(&q->direct_queue); in htb_reset()
1020 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
1021 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
1038 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
1047 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt); in htb_offload()
1062 qdisc_watchdog_init(&q->watchdog, sch); in htb_init()
1063 INIT_WORK(&q->work, htb_work_func); in htb_init()
1066 return -EINVAL; in htb_init()
1068 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in htb_init()
1078 return -EINVAL; in htb_init()
1081 if (gopt->version != HTB_VER >> 16) in htb_init()
1082 return -EINVAL; in htb_init()
1087 if (sch->parent != TC_H_ROOT) { in htb_init()
1089 return -EOPNOTSUPP; in htb_init()
1092 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) { in htb_init()
1093 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on"); in htb_init()
1094 return -EOPNOTSUPP; in htb_init()
1097 q->num_direct_qdiscs = dev->real_num_tx_queues; in htb_init()
1098 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, in htb_init()
1099 sizeof(*q->direct_qdiscs), in htb_init()
1101 if (!q->direct_qdiscs) in htb_init()
1102 return -ENOMEM; in htb_init()
1105 err = qdisc_class_hash_init(&q->clhash); in htb_init()
1110 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init()
1112 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; in htb_init()
1114 if ((q->rate2quantum = gopt->rate2quantum) < 1) in htb_init()
1115 q->rate2quantum = 1; in htb_init()
1116 q->defcls = gopt->defcls; in htb_init()
1121 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_init()
1126 TC_H_MAKE(sch->handle, 0), extack); in htb_init()
1128 return -ENOMEM; in htb_init()
1131 q->direct_qdiscs[ntx] = qdisc; in htb_init()
1132 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in htb_init()
1135 sch->flags |= TCQ_F_MQROOT; in htb_init()
1139 .parent_classid = TC_H_MAJ(sch->handle) >> 16, in htb_init()
1140 .classid = TC_H_MIN(q->defcls), in htb_init()
1147 /* Defer this assignment, so that htb_destroy skips offload-related in htb_init()
1150 q->offload = true; in htb_init()
1161 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_attach_offload()
1162 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx]; in htb_attach_offload()
1164 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in htb_attach_offload()
1168 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { in htb_attach_offload()
1175 kfree(q->direct_qdiscs); in htb_attach_offload()
1176 q->direct_qdiscs = NULL; in htb_attach_offload()
1185 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in htb_attach_software()
1199 if (q->offload) in htb_attach()
1211 if (q->offload) in htb_dump()
1212 sch->flags |= TCQ_F_OFFLOADED; in htb_dump()
1214 sch->flags &= ~TCQ_F_OFFLOADED; in htb_dump()
1216 sch->qstats.overlimits = q->overlimits; in htb_dump()
1217 /* Its safe to not acquire qdisc lock. As we hold RTNL, in htb_dump()
1221 gopt.direct_pkts = q->direct_pkts; in htb_dump()
1223 gopt.rate2quantum = q->rate2quantum; in htb_dump()
1224 gopt.defcls = q->defcls; in htb_dump()
1231 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) in htb_dump()
1233 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump()
1240 return -1; in htb_dump()
1251 /* Its safe to not acquire qdisc lock. As we hold RTNL, in htb_dump_class()
1254 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; in htb_dump_class()
1255 tcm->tcm_handle = cl->common.classid; in htb_dump_class()
1256 if (!cl->level && cl->leaf.q) in htb_dump_class()
1257 tcm->tcm_info = cl->leaf.q->handle; in htb_dump_class()
1265 psched_ratecfg_getrate(&opt.rate, &cl->rate); in htb_dump_class()
1266 opt.buffer = PSCHED_NS2TICKS(cl->buffer); in htb_dump_class()
1267 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class()
1268 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); in htb_dump_class()
1269 opt.quantum = cl->quantum; in htb_dump_class()
1270 opt.prio = cl->prio; in htb_dump_class()
1271 opt.level = cl->level; in htb_dump_class()
1274 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump_class()
1276 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1277 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, in htb_dump_class()
1280 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1281 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class()
1289 return -1; in htb_dump_class()
1299 gnet_stats_basic_sync_init(&cl->bstats); in htb_offload_aggregate_stats()
1301 for (i = 0; i < q->clhash.hashsize; i++) { in htb_offload_aggregate_stats()
1302 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { in htb_offload_aggregate_stats()
1305 while (p && p->level < cl->level) in htb_offload_aggregate_stats()
1306 p = p->parent; in htb_offload_aggregate_stats()
1311 bytes += u64_stats_read(&c->bstats_bias.bytes); in htb_offload_aggregate_stats()
1312 packets += u64_stats_read(&c->bstats_bias.packets); in htb_offload_aggregate_stats()
1313 if (c->level == 0) { in htb_offload_aggregate_stats()
1314 bytes += u64_stats_read(&c->leaf.q->bstats.bytes); in htb_offload_aggregate_stats()
1315 packets += u64_stats_read(&c->leaf.q->bstats.packets); in htb_offload_aggregate_stats()
1319 _bstats_update(&cl->bstats, bytes, packets); in htb_offload_aggregate_stats()
1328 .drops = cl->drops, in htb_dump_class_stats()
1329 .overlimits = cl->overlimits, in htb_dump_class_stats()
1333 if (!cl->level && cl->leaf.q) in htb_dump_class_stats()
1334 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); in htb_dump_class_stats()
1336 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), in htb_dump_class_stats()
1338 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), in htb_dump_class_stats()
1341 if (q->offload) { in htb_dump_class_stats()
1342 if (!cl->level) { in htb_dump_class_stats()
1343 if (cl->leaf.q) in htb_dump_class_stats()
1344 cl->bstats = cl->leaf.q->bstats; in htb_dump_class_stats()
1346 gnet_stats_basic_sync_init(&cl->bstats); in htb_dump_class_stats()
1347 _bstats_update(&cl->bstats, in htb_dump_class_stats()
1348 u64_stats_read(&cl->bstats_bias.bytes), in htb_dump_class_stats()
1349 u64_stats_read(&cl->bstats_bias.packets)); in htb_dump_class_stats()
1355 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || in htb_dump_class_stats()
1356 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in htb_dump_class_stats()
1358 return -1; in htb_dump_class_stats()
1360 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in htb_dump_class_stats()
1371 if (!q->offload) in htb_select_queue()
1372 return sch->dev_queue; in htb_select_queue()
1376 .classid = TC_H_MIN(tcm->tcm_parent), in htb_select_queue()
1379 if (err || offload_opt.qid >= dev->num_tx_queues) in htb_select_queue()
1387 struct net_device *dev = dev_queue->dev; in htb_graft_helper()
1390 if (dev->flags & IFF_UP) in htb_graft_helper()
1394 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in htb_graft_helper()
1395 if (dev->flags & IFF_UP) in htb_graft_helper()
1405 queue = cl->leaf.offload_queue; in htb_offload_get_queue()
1406 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_get_queue()
1407 WARN_ON(cl->leaf.q->dev_queue != queue); in htb_offload_get_queue()
1424 if (dev->flags & IFF_UP) in htb_offload_move_qdisc()
1427 WARN_ON(qdisc != cl_old->leaf.q); in htb_offload_move_qdisc()
1430 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_move_qdisc()
1431 cl_old->leaf.q->dev_queue = queue_new; in htb_offload_move_qdisc()
1432 cl_old->leaf.offload_queue = queue_new; in htb_offload_move_qdisc()
1437 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q); in htb_offload_move_qdisc()
1438 if (dev->flags & IFF_UP) in htb_offload_move_qdisc()
1440 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN)); in htb_offload_move_qdisc()
1447 struct netdev_queue *dev_queue = sch->dev_queue; in htb_graft()
1452 if (cl->level) in htb_graft()
1453 return -EINVAL; in htb_graft()
1455 if (q->offload) in htb_graft()
1460 cl->common.classid, extack); in htb_graft()
1462 return -ENOBUFS; in htb_graft()
1465 if (q->offload) { in htb_graft()
1466 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_graft()
1471 *old = qdisc_replace(sch, new, &cl->leaf.q); in htb_graft()
1473 if (q->offload) { in htb_graft()
1484 return !cl->level ? cl->leaf.q : NULL; in htb_leaf()
1496 if (!cl->parent) in htb_parent_last_child()
1499 if (cl->parent->children > 1) in htb_parent_last_child()
1509 struct htb_class *parent = cl->parent; in htb_parent_to_leaf()
1511 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1513 if (parent->cmode != HTB_CAN_SEND) in htb_parent_to_leaf()
1514 htb_safe_rb_erase(&parent->pq_node, in htb_parent_to_leaf()
1515 &q->hlevel[parent->level].wait_pq); in htb_parent_to_leaf()
1517 parent->level = 0; in htb_parent_to_leaf()
1518 memset(&parent->inner, 0, sizeof(parent->inner)); in htb_parent_to_leaf()
1519 parent->leaf.q = new_q ? new_q : &noop_qdisc; in htb_parent_to_leaf()
1520 parent->tokens = parent->buffer; in htb_parent_to_leaf()
1521 parent->ctokens = parent->cbuffer; in htb_parent_to_leaf()
1522 parent->t_c = ktime_get_ns(); in htb_parent_to_leaf()
1523 parent->cmode = HTB_CAN_SEND; in htb_parent_to_leaf()
1524 if (q->offload) in htb_parent_to_leaf()
1525 parent->leaf.offload_queue = cl->leaf.offload_queue; in htb_parent_to_leaf()
1534 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_parent_to_leaf_offload()
1538 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); in htb_parent_to_leaf_offload()
1547 struct Qdisc *q = cl->leaf.q; in htb_destroy_class_offload()
1551 if (cl->level) in htb_destroy_class_offload()
1552 return -EINVAL; in htb_destroy_class_offload()
1562 /* Last qdisc grafted should be the same as cl->leaf.q when in htb_destroy_class_offload()
1568 if (cl->parent) { in htb_destroy_class_offload()
1569 _bstats_update(&cl->parent->bstats_bias, in htb_destroy_class_offload()
1570 u64_stats_read(&q->bstats.bytes), in htb_destroy_class_offload()
1571 u64_stats_read(&q->bstats.packets)); in htb_destroy_class_offload()
1578 .classid = cl->common.classid, in htb_destroy_class_offload()
1593 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) { in htb_destroy_class_offload()
1594 u32 classid = TC_H_MAJ(sch->handle) | in htb_destroy_class_offload()
1606 if (!cl->level) { in htb_destroy_class()
1607 WARN_ON(!cl->leaf.q); in htb_destroy_class()
1608 qdisc_put(cl->leaf.q); in htb_destroy_class()
1610 gen_kill_estimator(&cl->rate_est); in htb_destroy_class()
1611 tcf_block_put(cl->block); in htb_destroy_class()
1625 cancel_work_sync(&q->work); in htb_destroy()
1626 qdisc_watchdog_cancel(&q->watchdog); in htb_destroy()
1632 tcf_block_put(q->block); in htb_destroy()
1634 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1635 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_destroy()
1636 tcf_block_put(cl->block); in htb_destroy()
1637 cl->block = NULL; in htb_destroy()
1644 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1645 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1649 if (!q->offload) { in htb_destroy()
1656 if (cl->level) in htb_destroy()
1664 qdisc_class_hash_remove(&q->clhash, in htb_destroy()
1665 &cl->common); in htb_destroy()
1666 if (cl->parent) in htb_destroy()
1667 cl->parent->children--; in htb_destroy()
1676 qdisc_class_hash_destroy(&q->clhash); in htb_destroy()
1677 __qdisc_reset_queue(&q->direct_queue); in htb_destroy()
1679 if (q->offload) { in htb_destroy()
1686 if (!q->direct_qdiscs) in htb_destroy()
1688 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) in htb_destroy()
1689 qdisc_put(q->direct_qdiscs[i]); in htb_destroy()
1690 kfree(q->direct_qdiscs); in htb_destroy()
1706 if (cl->children || qdisc_class_in_use(&cl->common)) { in htb_delete()
1708 return -EBUSY; in htb_delete()
1711 if (!cl->level && htb_parent_last_child(cl)) in htb_delete()
1714 if (q->offload) { in htb_delete()
1722 struct netdev_queue *dev_queue = sch->dev_queue; in htb_delete()
1724 if (q->offload) in htb_delete()
1728 cl->parent->common.classid, in htb_delete()
1730 if (q->offload) in htb_delete()
1736 if (!cl->level) in htb_delete()
1737 qdisc_purge_queue(cl->leaf.q); in htb_delete()
1740 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1741 if (cl->parent) in htb_delete()
1742 cl->parent->children--; in htb_delete()
1746 if (cl->cmode != HTB_CAN_SEND) in htb_delete()
1747 htb_safe_rb_erase(&cl->pq_node, in htb_delete()
1748 &q->hlevel[cl->level].wait_pq); in htb_delete()
1763 int err = -EINVAL; in htb_change_class()
1784 err = -EINVAL; in htb_change_class()
1791 if (!hopt->rate.rate || !hopt->ceil.rate) in htb_change_class()
1794 if (q->offload) { in htb_change_class()
1796 if (hopt->rate.overhead || hopt->ceil.overhead) { in htb_change_class()
1800 if (hopt->rate.mpu || hopt->ceil.mpu) { in htb_change_class()
1807 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class()
1808 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB], in htb_change_class()
1811 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class()
1812 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB], in htb_change_class()
1838 if (!classid || TC_H_MAJ(classid ^ sch->handle) || in htb_change_class()
1843 if (parent && parent->parent && parent->parent->level < 2) { in htb_change_class()
1847 err = -ENOBUFS; in htb_change_class()
1852 gnet_stats_basic_sync_init(&cl->bstats); in htb_change_class()
1853 gnet_stats_basic_sync_init(&cl->bstats_bias); in htb_change_class()
1855 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in htb_change_class()
1861 err = gen_new_estimator(&cl->bstats, NULL, in htb_change_class()
1862 &cl->rate_est, in htb_change_class()
1870 cl->children = 0; in htb_change_class()
1871 RB_CLEAR_NODE(&cl->pq_node); in htb_change_class()
1874 RB_CLEAR_NODE(&cl->node[prio]); in htb_change_class()
1876 cl->common.classid = classid; in htb_change_class()
1885 * -- thanks to Karlis Peisenieks in htb_change_class()
1887 if (!q->offload) { in htb_change_class()
1888 dev_queue = sch->dev_queue; in htb_change_class()
1889 } else if (!(parent && !parent->level)) { in htb_change_class()
1893 .classid = cl->common.classid, in htb_change_class()
1895 TC_H_MIN(parent->common.classid) : in htb_change_class()
1897 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
1898 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
1899 .prio = hopt->prio, in htb_change_class()
1900 .quantum = hopt->quantum, in htb_change_class()
1913 WARN_ON(old_q != parent->leaf.q); in htb_change_class()
1916 .classid = cl->common.classid, in htb_change_class()
1918 TC_H_MIN(parent->common.classid), in htb_change_class()
1919 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
1920 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
1921 .prio = hopt->prio, in htb_change_class()
1922 .quantum = hopt->quantum, in htb_change_class()
1932 _bstats_update(&parent->bstats_bias, in htb_change_class()
1933 u64_stats_read(&old_q->bstats.bytes), in htb_change_class()
1934 u64_stats_read(&old_q->bstats.packets)); in htb_change_class()
1939 if (q->offload) { in htb_change_class()
1940 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_change_class()
1945 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); in htb_change_class()
1948 if (parent && !parent->level) { in htb_change_class()
1950 qdisc_purge_queue(parent->leaf.q); in htb_change_class()
1951 parent_qdisc = parent->leaf.q; in htb_change_class()
1955 if (parent->cmode != HTB_CAN_SEND) { in htb_change_class()
1956 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); in htb_change_class()
1957 parent->cmode = HTB_CAN_SEND; in htb_change_class()
1959 parent->level = (parent->parent ? parent->parent->level in htb_change_class()
1960 : TC_HTB_MAXDEPTH) - 1; in htb_change_class()
1961 memset(&parent->inner, 0, sizeof(parent->inner)); in htb_change_class()
1965 cl->leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1966 if (q->offload) in htb_change_class()
1967 cl->leaf.offload_queue = dev_queue; in htb_change_class()
1969 cl->parent = parent; in htb_change_class()
1972 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1973 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1974 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ in htb_change_class()
1975 cl->t_c = ktime_get_ns(); in htb_change_class()
1976 cl->cmode = HTB_CAN_SEND; in htb_change_class()
1979 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1981 parent->children++; in htb_change_class()
1982 if (cl->leaf.q != &noop_qdisc) in htb_change_class()
1983 qdisc_hash_add(cl->leaf.q, true); in htb_change_class()
1986 err = gen_replace_estimator(&cl->bstats, NULL, in htb_change_class()
1987 &cl->rate_est, in htb_change_class()
1995 if (q->offload) { in htb_change_class()
2000 .classid = cl->common.classid, in htb_change_class()
2001 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
2002 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
2003 .prio = hopt->prio, in htb_change_class()
2004 .quantum = hopt->quantum, in htb_change_class()
2021 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); in htb_change_class()
2022 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); in htb_change_class()
2025 * is really leaf before changing cl->leaf ! in htb_change_class()
2027 if (!cl->level) { in htb_change_class()
2028 u64 quantum = cl->rate.rate_bytes_ps; in htb_change_class()
2030 do_div(quantum, q->rate2quantum); in htb_change_class()
2031 cl->quantum = min_t(u64, quantum, INT_MAX); in htb_change_class()
2033 if (!hopt->quantum && cl->quantum < 1000) { in htb_change_class()
2034 warn = -1; in htb_change_class()
2035 cl->quantum = 1000; in htb_change_class()
2037 if (!hopt->quantum && cl->quantum > 200000) { in htb_change_class()
2039 cl->quantum = 200000; in htb_change_class()
2041 if (hopt->quantum) in htb_change_class()
2042 cl->quantum = hopt->quantum; in htb_change_class()
2043 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) in htb_change_class()
2044 cl->prio = TC_HTB_NUMPRIO - 1; in htb_change_class()
2047 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
2048 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
2056 cl->common.classid, (warn == -1 ? "small" : "big")); in htb_change_class()
2058 qdisc_class_hash_grow(sch, &q->clhash); in htb_change_class()
2064 gen_kill_estimator(&cl->rate_est); in htb_change_class()
2066 tcf_block_put(cl->block); in htb_change_class()
2078 return cl ? cl->block : q->block; in htb_tcf_block()
2086 /*if (cl && !cl->level) return 0; in htb_bind_filter()
2090 * ---- in htb_bind_filter()
2091 * 19.6.2002 As Werner explained it is ok - bind filter is just in htb_bind_filter()
2092 * another way to "lock" the class - unlike "get" this lock can in htb_bind_filter()
2096 qdisc_class_get(&cl->common); in htb_bind_filter()
2104 qdisc_class_put(&cl->common); in htb_unbind_filter()
2113 if (arg->stop) in htb_walk()
2116 for (i = 0; i < q->clhash.hashsize; i++) { in htb_walk()
2117 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()