Lines Matching +full:- +full:grp
1 // SPDX-License-Identifier: GPL-2.0-only
26 "Reducing the Execution Time of Fair-Queueing Schedulers."
27 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
67 ^.__grp->index = 0
68 *.__grp->slot_shift
85 The per-scheduler-instance data contain all the data structures
97 * Shifts used for aggregate<->group mapping. We allow class weights that are
102 * grp->index is the index of the group; and grp->slot_shift
137 struct list_head alist; /* Link for active-classes list. */
150 struct qfq_group *grp; member
172 unsigned long full_slots; /* non-empty slots */
190 u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
207 return !list_empty(&cl->alist); in cl_is_active()
215 clc = qdisc_class_find(&q->clhash, classid); in qfq_find_class()
247 index -= !(slot_size - (1ULL << (index + min_slot_shift - 1))); in qfq_calc_index()
265 INIT_LIST_HEAD(&agg->active); in qfq_init_agg()
266 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_init_agg()
268 agg->lmax = lmax; in qfq_init_agg()
269 agg->class_weight = weight; in qfq_init_agg()
277 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) in qfq_find_agg()
278 if (agg->lmax == lmax && agg->class_weight == weight) in qfq_find_agg()
291 if (new_num_classes == q->max_agg_classes) in qfq_update_agg()
292 hlist_del_init(&agg->nonfull_next); in qfq_update_agg()
294 if (agg->num_classes > new_num_classes && in qfq_update_agg()
295 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ in qfq_update_agg()
296 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_update_agg()
299 * agg->initial_budget > agg->budgetmax in qfq_update_agg()
302 agg->budgetmax = new_num_classes * agg->lmax; in qfq_update_agg()
303 new_agg_weight = agg->class_weight * new_num_classes; in qfq_update_agg()
304 agg->inv_w = ONE_FP/new_agg_weight; in qfq_update_agg()
306 if (agg->grp == NULL) { in qfq_update_agg()
307 int i = qfq_calc_index(agg->inv_w, agg->budgetmax, in qfq_update_agg()
308 q->min_slot_shift); in qfq_update_agg()
309 agg->grp = &q->groups[i]; in qfq_update_agg()
312 q->wsum += in qfq_update_agg()
313 (int) agg->class_weight * (new_num_classes - agg->num_classes); in qfq_update_agg()
314 q->iwsum = ONE_FP / q->wsum; in qfq_update_agg()
316 agg->num_classes = new_num_classes; in qfq_update_agg()
324 cl->agg = agg; in qfq_add_to_agg()
326 qfq_update_agg(q, agg, agg->num_classes+1); in qfq_add_to_agg()
327 if (cl->qdisc->q.qlen > 0) { /* adding an active class */ in qfq_add_to_agg()
328 list_add_tail(&cl->alist, &agg->active); in qfq_add_to_agg()
329 if (list_first_entry(&agg->active, struct qfq_class, alist) == in qfq_add_to_agg()
330 cl && q->in_serv_agg != agg) /* agg was inactive */ in qfq_add_to_agg()
339 hlist_del_init(&agg->nonfull_next); in qfq_destroy_agg()
340 q->wsum -= agg->class_weight; in qfq_destroy_agg()
341 if (q->wsum != 0) in qfq_destroy_agg()
342 q->iwsum = ONE_FP / q->wsum; in qfq_destroy_agg()
344 if (q->in_serv_agg == agg) in qfq_destroy_agg()
345 q->in_serv_agg = qfq_choose_next_agg(q); in qfq_destroy_agg()
352 struct qfq_aggregate *agg = cl->agg; in qfq_deactivate_class()
355 list_del_init(&cl->alist); /* remove from RR queue of the aggregate */ in qfq_deactivate_class()
356 if (list_empty(&agg->active)) /* agg is now inactive */ in qfq_deactivate_class()
363 struct qfq_aggregate *agg = cl->agg; in qfq_rm_from_agg()
365 cl->agg = NULL; in qfq_rm_from_agg()
366 if (agg->num_classes == 1) { /* agg being emptied, destroy it */ in qfq_rm_from_agg()
370 qfq_update_agg(q, agg, agg->num_classes-1); in qfq_rm_from_agg()
376 if (cl->qdisc->q.qlen > 0) /* class is active */ in qfq_deact_rm_from_agg()
391 return -EINVAL; in qfq_change_agg()
397 return -ENOBUFS; in qfq_change_agg()
421 return -EINVAL; in qfq_change_class()
439 return -EINVAL; in qfq_change_class()
448 old_weight = cl->agg->class_weight; in qfq_change_class()
449 old_lmax = cl->agg->lmax; in qfq_change_class()
455 delta_w = weight - (cl ? old_weight : 0); in qfq_change_class()
457 if (q->wsum + delta_w > QFQ_MAX_WSUM) { in qfq_change_class()
460 delta_w, q->wsum); in qfq_change_class()
461 return -EINVAL; in qfq_change_class()
466 err = gen_replace_estimator(&cl->bstats, NULL, in qfq_change_class()
467 &cl->rate_est, in qfq_change_class()
481 return -ENOBUFS; in qfq_change_class()
483 gnet_stats_basic_sync_init(&cl->bstats); in qfq_change_class()
484 cl->common.classid = classid; in qfq_change_class()
485 cl->deficit = lmax; in qfq_change_class()
486 INIT_LIST_HEAD(&cl->alist); in qfq_change_class()
488 cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in qfq_change_class()
490 if (cl->qdisc == NULL) in qfq_change_class()
491 cl->qdisc = &noop_qdisc; in qfq_change_class()
494 err = gen_new_estimator(&cl->bstats, NULL, in qfq_change_class()
495 &cl->rate_est, in qfq_change_class()
503 if (cl->qdisc != &noop_qdisc) in qfq_change_class()
504 qdisc_hash_add(cl->qdisc, true); in qfq_change_class()
513 err = -ENOBUFS; in qfq_change_class()
514 gen_kill_estimator(&cl->rate_est); in qfq_change_class()
523 qdisc_class_hash_insert(&q->clhash, &cl->common); in qfq_change_class()
526 qdisc_class_hash_grow(sch, &q->clhash); in qfq_change_class()
532 qdisc_put(cl->qdisc); in qfq_change_class()
539 gen_kill_estimator(&cl->rate_est); in qfq_destroy_class()
540 qdisc_put(cl->qdisc); in qfq_destroy_class()
550 if (qdisc_class_in_use(&cl->common)) { in qfq_delete_class()
552 return -EBUSY; in qfq_delete_class()
557 qdisc_purge_queue(cl->qdisc); in qfq_delete_class()
558 qdisc_class_hash_remove(&q->clhash, &cl->common); in qfq_delete_class()
580 return q->block; in qfq_tcf_block()
589 qdisc_class_get(&cl->common); in qfq_bind_tcf()
598 qdisc_class_put(&cl->common); in qfq_unbind_tcf()
608 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in qfq_graft_class()
609 cl->common.classid, NULL); in qfq_graft_class()
614 *old = qdisc_replace(sch, new, &cl->qdisc); in qfq_graft_class()
622 return cl->qdisc; in qfq_class_leaf()
632 tcm->tcm_parent = TC_H_ROOT; in qfq_dump_class()
633 tcm->tcm_handle = cl->common.classid; in qfq_dump_class()
634 tcm->tcm_info = cl->qdisc->handle; in qfq_dump_class()
641 class_weight = cl->agg->class_weight; in qfq_dump_class()
642 lmax = cl->agg->lmax; in qfq_dump_class()
651 return -EMSGSIZE; in qfq_dump_class()
663 xstats.weight = cl->agg->class_weight; in qfq_dump_class_stats()
664 xstats.lmax = cl->agg->lmax; in qfq_dump_class_stats()
667 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || in qfq_dump_class_stats()
668 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in qfq_dump_class_stats()
669 qdisc_qstats_copy(d, cl->qdisc) < 0) in qfq_dump_class_stats()
670 return -1; in qfq_dump_class_stats()
681 if (arg->stop) in qfq_walk()
684 for (i = 0; i < q->clhash.hashsize; i++) { in qfq_walk()
685 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in qfq_walk()
701 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { in qfq_classify()
702 pr_debug("qfq_classify: found %d\n", skb->priority); in qfq_classify()
703 cl = qfq_find_class(sch, skb->priority); in qfq_classify()
709 fl = rcu_dereference_bh(q->filter_list); in qfq_classify()
735 return (s64)(a - b) > 0; in qfq_gt()
741 return ts & ~((1ULL << shift) - 1); in qfq_round_down()
749 return &q->groups[index]; in qfq_ffs()
754 return bitmap & ~((1UL << from) - 1); in mask_from()
759 * First compute eligibility comparing grp->S, q->V,
762 static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp) in qfq_calc_state() argument
765 unsigned int state = qfq_gt(grp->S, q->V); in qfq_calc_state()
766 unsigned long mask = mask_from(q->bitmaps[ER], grp->index); in qfq_calc_state()
771 if (qfq_gt(grp->F, next->F)) in qfq_calc_state()
781 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
782 * q->bitmaps[src] &= ~mask;
788 q->bitmaps[dst] |= q->bitmaps[src] & mask; in qfq_move_groups()
789 q->bitmaps[src] &= ~mask; in qfq_move_groups()
794 unsigned long mask = mask_from(q->bitmaps[ER], index + 1); in qfq_unblock_groups()
799 if (!qfq_gt(next->F, old_F)) in qfq_unblock_groups()
803 mask = (1UL << index) - 1; in qfq_unblock_groups()
811 old_V ^= q->V;
812 old_V >>= q->min_slot_shift;
820 unsigned long vslot = q->V >> q->min_slot_shift; in qfq_make_eligible()
821 unsigned long old_vslot = q->oldV >> q->min_slot_shift; in qfq_make_eligible()
830 mask = (1UL << last_flip_pos) - 1; in qfq_make_eligible()
839 * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
840 * and not a '-1' because the start time of the group may be moved
842 * this would cause non-empty slots to be right-shifted by one
856 * As for the first event, i.e., an out-of-order service, the
862 * The following function deals with this problem by backward-shifting
864 * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
866 * worst-case guarantees of these aggregates are not violated. In
867 * fact, in case of no out-of-order service, the timestamps of agg
870 * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
871 * service is postponed because of the backward-shift would have
888 * quite complex, the above-discussed capping of the slot index is
892 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg, in qfq_slot_insert() argument
895 u64 slot = (roundedS - grp->S) >> grp->slot_shift; in qfq_slot_insert()
898 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { in qfq_slot_insert()
899 u64 deltaS = roundedS - grp->S - in qfq_slot_insert()
900 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); in qfq_slot_insert()
901 agg->S -= deltaS; in qfq_slot_insert()
902 agg->F -= deltaS; in qfq_slot_insert()
903 slot = QFQ_MAX_SLOTS - 2; in qfq_slot_insert()
906 i = (grp->front + slot) % QFQ_MAX_SLOTS; in qfq_slot_insert()
908 hlist_add_head(&agg->next, &grp->slots[i]); in qfq_slot_insert()
909 __set_bit(slot, &grp->full_slots); in qfq_slot_insert()
913 static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp) in qfq_slot_head() argument
915 return hlist_entry(grp->slots[grp->front].first, in qfq_slot_head()
922 static void qfq_front_slot_remove(struct qfq_group *grp) in qfq_front_slot_remove() argument
924 struct qfq_aggregate *agg = qfq_slot_head(grp); in qfq_front_slot_remove()
927 hlist_del(&agg->next); in qfq_front_slot_remove()
928 if (hlist_empty(&grp->slots[grp->front])) in qfq_front_slot_remove()
929 __clear_bit(0, &grp->full_slots); in qfq_front_slot_remove()
933 * Returns the first aggregate in the first non-empty bucket of the
935 * non-empty bucket is at position 0 in full_slots.
937 static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp) in qfq_slot_scan() argument
941 pr_debug("qfq slot_scan: grp %u full %#lx\n", in qfq_slot_scan()
942 grp->index, grp->full_slots); in qfq_slot_scan()
944 if (grp->full_slots == 0) in qfq_slot_scan()
947 i = __ffs(grp->full_slots); /* zero based */ in qfq_slot_scan()
949 grp->front = (grp->front + i) % QFQ_MAX_SLOTS; in qfq_slot_scan()
950 grp->full_slots >>= i; in qfq_slot_scan()
953 return qfq_slot_head(grp); in qfq_slot_scan()
960 * because we use ffs() to find the first non-empty slot.
965 static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS) in qfq_slot_rotate() argument
967 unsigned int i = (grp->S - roundedS) >> grp->slot_shift; in qfq_slot_rotate()
969 grp->full_slots <<= i; in qfq_slot_rotate()
970 grp->front = (grp->front - i) % QFQ_MAX_SLOTS; in qfq_slot_rotate()
975 struct qfq_group *grp; in qfq_update_eligible() local
978 ineligible = q->bitmaps[IR] | q->bitmaps[IB]; in qfq_update_eligible()
980 if (!q->bitmaps[ER]) { in qfq_update_eligible()
981 grp = qfq_ffs(q, ineligible); in qfq_update_eligible()
982 if (qfq_gt(grp->S, q->V)) in qfq_update_eligible()
983 q->V = grp->S; in qfq_update_eligible()
993 struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc); in agg_dequeue()
998 cl->deficit -= (int) len; in agg_dequeue()
1000 if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ in agg_dequeue()
1001 list_del_init(&cl->alist); in agg_dequeue()
1002 else if (cl->deficit < qdisc_peek_len(cl->qdisc)) { in agg_dequeue()
1003 cl->deficit += agg->lmax; in agg_dequeue()
1004 list_move_tail(&cl->alist, &agg->active); in agg_dequeue()
1016 *cl = list_first_entry(&agg->active, struct qfq_class, alist); in qfq_peek_skb()
1017 skb = (*cl)->qdisc->ops->peek((*cl)->qdisc); in qfq_peek_skb()
1019 qdisc_warn_nonwc("qfq_dequeue", (*cl)->qdisc); in qfq_peek_skb()
1032 * agg->initial_budget - agg->budget > agg->bugdetmax in charge_actual_service()
1034 u32 service_received = min(agg->budgetmax, in charge_actual_service()
1035 agg->initial_budget - agg->budget); in charge_actual_service()
1037 agg->F = agg->S + (u64)service_received * agg->inv_w; in charge_actual_service()
1056 int slot_shift = agg->grp->slot_shift; in qfq_update_start()
1058 roundedF = qfq_round_down(agg->F, slot_shift); in qfq_update_start()
1059 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); in qfq_update_start()
1061 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) { in qfq_update_start()
1063 mask = mask_from(q->bitmaps[ER], agg->grp->index); in qfq_update_start()
1066 if (qfq_gt(roundedF, next->F)) { in qfq_update_start()
1067 if (qfq_gt(limit, next->F)) in qfq_update_start()
1068 agg->S = next->F; in qfq_update_start()
1070 agg->S = limit; in qfq_update_start()
1074 agg->S = q->V; in qfq_update_start()
1076 agg->S = agg->F; in qfq_update_start()
1080 * service. In particular, assign to agg->F its maximum possible
1091 agg->S = agg->F; in qfq_update_agg_ts()
1093 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w; in qfq_update_agg_ts()
1101 struct qfq_aggregate *in_serv_agg = q->in_serv_agg; in qfq_dequeue()
1104 /* next-packet len, 0 means no more active classes in in-service agg */ in qfq_dequeue()
1110 if (!list_empty(&in_serv_agg->active)) in qfq_dequeue()
1114 * If there are no active classes in the in-service aggregate, in qfq_dequeue()
1118 if (len == 0 || in_serv_agg->budget < len) { in qfq_dequeue()
1122 in_serv_agg->initial_budget = in_serv_agg->budget = in qfq_dequeue()
1123 in_serv_agg->budgetmax; in qfq_dequeue()
1125 if (!list_empty(&in_serv_agg->active)) { in qfq_dequeue()
1131 * just keep it as the in-service one. This in qfq_dequeue()
1138 } else if (sch->q.qlen == 0) { /* no aggregate to serve */ in qfq_dequeue()
1139 q->in_serv_agg = NULL; in qfq_dequeue()
1147 in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q); in qfq_dequeue()
1153 sch->q.qlen--; in qfq_dequeue()
1158 sch->q.qlen++; in qfq_dequeue()
1169 if (unlikely(in_serv_agg->budget < len)) in qfq_dequeue()
1170 in_serv_agg->budget = 0; in qfq_dequeue()
1172 in_serv_agg->budget -= len; in qfq_dequeue()
1174 q->V += (u64)len * q->iwsum; in qfq_dequeue()
1176 len, (unsigned long long) in_serv_agg->F, in qfq_dequeue()
1177 (unsigned long long) q->V); in qfq_dequeue()
1184 struct qfq_group *grp; in qfq_choose_next_agg() local
1189 q->oldV = q->V; in qfq_choose_next_agg()
1191 if (!q->bitmaps[ER]) in qfq_choose_next_agg()
1194 grp = qfq_ffs(q, q->bitmaps[ER]); in qfq_choose_next_agg()
1195 old_F = grp->F; in qfq_choose_next_agg()
1197 agg = qfq_slot_head(grp); in qfq_choose_next_agg()
1200 qfq_front_slot_remove(grp); in qfq_choose_next_agg()
1202 new_front_agg = qfq_slot_scan(grp); in qfq_choose_next_agg()
1205 __clear_bit(grp->index, &q->bitmaps[ER]); in qfq_choose_next_agg()
1207 u64 roundedS = qfq_round_down(new_front_agg->S, in qfq_choose_next_agg()
1208 grp->slot_shift); in qfq_choose_next_agg()
1211 if (grp->S == roundedS) in qfq_choose_next_agg()
1213 grp->S = roundedS; in qfq_choose_next_agg()
1214 grp->F = roundedS + (2ULL << grp->slot_shift); in qfq_choose_next_agg()
1215 __clear_bit(grp->index, &q->bitmaps[ER]); in qfq_choose_next_agg()
1216 s = qfq_calc_state(q, grp); in qfq_choose_next_agg()
1217 __set_bit(grp->index, &q->bitmaps[s]); in qfq_choose_next_agg()
1220 qfq_unblock_groups(q, grp->index, old_F); in qfq_choose_next_agg()
1241 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); in qfq_enqueue()
1243 if (unlikely(cl->agg->lmax < len)) { in qfq_enqueue()
1245 cl->agg->lmax, len, cl->common.classid); in qfq_enqueue()
1246 err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); in qfq_enqueue()
1248 cl->qstats.drops++; in qfq_enqueue()
1253 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; in qfq_enqueue()
1254 err = qdisc_enqueue(skb, cl->qdisc, to_free); in qfq_enqueue()
1258 cl->qstats.drops++; in qfq_enqueue()
1264 _bstats_update(&cl->bstats, len, gso_segs); in qfq_enqueue()
1265 sch->qstats.backlog += len; in qfq_enqueue()
1266 ++sch->q.qlen; in qfq_enqueue()
1268 agg = cl->agg; in qfq_enqueue()
1271 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && in qfq_enqueue()
1272 list_first_entry(&agg->active, struct qfq_class, alist) in qfq_enqueue()
1273 == cl && cl->deficit < len) in qfq_enqueue()
1274 list_move_tail(&cl->alist, &agg->active); in qfq_enqueue()
1280 cl->deficit = agg->lmax; in qfq_enqueue()
1281 list_add_tail(&cl->alist, &agg->active); in qfq_enqueue()
1283 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || in qfq_enqueue()
1284 q->in_serv_agg == agg) in qfq_enqueue()
1285 return err; /* non-empty or in service, nothing else to do */ in qfq_enqueue()
1297 struct qfq_group *grp = agg->grp; in qfq_schedule_agg() local
1301 roundedS = qfq_round_down(agg->S, grp->slot_shift); in qfq_schedule_agg()
1305 * If agg->S >= grp->S we don't need to adjust the in qfq_schedule_agg()
1307 * Otherwise grp->S is decreasing, we must make room in qfq_schedule_agg()
1312 if (grp->full_slots) { in qfq_schedule_agg()
1313 if (!qfq_gt(grp->S, agg->S)) in qfq_schedule_agg()
1316 /* create a slot for this agg->S */ in qfq_schedule_agg()
1317 qfq_slot_rotate(grp, roundedS); in qfq_schedule_agg()
1319 __clear_bit(grp->index, &q->bitmaps[IR]); in qfq_schedule_agg()
1320 __clear_bit(grp->index, &q->bitmaps[IB]); in qfq_schedule_agg()
1321 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && in qfq_schedule_agg()
1322 q->in_serv_agg == NULL) in qfq_schedule_agg()
1323 q->V = roundedS; in qfq_schedule_agg()
1325 grp->S = roundedS; in qfq_schedule_agg()
1326 grp->F = roundedS + (2ULL << grp->slot_shift); in qfq_schedule_agg()
1327 s = qfq_calc_state(q, grp); in qfq_schedule_agg()
1328 __set_bit(grp->index, &q->bitmaps[s]); in qfq_schedule_agg()
1331 s, q->bitmaps[s], in qfq_schedule_agg()
1332 (unsigned long long) agg->S, in qfq_schedule_agg()
1333 (unsigned long long) agg->F, in qfq_schedule_agg()
1334 (unsigned long long) q->V); in qfq_schedule_agg()
1337 qfq_slot_insert(grp, agg, roundedS); in qfq_schedule_agg()
1345 agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ in qfq_activate_agg()
1348 if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ in qfq_activate_agg()
1349 q->in_serv_agg = agg; /* start serving this aggregate */ in qfq_activate_agg()
1351 q->oldV = q->V = agg->S; in qfq_activate_agg()
1352 } else if (agg != q->in_serv_agg) in qfq_activate_agg()
1356 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, in qfq_slot_remove() argument
1362 roundedS = qfq_round_down(agg->S, grp->slot_shift); in qfq_slot_remove()
1363 offset = (roundedS - grp->S) >> grp->slot_shift; in qfq_slot_remove()
1365 i = (grp->front + offset) % QFQ_MAX_SLOTS; in qfq_slot_remove()
1367 hlist_del(&agg->next); in qfq_slot_remove()
1368 if (hlist_empty(&grp->slots[i])) in qfq_slot_remove()
1369 __clear_bit(offset, &grp->full_slots); in qfq_slot_remove()
1381 struct qfq_group *grp = agg->grp; in qfq_deactivate_agg() local
1386 if (agg == q->in_serv_agg) { in qfq_deactivate_agg()
1388 q->in_serv_agg = qfq_choose_next_agg(q); in qfq_deactivate_agg()
1392 agg->F = agg->S; in qfq_deactivate_agg()
1393 qfq_slot_remove(q, grp, agg); in qfq_deactivate_agg()
1395 if (!grp->full_slots) { in qfq_deactivate_agg()
1396 __clear_bit(grp->index, &q->bitmaps[IR]); in qfq_deactivate_agg()
1397 __clear_bit(grp->index, &q->bitmaps[EB]); in qfq_deactivate_agg()
1398 __clear_bit(grp->index, &q->bitmaps[IB]); in qfq_deactivate_agg()
1400 if (test_bit(grp->index, &q->bitmaps[ER]) && in qfq_deactivate_agg()
1401 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) { in qfq_deactivate_agg()
1402 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1); in qfq_deactivate_agg()
1404 mask = ~((1UL << __fls(mask)) - 1); in qfq_deactivate_agg()
1410 __clear_bit(grp->index, &q->bitmaps[ER]); in qfq_deactivate_agg()
1411 } else if (hlist_empty(&grp->slots[grp->front])) { in qfq_deactivate_agg()
1412 agg = qfq_slot_scan(grp); in qfq_deactivate_agg()
1413 roundedS = qfq_round_down(agg->S, grp->slot_shift); in qfq_deactivate_agg()
1414 if (grp->S != roundedS) { in qfq_deactivate_agg()
1415 __clear_bit(grp->index, &q->bitmaps[ER]); in qfq_deactivate_agg()
1416 __clear_bit(grp->index, &q->bitmaps[IR]); in qfq_deactivate_agg()
1417 __clear_bit(grp->index, &q->bitmaps[EB]); in qfq_deactivate_agg()
1418 __clear_bit(grp->index, &q->bitmaps[IB]); in qfq_deactivate_agg()
1419 grp->S = roundedS; in qfq_deactivate_agg()
1420 grp->F = roundedS + (2ULL << grp->slot_shift); in qfq_deactivate_agg()
1421 s = qfq_calc_state(q, grp); in qfq_deactivate_agg()
1422 __set_bit(grp->index, &q->bitmaps[s]); in qfq_deactivate_agg()
1432 if (list_empty(&cl->alist)) in qfq_qlen_notify()
1441 struct qfq_group *grp; in qfq_init_qdisc() local
1445 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in qfq_init_qdisc()
1449 err = qdisc_class_hash_init(&q->clhash); in qfq_init_qdisc()
1453 max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1, in qfq_init_qdisc()
1457 q->max_agg_classes = 1<<max_cl_shift; in qfq_init_qdisc()
1461 q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX; in qfq_init_qdisc()
1464 grp = &q->groups[i]; in qfq_init_qdisc()
1465 grp->index = i; in qfq_init_qdisc()
1466 grp->slot_shift = q->min_slot_shift + i; in qfq_init_qdisc()
1468 INIT_HLIST_HEAD(&grp->slots[j]); in qfq_init_qdisc()
1471 INIT_HLIST_HEAD(&q->nonfull_aggs); in qfq_init_qdisc()
1482 for (i = 0; i < q->clhash.hashsize; i++) { in qfq_reset_qdisc()
1483 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in qfq_reset_qdisc()
1484 if (cl->qdisc->q.qlen > 0) in qfq_reset_qdisc()
1487 qdisc_reset(cl->qdisc); in qfq_reset_qdisc()
1499 tcf_block_put(q->block); in qfq_destroy_qdisc()
1501 for (i = 0; i < q->clhash.hashsize; i++) { in qfq_destroy_qdisc()
1502 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in qfq_destroy_qdisc()
1508 qdisc_class_hash_destroy(&q->clhash); in qfq_destroy_qdisc()