Lines Matching +full:rate +full:- +full:b
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
5 * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6 * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7 * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8 * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9 * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10 * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
16 * easy-to-use package:
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
31 * set-associative hash function.
33 * - Each queue is actively managed by Cobalt, which is a combination of the
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
84 /* struct cobalt_params - contains codel and blue parameters
85 * @interval: codel initial drop rate
86 * @target: maximum persistent sojourn time & blue update rate
87 * @mtu_time: serialisation delay of maximum-size packet
99 /* struct cobalt_vars - contains codel and blue variables
127 /* this stuff is all needed per-flow at dequeue time */
147 u16 t:3, b:10; member
269 * respond to congestion signals in a TCP-like way. BLUE is more effective on
286 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data; in get_cobalt_cb()
291 return get_cobalt_cb(skb)->enqueue_time; in cobalt_get_enqueue_time()
297 get_cobalt_cb(skb)->enqueue_time = now; in cobalt_set_enqueue_time()
384 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
394 invsqrt = vars->rec_inv_sqrt; in cobalt_newton_step()
396 val = (3LL << 32) - ((u64)vars->count * invsqrt2); in cobalt_newton_step()
399 val = (val * invsqrt) >> (32 - 2 + 1); in cobalt_newton_step()
401 vars->rec_inv_sqrt = val; in cobalt_newton_step()
406 if (vars->count < REC_INV_SQRT_CACHE) in cobalt_invsqrt()
407 vars->rec_inv_sqrt = inv_sqrt_cache[vars->count]; in cobalt_invsqrt()
438 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) { in cobalt_queue_full()
439 up = !vars->p_drop; in cobalt_queue_full()
440 vars->p_drop += p->p_inc; in cobalt_queue_full()
441 if (vars->p_drop < p->p_inc) in cobalt_queue_full()
442 vars->p_drop = ~0; in cobalt_queue_full()
443 vars->blue_timer = now; in cobalt_queue_full()
445 vars->dropping = true; in cobalt_queue_full()
446 vars->drop_next = now; in cobalt_queue_full()
447 if (!vars->count) in cobalt_queue_full()
448 vars->count = 1; in cobalt_queue_full()
462 if (vars->p_drop && in cobalt_queue_empty()
463 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) { in cobalt_queue_empty()
464 if (vars->p_drop < p->p_dec) in cobalt_queue_empty()
465 vars->p_drop = 0; in cobalt_queue_empty()
467 vars->p_drop -= p->p_dec; in cobalt_queue_empty()
468 vars->blue_timer = now; in cobalt_queue_empty()
469 down = !vars->p_drop; in cobalt_queue_empty()
471 vars->dropping = false; in cobalt_queue_empty()
473 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) { in cobalt_queue_empty()
474 vars->count--; in cobalt_queue_empty()
476 vars->drop_next = cobalt_control(vars->drop_next, in cobalt_queue_empty()
477 p->interval, in cobalt_queue_empty()
478 vars->rec_inv_sqrt); in cobalt_queue_empty()
510 * as possible to 1.0 in fixed-point. in cobalt_should_drop()
514 schedule = ktime_sub(now, vars->drop_next); in cobalt_should_drop()
515 over_target = sojourn > p->target && in cobalt_should_drop()
516 sojourn > p->mtu_time * bulk_flows * 2 && in cobalt_should_drop()
517 sojourn > p->mtu_time * 4; in cobalt_should_drop()
518 next_due = vars->count && ktime_to_ns(schedule) >= 0; in cobalt_should_drop()
520 vars->ecn_marked = false; in cobalt_should_drop()
523 if (!vars->dropping) { in cobalt_should_drop()
524 vars->dropping = true; in cobalt_should_drop()
525 vars->drop_next = cobalt_control(now, in cobalt_should_drop()
526 p->interval, in cobalt_should_drop()
527 vars->rec_inv_sqrt); in cobalt_should_drop()
529 if (!vars->count) in cobalt_should_drop()
530 vars->count = 1; in cobalt_should_drop()
531 } else if (vars->dropping) { in cobalt_should_drop()
532 vars->dropping = false; in cobalt_should_drop()
535 if (next_due && vars->dropping) { in cobalt_should_drop()
537 if (!(vars->ecn_marked = INET_ECN_set_ce(skb))) in cobalt_should_drop()
540 vars->count++; in cobalt_should_drop()
541 if (!vars->count) in cobalt_should_drop()
542 vars->count--; in cobalt_should_drop()
544 vars->drop_next = cobalt_control(vars->drop_next, in cobalt_should_drop()
545 p->interval, in cobalt_should_drop()
546 vars->rec_inv_sqrt); in cobalt_should_drop()
547 schedule = ktime_sub(now, vars->drop_next); in cobalt_should_drop()
550 vars->count--; in cobalt_should_drop()
552 vars->drop_next = cobalt_control(vars->drop_next, in cobalt_should_drop()
553 p->interval, in cobalt_should_drop()
554 vars->rec_inv_sqrt); in cobalt_should_drop()
555 schedule = ktime_sub(now, vars->drop_next); in cobalt_should_drop()
556 next_due = vars->count && ktime_to_ns(schedule) >= 0; in cobalt_should_drop()
561 if (vars->p_drop && reason == SKB_NOT_DROPPED_YET && in cobalt_should_drop()
562 get_random_u32() < vars->p_drop) in cobalt_should_drop()
566 if (!vars->count) in cobalt_should_drop()
567 vars->drop_next = ktime_add_ns(now, p->interval); in cobalt_should_drop()
569 vars->drop_next = now; in cobalt_should_drop()
579 bool rev = !skb->_nfct, upd = false; in cake_update_flowkeys()
589 if (ip != keys->addrs.v4addrs.src) { in cake_update_flowkeys()
590 keys->addrs.v4addrs.src = ip; in cake_update_flowkeys()
594 if (ip != keys->addrs.v4addrs.dst) { in cake_update_flowkeys()
595 keys->addrs.v4addrs.dst = ip; in cake_update_flowkeys()
599 if (keys->ports.ports) { in cake_update_flowkeys()
603 if (port != keys->ports.src) { in cake_update_flowkeys()
604 keys->ports.src = port; in cake_update_flowkeys()
608 if (port != keys->ports.dst) { in cake_update_flowkeys()
609 port = keys->ports.dst; in cake_update_flowkeys()
638 q->hosts[flow->srchost].srchost_bulk_flow_count)) in cake_dec_srchost_bulk_flow_count()
639 q->hosts[flow->srchost].srchost_bulk_flow_count--; in cake_dec_srchost_bulk_flow_count()
647 q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_srchost_bulk_flow_count()
648 q->hosts[flow->srchost].srchost_bulk_flow_count++; in cake_inc_srchost_bulk_flow_count()
656 q->hosts[flow->dsthost].dsthost_bulk_flow_count)) in cake_dec_dsthost_bulk_flow_count()
657 q->hosts[flow->dsthost].dsthost_bulk_flow_count--; in cake_dec_dsthost_bulk_flow_count()
665 q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_dsthost_bulk_flow_count()
666 q->hosts[flow->dsthost].dsthost_bulk_flow_count++; in cake_inc_dsthost_bulk_flow_count()
677 q->hosts[flow->srchost].srchost_bulk_flow_count); in cake_get_flow_quantum()
681 q->hosts[flow->dsthost].dsthost_bulk_flow_count); in cake_get_flow_quantum()
686 return (q->flow_quantum * quantum_div[host_load] + in cake_get_flow_quantum()
699 bool use_skbhash = skb->l4_hash; in cake_hash()
759 * side-effect it sorts the src and dst addresses. in cake_hash()
766 flow_hash = flow_override - 1; in cake_hash()
768 flow_hash = skb->hash; in cake_hash()
770 dsthost_hash = host_override - 1; in cake_hash()
771 srchost_hash = host_override - 1; in cake_hash()
784 /* set-associative hashing */ in cake_hash()
786 if (likely(q->tags[reduced_hash] == flow_hash && in cake_hash()
787 q->flows[reduced_hash].set)) { in cake_hash()
788 q->way_directs++; in cake_hash()
791 u32 outer_hash = reduced_hash - inner_hash; in cake_hash()
801 if (q->tags[outer_hash + k] == flow_hash) { in cake_hash()
803 q->way_hits++; in cake_hash()
805 if (!q->flows[outer_hash + k].set) { in cake_hash()
820 if (!q->flows[outer_hash + k].set) { in cake_hash()
821 q->way_misses++; in cake_hash()
831 q->way_collisions++; in cake_hash()
835 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
836 cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode); in cake_hash()
837 cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode); in cake_hash()
842 q->tags[reduced_hash] = flow_hash; in cake_hash()
847 outer_hash = srchost_idx - inner_hash; in cake_hash()
850 if (q->hosts[outer_hash + k].srchost_tag == in cake_hash()
856 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) in cake_hash()
859 q->hosts[outer_hash + k].srchost_tag = srchost_hash; in cake_hash()
862 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
864 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
865 cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode); in cake_hash()
871 outer_hash = dsthost_idx - inner_hash; in cake_hash()
874 if (q->hosts[outer_hash + k].dsthost_tag == in cake_hash()
880 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) in cake_hash()
883 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; in cake_hash()
886 q->flows[reduced_hash].dsthost = dsthost_idx; in cake_hash()
888 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
889 cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode); in cake_hash()
901 struct sk_buff *skb = flow->head; in dequeue_head()
904 flow->head = skb->next; in dequeue_head()
915 if (!flow->head) in flow_queue_add()
916 flow->head = skb; in flow_queue_add()
918 flow->tail->next = skb; in flow_queue_add()
919 flow->tail = skb; in flow_queue_add()
920 skb->next = NULL; in flow_queue_add()
934 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6) in cake_get_iphdr()
935 return skb_header_pointer(skb, offset + iph->ihl * 4, in cake_get_iphdr()
938 else if (iph->version == 4) in cake_get_iphdr()
941 else if (iph->version == 6) in cake_get_iphdr()
963 if (ipv6h->version == 4) { in cake_get_tcphdr()
965 offset += iph->ihl * 4; in cake_get_tcphdr()
967 /* special-case 6in4 tunnelling, as that is a common way to get in cake_get_tcphdr()
970 if (iph->protocol == IPPROTO_IPV6) { in cake_get_tcphdr()
974 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) in cake_get_tcphdr()
979 } else if (iph->protocol != IPPROTO_TCP) { in cake_get_tcphdr()
983 } else if (ipv6h->version == 6) { in cake_get_tcphdr()
984 if (ipv6h->nexthdr != IPPROTO_TCP) in cake_get_tcphdr()
993 if (!tcph || tcph->doff < 5) in cake_get_tcphdr()
1004 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr); in cake_get_tcpopt()
1014 length--; in cake_get_tcpopt()
1028 ptr += opsize - 2; in cake_get_tcpopt()
1029 length -= opsize; in cake_get_tcpopt()
1040 * @return -1, 0 or 1 as normal compare functions
1046 u32 ack_seq_a = ntohl(tcph_a->ack_seq); in cake_tcph_sack_compare()
1055 oplen_a -= TCPOLEN_SACK_BASE; in cake_tcph_sack_compare()
1056 oplen_b -= TCPOLEN_SACK_BASE; in cake_tcph_sack_compare()
1060 return -1; in cake_tcph_sack_compare()
1070 u32 start_a = get_unaligned_be32(&sack_a->start_seq); in cake_tcph_sack_compare()
1071 u32 end_a = get_unaligned_be32(&sack_a->end_seq); in cake_tcph_sack_compare()
1077 return -1; in cake_tcph_sack_compare()
1079 bytes_a += end_a - start_a; in cake_tcph_sack_compare()
1082 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq); in cake_tcph_sack_compare()
1083 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); in cake_tcph_sack_compare()
1087 bytes_b += end_b - start_b; in cake_tcph_sack_compare()
1094 oplen_tmp -= sizeof(*sack_tmp); in cake_tcph_sack_compare()
1099 return -1; in cake_tcph_sack_compare()
1101 oplen_a -= sizeof(*sack_a); in cake_tcph_sack_compare()
1106 /* If we made it this far, all ranges SACKed by A are covered by B, so in cake_tcph_sack_compare()
1107 * either the SACKs are equal, or B SACKs more bytes. in cake_tcph_sack_compare()
1130 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr); in cake_tcph_may_drop()
1153 length--; in cake_tcph_may_drop()
1191 ptr += opsize - 2; in cake_tcph_may_drop()
1192 length -= opsize; in cake_tcph_may_drop()
1201 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; in cake_ack_filter()
1216 if (flow->head == flow->tail) in cake_ack_filter()
1219 skb = flow->tail; in cake_ack_filter()
1236 * of the queue looking for pure ACKs with the same 5-tuple as the in cake_ack_filter()
1239 for (skb_check = flow->head; in cake_ack_filter()
1241 skb_prev = skb_check, skb_check = skb_check->next) { in cake_ack_filter()
1246 /* only TCP packets with matching 5-tuple are eligible, and only in cake_ack_filter()
1249 if (!tcph_check || iph->version != iph_check->version || in cake_ack_filter()
1250 tcph_check->source != tcph->source || in cake_ack_filter()
1251 tcph_check->dest != tcph->dest) in cake_ack_filter()
1254 if (iph_check->version == 4) { in cake_ack_filter()
1255 if (iph_check->saddr != iph->saddr || in cake_ack_filter()
1256 iph_check->daddr != iph->daddr) in cake_ack_filter()
1259 seglen = iph_totlen(skb, iph_check) - in cake_ack_filter()
1260 (4 * iph_check->ihl); in cake_ack_filter()
1261 } else if (iph_check->version == 6) { in cake_ack_filter()
1265 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) || in cake_ack_filter()
1266 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr)) in cake_ack_filter()
1269 seglen = ntohs(ipv6h_check->payload_len); in cake_ack_filter()
1283 num_found--; in cake_ack_filter()
1293 (seglen - __tcp_hdrlen(tcph_check)) != 0 || in cake_ack_filter()
1294 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq))) in cake_ack_filter()
1306 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) && in cake_ack_filter()
1336 if (elig_ack && aggressive && elig_ack->next == skb && in cake_ack_filter()
1345 elig_ack_prev->next = elig_ack->next; in cake_ack_filter()
1347 flow->head = elig_ack->next; in cake_ack_filter()
1356 avg -= avg >> shift; in cake_ewma()
1363 if (q->rate_flags & CAKE_FLAG_OVERHEAD) in cake_calc_overhead()
1364 len -= off; in cake_calc_overhead()
1366 if (q->max_netlen < len) in cake_calc_overhead()
1367 q->max_netlen = len; in cake_calc_overhead()
1368 if (q->min_netlen > len) in cake_calc_overhead()
1369 q->min_netlen = len; in cake_calc_overhead()
1371 len += q->rate_overhead; in cake_calc_overhead()
1373 if (len < q->rate_mpu) in cake_calc_overhead()
1374 len = q->rate_mpu; in cake_calc_overhead()
1376 if (q->atm_mode == CAKE_ATM_ATM) { in cake_calc_overhead()
1380 } else if (q->atm_mode == CAKE_ATM_PTM) { in cake_calc_overhead()
1388 if (q->max_adjlen < len) in cake_calc_overhead()
1389 q->max_adjlen = len; in cake_calc_overhead()
1390 if (q->min_adjlen > len) in cake_calc_overhead()
1391 q->min_adjlen = len; in cake_calc_overhead()
1404 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); in cake_overhead()
1406 if (!shinfo->gso_size) in cake_overhead()
1410 if (!skb->encapsulation) in cake_overhead()
1416 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | in cake_overhead()
1433 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) in cake_overhead()
1434 segs = DIV_ROUND_UP(skb->len - hdr_len, in cake_overhead()
1435 shinfo->gso_size); in cake_overhead()
1437 segs = shinfo->gso_segs; in cake_overhead()
1439 len = shinfo->gso_size + hdr_len; in cake_overhead()
1440 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
1442 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead()
1448 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_swap()
1449 struct cake_heap_entry jj = q->overflow_heap[j]; in cake_heap_swap()
1451 q->overflow_heap[i] = jj; in cake_heap_swap()
1452 q->overflow_heap[j] = ii; in cake_heap_swap()
1454 q->tins[ii.t].overflow_idx[ii.b] = j; in cake_heap_swap()
1455 q->tins[jj.t].overflow_idx[jj.b] = i; in cake_heap_swap()
1460 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_get_backlog()
1462 return q->tins[ii.t].backlogs[ii.b]; in cake_heap_get_backlog()
1505 u16 p = (i - 1) >> 1; in cake_heapify_up()
1519 struct cake_tin_data *b, in cake_advance_shaper() argument
1523 u32 len = get_cobalt_cb(skb)->adjusted_len; in cake_advance_shaper()
1528 if (q->rate_ns) { in cake_advance_shaper()
1529 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft; in cake_advance_shaper()
1530 u64 global_dur = (len * q->rate_ns) >> q->rate_shft; in cake_advance_shaper()
1533 if (ktime_before(b->time_next_packet, now)) in cake_advance_shaper()
1534 b->time_next_packet = ktime_add_ns(b->time_next_packet, in cake_advance_shaper()
1537 else if (ktime_before(b->time_next_packet, in cake_advance_shaper()
1539 b->time_next_packet = ktime_add_ns(now, tin_dur); in cake_advance_shaper()
1541 q->time_next_packet = ktime_add_ns(q->time_next_packet, in cake_advance_shaper()
1544 q->failsafe_next_packet = \ in cake_advance_shaper()
1545 ktime_add_ns(q->failsafe_next_packet, in cake_advance_shaper()
1557 struct cake_tin_data *b; in cake_drop() local
1561 if (!q->overflow_timeout) { in cake_drop()
1563 /* Build fresh max-heap */ in cake_drop()
1564 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2 - 1; i >= 0; i--) in cake_drop()
1567 q->overflow_timeout = 65535; in cake_drop()
1570 qq = q->overflow_heap[0]; in cake_drop()
1572 idx = qq.b; in cake_drop()
1574 b = &q->tins[tin]; in cake_drop()
1575 flow = &b->flows[idx]; in cake_drop()
1579 q->overflow_timeout = 0; in cake_drop()
1583 if (cobalt_queue_full(&flow->cvars, &b->cparams, now)) in cake_drop()
1584 b->unresponsive_flow_count++; in cake_drop()
1587 q->buffer_used -= skb->truesize; in cake_drop()
1588 b->backlogs[idx] -= len; in cake_drop()
1589 b->tin_backlog -= len; in cake_drop()
1590 sch->qstats.backlog -= len; in cake_drop()
1592 flow->dropped++; in cake_drop()
1593 b->tin_dropped++; in cake_drop()
1595 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_drop()
1596 cake_advance_shaper(q, b, skb, now, true); in cake_drop()
1599 sch->q.qlen--; in cake_drop()
1655 return 0x38; /* CS7 - Net Control */ in cake_handle_diffserv()
1658 /* If there is no Diffserv field, treat as best-effort */ in cake_handle_diffserv()
1671 /* Tin selection: Default to diffserv-based selection, allow overriding in cake_select_tin()
1672 * using firewall marks or skb->priority. Call DSCP parsing early if in cake_select_tin()
1675 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; in cake_select_tin()
1676 wash = !!(q->rate_flags & CAKE_FLAG_WASH); in cake_select_tin()
1680 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) in cake_select_tin()
1683 else if (mark && mark <= q->tin_cnt) in cake_select_tin()
1684 tin = q->tin_order[mark - 1]; in cake_select_tin()
1686 else if (TC_H_MAJ(skb->priority) == sch->handle && in cake_select_tin()
1687 TC_H_MIN(skb->priority) > 0 && in cake_select_tin()
1688 TC_H_MIN(skb->priority) <= q->tin_cnt) in cake_select_tin()
1689 tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; in cake_select_tin()
1694 tin = q->tin_index[dscp]; in cake_select_tin()
1696 if (unlikely(tin >= q->tin_cnt)) in cake_select_tin()
1700 return &q->tins[tin]; in cake_select_tin()
1712 filter = rcu_dereference_bh(q->filter_list); in cake_classify()
1751 struct cake_tin_data *b; in cake_enqueue() local
1756 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); in cake_enqueue()
1763 tin = (u32)(b - q->tins); in cake_enqueue()
1764 idx--; in cake_enqueue()
1765 flow = &b->flows[idx]; in cake_enqueue()
1768 if (!b->tin_backlog) { in cake_enqueue()
1769 if (ktime_before(b->time_next_packet, now)) in cake_enqueue()
1770 b->time_next_packet = now; in cake_enqueue()
1772 if (!sch->q.qlen) { in cake_enqueue()
1773 if (ktime_before(q->time_next_packet, now)) { in cake_enqueue()
1774 q->failsafe_next_packet = now; in cake_enqueue()
1775 q->time_next_packet = now; in cake_enqueue()
1776 } else if (ktime_after(q->time_next_packet, now) && in cake_enqueue()
1777 ktime_after(q->failsafe_next_packet, now)) { in cake_enqueue()
1779 min(ktime_to_ns(q->time_next_packet), in cake_enqueue()
1781 q->failsafe_next_packet)); in cake_enqueue()
1782 sch->qstats.overlimits++; in cake_enqueue()
1783 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_enqueue()
1788 if (unlikely(len > b->max_skblen)) in cake_enqueue()
1789 b->max_skblen = len; in cake_enqueue()
1791 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { in cake_enqueue()
1802 qdisc_skb_cb(segs)->pkt_len = segs->len; in cake_enqueue()
1804 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, in cake_enqueue()
1808 sch->q.qlen++; in cake_enqueue()
1810 slen += segs->len; in cake_enqueue()
1811 q->buffer_used += segs->truesize; in cake_enqueue()
1812 b->packets++; in cake_enqueue()
1816 b->bytes += slen; in cake_enqueue()
1817 b->backlogs[idx] += slen; in cake_enqueue()
1818 b->tin_backlog += slen; in cake_enqueue()
1819 sch->qstats.backlog += slen; in cake_enqueue()
1820 q->avg_window_bytes += slen; in cake_enqueue()
1822 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); in cake_enqueue()
1827 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); in cake_enqueue()
1830 if (q->ack_filter) in cake_enqueue()
1834 b->ack_drops++; in cake_enqueue()
1835 sch->qstats.drops++; in cake_enqueue()
1836 b->bytes += qdisc_pkt_len(ack); in cake_enqueue()
1837 len -= qdisc_pkt_len(ack); in cake_enqueue()
1838 q->buffer_used += skb->truesize - ack->truesize; in cake_enqueue()
1839 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_enqueue()
1840 cake_advance_shaper(q, b, ack, now, true); in cake_enqueue()
1845 sch->q.qlen++; in cake_enqueue()
1846 q->buffer_used += skb->truesize; in cake_enqueue()
1850 b->packets++; in cake_enqueue()
1851 b->bytes += len; in cake_enqueue()
1852 b->backlogs[idx] += len; in cake_enqueue()
1853 b->tin_backlog += len; in cake_enqueue()
1854 sch->qstats.backlog += len; in cake_enqueue()
1855 q->avg_window_bytes += len; in cake_enqueue()
1858 if (q->overflow_timeout) in cake_enqueue()
1859 cake_heapify_up(q, b->overflow_idx[idx]); in cake_enqueue()
1862 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { in cake_enqueue()
1864 ktime_to_ns(ktime_sub(now, q->last_packet_time)); in cake_enqueue()
1869 /* filter out short-term bursts, eg. wifi aggregation */ in cake_enqueue()
1870 q->avg_packet_interval = \ in cake_enqueue()
1871 cake_ewma(q->avg_packet_interval, in cake_enqueue()
1873 (packet_interval > q->avg_packet_interval ? in cake_enqueue()
1876 q->last_packet_time = now; in cake_enqueue()
1878 if (packet_interval > q->avg_packet_interval) { in cake_enqueue()
1881 q->avg_window_begin)); in cake_enqueue()
1882 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; in cake_enqueue() local
1884 b = div64_u64(b, window_interval); in cake_enqueue()
1885 q->avg_peak_bandwidth = in cake_enqueue()
1886 cake_ewma(q->avg_peak_bandwidth, b, in cake_enqueue()
1887 b > q->avg_peak_bandwidth ? 2 : 8); in cake_enqueue()
1888 q->avg_window_bytes = 0; in cake_enqueue()
1889 q->avg_window_begin = now; in cake_enqueue()
1892 ktime_add_ms(q->last_reconfig_time, in cake_enqueue()
1894 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; in cake_enqueue()
1899 q->avg_window_bytes = 0; in cake_enqueue()
1900 q->last_packet_time = now; in cake_enqueue()
1904 if (!flow->set || flow->set == CAKE_SET_DECAYING) { in cake_enqueue()
1905 if (!flow->set) { in cake_enqueue()
1906 list_add_tail(&flow->flowchain, &b->new_flows); in cake_enqueue()
1908 b->decaying_flow_count--; in cake_enqueue()
1909 list_move_tail(&flow->flowchain, &b->new_flows); in cake_enqueue()
1911 flow->set = CAKE_SET_SPARSE; in cake_enqueue()
1912 b->sparse_flow_count++; in cake_enqueue()
1914 flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode); in cake_enqueue()
1915 } else if (flow->set == CAKE_SET_SPARSE_WAIT) { in cake_enqueue()
1919 flow->set = CAKE_SET_BULK; in cake_enqueue()
1920 b->sparse_flow_count--; in cake_enqueue()
1921 b->bulk_flow_count++; in cake_enqueue()
1923 cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_enqueue()
1924 cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_enqueue()
1927 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1928 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1930 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1935 while (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1943 b->drop_overlimit += dropped; in cake_enqueue()
1954 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue_one() local
1955 struct cake_flow *flow = &b->flows[q->cur_flow]; in cake_dequeue_one()
1959 if (flow->head) { in cake_dequeue_one()
1962 b->backlogs[q->cur_flow] -= len; in cake_dequeue_one()
1963 b->tin_backlog -= len; in cake_dequeue_one()
1964 sch->qstats.backlog -= len; in cake_dequeue_one()
1965 q->buffer_used -= skb->truesize; in cake_dequeue_one()
1966 sch->q.qlen--; in cake_dequeue_one()
1968 if (q->overflow_timeout) in cake_dequeue_one()
1969 cake_heapify(q, b->overflow_idx[q->cur_flow]); in cake_dequeue_one()
1980 q->cur_tin = tin; in cake_clear_tin()
1981 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
1989 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue() local
2000 if (!sch->q.qlen) in cake_dequeue()
2004 if (ktime_after(q->time_next_packet, now) && in cake_dequeue()
2005 ktime_after(q->failsafe_next_packet, now)) { in cake_dequeue()
2006 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
2007 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
2009 sch->qstats.overlimits++; in cake_dequeue()
2010 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2015 if (!q->rate_ns) { in cake_dequeue()
2021 while (b->tin_deficit < 0 || in cake_dequeue()
2022 !(b->sparse_flow_count + b->bulk_flow_count)) { in cake_dequeue()
2023 if (b->tin_deficit <= 0) in cake_dequeue()
2024 b->tin_deficit += b->tin_quantum; in cake_dequeue()
2025 if (b->sparse_flow_count + b->bulk_flow_count) in cake_dequeue()
2028 q->cur_tin++; in cake_dequeue()
2029 b++; in cake_dequeue()
2030 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
2031 q->cur_tin = 0; in cake_dequeue()
2032 b = q->tins; in cake_dequeue()
2035 /* It's possible for q->qlen to be in cake_dequeue()
2048 * - Highest-priority tin with queue and meeting schedule, or in cake_dequeue()
2049 * - The earliest-scheduled tin with queue. in cake_dequeue()
2054 for (tin = 0; tin < q->tin_cnt; tin++) { in cake_dequeue()
2055 b = q->tins + tin; in cake_dequeue()
2056 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) { in cake_dequeue()
2058 ktime_sub(b->time_next_packet, now); in cake_dequeue()
2069 q->cur_tin = best_tin; in cake_dequeue()
2070 b = q->tins + best_tin; in cake_dequeue()
2073 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count))) in cake_dequeue()
2079 head = &b->decaying_flows; in cake_dequeue()
2081 head = &b->new_flows; in cake_dequeue()
2083 head = &b->old_flows; in cake_dequeue()
2085 head = &b->decaying_flows; in cake_dequeue()
2092 q->cur_flow = flow - b->flows; in cake_dequeue()
2096 if (flow->deficit <= 0) { in cake_dequeue()
2098 * rotations. No non-empty flow can go into the decaying in cake_dequeue()
2101 if (flow->set == CAKE_SET_SPARSE) { in cake_dequeue()
2102 if (flow->head) { in cake_dequeue()
2103 b->sparse_flow_count--; in cake_dequeue()
2104 b->bulk_flow_count++; in cake_dequeue()
2106 cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2107 cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2109 flow->set = CAKE_SET_BULK; in cake_dequeue()
2115 flow->set = CAKE_SET_SPARSE_WAIT; in cake_dequeue()
2119 flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode); in cake_dequeue()
2120 list_move_tail(&flow->flowchain, &b->old_flows); in cake_dequeue()
2130 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now)) in cake_dequeue()
2131 b->unresponsive_flow_count--; in cake_dequeue()
2133 if (flow->cvars.p_drop || flow->cvars.count || in cake_dequeue()
2134 ktime_before(now, flow->cvars.drop_next)) { in cake_dequeue()
2138 list_move_tail(&flow->flowchain, in cake_dequeue()
2139 &b->decaying_flows); in cake_dequeue()
2140 if (flow->set == CAKE_SET_BULK) { in cake_dequeue()
2141 b->bulk_flow_count--; in cake_dequeue()
2143 cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2144 cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2146 b->decaying_flow_count++; in cake_dequeue()
2147 } else if (flow->set == CAKE_SET_SPARSE || in cake_dequeue()
2148 flow->set == CAKE_SET_SPARSE_WAIT) { in cake_dequeue()
2149 b->sparse_flow_count--; in cake_dequeue()
2150 b->decaying_flow_count++; in cake_dequeue()
2152 flow->set = CAKE_SET_DECAYING; in cake_dequeue()
2155 list_del_init(&flow->flowchain); in cake_dequeue()
2156 if (flow->set == CAKE_SET_SPARSE || in cake_dequeue()
2157 flow->set == CAKE_SET_SPARSE_WAIT) in cake_dequeue()
2158 b->sparse_flow_count--; in cake_dequeue()
2159 else if (flow->set == CAKE_SET_BULK) { in cake_dequeue()
2160 b->bulk_flow_count--; in cake_dequeue()
2162 cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2163 cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); in cake_dequeue()
2165 b->decaying_flow_count--; in cake_dequeue()
2167 flow->set = CAKE_SET_NONE; in cake_dequeue()
2172 reason = cobalt_should_drop(&flow->cvars, &b->cparams, now, skb, in cake_dequeue()
2173 (b->bulk_flow_count * in cake_dequeue()
2174 !!(q->rate_flags & in cake_dequeue()
2177 if (reason == SKB_NOT_DROPPED_YET || !flow->head) in cake_dequeue()
2181 if (q->rate_flags & CAKE_FLAG_INGRESS) { in cake_dequeue()
2182 len = cake_advance_shaper(q, b, skb, in cake_dequeue()
2184 flow->deficit -= len; in cake_dequeue()
2185 b->tin_deficit -= len; in cake_dequeue()
2187 flow->dropped++; in cake_dequeue()
2188 b->tin_dropped++; in cake_dequeue()
2192 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_dequeue()
2196 b->tin_ecn_mark += !!flow->cvars.ecn_marked; in cake_dequeue()
2201 b->avge_delay = cake_ewma(b->avge_delay, delay, 8); in cake_dequeue()
2202 b->peak_delay = cake_ewma(b->peak_delay, delay, in cake_dequeue()
2203 delay > b->peak_delay ? 2 : 8); in cake_dequeue()
2204 b->base_delay = cake_ewma(b->base_delay, delay, in cake_dequeue()
2205 delay < b->base_delay ? 2 : 8); in cake_dequeue()
2207 len = cake_advance_shaper(q, b, skb, now, false); in cake_dequeue()
2208 flow->deficit -= len; in cake_dequeue()
2209 b->tin_deficit -= len; in cake_dequeue()
2211 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { in cake_dequeue()
2212 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
2213 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
2215 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2216 } else if (!sch->q.qlen) { in cake_dequeue()
2219 for (i = 0; i < q->tin_cnt; i++) { in cake_dequeue()
2220 if (q->tins[i].decaying_flow_count) { in cake_dequeue()
2223 q->tins[i].cparams.target); in cake_dequeue()
2225 qdisc_watchdog_schedule_ns(&q->watchdog, in cake_dequeue()
2232 if (q->overflow_timeout) in cake_dequeue()
2233 q->overflow_timeout--; in cake_dequeue()
2243 if (!q->tins) in cake_reset()
2270 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, in cake_set_rate() argument
2273 /* convert byte-rate into time-per-byte in cake_set_rate()
2282 b->flow_quantum = 1514; in cake_set_rate()
2283 if (rate) { in cake_set_rate()
2284 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL); in cake_set_rate()
2287 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate)); in cake_set_rate()
2290 rate_shft--; in cake_set_rate()
2294 b->tin_rate_bps = rate; in cake_set_rate()
2295 b->tin_rate_ns = rate_ns; in cake_set_rate()
2296 b->tin_rate_shft = rate_shft; in cake_set_rate()
2300 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns); in cake_set_rate()
2301 b->cparams.interval = max(rtt_est_ns + in cake_set_rate()
2302 b->cparams.target - target_ns, in cake_set_rate()
2303 b->cparams.target * 2); in cake_set_rate()
2304 b->cparams.mtu_time = byte_target_ns; in cake_set_rate()
2305 b->cparams.p_inc = 1 << 24; /* 1/256 */ in cake_set_rate()
2306 b->cparams.p_dec = 1 << 20; /* 1/4096 */ in cake_set_rate()
2312 struct cake_tin_data *b = &q->tins[0]; in cake_config_besteffort() local
2314 u64 rate = q->rate_bps; in cake_config_besteffort() local
2316 q->tin_cnt = 1; in cake_config_besteffort()
2318 q->tin_index = besteffort; in cake_config_besteffort()
2319 q->tin_order = normal_order; in cake_config_besteffort()
2321 cake_set_rate(b, rate, mtu, in cake_config_besteffort()
2322 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_besteffort()
2323 b->tin_quantum = 65535; in cake_config_besteffort()
2330 /* convert high-level (user visible) parameters into internal format */ in cake_config_precedence()
2333 u64 rate = q->rate_bps; in cake_config_precedence() local
2337 q->tin_cnt = 8; in cake_config_precedence()
2338 q->tin_index = precedence; in cake_config_precedence()
2339 q->tin_order = normal_order; in cake_config_precedence()
2341 for (i = 0; i < q->tin_cnt; i++) { in cake_config_precedence()
2342 struct cake_tin_data *b = &q->tins[i]; in cake_config_precedence() local
2344 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_precedence()
2345 us_to_ns(q->interval)); in cake_config_precedence()
2347 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_precedence()
2350 rate *= 7; in cake_config_precedence()
2351 rate >>= 3; in cake_config_precedence()
2362 * Default Forwarding (DF/CS0) - Best Effort
2366 * Assured Forwarding 1 (AF1x) - x3
2367 * Assured Forwarding 2 (AF2x) - x3
2368 * Assured Forwarding 3 (AF3x) - x3
2369 * Assured Forwarding 4 (AF4x) - x3
2388 * Network Control (CS6,CS7) - routing traffic
2389 * Telephony (EF,VA) - aka. VoIP streams
2390 * Signalling (CS5) - VoIP setup
2391 * Multimedia Conferencing (AF4x) - aka. video calls
2392 * Realtime Interactive (CS4) - eg. games
2393 * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
2395 * Low-Latency Data (AF2x,TOS4) - eg. database
2396 * Ops, Admin, Management (CS2) - eg. ssh
2398 * High-Throughput Data (AF1x,TOS2) - eg. web traffic
2399 * Low-Priority Data (LE,CS1) - eg. BitTorrent
2422 u64 rate = q->rate_bps; in cake_config_diffserv8() local
2426 q->tin_cnt = 8; in cake_config_diffserv8()
2429 q->tin_index = diffserv8; in cake_config_diffserv8()
2430 q->tin_order = normal_order; in cake_config_diffserv8()
2433 for (i = 0; i < q->tin_cnt; i++) { in cake_config_diffserv8()
2434 struct cake_tin_data *b = &q->tins[i]; in cake_config_diffserv8() local
2436 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_diffserv8()
2437 us_to_ns(q->interval)); in cake_config_diffserv8()
2439 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_diffserv8()
2442 rate *= 7; in cake_config_diffserv8()
2443 rate >>= 3; in cake_config_diffserv8()
2454 /* Further pruned list of traffic classes for four-class system: in cake_config_diffserv4()
2466 u64 rate = q->rate_bps; in cake_config_diffserv4() local
2469 q->tin_cnt = 4; in cake_config_diffserv4()
2472 q->tin_index = diffserv4; in cake_config_diffserv4()
2473 q->tin_order = bulk_order; in cake_config_diffserv4()
2476 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv4()
2477 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2478 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv4()
2479 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2480 cake_set_rate(&q->tins[2], rate >> 1, mtu, in cake_config_diffserv4()
2481 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2482 cake_set_rate(&q->tins[3], rate >> 2, mtu, in cake_config_diffserv4()
2483 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2485 /* bandwidth-sharing weights */ in cake_config_diffserv4()
2486 q->tins[0].tin_quantum = quantum; in cake_config_diffserv4()
2487 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv4()
2488 q->tins[2].tin_quantum = quantum >> 1; in cake_config_diffserv4()
2489 q->tins[3].tin_quantum = quantum >> 2; in cake_config_diffserv4()
2503 u64 rate = q->rate_bps; in cake_config_diffserv3() local
2506 q->tin_cnt = 3; in cake_config_diffserv3()
2509 q->tin_index = diffserv3; in cake_config_diffserv3()
2510 q->tin_order = bulk_order; in cake_config_diffserv3()
2513 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv3()
2514 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2515 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv3()
2516 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2517 cake_set_rate(&q->tins[2], rate >> 2, mtu, in cake_config_diffserv3()
2518 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2520 /* bandwidth-sharing weights */ in cake_config_diffserv3()
2521 q->tins[0].tin_quantum = quantum; in cake_config_diffserv3()
2522 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv3()
2523 q->tins[2].tin_quantum = quantum >> 2; in cake_config_diffserv3()
2533 switch (q->tin_mode) { in cake_reconfigure()
2556 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { in cake_reconfigure()
2558 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; in cake_reconfigure()
2561 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2562 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2564 if (q->buffer_config_limit) { in cake_reconfigure()
2565 q->buffer_limit = q->buffer_config_limit; in cake_reconfigure()
2566 } else if (q->rate_bps) { in cake_reconfigure()
2567 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2570 q->buffer_limit = max_t(u32, t, 4U << 20); in cake_reconfigure()
2572 q->buffer_limit = ~0; in cake_reconfigure()
2575 sch->flags &= ~TCQ_F_CAN_BYPASS; in cake_reconfigure()
2577 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
2578 max(sch->limit * psched_mtu(qdisc_dev(sch)), in cake_reconfigure()
2579 q->buffer_config_limit)); in cake_reconfigure()
2596 flow_mode = q->flow_mode; in cake_change()
2605 return -EOPNOTSUPP; in cake_change()
2610 WRITE_ONCE(q->rate_bps, in cake_change()
2614 WRITE_ONCE(q->tin_mode, in cake_change()
2617 rate_flags = q->rate_flags; in cake_change()
2631 WRITE_ONCE(q->atm_mode, in cake_change()
2635 WRITE_ONCE(q->rate_overhead, in cake_change()
2639 q->max_netlen = 0; in cake_change()
2640 q->max_adjlen = 0; in cake_change()
2641 q->min_netlen = ~0; in cake_change()
2642 q->min_adjlen = ~0; in cake_change()
2648 q->max_netlen = 0; in cake_change()
2649 q->max_adjlen = 0; in cake_change()
2650 q->min_netlen = ~0; in cake_change()
2651 q->min_adjlen = ~0; in cake_change()
2655 WRITE_ONCE(q->rate_mpu, in cake_change()
2661 WRITE_ONCE(q->interval, max(interval, 1U)); in cake_change()
2667 WRITE_ONCE(q->target, max(target, 1U)); in cake_change()
2685 WRITE_ONCE(q->ack_filter, in cake_change()
2689 WRITE_ONCE(q->buffer_config_limit, in cake_change()
2700 WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK])); in cake_change()
2701 WRITE_ONCE(q->fwmark_shft, in cake_change()
2702 q->fwmark_mask ? __ffs(q->fwmark_mask) : 0); in cake_change()
2705 WRITE_ONCE(q->rate_flags, rate_flags); in cake_change()
2706 WRITE_ONCE(q->flow_mode, flow_mode); in cake_change()
2707 if (q->tins) { in cake_change()
2720 qdisc_watchdog_cancel(&q->watchdog); in cake_destroy()
2721 tcf_block_put(q->block); in cake_destroy()
2722 kvfree(q->tins); in cake_destroy()
2731 sch->limit = 10240; in cake_init()
2732 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; in cake_init()
2733 q->flow_mode = CAKE_FLOW_TRIPLE; in cake_init()
2735 q->rate_bps = 0; /* unlimited by default */ in cake_init()
2737 q->interval = 100000; /* 100ms default */ in cake_init()
2738 q->target = 5000; /* 5ms: codel RFC argues in cake_init()
2741 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_init()
2742 q->cur_tin = 0; in cake_init()
2743 q->cur_flow = 0; in cake_init()
2745 qdisc_watchdog_init(&q->watchdog, sch); in cake_init()
2754 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in cake_init()
2762 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), in cake_init()
2764 if (!q->tins) in cake_init()
2765 return -ENOMEM; in cake_init()
2768 struct cake_tin_data *b = q->tins + i; in cake_init() local
2770 INIT_LIST_HEAD(&b->new_flows); in cake_init()
2771 INIT_LIST_HEAD(&b->old_flows); in cake_init()
2772 INIT_LIST_HEAD(&b->decaying_flows); in cake_init()
2773 b->sparse_flow_count = 0; in cake_init()
2774 b->bulk_flow_count = 0; in cake_init()
2775 b->decaying_flow_count = 0; in cake_init()
2778 struct cake_flow *flow = b->flows + j; in cake_init()
2781 INIT_LIST_HEAD(&flow->flowchain); in cake_init()
2782 cobalt_vars_init(&flow->cvars); in cake_init()
2784 q->overflow_heap[k].t = i; in cake_init()
2785 q->overflow_heap[k].b = j; in cake_init()
2786 b->overflow_idx[j] = k; in cake_init()
2791 q->avg_peak_bandwidth = q->rate_bps; in cake_init()
2792 q->min_netlen = ~0; in cake_init()
2793 q->min_adjlen = ~0; in cake_init()
2809 READ_ONCE(q->rate_bps), TCA_CAKE_PAD)) in cake_dump()
2812 flow_mode = READ_ONCE(q->flow_mode); in cake_dump()
2816 if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval))) in cake_dump()
2819 if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target))) in cake_dump()
2823 READ_ONCE(q->buffer_config_limit))) in cake_dump()
2826 rate_flags = READ_ONCE(q->rate_flags); in cake_dump()
2835 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter))) in cake_dump()
2842 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode))) in cake_dump()
2849 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead))) in cake_dump()
2856 if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode))) in cake_dump()
2859 if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu))) in cake_dump()
2866 if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask))) in cake_dump()
2872 return -1; in cake_dump()
2877 struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP); in cake_dump_stats()
2883 return -1; in cake_dump_stats()
2886 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ in cake_dump_stats()
2890 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \ in cake_dump_stats()
2895 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); in cake_dump_stats()
2896 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); in cake_dump_stats()
2897 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); in cake_dump_stats()
2898 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); in cake_dump_stats()
2899 PUT_STAT_U32(MAX_NETLEN, q->max_netlen); in cake_dump_stats()
2900 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); in cake_dump_stats()
2901 PUT_STAT_U32(MIN_NETLEN, q->min_netlen); in cake_dump_stats()
2902 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); in cake_dump_stats()
2907 tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS); in cake_dump_stats()
2912 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \ in cake_dump_stats()
2916 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \ in cake_dump_stats()
2921 for (i = 0; i < q->tin_cnt; i++) { in cake_dump_stats()
2922 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_dump_stats() local
2924 ts = nla_nest_start_noflag(d->skb, i + 1); in cake_dump_stats()
2928 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps); in cake_dump_stats()
2929 PUT_TSTAT_U64(SENT_BYTES64, b->bytes); in cake_dump_stats()
2930 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog); in cake_dump_stats()
2933 ktime_to_us(ns_to_ktime(b->cparams.target))); in cake_dump_stats()
2935 ktime_to_us(ns_to_ktime(b->cparams.interval))); in cake_dump_stats()
2937 PUT_TSTAT_U32(SENT_PACKETS, b->packets); in cake_dump_stats()
2938 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped); in cake_dump_stats()
2939 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark); in cake_dump_stats()
2940 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops); in cake_dump_stats()
2943 ktime_to_us(ns_to_ktime(b->peak_delay))); in cake_dump_stats()
2945 ktime_to_us(ns_to_ktime(b->avge_delay))); in cake_dump_stats()
2947 ktime_to_us(ns_to_ktime(b->base_delay))); in cake_dump_stats()
2949 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits); in cake_dump_stats()
2950 PUT_TSTAT_U32(WAY_MISSES, b->way_misses); in cake_dump_stats()
2951 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions); in cake_dump_stats()
2953 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count + in cake_dump_stats()
2954 b->decaying_flow_count); in cake_dump_stats()
2955 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count); in cake_dump_stats()
2956 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count); in cake_dump_stats()
2957 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen); in cake_dump_stats()
2959 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum); in cake_dump_stats()
2960 nla_nest_end(d->skb, ts); in cake_dump_stats()
2966 nla_nest_end(d->skb, tstats); in cake_dump_stats()
2967 return nla_nest_end(d->skb, stats); in cake_dump_stats()
2970 nla_nest_cancel(d->skb, stats); in cake_dump_stats()
2971 return -1; in cake_dump_stats()
3001 return q->block; in cake_tcf_block()
3007 tcm->tcm_handle |= TC_H_MIN(cl); in cake_dump_class()
3018 u32 idx = cl - 1; in cake_dump_class_stats()
3020 if (idx < CAKE_QUEUES * q->tin_cnt) { in cake_dump_class_stats()
3021 const struct cake_tin_data *b = \ in cake_dump_class_stats() local
3022 &q->tins[q->tin_order[idx / CAKE_QUEUES]]; in cake_dump_class_stats()
3025 flow = &b->flows[idx % CAKE_QUEUES]; in cake_dump_class_stats()
3027 if (flow->head) { in cake_dump_class_stats()
3029 skb = flow->head; in cake_dump_class_stats()
3032 skb = skb->next; in cake_dump_class_stats()
3036 qs.backlog = b->backlogs[idx % CAKE_QUEUES]; in cake_dump_class_stats()
3037 qs.drops = flow->dropped; in cake_dump_class_stats()
3040 return -1; in cake_dump_class_stats()
3044 stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP); in cake_dump_class_stats()
3046 return -1; in cake_dump_class_stats()
3049 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ in cake_dump_class_stats()
3053 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ in cake_dump_class_stats()
3057 PUT_STAT_S32(DEFICIT, flow->deficit); in cake_dump_class_stats()
3058 PUT_STAT_U32(DROPPING, flow->cvars.dropping); in cake_dump_class_stats()
3059 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count); in cake_dump_class_stats()
3060 PUT_STAT_U32(P_DROP, flow->cvars.p_drop); in cake_dump_class_stats()
3061 if (flow->cvars.p_drop) { in cake_dump_class_stats()
3065 flow->cvars.blue_timer))); in cake_dump_class_stats()
3067 if (flow->cvars.dropping) { in cake_dump_class_stats()
3071 flow->cvars.drop_next))); in cake_dump_class_stats()
3074 if (nla_nest_end(d->skb, stats) < 0) in cake_dump_class_stats()
3075 return -1; in cake_dump_class_stats()
3081 nla_nest_cancel(d->skb, stats); in cake_dump_class_stats()
3082 return -1; in cake_dump_class_stats()
3090 if (arg->stop) in cake_walk()
3093 for (i = 0; i < q->tin_cnt; i++) { in cake_walk()
3094 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_walk() local
3097 if (list_empty(&b->flows[j].flowchain)) { in cake_walk()
3098 arg->count++; in cake_walk()