Lines Matching full:q

108 	u64	last_qdelay;	/* Q delay val at the last probability update */
147 static u64 head_enqueue_time(struct Qdisc *q)
149 struct sk_buff *skb = qdisc_peek_head(q);
170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
172 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q)
187 return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step;
190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb)
193 q->ecn_mark++;
199 static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q)
201 q->c_protection_credit = q->c_protection_init;
209 struct dualpi2_sched_data *q, u32 wc)
211 q->c_protection_wc = wc;
212 q->c_protection_wl = MAX_WC - wc;
213 q->c_protection_init = (s32)psched_mtu(qdisc_dev(sch)) *
214 ((int)q->c_protection_wc - (int)q->c_protection_wl);
215 dualpi2_reset_c_protection(q);
230 static bool dualpi2_classic_marking(struct dualpi2_sched_data *q,
237 dualpi2_mark(q, skb);
247 * @q->drop_overload), apply classic drops first before marking.
253 static bool dualpi2_scalable_marking(struct dualpi2_sched_data *q,
260 if (!q->drop_overload ||
272 dualpi2_mark(q, skb);
282 static bool must_drop(struct Qdisc *sch, struct dualpi2_sched_data *q,
292 prob = READ_ONCE(q->pi2_prob);
293 local_l_prob = (u64)prob * q->coupling_factor;
298 return dualpi2_classic_marking(q, skb, prob, overload);
300 return dualpi2_scalable_marking(q, skb, local_l_prob, prob,
341 static int dualpi2_skb_classify(struct dualpi2_sched_data *q,
350 if (cb->ect & q->ecn_mask) {
355 if (TC_H_MAJ(skb->priority) == q->sch->handle &&
361 fl = rcu_dereference_bh(q->tcf_filters);
388 struct dualpi2_sched_data *q = qdisc_priv(sch);
392 unlikely((u64)q->memory_used + skb->truesize > q->memory_limit)) {
395 qdisc_qstats_overlimit(q->l_queue);
400 if (q->drop_early && must_drop(sch, q, skb)) {
408 q->memory_used += skb->truesize;
409 if (q->memory_used > q->max_memory_used)
410 q->max_memory_used = q->memory_used;
412 if (qdisc_qlen(sch) > q->maxq)
413 q->maxq = qdisc_qlen(sch);
417 dualpi2_skb_cb(skb)->apply_step = skb_apply_step(skb, q);
420 ++sch->q.qlen;
422 ++q->packets_in_l;
423 if (!q->l_head_ts)
424 q->l_head_ts = cb->ts;
425 return qdisc_enqueue_tail(skb, q->l_queue);
427 ++q->packets_in_c;
428 if (!q->c_head_ts)
429 q->c_head_ts = cb->ts;
444 struct dualpi2_sched_data *q = qdisc_priv(sch);
447 err = dualpi2_skb_classify(q, skb);
455 if (q->split_gso && skb_is_gso(skb)) {
518 struct dualpi2_sched_data *q,
526 c_len = qdisc_qlen(sch) - qdisc_qlen(q->l_queue);
527 if (qdisc_qlen(q->l_queue) && (!c_len || q->c_protection_credit <= 0)) {
528 skb = __qdisc_dequeue_head(&q->l_queue->q);
529 WRITE_ONCE(q->l_head_ts, head_enqueue_time(q->l_queue));
531 *credit_change = q->c_protection_wc;
532 qdisc_qstats_backlog_dec(q->l_queue, skb);
535 --sch->q.qlen;
536 q->memory_used -= skb->truesize;
538 skb = __qdisc_dequeue_head(&sch->q);
539 WRITE_ONCE(q->c_head_ts, head_enqueue_time(sch));
540 if (qdisc_qlen(q->l_queue))
541 *credit_change = ~((s32)q->c_protection_wl) + 1;
542 q->memory_used -= skb->truesize;
544 dualpi2_reset_c_protection(q);
552 static int do_step_aqm(struct dualpi2_sched_data *q, struct sk_buff *skb,
557 if (q->step_in_packets)
558 qdelay = qdisc_qlen(q->l_queue);
562 if (dualpi2_skb_cb(skb)->apply_step && qdelay > q->step_thresh) {
568 if (dualpi2_mark(q, skb))
569 ++q->step_marks;
571 qdisc_bstats_update(q->l_queue, skb);
575 static void drop_and_retry(struct dualpi2_sched_data *q, struct sk_buff *skb,
578 ++q->deferred_drops_cnt;
579 q->deferred_drops_len += qdisc_pkt_len(skb);
586 struct dualpi2_sched_data *q = qdisc_priv(sch);
593 while ((skb = dequeue_packet(sch, q, &credit_change, now))) {
594 if (!q->drop_early && must_drop(sch, q, skb)) {
595 drop_and_retry(q, skb, sch,
600 if (skb_in_l_queue(skb) && do_step_aqm(q, skb, now)) {
601 qdisc_qstats_drop(q->l_queue);
602 drop_and_retry(q, skb, sch,
607 q->c_protection_credit += credit_change;
612 if (q->deferred_drops_cnt) {
613 qdisc_tree_reduce_backlog(sch, q->deferred_drops_cnt,
614 q->deferred_drops_len);
615 q->deferred_drops_cnt = 0;
616 q->deferred_drops_len = 0;
627 static void get_queue_delays(struct dualpi2_sched_data *q, u64 *qdelay_c,
633 qc = READ_ONCE(q->c_head_ts);
634 ql = READ_ONCE(q->l_head_ts);
642 struct dualpi2_sched_data *q = qdisc_priv(sch);
649 get_queue_delays(q, &qdelay_c, &qdelay_l);
655 delta = ((s64)qdelay - (s64)q->pi2_target) * q->pi2_alpha;
656 delta += ((s64)qdelay - (s64)q->last_qdelay) * q->pi2_beta;
657 q->last_qdelay = qdelay;
661 new_prob = __scale_delta(delta) + q->pi2_prob;
662 if (new_prob < q->pi2_prob)
665 new_prob = q->pi2_prob - __scale_delta(~delta + 1);
666 if (new_prob > q->pi2_prob)
673 if (!q->drop_overload)
674 return min_t(u32, new_prob, MAX_PROB / q->coupling_factor);
712 struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
713 struct Qdisc *sch = q->sch;
720 WRITE_ONCE(q->pi2_prob, calculate_probability(sch));
721 hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q));
763 struct dualpi2_sched_data *q;
781 q = qdisc_priv(sch);
788 WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
792 WRITE_ONCE(q->memory_limit,
798 WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
804 WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
810 WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
816 WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
822 WRITE_ONCE(q->step_in_packets, true);
823 WRITE_ONCE(q->step_thresh, step_th);
827 WRITE_ONCE(q->step_in_packets, false);
828 WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
832 WRITE_ONCE(q->min_qlen_step,
838 WRITE_ONCE(q->coupling_factor, coupling);
844 WRITE_ONCE(q->drop_overload, (bool)drop_overload);
850 WRITE_ONCE(q->drop_early, (bool)drop_early);
856 dualpi2_calculate_c_protection(sch, q, wc);
862 WRITE_ONCE(q->ecn_mask, ecn_mask);
868 WRITE_ONCE(q->split_gso, (bool)split_gso);
874 q->memory_used > q->memory_limit) {
877 q->memory_used -= skb->truesize;
891 struct dualpi2_sched_data *q = qdisc_priv(sch);
893 q->sch->limit = 10000; /* Max 125ms at 1Gbps */
894 q->memory_limit = get_memory_limit(sch, q->sch->limit);
896 q->pi2_target = 15 * NSEC_PER_MSEC;
897 q->pi2_tupdate = 16 * NSEC_PER_MSEC;
898 q->pi2_alpha = dualpi2_scale_alpha_beta(41); /* ~0.16 Hz * 256 */
899 q->pi2_beta = dualpi2_scale_alpha_beta(819); /* ~3.20 Hz * 256 */
901 q->step_thresh = 1 * NSEC_PER_MSEC;
902 q->step_in_packets = false;
904 dualpi2_calculate_c_protection(q->sch, q, 10); /* wc=10%, wl=90% */
906 q->ecn_mask = TC_DUALPI2_ECN_MASK_L4S_ECT; /* INET_ECN_ECT_1 */
907 q->min_qlen_step = 0; /* Always apply step mark in L-queue */
908 q->coupling_factor = 2; /* window fairness for equal RTTs */
909 q->drop_overload = TC_DUALPI2_DROP_OVERLOAD_DROP; /* Drop overload */
910 q->drop_early = TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE; /* Drop dequeue */
911 q->split_gso = TC_DUALPI2_SPLIT_GSO_SPLIT_GSO; /* Split GSO */
917 struct dualpi2_sched_data *q = qdisc_priv(sch);
920 q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
922 if (!q->l_queue)
925 err = tcf_block_get(&q->tcf_block, &q->tcf_filters, sch, extack);
929 q->sch = sch;
931 hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
941 hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
948 struct dualpi2_sched_data *q = qdisc_priv(sch);
953 step_in_pkts = READ_ONCE(q->step_in_packets);
954 step_th = READ_ONCE(q->step_thresh);
963 READ_ONCE(q->memory_limit)) ||
965 convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
967 convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
969 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
971 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
974 READ_ONCE(q->min_qlen_step)) ||
976 READ_ONCE(q->coupling_factor)) ||
978 READ_ONCE(q->drop_overload)) ||
980 READ_ONCE(q->drop_early)) ||
982 READ_ONCE(q->c_protection_wc)) ||
983 nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
984 nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
990 READ_ONCE(q->memory_limit)) ||
992 convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
994 convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
996 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
998 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
1002 READ_ONCE(q->min_qlen_step)) ||
1004 READ_ONCE(q->coupling_factor)) ||
1006 READ_ONCE(q->drop_overload)) ||
1008 READ_ONCE(q->drop_early)) ||
1010 READ_ONCE(q->c_protection_wc)) ||
1011 nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
1012 nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
1024 struct dualpi2_sched_data *q = qdisc_priv(sch);
1026 .prob = READ_ONCE(q->pi2_prob),
1027 .packets_in_c = q->packets_in_c,
1028 .packets_in_l = q->packets_in_l,
1029 .maxq = q->maxq,
1030 .ecn_mark = q->ecn_mark,
1031 .credit = q->c_protection_credit,
1032 .step_marks = q->step_marks,
1033 .memory_used = q->memory_used,
1034 .max_memory_used = q->max_memory_used,
1035 .memory_limit = q->memory_limit,
1039 get_queue_delays(q, &qc, &ql);
1051 struct dualpi2_sched_data *q = qdisc_priv(sch);
1054 qdisc_reset_queue(q->l_queue);
1055 q->c_head_ts = 0;
1056 q->l_head_ts = 0;
1057 q->pi2_prob = 0;
1058 q->packets_in_c = 0;
1059 q->packets_in_l = 0;
1060 q->maxq = 0;
1061 q->ecn_mark = 0;
1062 q->step_marks = 0;
1063 q->memory_used = 0;
1064 q->max_memory_used = 0;
1065 dualpi2_reset_c_protection(q);
1070 struct dualpi2_sched_data *q = qdisc_priv(sch);
1072 q->pi2_tupdate = 0;
1073 hrtimer_cancel(&q->pi2_timer);
1074 if (q->l_queue)
1075 qdisc_put(q->l_queue);
1076 tcf_block_put(q->tcf_block);
1095 static void dualpi2_unbind(struct Qdisc *q, unsigned long cl)
1102 struct dualpi2_sched_data *q = qdisc_priv(sch);
1106 return q->tcf_block;