xref: /linux/net/sched/sch_dualpi2.c (revision 0fc8f6200d2313278fbf4539bbab74677c685531)
1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
2 /* Copyright (C) 2024 Nokia
3  *
4  * Author: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
5  * Author: Olga Albisser <olga@albisser.org>
6  * Author: Henrik Steen <henrist@henrist.net>
7  * Author: Olivier Tilmans <olivier.tilmans@nokia.com>
8  * Author: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
9  *
10  * DualPI Improved with a Square (dualpi2):
11  * - Supports congestion controls that comply with the Prague requirements
12  *   in RFC9331 (e.g. TCP-Prague)
13  * - Supports coupled dual-queue with PI2 as defined in RFC9332
14  * - Supports ECN L4S-identifier (IP.ECN==0b*1)
15  *
16  * note: Although DCTCP and BBRv3 can use shallow-threshold ECN marks,
17  *   they do not meet the 'Prague L4S Requirements' listed in RFC 9331
18  *   Section 4, so they can only be used with DualPI2 in a datacenter
19  *   context.
20  *
21  * References:
22  * - RFC9332: https://datatracker.ietf.org/doc/html/rfc9332
23  * - De Schepper, Koen, et al. "PI 2: A linearized AQM for both classic and
24  *   scalable TCP."  in proc. ACM CoNEXT'16, 2016.
25  */
26 
27 #include <linux/errno.h>
28 #include <linux/hrtimer.h>
29 #include <linux/if_vlan.h>
30 #include <linux/kernel.h>
31 #include <linux/limits.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/types.h>
35 
36 #include <net/gso.h>
37 #include <net/inet_ecn.h>
38 #include <net/pkt_cls.h>
39 #include <net/pkt_sched.h>
40 
41 /* 32b enable to support flows with windows up to ~8.6 * 1e9 packets
42  * i.e., twice the maximal snd_cwnd.
43  * MAX_PROB must be consistent with the RNG in dualpi2_roll().
44  */
45 #define MAX_PROB U32_MAX
46 
47 /* alpha/beta values exchanged over netlink are in units of 256ns */
48 #define ALPHA_BETA_SHIFT 8
49 
50 /* Scaled values of alpha/beta must fit in 32b to avoid overflow in later
51  * computations. Consequently (see and dualpi2_scale_alpha_beta()), their
52  * netlink-provided values can use at most 31b, i.e. be at most (2^23)-1
53  * (~4MHz) as those are given in 1/256th. This enable to tune alpha/beta to
54  * control flows whose maximal RTTs can be in usec up to few secs.
55  */
56 #define ALPHA_BETA_MAX ((1U << 31) - 1)
57 
58 /* Internal alpha/beta are in units of 64ns.
59  * This enables to use all alpha/beta values in the allowed range without loss
60  * of precision due to rounding when scaling them internally, e.g.,
61  * scale_alpha_beta(1) will not round down to 0.
62  */
63 #define ALPHA_BETA_GRANULARITY 6
64 
65 #define ALPHA_BETA_SCALING (ALPHA_BETA_SHIFT - ALPHA_BETA_GRANULARITY)
66 
67 /* We express the weights (wc, wl) in %, i.e., wc + wl = 100 */
68 #define MAX_WC 100
69 
70 struct dualpi2_sched_data {
71 	struct Qdisc *l_queue;	/* The L4S Low latency queue (L-queue) */
72 	struct Qdisc *sch;	/* The Classic queue (C-queue) */
73 
74 	/* Registered tc filters */
75 	struct tcf_proto __rcu *tcf_filters;
76 	struct tcf_block *tcf_block;
77 
78 	/* PI2 parameters */
79 	u64	pi2_target;	/* Target delay in nanoseconds */
80 	u32	pi2_tupdate;	/* Timer frequency in nanoseconds */
81 	u32	pi2_prob;	/* Base PI probability */
82 	u32	pi2_alpha;	/* Gain factor for the integral rate response */
83 	u32	pi2_beta;	/* Gain factor for the proportional response */
84 	struct hrtimer pi2_timer; /* prob update timer */
85 
86 	/* Step AQM (L-queue only) parameters */
87 	u32	step_thresh;	/* Step threshold */
88 	bool	step_in_packets; /* Step thresh in packets (1) or time (0) */
89 
90 	/* C-queue starvation protection */
91 	s32	c_protection_credit; /* Credit (sign indicates which queue) */
92 	s32	c_protection_init; /* Reset value of the credit */
93 	u8	c_protection_wc; /* C-queue weight (between 0 and MAX_WC) */
94 	u8	c_protection_wl; /* L-queue weight (MAX_WC - wc) */
95 
96 	/* General dualQ parameters */
97 	u32	memory_limit;	/* Memory limit of both queues */
98 	u8	coupling_factor;/* Coupling factor (k) between both queues */
99 	u8	ecn_mask;	/* Mask to match packets into L-queue */
100 	u32	min_qlen_step;	/* Minimum queue length to apply step thresh */
101 	bool	drop_early;	/* Drop at enqueue (1) instead of dequeue  (0) */
102 	bool	drop_overload;	/* Drop (1) on overload, or overflow (0) */
103 	bool	split_gso;	/* Split aggregated skb (1) or leave as is (0) */
104 
105 	/* Statistics */
106 	u64	c_head_ts;	/* Enqueue timestamp of the C-queue head */
107 	u64	l_head_ts;	/* Enqueue timestamp of the L-queue head */
108 	u64	last_qdelay;	/* Q delay val at the last probability update */
109 	u32	packets_in_c;	/* Enqueue packet counter of the C-queue */
110 	u32	packets_in_l;	/* Enqueue packet counter of the L-queue */
111 	u32	maxq;		/* Maximum queue size of the C-queue */
112 	u32	ecn_mark;	/* ECN mark pkt counter due to PI probability */
113 	u32	step_marks;	/* ECN mark pkt counter due to step AQM */
114 	u32	memory_used;	/* Memory used of both queues */
115 	u32	max_memory_used;/* Maximum used memory */
116 
117 	/* Deferred drop statistics */
118 	u32	deferred_drops_cnt;	/* Packets dropped */
119 	u32	deferred_drops_len;	/* Bytes dropped */
120 };
121 
122 struct dualpi2_skb_cb {
123 	u64 ts;			/* Timestamp at enqueue */
124 	u8 apply_step:1,	/* Can we apply the step threshold */
125 	   classified:2,	/* Packet classification results */
126 	   ect:2;		/* Packet ECT codepoint */
127 };
128 
129 enum dualpi2_classification_results {
130 	DUALPI2_C_CLASSIC	= 0,	/* C-queue */
131 	DUALPI2_C_L4S		= 1,	/* L-queue (scale mark/classic drop) */
132 	DUALPI2_C_LLLL		= 2,	/* L-queue (no drops/marks) */
133 	__DUALPI2_C_MAX			/* Keep last*/
134 };
135 
136 static struct dualpi2_skb_cb *dualpi2_skb_cb(struct sk_buff *skb)
137 {
138 	qdisc_cb_private_validate(skb, sizeof(struct dualpi2_skb_cb));
139 	return (struct dualpi2_skb_cb *)qdisc_skb_cb(skb)->data;
140 }
141 
142 static u64 dualpi2_sojourn_time(struct sk_buff *skb, u64 reference)
143 {
144 	return reference - dualpi2_skb_cb(skb)->ts;
145 }
146 
147 static u64 head_enqueue_time(struct Qdisc *q)
148 {
149 	struct sk_buff *skb = qdisc_peek_head(q);
150 
151 	return skb ? dualpi2_skb_cb(skb)->ts : 0;
152 }
153 
154 static u32 dualpi2_scale_alpha_beta(u32 param)
155 {
156 	u64 tmp = ((u64)param * MAX_PROB >> ALPHA_BETA_SCALING);
157 
158 	do_div(tmp, NSEC_PER_SEC);
159 	return tmp;
160 }
161 
162 static u32 dualpi2_unscale_alpha_beta(u32 param)
163 {
164 	u64 tmp = ((u64)param * NSEC_PER_SEC << ALPHA_BETA_SCALING);
165 
166 	do_div(tmp, MAX_PROB);
167 	return tmp;
168 }
169 
170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
171 {
172 	return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
173 }
174 
175 static bool skb_is_l4s(struct sk_buff *skb)
176 {
177 	return dualpi2_skb_cb(skb)->classified == DUALPI2_C_L4S;
178 }
179 
180 static bool skb_in_l_queue(struct sk_buff *skb)
181 {
182 	return dualpi2_skb_cb(skb)->classified != DUALPI2_C_CLASSIC;
183 }
184 
185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q)
186 {
187 	return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step;
188 }
189 
190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb)
191 {
192 	if (INET_ECN_set_ce(skb)) {
193 		q->ecn_mark++;
194 		return true;
195 	}
196 	return false;
197 }
198 
199 static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q)
200 {
201 	q->c_protection_credit = q->c_protection_init;
202 }
203 
204 /* This computes the initial credit value and WRR weight for the L queue (wl)
205  * from the weight of the C queue (wc).
206  * If wl > wc, the scheduler will start with the L queue when reset.
207  */
208 static void dualpi2_calculate_c_protection(struct Qdisc *sch,
209 					   struct dualpi2_sched_data *q, u32 wc)
210 {
211 	q->c_protection_wc = wc;
212 	q->c_protection_wl = MAX_WC - wc;
213 	q->c_protection_init = (s32)psched_mtu(qdisc_dev(sch)) *
214 		((int)q->c_protection_wc - (int)q->c_protection_wl);
215 	dualpi2_reset_c_protection(q);
216 }
217 
218 static bool dualpi2_roll(u32 prob)
219 {
220 	return get_random_u32() <= prob;
221 }
222 
223 /* Packets in the C-queue are subject to a marking probability pC, which is the
224  * square of the internal PI probability (i.e., have an overall lower mark/drop
225  * probability). If the qdisc is overloaded, ignore ECT values and only drop.
226  *
227  * Note that this marking scheme is also applied to L4S packets during overload.
228  * Return true if packet dropping is required in C queue
229  */
230 static bool dualpi2_classic_marking(struct dualpi2_sched_data *q,
231 				    struct sk_buff *skb, u32 prob,
232 				    bool overload)
233 {
234 	if (dualpi2_roll(prob) && dualpi2_roll(prob)) {
235 		if (overload || dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
236 			return true;
237 		dualpi2_mark(q, skb);
238 	}
239 	return false;
240 }
241 
242 /* Packets in the L-queue are subject to a marking probability pL given by the
243  * internal PI probability scaled by the coupling factor.
244  *
245  * On overload (i.e., @local_l_prob is >= 100%):
246  * - if the qdisc is configured to trade losses to preserve latency (i.e.,
247  *   @q->drop_overload), apply classic drops first before marking.
248  * - otherwise, preserve the "no loss" property of ECN at the cost of queueing
249  *   delay, eventually resulting in taildrop behavior once sch->limit is
250  *   reached.
251  * Return true if packet dropping is required in L queue
252  */
253 static bool dualpi2_scalable_marking(struct dualpi2_sched_data *q,
254 				     struct sk_buff *skb,
255 				     u64 local_l_prob, u32 prob,
256 				     bool overload)
257 {
258 	if (overload) {
259 		/* Apply classic drop */
260 		if (!q->drop_overload ||
261 		    !(dualpi2_roll(prob) && dualpi2_roll(prob)))
262 			goto mark;
263 		return true;
264 	}
265 
266 	/* We can safely cut the upper 32b as overload==false */
267 	if (dualpi2_roll(local_l_prob)) {
268 		/* Non-ECT packets could have classified as L4S by filters. */
269 		if (dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
270 			return true;
271 mark:
272 		dualpi2_mark(q, skb);
273 	}
274 	return false;
275 }
276 
277 /* Decide whether a given packet must be dropped (or marked if ECT), according
278  * to the PI2 probability.
279  *
280  * Never mark/drop if we have a standing queue of less than 2 MTUs.
281  */
282 static bool must_drop(struct Qdisc *sch, struct dualpi2_sched_data *q,
283 		      struct sk_buff *skb)
284 {
285 	u64 local_l_prob;
286 	bool overload;
287 	u32 prob;
288 
289 	if (sch->qstats.backlog < 2 * psched_mtu(qdisc_dev(sch)))
290 		return false;
291 
292 	prob = READ_ONCE(q->pi2_prob);
293 	local_l_prob = (u64)prob * q->coupling_factor;
294 	overload = local_l_prob > MAX_PROB;
295 
296 	switch (dualpi2_skb_cb(skb)->classified) {
297 	case DUALPI2_C_CLASSIC:
298 		return dualpi2_classic_marking(q, skb, prob, overload);
299 	case DUALPI2_C_L4S:
300 		return dualpi2_scalable_marking(q, skb, local_l_prob, prob,
301 						overload);
302 	default: /* DUALPI2_C_LLLL */
303 		return false;
304 	}
305 }
306 
307 static void dualpi2_read_ect(struct sk_buff *skb)
308 {
309 	struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
310 	int wlen = skb_network_offset(skb);
311 
312 	switch (skb_protocol(skb, true)) {
313 	case htons(ETH_P_IP):
314 		wlen += sizeof(struct iphdr);
315 		if (!pskb_may_pull(skb, wlen) ||
316 		    skb_try_make_writable(skb, wlen))
317 			goto not_ecn;
318 
319 		cb->ect = ipv4_get_dsfield(ip_hdr(skb)) & INET_ECN_MASK;
320 		break;
321 	case htons(ETH_P_IPV6):
322 		wlen += sizeof(struct ipv6hdr);
323 		if (!pskb_may_pull(skb, wlen) ||
324 		    skb_try_make_writable(skb, wlen))
325 			goto not_ecn;
326 
327 		cb->ect = ipv6_get_dsfield(ipv6_hdr(skb)) & INET_ECN_MASK;
328 		break;
329 	default:
330 		goto not_ecn;
331 	}
332 	return;
333 
334 not_ecn:
335 	/* Non pullable/writable packets can only be dropped hence are
336 	 * classified as not ECT.
337 	 */
338 	cb->ect = INET_ECN_NOT_ECT;
339 }
340 
341 static int dualpi2_skb_classify(struct dualpi2_sched_data *q,
342 				struct sk_buff *skb)
343 {
344 	struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
345 	struct tcf_result res;
346 	struct tcf_proto *fl;
347 	int result;
348 
349 	dualpi2_read_ect(skb);
350 	if (cb->ect & q->ecn_mask) {
351 		cb->classified = DUALPI2_C_L4S;
352 		return NET_XMIT_SUCCESS;
353 	}
354 
355 	if (TC_H_MAJ(skb->priority) == q->sch->handle &&
356 	    TC_H_MIN(skb->priority) < __DUALPI2_C_MAX) {
357 		cb->classified = TC_H_MIN(skb->priority);
358 		return NET_XMIT_SUCCESS;
359 	}
360 
361 	fl = rcu_dereference_bh(q->tcf_filters);
362 	if (!fl) {
363 		cb->classified = DUALPI2_C_CLASSIC;
364 		return NET_XMIT_SUCCESS;
365 	}
366 
367 	result = tcf_classify(skb, NULL, fl, &res, false);
368 	if (result >= 0) {
369 #ifdef CONFIG_NET_CLS_ACT
370 		switch (result) {
371 		case TC_ACT_STOLEN:
372 		case TC_ACT_QUEUED:
373 		case TC_ACT_TRAP:
374 			return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
375 		case TC_ACT_SHOT:
376 			return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
377 		}
378 #endif
379 		cb->classified = TC_H_MIN(res.classid) < __DUALPI2_C_MAX ?
380 			TC_H_MIN(res.classid) : DUALPI2_C_CLASSIC;
381 	}
382 	return NET_XMIT_SUCCESS;
383 }
384 
385 static int dualpi2_enqueue_skb(struct sk_buff *skb, struct Qdisc *sch,
386 			       struct sk_buff **to_free)
387 {
388 	struct dualpi2_sched_data *q = qdisc_priv(sch);
389 	struct dualpi2_skb_cb *cb;
390 
391 	if (unlikely(qdisc_qlen(sch) >= sch->limit) ||
392 	    unlikely((u64)q->memory_used + skb->truesize > q->memory_limit)) {
393 		qdisc_qstats_overlimit(sch);
394 		if (skb_in_l_queue(skb))
395 			qdisc_qstats_overlimit(q->l_queue);
396 		return qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_OVERLIMIT);
397 	}
398 
399 	if (q->drop_early && must_drop(sch, q, skb)) {
400 		qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_CONGESTED);
401 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
402 	}
403 
404 	cb = dualpi2_skb_cb(skb);
405 	cb->ts = ktime_get_ns();
406 	q->memory_used += skb->truesize;
407 	if (q->memory_used > q->max_memory_used)
408 		q->max_memory_used = q->memory_used;
409 
410 	if (qdisc_qlen(sch) > q->maxq)
411 		q->maxq = qdisc_qlen(sch);
412 
413 	if (skb_in_l_queue(skb)) {
414 		/* Apply step thresh if skb is L4S && L-queue len >= min_qlen */
415 		dualpi2_skb_cb(skb)->apply_step = skb_apply_step(skb, q);
416 
417 		/* Keep the overall qdisc stats consistent */
418 		++sch->q.qlen;
419 		qdisc_qstats_backlog_inc(sch, skb);
420 		++q->packets_in_l;
421 		if (!q->l_head_ts)
422 			q->l_head_ts = cb->ts;
423 		return qdisc_enqueue_tail(skb, q->l_queue);
424 	}
425 	++q->packets_in_c;
426 	if (!q->c_head_ts)
427 		q->c_head_ts = cb->ts;
428 	return qdisc_enqueue_tail(skb, sch);
429 }
430 
431 /* By default, dualpi2 will split GSO skbs into independent skbs and enqueue
432  * each of those individually. This yields the following benefits, at the
433  * expense of CPU usage:
434  * - Finer-grained AQM actions as the sub-packets of a burst no longer share the
435  *   same fate (e.g., the random mark/drop probability is applied individually)
436  * - Improved precision of the starvation protection/WRR scheduler at dequeue,
437  *   as the size of the dequeued packets will be smaller.
438  */
439 static int dualpi2_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
440 				 struct sk_buff **to_free)
441 {
442 	struct dualpi2_sched_data *q = qdisc_priv(sch);
443 	int err;
444 
445 	err = dualpi2_skb_classify(q, skb);
446 	if (err != NET_XMIT_SUCCESS) {
447 		if (err & __NET_XMIT_BYPASS)
448 			qdisc_qstats_drop(sch);
449 		__qdisc_drop(skb, to_free);
450 		return err;
451 	}
452 
453 	if (q->split_gso && skb_is_gso(skb)) {
454 		netdev_features_t features;
455 		struct sk_buff *nskb, *next;
456 		int cnt, byte_len, orig_len;
457 		int err;
458 
459 		features = netif_skb_features(skb);
460 		nskb = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
461 		if (IS_ERR_OR_NULL(nskb))
462 			return qdisc_drop(skb, sch, to_free);
463 
464 		cnt = 1;
465 		byte_len = 0;
466 		orig_len = qdisc_pkt_len(skb);
467 		skb_list_walk_safe(nskb, nskb, next) {
468 			skb_mark_not_on_list(nskb);
469 
470 			/* Iterate through GSO fragments of an skb:
471 			 * (1) Set pkt_len from the single GSO fragments
472 			 * (2) Copy classified and ect values of an skb
473 			 * (3) Enqueue fragment & set ts in dualpi2_enqueue_skb
474 			 */
475 			qdisc_skb_cb(nskb)->pkt_len = nskb->len;
476 			qdisc_skb_cb(nskb)->pkt_segs = 1;
477 			dualpi2_skb_cb(nskb)->classified =
478 				dualpi2_skb_cb(skb)->classified;
479 			dualpi2_skb_cb(nskb)->ect = dualpi2_skb_cb(skb)->ect;
480 			err = dualpi2_enqueue_skb(nskb, sch, to_free);
481 
482 			if (err == NET_XMIT_SUCCESS) {
483 				/* Compute the backlog adjustment that needs
484 				 * to be propagated in the qdisc tree to reflect
485 				 * all new skbs successfully enqueued.
486 				 */
487 				++cnt;
488 				byte_len += nskb->len;
489 			}
490 		}
491 		if (cnt > 1) {
492 			/* The caller will add the original skb stats to its
493 			 * backlog, compensate this if any nskb is enqueued.
494 			 */
495 			--cnt;
496 			byte_len -= orig_len;
497 		}
498 		qdisc_tree_reduce_backlog(sch, -cnt, -byte_len);
499 		consume_skb(skb);
500 		return err;
501 	}
502 	return dualpi2_enqueue_skb(skb, sch, to_free);
503 }
504 
505 /* Select the queue from which the next packet can be dequeued, ensuring that
506  * neither queue can starve the other with a WRR scheduler.
507  *
508  * The sign of the WRR credit determines the next queue, while the size of
509  * the dequeued packet determines the magnitude of the WRR credit change. If
510  * either queue is empty, the WRR credit is kept unchanged.
511  *
512  * As the dequeued packet can be dropped later, the caller has to perform the
513  * qdisc_bstats_update() calls.
514  */
515 static struct sk_buff *dequeue_packet(struct Qdisc *sch,
516 				      struct dualpi2_sched_data *q,
517 				      int *credit_change,
518 				      u64 now)
519 {
520 	struct sk_buff *skb = NULL;
521 	int c_len;
522 
523 	*credit_change = 0;
524 	c_len = qdisc_qlen(sch) - qdisc_qlen(q->l_queue);
525 	if (qdisc_qlen(q->l_queue) && (!c_len || q->c_protection_credit <= 0)) {
526 		skb = __qdisc_dequeue_head(&q->l_queue->q);
527 		WRITE_ONCE(q->l_head_ts, head_enqueue_time(q->l_queue));
528 		if (c_len)
529 			*credit_change = q->c_protection_wc;
530 		qdisc_qstats_backlog_dec(q->l_queue, skb);
531 
532 		/* Keep the global queue size consistent */
533 		--sch->q.qlen;
534 		q->memory_used -= skb->truesize;
535 	} else if (c_len) {
536 		skb = __qdisc_dequeue_head(&sch->q);
537 		WRITE_ONCE(q->c_head_ts, head_enqueue_time(sch));
538 		if (qdisc_qlen(q->l_queue))
539 			*credit_change = ~((s32)q->c_protection_wl) + 1;
540 		q->memory_used -= skb->truesize;
541 	} else {
542 		dualpi2_reset_c_protection(q);
543 		return NULL;
544 	}
545 	*credit_change *= qdisc_pkt_len(skb);
546 	qdisc_qstats_backlog_dec(sch, skb);
547 	return skb;
548 }
549 
550 static int do_step_aqm(struct dualpi2_sched_data *q, struct sk_buff *skb,
551 		       u64 now)
552 {
553 	u64 qdelay = 0;
554 
555 	if (q->step_in_packets)
556 		qdelay = qdisc_qlen(q->l_queue);
557 	else
558 		qdelay = dualpi2_sojourn_time(skb, now);
559 
560 	if (dualpi2_skb_cb(skb)->apply_step && qdelay > q->step_thresh) {
561 		if (!dualpi2_skb_cb(skb)->ect) {
562 			/* Drop this non-ECT packet */
563 			return 1;
564 		}
565 
566 		if (dualpi2_mark(q, skb))
567 			++q->step_marks;
568 	}
569 	qdisc_bstats_update(q->l_queue, skb);
570 	return 0;
571 }
572 
573 static void drop_and_retry(struct dualpi2_sched_data *q, struct sk_buff *skb,
574 			   struct Qdisc *sch, enum qdisc_drop_reason reason)
575 {
576 	++q->deferred_drops_cnt;
577 	q->deferred_drops_len += qdisc_pkt_len(skb);
578 	qdisc_dequeue_drop(sch, skb, reason);
579 	qdisc_qstats_drop(sch);
580 }
581 
582 static struct sk_buff *dualpi2_qdisc_dequeue(struct Qdisc *sch)
583 {
584 	struct dualpi2_sched_data *q = qdisc_priv(sch);
585 	struct sk_buff *skb;
586 	int credit_change;
587 	u64 now;
588 
589 	now = ktime_get_ns();
590 
591 	while ((skb = dequeue_packet(sch, q, &credit_change, now))) {
592 		if (!q->drop_early && must_drop(sch, q, skb)) {
593 			drop_and_retry(q, skb, sch, QDISC_DROP_CONGESTED);
594 			continue;
595 		}
596 
597 		if (skb_in_l_queue(skb) && do_step_aqm(q, skb, now)) {
598 			qdisc_qstats_drop(q->l_queue);
599 			drop_and_retry(q, skb, sch, QDISC_DROP_L4S_STEP_NON_ECN);
600 			continue;
601 		}
602 
603 		q->c_protection_credit += credit_change;
604 		qdisc_bstats_update(sch, skb);
605 		break;
606 	}
607 
608 	if (q->deferred_drops_cnt) {
609 		qdisc_tree_reduce_backlog(sch, q->deferred_drops_cnt,
610 					  q->deferred_drops_len);
611 		q->deferred_drops_cnt = 0;
612 		q->deferred_drops_len = 0;
613 	}
614 	return skb;
615 }
616 
617 static s64 __scale_delta(u64 diff)
618 {
619 	do_div(diff, 1 << ALPHA_BETA_GRANULARITY);
620 	return diff;
621 }
622 
623 static void get_queue_delays(struct dualpi2_sched_data *q, u64 *qdelay_c,
624 			     u64 *qdelay_l)
625 {
626 	u64 now, qc, ql;
627 
628 	now = ktime_get_ns();
629 	qc = READ_ONCE(q->c_head_ts);
630 	ql = READ_ONCE(q->l_head_ts);
631 
632 	*qdelay_c = qc ? now - qc : 0;
633 	*qdelay_l = ql ? now - ql : 0;
634 }
635 
636 static u32 calculate_probability(struct Qdisc *sch)
637 {
638 	struct dualpi2_sched_data *q = qdisc_priv(sch);
639 	u32 new_prob;
640 	u64 qdelay_c;
641 	u64 qdelay_l;
642 	u64 qdelay;
643 	s64 delta;
644 
645 	get_queue_delays(q, &qdelay_c, &qdelay_l);
646 	qdelay = max(qdelay_l, qdelay_c);
647 
648 	/* Alpha and beta take at most 32b, i.e, the delay difference would
649 	 * overflow for queuing delay differences > ~4.2sec.
650 	 */
651 	delta = ((s64)qdelay - (s64)q->pi2_target) * q->pi2_alpha;
652 	delta += ((s64)qdelay - (s64)q->last_qdelay) * q->pi2_beta;
653 	q->last_qdelay = qdelay;
654 
655 	/* Bound new_prob between 0 and MAX_PROB */
656 	if (delta > 0) {
657 		new_prob = __scale_delta(delta) + q->pi2_prob;
658 		if (new_prob < q->pi2_prob)
659 			new_prob = MAX_PROB;
660 	} else {
661 		new_prob = q->pi2_prob - __scale_delta(~delta + 1);
662 		if (new_prob > q->pi2_prob)
663 			new_prob = 0;
664 	}
665 
666 	/* If we do not drop on overload, ensure we cap the L4S probability to
667 	 * 100% to keep window fairness when overflowing.
668 	 */
669 	if (!q->drop_overload)
670 		return min_t(u32, new_prob, MAX_PROB / q->coupling_factor);
671 	return new_prob;
672 }
673 
674 static u32 get_memory_limit(struct Qdisc *sch, u32 limit)
675 {
676 	/* Apply rule of thumb, i.e., doubling the packet length,
677 	 * to further include per packet overhead in memory_limit.
678 	 */
679 	u64 memlim = mul_u32_u32(limit, 2 * psched_mtu(qdisc_dev(sch)));
680 
681 	if (upper_32_bits(memlim))
682 		return U32_MAX;
683 	else
684 		return lower_32_bits(memlim);
685 }
686 
687 static u32 convert_us_to_nsec(u32 us)
688 {
689 	u64 ns = mul_u32_u32(us, NSEC_PER_USEC);
690 
691 	if (upper_32_bits(ns))
692 		return U32_MAX;
693 
694 	return lower_32_bits(ns);
695 }
696 
697 static u32 convert_ns_to_usec(u64 ns)
698 {
699 	do_div(ns, NSEC_PER_USEC);
700 	if (upper_32_bits(ns))
701 		return U32_MAX;
702 
703 	return lower_32_bits(ns);
704 }
705 
706 static enum hrtimer_restart dualpi2_timer(struct hrtimer *timer)
707 {
708 	struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
709 	struct Qdisc *sch = q->sch;
710 	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
711 
712 	rcu_read_lock();
713 	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
714 	spin_lock(root_lock);
715 
716 	WRITE_ONCE(q->pi2_prob, calculate_probability(sch));
717 	hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q));
718 
719 	spin_unlock(root_lock);
720 	rcu_read_unlock();
721 	return HRTIMER_RESTART;
722 }
723 
724 static struct netlink_range_validation dualpi2_alpha_beta_range = {
725 	.min = 1,
726 	.max = ALPHA_BETA_MAX,
727 };
728 
729 static const struct nla_policy dualpi2_policy[TCA_DUALPI2_MAX + 1] = {
730 	[TCA_DUALPI2_LIMIT]		= NLA_POLICY_MIN(NLA_U32, 1),
731 	[TCA_DUALPI2_MEMORY_LIMIT]	= NLA_POLICY_MIN(NLA_U32, 1),
732 	[TCA_DUALPI2_TARGET]		= { .type = NLA_U32 },
733 	[TCA_DUALPI2_TUPDATE]		= NLA_POLICY_MIN(NLA_U32, 1),
734 	[TCA_DUALPI2_ALPHA]		=
735 		NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
736 	[TCA_DUALPI2_BETA]		=
737 		NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
738 	[TCA_DUALPI2_STEP_THRESH_PKTS]	= { .type = NLA_U32 },
739 	[TCA_DUALPI2_STEP_THRESH_US]	= { .type = NLA_U32 },
740 	[TCA_DUALPI2_MIN_QLEN_STEP]	= { .type = NLA_U32 },
741 	[TCA_DUALPI2_COUPLING]		= NLA_POLICY_MIN(NLA_U8, 1),
742 	[TCA_DUALPI2_DROP_OVERLOAD]	=
743 		NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_OVERLOAD_MAX),
744 	[TCA_DUALPI2_DROP_EARLY]	=
745 		NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_EARLY_MAX),
746 	[TCA_DUALPI2_C_PROTECTION]	=
747 		NLA_POLICY_RANGE(NLA_U8, 0, MAX_WC),
748 	[TCA_DUALPI2_ECN_MASK]		=
749 		NLA_POLICY_RANGE(NLA_U8, TC_DUALPI2_ECN_MASK_L4S_ECT,
750 				 TCA_DUALPI2_ECN_MASK_MAX),
751 	[TCA_DUALPI2_SPLIT_GSO]		=
752 		NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_SPLIT_GSO_MAX),
753 };
754 
755 static int dualpi2_change(struct Qdisc *sch, struct nlattr *opt,
756 			  struct netlink_ext_ack *extack)
757 {
758 	struct nlattr *tb[TCA_DUALPI2_MAX + 1];
759 	struct dualpi2_sched_data *q;
760 	int old_backlog;
761 	int old_qlen;
762 	int err;
763 
764 	if (!opt || !nla_len(opt)) {
765 		NL_SET_ERR_MSG_MOD(extack, "Dualpi2 options are required");
766 		return -EINVAL;
767 	}
768 	err = nla_parse_nested(tb, TCA_DUALPI2_MAX, opt, dualpi2_policy,
769 			       extack);
770 	if (err < 0)
771 		return err;
772 	if (tb[TCA_DUALPI2_STEP_THRESH_PKTS] && tb[TCA_DUALPI2_STEP_THRESH_US]) {
773 		NL_SET_ERR_MSG_MOD(extack, "multiple step thresh attributes");
774 		return -EINVAL;
775 	}
776 
777 	q = qdisc_priv(sch);
778 	sch_tree_lock(sch);
779 
780 	if (tb[TCA_DUALPI2_LIMIT]) {
781 		u32 limit = nla_get_u32(tb[TCA_DUALPI2_LIMIT]);
782 
783 		WRITE_ONCE(sch->limit, limit);
784 		WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
785 	}
786 
787 	if (tb[TCA_DUALPI2_MEMORY_LIMIT])
788 		WRITE_ONCE(q->memory_limit,
789 			   nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]));
790 
791 	if (tb[TCA_DUALPI2_TARGET]) {
792 		u64 target = nla_get_u32(tb[TCA_DUALPI2_TARGET]);
793 
794 		WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
795 	}
796 
797 	if (tb[TCA_DUALPI2_TUPDATE]) {
798 		u64 tupdate = nla_get_u32(tb[TCA_DUALPI2_TUPDATE]);
799 
800 		WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
801 	}
802 
803 	if (tb[TCA_DUALPI2_ALPHA]) {
804 		u32 alpha = nla_get_u32(tb[TCA_DUALPI2_ALPHA]);
805 
806 		WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
807 	}
808 
809 	if (tb[TCA_DUALPI2_BETA]) {
810 		u32 beta = nla_get_u32(tb[TCA_DUALPI2_BETA]);
811 
812 		WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
813 	}
814 
815 	if (tb[TCA_DUALPI2_STEP_THRESH_PKTS]) {
816 		u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_PKTS]);
817 
818 		WRITE_ONCE(q->step_in_packets, true);
819 		WRITE_ONCE(q->step_thresh, step_th);
820 	} else if (tb[TCA_DUALPI2_STEP_THRESH_US]) {
821 		u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_US]);
822 
823 		WRITE_ONCE(q->step_in_packets, false);
824 		WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
825 	}
826 
827 	if (tb[TCA_DUALPI2_MIN_QLEN_STEP])
828 		WRITE_ONCE(q->min_qlen_step,
829 			   nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]));
830 
831 	if (tb[TCA_DUALPI2_COUPLING]) {
832 		u8 coupling = nla_get_u8(tb[TCA_DUALPI2_COUPLING]);
833 
834 		WRITE_ONCE(q->coupling_factor, coupling);
835 	}
836 
837 	if (tb[TCA_DUALPI2_DROP_OVERLOAD]) {
838 		u8 drop_overload = nla_get_u8(tb[TCA_DUALPI2_DROP_OVERLOAD]);
839 
840 		WRITE_ONCE(q->drop_overload, (bool)drop_overload);
841 	}
842 
843 	if (tb[TCA_DUALPI2_DROP_EARLY]) {
844 		u8 drop_early = nla_get_u8(tb[TCA_DUALPI2_DROP_EARLY]);
845 
846 		WRITE_ONCE(q->drop_early, (bool)drop_early);
847 	}
848 
849 	if (tb[TCA_DUALPI2_C_PROTECTION]) {
850 		u8 wc = nla_get_u8(tb[TCA_DUALPI2_C_PROTECTION]);
851 
852 		dualpi2_calculate_c_protection(sch, q, wc);
853 	}
854 
855 	if (tb[TCA_DUALPI2_ECN_MASK]) {
856 		u8 ecn_mask = nla_get_u8(tb[TCA_DUALPI2_ECN_MASK]);
857 
858 		WRITE_ONCE(q->ecn_mask, ecn_mask);
859 	}
860 
861 	if (tb[TCA_DUALPI2_SPLIT_GSO]) {
862 		u8 split_gso = nla_get_u8(tb[TCA_DUALPI2_SPLIT_GSO]);
863 
864 		WRITE_ONCE(q->split_gso, (bool)split_gso);
865 	}
866 
867 	old_qlen = qdisc_qlen(sch);
868 	old_backlog = sch->qstats.backlog;
869 	while (qdisc_qlen(sch) > sch->limit ||
870 	       q->memory_used > q->memory_limit) {
871 		struct sk_buff *skb = NULL;
872 
873 		if (qdisc_qlen(sch) > qdisc_qlen(q->l_queue)) {
874 			skb = qdisc_dequeue_internal(sch, true);
875 			if (unlikely(!skb)) {
876 				WARN_ON_ONCE(1);
877 				break;
878 			}
879 			q->memory_used -= skb->truesize;
880 			rtnl_qdisc_drop(skb, sch);
881 		} else if (qdisc_qlen(q->l_queue)) {
882 			skb = qdisc_dequeue_internal(q->l_queue, true);
883 			if (unlikely(!skb)) {
884 				WARN_ON_ONCE(1);
885 				break;
886 			}
887 			/* L-queue packets are counted in both sch and
888 			 * l_queue on enqueue; qdisc_dequeue_internal()
889 			 * handled l_queue, so we further account for sch.
890 			 */
891 			--sch->q.qlen;
892 			qdisc_qstats_backlog_dec(sch, skb);
893 			q->memory_used -= skb->truesize;
894 			rtnl_qdisc_drop(skb, q->l_queue);
895 			qdisc_qstats_drop(sch);
896 		} else {
897 			WARN_ON_ONCE(1);
898 			break;
899 		}
900 	}
901 	qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch),
902 				  old_backlog - sch->qstats.backlog);
903 
904 	sch_tree_unlock(sch);
905 	return 0;
906 }
907 
908 /* Default alpha/beta values give a 10dB stability margin with max_rtt=100ms. */
909 static void dualpi2_reset_default(struct Qdisc *sch)
910 {
911 	struct dualpi2_sched_data *q = qdisc_priv(sch);
912 
913 	q->sch->limit = 10000;				/* Max 125ms at 1Gbps */
914 	q->memory_limit = get_memory_limit(sch, q->sch->limit);
915 
916 	q->pi2_target = 15 * NSEC_PER_MSEC;
917 	q->pi2_tupdate = 16 * NSEC_PER_MSEC;
918 	q->pi2_alpha = dualpi2_scale_alpha_beta(41);	/* ~0.16 Hz * 256 */
919 	q->pi2_beta = dualpi2_scale_alpha_beta(819);	/* ~3.20 Hz * 256 */
920 
921 	q->step_thresh = 1 * NSEC_PER_MSEC;
922 	q->step_in_packets = false;
923 
924 	dualpi2_calculate_c_protection(q->sch, q, 10);	/* wc=10%, wl=90% */
925 
926 	q->ecn_mask = TC_DUALPI2_ECN_MASK_L4S_ECT;	/* INET_ECN_ECT_1 */
927 	q->min_qlen_step = 0;		/* Always apply step mark in L-queue */
928 	q->coupling_factor = 2;		/* window fairness for equal RTTs */
929 	q->drop_overload = TC_DUALPI2_DROP_OVERLOAD_DROP; /* Drop overload */
930 	q->drop_early = TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE; /* Drop dequeue */
931 	q->split_gso = TC_DUALPI2_SPLIT_GSO_SPLIT_GSO;	/* Split GSO */
932 }
933 
934 static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
935 			struct netlink_ext_ack *extack)
936 {
937 	struct dualpi2_sched_data *q = qdisc_priv(sch);
938 	int err;
939 
940 	sch->flags |= TCQ_F_DEQUEUE_DROPS;
941 
942 	q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
943 				       TC_H_MAKE(sch->handle, 1), extack);
944 	if (!q->l_queue)
945 		return -ENOMEM;
946 
947 	err = tcf_block_get(&q->tcf_block, &q->tcf_filters, sch, extack);
948 	if (err)
949 		return err;
950 
951 	q->sch = sch;
952 	dualpi2_reset_default(sch);
953 	hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
954 		      HRTIMER_MODE_ABS_PINNED_SOFT);
955 
956 	if (opt && nla_len(opt)) {
957 		err = dualpi2_change(sch, opt, extack);
958 
959 		if (err)
960 			return err;
961 	}
962 
963 	hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
964 		      HRTIMER_MODE_ABS_PINNED_SOFT);
965 	return 0;
966 }
967 
968 static int dualpi2_dump(struct Qdisc *sch, struct sk_buff *skb)
969 {
970 	struct dualpi2_sched_data *q = qdisc_priv(sch);
971 	struct nlattr *opts;
972 	bool step_in_pkts;
973 	u32 step_th;
974 
975 	step_in_pkts = READ_ONCE(q->step_in_packets);
976 	step_th = READ_ONCE(q->step_thresh);
977 
978 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
979 	if (!opts)
980 		goto nla_put_failure;
981 
982 	if (step_in_pkts &&
983 	    (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
984 	    nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
985 			READ_ONCE(q->memory_limit)) ||
986 	    nla_put_u32(skb, TCA_DUALPI2_TARGET,
987 			convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
988 	    nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
989 			convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
990 	    nla_put_u32(skb, TCA_DUALPI2_ALPHA,
991 			dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
992 	    nla_put_u32(skb, TCA_DUALPI2_BETA,
993 			dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
994 	    nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_PKTS, step_th) ||
995 	    nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
996 			READ_ONCE(q->min_qlen_step)) ||
997 	    nla_put_u8(skb, TCA_DUALPI2_COUPLING,
998 		       READ_ONCE(q->coupling_factor)) ||
999 	    nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
1000 		       READ_ONCE(q->drop_overload)) ||
1001 	    nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
1002 		       READ_ONCE(q->drop_early)) ||
1003 	    nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
1004 		       READ_ONCE(q->c_protection_wc)) ||
1005 	    nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
1006 	    nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
1007 		goto nla_put_failure;
1008 
1009 	if (!step_in_pkts &&
1010 	    (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
1011 	    nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
1012 			READ_ONCE(q->memory_limit)) ||
1013 	    nla_put_u32(skb, TCA_DUALPI2_TARGET,
1014 			convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
1015 	    nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
1016 			convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
1017 	    nla_put_u32(skb, TCA_DUALPI2_ALPHA,
1018 			dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
1019 	    nla_put_u32(skb, TCA_DUALPI2_BETA,
1020 			dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
1021 	    nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_US,
1022 			convert_ns_to_usec(step_th)) ||
1023 	    nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
1024 			READ_ONCE(q->min_qlen_step)) ||
1025 	    nla_put_u8(skb, TCA_DUALPI2_COUPLING,
1026 		       READ_ONCE(q->coupling_factor)) ||
1027 	    nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
1028 		       READ_ONCE(q->drop_overload)) ||
1029 	    nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
1030 		       READ_ONCE(q->drop_early)) ||
1031 	    nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
1032 		       READ_ONCE(q->c_protection_wc)) ||
1033 	    nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
1034 	    nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
1035 		goto nla_put_failure;
1036 
1037 	return nla_nest_end(skb, opts);
1038 
1039 nla_put_failure:
1040 	nla_nest_cancel(skb, opts);
1041 	return -1;
1042 }
1043 
1044 static int dualpi2_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1045 {
1046 	struct dualpi2_sched_data *q = qdisc_priv(sch);
1047 	struct tc_dualpi2_xstats st = {
1048 		.prob			= READ_ONCE(q->pi2_prob),
1049 		.packets_in_c		= q->packets_in_c,
1050 		.packets_in_l		= q->packets_in_l,
1051 		.maxq			= q->maxq,
1052 		.ecn_mark		= q->ecn_mark,
1053 		.credit			= q->c_protection_credit,
1054 		.step_marks		= q->step_marks,
1055 		.memory_used		= q->memory_used,
1056 		.max_memory_used	= q->max_memory_used,
1057 		.memory_limit		= q->memory_limit,
1058 	};
1059 	u64 qc, ql;
1060 
1061 	get_queue_delays(q, &qc, &ql);
1062 	st.delay_l = convert_ns_to_usec(ql);
1063 	st.delay_c = convert_ns_to_usec(qc);
1064 	return gnet_stats_copy_app(d, &st, sizeof(st));
1065 }
1066 
1067 /* Reset both L-queue and C-queue, internal packet counters, PI probability,
1068  * C-queue protection credit, and timestamps, while preserving current
1069  * configuration of DUALPI2.
1070  */
1071 static void dualpi2_reset(struct Qdisc *sch)
1072 {
1073 	struct dualpi2_sched_data *q = qdisc_priv(sch);
1074 
1075 	qdisc_reset_queue(sch);
1076 	qdisc_reset_queue(q->l_queue);
1077 	q->c_head_ts = 0;
1078 	q->l_head_ts = 0;
1079 	q->pi2_prob = 0;
1080 	q->packets_in_c = 0;
1081 	q->packets_in_l = 0;
1082 	q->maxq = 0;
1083 	q->ecn_mark = 0;
1084 	q->step_marks = 0;
1085 	q->memory_used = 0;
1086 	q->max_memory_used = 0;
1087 	dualpi2_reset_c_protection(q);
1088 }
1089 
1090 static void dualpi2_destroy(struct Qdisc *sch)
1091 {
1092 	struct dualpi2_sched_data *q = qdisc_priv(sch);
1093 
1094 	q->pi2_tupdate = 0;
1095 	hrtimer_cancel(&q->pi2_timer);
1096 	if (q->l_queue)
1097 		qdisc_put(q->l_queue);
1098 	tcf_block_put(q->tcf_block);
1099 }
1100 
1101 static struct Qdisc *dualpi2_leaf(struct Qdisc *sch, unsigned long arg)
1102 {
1103 	return NULL;
1104 }
1105 
1106 static unsigned long dualpi2_find(struct Qdisc *sch, u32 classid)
1107 {
1108 	return 0;
1109 }
1110 
1111 static unsigned long dualpi2_bind(struct Qdisc *sch, unsigned long parent,
1112 				  u32 classid)
1113 {
1114 	return 0;
1115 }
1116 
1117 static void dualpi2_unbind(struct Qdisc *q, unsigned long cl)
1118 {
1119 }
1120 
1121 static struct tcf_block *dualpi2_tcf_block(struct Qdisc *sch, unsigned long cl,
1122 					   struct netlink_ext_ack *extack)
1123 {
1124 	struct dualpi2_sched_data *q = qdisc_priv(sch);
1125 
1126 	if (cl)
1127 		return NULL;
1128 	return q->tcf_block;
1129 }
1130 
1131 static void dualpi2_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1132 {
1133 	unsigned int i;
1134 
1135 	if (arg->stop)
1136 		return;
1137 
1138 	/* We statically define only 2 queues */
1139 	for (i = 0; i < 2; i++) {
1140 		if (arg->count < arg->skip) {
1141 			arg->count++;
1142 			continue;
1143 		}
1144 		if (arg->fn(sch, i + 1, arg) < 0) {
1145 			arg->stop = 1;
1146 			break;
1147 		}
1148 		arg->count++;
1149 	}
1150 }
1151 
1152 /* Minimal class support to handle tc filters */
1153 static const struct Qdisc_class_ops dualpi2_class_ops = {
1154 	.leaf		= dualpi2_leaf,
1155 	.find		= dualpi2_find,
1156 	.tcf_block	= dualpi2_tcf_block,
1157 	.bind_tcf	= dualpi2_bind,
1158 	.unbind_tcf	= dualpi2_unbind,
1159 	.walk		= dualpi2_walk,
1160 };
1161 
1162 static struct Qdisc_ops dualpi2_qdisc_ops __read_mostly = {
1163 	.id		= "dualpi2",
1164 	.cl_ops		= &dualpi2_class_ops,
1165 	.priv_size	= sizeof(struct dualpi2_sched_data),
1166 	.enqueue	= dualpi2_qdisc_enqueue,
1167 	.dequeue	= dualpi2_qdisc_dequeue,
1168 	.peek		= qdisc_peek_dequeued,
1169 	.init		= dualpi2_init,
1170 	.destroy	= dualpi2_destroy,
1171 	.reset		= dualpi2_reset,
1172 	.change		= dualpi2_change,
1173 	.dump		= dualpi2_dump,
1174 	.dump_stats	= dualpi2_dump_stats,
1175 	.owner		= THIS_MODULE,
1176 };
1177 
1178 static int __init dualpi2_module_init(void)
1179 {
1180 	return register_qdisc(&dualpi2_qdisc_ops);
1181 }
1182 
1183 static void __exit dualpi2_module_exit(void)
1184 {
1185 	unregister_qdisc(&dualpi2_qdisc_ops);
1186 }
1187 
1188 module_init(dualpi2_module_init);
1189 module_exit(dualpi2_module_exit);
1190 
1191 MODULE_DESCRIPTION("Dual Queue with Proportional Integral controller Improved with a Square (dualpi2) scheduler");
1192 MODULE_AUTHOR("Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>");
1193 MODULE_AUTHOR("Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>");
1194 MODULE_AUTHOR("Olga Albisser <olga@albisser.org>");
1195 MODULE_AUTHOR("Henrik Steen <henrist@henrist.net>");
1196 MODULE_AUTHOR("Olivier Tilmans <olivier.tilmans@nokia.com>");
1197 
1198 MODULE_LICENSE("Dual BSD/GPL");
1199 MODULE_VERSION("1.0");
1200