1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
2 /* Copyright (C) 2024 Nokia
3 *
4 * Author: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
5 * Author: Olga Albisser <olga@albisser.org>
6 * Author: Henrik Steen <henrist@henrist.net>
7 * Author: Olivier Tilmans <olivier.tilmans@nokia.com>
8 * Author: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
9 *
10 * DualPI Improved with a Square (dualpi2):
11 * - Supports congestion controls that comply with the Prague requirements
12 * in RFC9331 (e.g. TCP-Prague)
13 * - Supports coupled dual-queue with PI2 as defined in RFC9332
14 * - Supports ECN L4S-identifier (IP.ECN==0b*1)
15 *
16 * note: Although DCTCP and BBRv3 can use shallow-threshold ECN marks,
17 * they do not meet the 'Prague L4S Requirements' listed in RFC 9331
18 * Section 4, so they can only be used with DualPI2 in a datacenter
19 * context.
20 *
21 * References:
22 * - RFC9332: https://datatracker.ietf.org/doc/html/rfc9332
23 * - De Schepper, Koen, et al. "PI 2: A linearized AQM for both classic and
24 * scalable TCP." in proc. ACM CoNEXT'16, 2016.
25 */
26
27 #include <linux/errno.h>
28 #include <linux/hrtimer.h>
29 #include <linux/if_vlan.h>
30 #include <linux/kernel.h>
31 #include <linux/limits.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/types.h>
35
36 #include <net/gso.h>
37 #include <net/inet_ecn.h>
38 #include <net/pkt_cls.h>
39 #include <net/pkt_sched.h>
40
41 /* 32b enable to support flows with windows up to ~8.6 * 1e9 packets
42 * i.e., twice the maximal snd_cwnd.
43 * MAX_PROB must be consistent with the RNG in dualpi2_roll().
44 */
45 #define MAX_PROB U32_MAX
46
47 /* alpha/beta values exchanged over netlink are in units of 256ns */
48 #define ALPHA_BETA_SHIFT 8
49
50 /* Scaled values of alpha/beta must fit in 32b to avoid overflow in later
51 * computations. Consequently (see and dualpi2_scale_alpha_beta()), their
52 * netlink-provided values can use at most 31b, i.e. be at most (2^23)-1
53 * (~4MHz) as those are given in 1/256th. This enable to tune alpha/beta to
54 * control flows whose maximal RTTs can be in usec up to few secs.
55 */
56 #define ALPHA_BETA_MAX ((1U << 31) - 1)
57
58 /* Internal alpha/beta are in units of 64ns.
59 * This enables to use all alpha/beta values in the allowed range without loss
60 * of precision due to rounding when scaling them internally, e.g.,
61 * scale_alpha_beta(1) will not round down to 0.
62 */
63 #define ALPHA_BETA_GRANULARITY 6
64
65 #define ALPHA_BETA_SCALING (ALPHA_BETA_SHIFT - ALPHA_BETA_GRANULARITY)
66
67 /* We express the weights (wc, wl) in %, i.e., wc + wl = 100 */
68 #define MAX_WC 100
69
70 struct dualpi2_sched_data {
71 struct Qdisc *l_queue; /* The L4S Low latency queue (L-queue) */
72 struct Qdisc *sch; /* The Classic queue (C-queue) */
73
74 /* Registered tc filters */
75 struct tcf_proto __rcu *tcf_filters;
76 struct tcf_block *tcf_block;
77
78 /* PI2 parameters */
79 u64 pi2_target; /* Target delay in nanoseconds */
80 u32 pi2_tupdate; /* Timer frequency in nanoseconds */
81 u32 pi2_prob; /* Base PI probability */
82 u32 pi2_alpha; /* Gain factor for the integral rate response */
83 u32 pi2_beta; /* Gain factor for the proportional response */
84 struct hrtimer pi2_timer; /* prob update timer */
85
86 /* Step AQM (L-queue only) parameters */
87 u32 step_thresh; /* Step threshold */
88 bool step_in_packets; /* Step thresh in packets (1) or time (0) */
89
90 /* C-queue starvation protection */
91 s32 c_protection_credit; /* Credit (sign indicates which queue) */
92 s32 c_protection_init; /* Reset value of the credit */
93 u8 c_protection_wc; /* C-queue weight (between 0 and MAX_WC) */
94 u8 c_protection_wl; /* L-queue weight (MAX_WC - wc) */
95
96 /* General dualQ parameters */
97 u32 memory_limit; /* Memory limit of both queues */
98 u8 coupling_factor;/* Coupling factor (k) between both queues */
99 u8 ecn_mask; /* Mask to match packets into L-queue */
100 u32 min_qlen_step; /* Minimum queue length to apply step thresh */
101 bool drop_early; /* Drop at enqueue (1) instead of dequeue (0) */
102 bool drop_overload; /* Drop (1) on overload, or overflow (0) */
103 bool split_gso; /* Split aggregated skb (1) or leave as is (0) */
104
105 /* Statistics */
106 u64 c_head_ts; /* Enqueue timestamp of the C-queue head */
107 u64 l_head_ts; /* Enqueue timestamp of the L-queue head */
108 u64 last_qdelay; /* Q delay val at the last probability update */
109 u32 packets_in_c; /* Enqueue packet counter of the C-queue */
110 u32 packets_in_l; /* Enqueue packet counter of the L-queue */
111 u32 maxq; /* Maximum queue size of the C-queue */
112 u32 ecn_mark; /* ECN mark pkt counter due to PI probability */
113 u32 step_marks; /* ECN mark pkt counter due to step AQM */
114 u32 memory_used; /* Memory used of both queues */
115 u32 max_memory_used;/* Maximum used memory */
116
117 /* Deferred drop statistics */
118 u32 deferred_drops_cnt; /* Packets dropped */
119 u32 deferred_drops_len; /* Bytes dropped */
120 };
121
122 struct dualpi2_skb_cb {
123 u64 ts; /* Timestamp at enqueue */
124 u8 apply_step:1, /* Can we apply the step threshold */
125 classified:2, /* Packet classification results */
126 ect:2; /* Packet ECT codepoint */
127 };
128
129 enum dualpi2_classification_results {
130 DUALPI2_C_CLASSIC = 0, /* C-queue */
131 DUALPI2_C_L4S = 1, /* L-queue (scale mark/classic drop) */
132 DUALPI2_C_LLLL = 2, /* L-queue (no drops/marks) */
133 __DUALPI2_C_MAX /* Keep last*/
134 };
135
dualpi2_skb_cb(struct sk_buff * skb)136 static struct dualpi2_skb_cb *dualpi2_skb_cb(struct sk_buff *skb)
137 {
138 qdisc_cb_private_validate(skb, sizeof(struct dualpi2_skb_cb));
139 return (struct dualpi2_skb_cb *)qdisc_skb_cb(skb)->data;
140 }
141
dualpi2_sojourn_time(struct sk_buff * skb,u64 reference)142 static u64 dualpi2_sojourn_time(struct sk_buff *skb, u64 reference)
143 {
144 return reference - dualpi2_skb_cb(skb)->ts;
145 }
146
head_enqueue_time(struct Qdisc * q)147 static u64 head_enqueue_time(struct Qdisc *q)
148 {
149 struct sk_buff *skb = qdisc_peek_head(q);
150
151 return skb ? dualpi2_skb_cb(skb)->ts : 0;
152 }
153
dualpi2_scale_alpha_beta(u32 param)154 static u32 dualpi2_scale_alpha_beta(u32 param)
155 {
156 u64 tmp = ((u64)param * MAX_PROB >> ALPHA_BETA_SCALING);
157
158 do_div(tmp, NSEC_PER_SEC);
159 return tmp;
160 }
161
dualpi2_unscale_alpha_beta(u32 param)162 static u32 dualpi2_unscale_alpha_beta(u32 param)
163 {
164 u64 tmp = ((u64)param * NSEC_PER_SEC << ALPHA_BETA_SCALING);
165
166 do_div(tmp, MAX_PROB);
167 return tmp;
168 }
169
next_pi2_timeout(struct dualpi2_sched_data * q)170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
171 {
172 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
173 }
174
skb_is_l4s(struct sk_buff * skb)175 static bool skb_is_l4s(struct sk_buff *skb)
176 {
177 return dualpi2_skb_cb(skb)->classified == DUALPI2_C_L4S;
178 }
179
skb_in_l_queue(struct sk_buff * skb)180 static bool skb_in_l_queue(struct sk_buff *skb)
181 {
182 return dualpi2_skb_cb(skb)->classified != DUALPI2_C_CLASSIC;
183 }
184
skb_apply_step(struct sk_buff * skb,struct dualpi2_sched_data * q)185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q)
186 {
187 return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step;
188 }
189
dualpi2_mark(struct dualpi2_sched_data * q,struct sk_buff * skb)190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb)
191 {
192 if (INET_ECN_set_ce(skb)) {
193 q->ecn_mark++;
194 return true;
195 }
196 return false;
197 }
198
dualpi2_reset_c_protection(struct dualpi2_sched_data * q)199 static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q)
200 {
201 q->c_protection_credit = q->c_protection_init;
202 }
203
204 /* This computes the initial credit value and WRR weight for the L queue (wl)
205 * from the weight of the C queue (wc).
206 * If wl > wc, the scheduler will start with the L queue when reset.
207 */
dualpi2_calculate_c_protection(struct Qdisc * sch,struct dualpi2_sched_data * q,u32 wc)208 static void dualpi2_calculate_c_protection(struct Qdisc *sch,
209 struct dualpi2_sched_data *q, u32 wc)
210 {
211 q->c_protection_wc = wc;
212 q->c_protection_wl = MAX_WC - wc;
213 q->c_protection_init = (s32)psched_mtu(qdisc_dev(sch)) *
214 ((int)q->c_protection_wc - (int)q->c_protection_wl);
215 dualpi2_reset_c_protection(q);
216 }
217
dualpi2_roll(u32 prob)218 static bool dualpi2_roll(u32 prob)
219 {
220 return get_random_u32() <= prob;
221 }
222
223 /* Packets in the C-queue are subject to a marking probability pC, which is the
224 * square of the internal PI probability (i.e., have an overall lower mark/drop
225 * probability). If the qdisc is overloaded, ignore ECT values and only drop.
226 *
227 * Note that this marking scheme is also applied to L4S packets during overload.
228 * Return true if packet dropping is required in C queue
229 */
dualpi2_classic_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u32 prob,bool overload)230 static bool dualpi2_classic_marking(struct dualpi2_sched_data *q,
231 struct sk_buff *skb, u32 prob,
232 bool overload)
233 {
234 if (dualpi2_roll(prob) && dualpi2_roll(prob)) {
235 if (overload || dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
236 return true;
237 dualpi2_mark(q, skb);
238 }
239 return false;
240 }
241
242 /* Packets in the L-queue are subject to a marking probability pL given by the
243 * internal PI probability scaled by the coupling factor.
244 *
245 * On overload (i.e., @local_l_prob is >= 100%):
246 * - if the qdisc is configured to trade losses to preserve latency (i.e.,
247 * @q->drop_overload), apply classic drops first before marking.
248 * - otherwise, preserve the "no loss" property of ECN at the cost of queueing
249 * delay, eventually resulting in taildrop behavior once sch->limit is
250 * reached.
251 * Return true if packet dropping is required in L queue
252 */
dualpi2_scalable_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 local_l_prob,u32 prob,bool overload)253 static bool dualpi2_scalable_marking(struct dualpi2_sched_data *q,
254 struct sk_buff *skb,
255 u64 local_l_prob, u32 prob,
256 bool overload)
257 {
258 if (overload) {
259 /* Apply classic drop */
260 if (!q->drop_overload ||
261 !(dualpi2_roll(prob) && dualpi2_roll(prob)))
262 goto mark;
263 return true;
264 }
265
266 /* We can safely cut the upper 32b as overload==false */
267 if (dualpi2_roll(local_l_prob)) {
268 /* Non-ECT packets could have classified as L4S by filters. */
269 if (dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
270 return true;
271 mark:
272 dualpi2_mark(q, skb);
273 }
274 return false;
275 }
276
277 /* Decide whether a given packet must be dropped (or marked if ECT), according
278 * to the PI2 probability.
279 *
280 * Never mark/drop if we have a standing queue of less than 2 MTUs.
281 */
must_drop(struct Qdisc * sch,struct dualpi2_sched_data * q,struct sk_buff * skb)282 static bool must_drop(struct Qdisc *sch, struct dualpi2_sched_data *q,
283 struct sk_buff *skb)
284 {
285 u64 local_l_prob;
286 bool overload;
287 u32 prob;
288
289 if (sch->qstats.backlog < 2 * psched_mtu(qdisc_dev(sch)))
290 return false;
291
292 prob = READ_ONCE(q->pi2_prob);
293 local_l_prob = (u64)prob * q->coupling_factor;
294 overload = local_l_prob > MAX_PROB;
295
296 switch (dualpi2_skb_cb(skb)->classified) {
297 case DUALPI2_C_CLASSIC:
298 return dualpi2_classic_marking(q, skb, prob, overload);
299 case DUALPI2_C_L4S:
300 return dualpi2_scalable_marking(q, skb, local_l_prob, prob,
301 overload);
302 default: /* DUALPI2_C_LLLL */
303 return false;
304 }
305 }
306
dualpi2_read_ect(struct sk_buff * skb)307 static void dualpi2_read_ect(struct sk_buff *skb)
308 {
309 struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
310 int wlen = skb_network_offset(skb);
311
312 switch (skb_protocol(skb, true)) {
313 case htons(ETH_P_IP):
314 wlen += sizeof(struct iphdr);
315 if (!pskb_may_pull(skb, wlen) ||
316 skb_try_make_writable(skb, wlen))
317 goto not_ecn;
318
319 cb->ect = ipv4_get_dsfield(ip_hdr(skb)) & INET_ECN_MASK;
320 break;
321 case htons(ETH_P_IPV6):
322 wlen += sizeof(struct ipv6hdr);
323 if (!pskb_may_pull(skb, wlen) ||
324 skb_try_make_writable(skb, wlen))
325 goto not_ecn;
326
327 cb->ect = ipv6_get_dsfield(ipv6_hdr(skb)) & INET_ECN_MASK;
328 break;
329 default:
330 goto not_ecn;
331 }
332 return;
333
334 not_ecn:
335 /* Non pullable/writable packets can only be dropped hence are
336 * classified as not ECT.
337 */
338 cb->ect = INET_ECN_NOT_ECT;
339 }
340
dualpi2_skb_classify(struct dualpi2_sched_data * q,struct sk_buff * skb)341 static int dualpi2_skb_classify(struct dualpi2_sched_data *q,
342 struct sk_buff *skb)
343 {
344 struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
345 struct tcf_result res;
346 struct tcf_proto *fl;
347 int result;
348
349 dualpi2_read_ect(skb);
350 if (cb->ect & q->ecn_mask) {
351 cb->classified = DUALPI2_C_L4S;
352 return NET_XMIT_SUCCESS;
353 }
354
355 if (TC_H_MAJ(skb->priority) == q->sch->handle &&
356 TC_H_MIN(skb->priority) < __DUALPI2_C_MAX) {
357 cb->classified = TC_H_MIN(skb->priority);
358 return NET_XMIT_SUCCESS;
359 }
360
361 fl = rcu_dereference_bh(q->tcf_filters);
362 if (!fl) {
363 cb->classified = DUALPI2_C_CLASSIC;
364 return NET_XMIT_SUCCESS;
365 }
366
367 result = tcf_classify(skb, NULL, fl, &res, false);
368 if (result >= 0) {
369 #ifdef CONFIG_NET_CLS_ACT
370 switch (result) {
371 case TC_ACT_STOLEN:
372 case TC_ACT_QUEUED:
373 case TC_ACT_TRAP:
374 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
375 case TC_ACT_SHOT:
376 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
377 }
378 #endif
379 cb->classified = TC_H_MIN(res.classid) < __DUALPI2_C_MAX ?
380 TC_H_MIN(res.classid) : DUALPI2_C_CLASSIC;
381 }
382 return NET_XMIT_SUCCESS;
383 }
384
dualpi2_enqueue_skb(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)385 static int dualpi2_enqueue_skb(struct sk_buff *skb, struct Qdisc *sch,
386 struct sk_buff **to_free)
387 {
388 struct dualpi2_sched_data *q = qdisc_priv(sch);
389 struct dualpi2_skb_cb *cb;
390
391 if (unlikely(qdisc_qlen(sch) >= sch->limit) ||
392 unlikely((u64)q->memory_used + skb->truesize > q->memory_limit)) {
393 qdisc_qstats_overlimit(sch);
394 if (skb_in_l_queue(skb))
395 qdisc_qstats_overlimit(q->l_queue);
396 return qdisc_drop_reason(skb, sch, to_free,
397 SKB_DROP_REASON_QDISC_OVERLIMIT);
398 }
399
400 if (q->drop_early && must_drop(sch, q, skb)) {
401 qdisc_drop_reason(skb, sch, to_free,
402 SKB_DROP_REASON_QDISC_CONGESTED);
403 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
404 }
405
406 cb = dualpi2_skb_cb(skb);
407 cb->ts = ktime_get_ns();
408 q->memory_used += skb->truesize;
409 if (q->memory_used > q->max_memory_used)
410 q->max_memory_used = q->memory_used;
411
412 if (qdisc_qlen(sch) > q->maxq)
413 q->maxq = qdisc_qlen(sch);
414
415 if (skb_in_l_queue(skb)) {
416 /* Apply step thresh if skb is L4S && L-queue len >= min_qlen */
417 dualpi2_skb_cb(skb)->apply_step = skb_apply_step(skb, q);
418
419 /* Keep the overall qdisc stats consistent */
420 ++sch->q.qlen;
421 qdisc_qstats_backlog_inc(sch, skb);
422 ++q->packets_in_l;
423 if (!q->l_head_ts)
424 q->l_head_ts = cb->ts;
425 return qdisc_enqueue_tail(skb, q->l_queue);
426 }
427 ++q->packets_in_c;
428 if (!q->c_head_ts)
429 q->c_head_ts = cb->ts;
430 return qdisc_enqueue_tail(skb, sch);
431 }
432
433 /* By default, dualpi2 will split GSO skbs into independent skbs and enqueue
434 * each of those individually. This yields the following benefits, at the
435 * expense of CPU usage:
436 * - Finer-grained AQM actions as the sub-packets of a burst no longer share the
437 * same fate (e.g., the random mark/drop probability is applied individually)
438 * - Improved precision of the starvation protection/WRR scheduler at dequeue,
439 * as the size of the dequeued packets will be smaller.
440 */
dualpi2_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)441 static int dualpi2_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
442 struct sk_buff **to_free)
443 {
444 struct dualpi2_sched_data *q = qdisc_priv(sch);
445 int err;
446
447 err = dualpi2_skb_classify(q, skb);
448 if (err != NET_XMIT_SUCCESS) {
449 if (err & __NET_XMIT_BYPASS)
450 qdisc_qstats_drop(sch);
451 __qdisc_drop(skb, to_free);
452 return err;
453 }
454
455 if (q->split_gso && skb_is_gso(skb)) {
456 netdev_features_t features;
457 struct sk_buff *nskb, *next;
458 int cnt, byte_len, orig_len;
459 int err;
460
461 features = netif_skb_features(skb);
462 nskb = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
463 if (IS_ERR_OR_NULL(nskb))
464 return qdisc_drop(skb, sch, to_free);
465
466 cnt = 1;
467 byte_len = 0;
468 orig_len = qdisc_pkt_len(skb);
469 skb_list_walk_safe(nskb, nskb, next) {
470 skb_mark_not_on_list(nskb);
471
472 /* Iterate through GSO fragments of an skb:
473 * (1) Set pkt_len from the single GSO fragments
474 * (2) Copy classified and ect values of an skb
475 * (3) Enqueue fragment & set ts in dualpi2_enqueue_skb
476 */
477 qdisc_skb_cb(nskb)->pkt_len = nskb->len;
478 dualpi2_skb_cb(nskb)->classified =
479 dualpi2_skb_cb(skb)->classified;
480 dualpi2_skb_cb(nskb)->ect = dualpi2_skb_cb(skb)->ect;
481 err = dualpi2_enqueue_skb(nskb, sch, to_free);
482
483 if (err == NET_XMIT_SUCCESS) {
484 /* Compute the backlog adjustment that needs
485 * to be propagated in the qdisc tree to reflect
486 * all new skbs successfully enqueued.
487 */
488 ++cnt;
489 byte_len += nskb->len;
490 }
491 }
492 if (cnt > 1) {
493 /* The caller will add the original skb stats to its
494 * backlog, compensate this if any nskb is enqueued.
495 */
496 --cnt;
497 byte_len -= orig_len;
498 }
499 qdisc_tree_reduce_backlog(sch, -cnt, -byte_len);
500 consume_skb(skb);
501 return err;
502 }
503 return dualpi2_enqueue_skb(skb, sch, to_free);
504 }
505
506 /* Select the queue from which the next packet can be dequeued, ensuring that
507 * neither queue can starve the other with a WRR scheduler.
508 *
509 * The sign of the WRR credit determines the next queue, while the size of
510 * the dequeued packet determines the magnitude of the WRR credit change. If
511 * either queue is empty, the WRR credit is kept unchanged.
512 *
513 * As the dequeued packet can be dropped later, the caller has to perform the
514 * qdisc_bstats_update() calls.
515 */
dequeue_packet(struct Qdisc * sch,struct dualpi2_sched_data * q,int * credit_change,u64 now)516 static struct sk_buff *dequeue_packet(struct Qdisc *sch,
517 struct dualpi2_sched_data *q,
518 int *credit_change,
519 u64 now)
520 {
521 struct sk_buff *skb = NULL;
522 int c_len;
523
524 *credit_change = 0;
525 c_len = qdisc_qlen(sch) - qdisc_qlen(q->l_queue);
526 if (qdisc_qlen(q->l_queue) && (!c_len || q->c_protection_credit <= 0)) {
527 skb = __qdisc_dequeue_head(&q->l_queue->q);
528 WRITE_ONCE(q->l_head_ts, head_enqueue_time(q->l_queue));
529 if (c_len)
530 *credit_change = q->c_protection_wc;
531 qdisc_qstats_backlog_dec(q->l_queue, skb);
532
533 /* Keep the global queue size consistent */
534 --sch->q.qlen;
535 q->memory_used -= skb->truesize;
536 } else if (c_len) {
537 skb = __qdisc_dequeue_head(&sch->q);
538 WRITE_ONCE(q->c_head_ts, head_enqueue_time(sch));
539 if (qdisc_qlen(q->l_queue))
540 *credit_change = ~((s32)q->c_protection_wl) + 1;
541 q->memory_used -= skb->truesize;
542 } else {
543 dualpi2_reset_c_protection(q);
544 return NULL;
545 }
546 *credit_change *= qdisc_pkt_len(skb);
547 qdisc_qstats_backlog_dec(sch, skb);
548 return skb;
549 }
550
do_step_aqm(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 now)551 static int do_step_aqm(struct dualpi2_sched_data *q, struct sk_buff *skb,
552 u64 now)
553 {
554 u64 qdelay = 0;
555
556 if (q->step_in_packets)
557 qdelay = qdisc_qlen(q->l_queue);
558 else
559 qdelay = dualpi2_sojourn_time(skb, now);
560
561 if (dualpi2_skb_cb(skb)->apply_step && qdelay > q->step_thresh) {
562 if (!dualpi2_skb_cb(skb)->ect) {
563 /* Drop this non-ECT packet */
564 return 1;
565 }
566
567 if (dualpi2_mark(q, skb))
568 ++q->step_marks;
569 }
570 qdisc_bstats_update(q->l_queue, skb);
571 return 0;
572 }
573
drop_and_retry(struct dualpi2_sched_data * q,struct sk_buff * skb,struct Qdisc * sch,enum skb_drop_reason reason)574 static void drop_and_retry(struct dualpi2_sched_data *q, struct sk_buff *skb,
575 struct Qdisc *sch, enum skb_drop_reason reason)
576 {
577 ++q->deferred_drops_cnt;
578 q->deferred_drops_len += qdisc_pkt_len(skb);
579 kfree_skb_reason(skb, reason);
580 qdisc_qstats_drop(sch);
581 }
582
dualpi2_qdisc_dequeue(struct Qdisc * sch)583 static struct sk_buff *dualpi2_qdisc_dequeue(struct Qdisc *sch)
584 {
585 struct dualpi2_sched_data *q = qdisc_priv(sch);
586 struct sk_buff *skb;
587 int credit_change;
588 u64 now;
589
590 now = ktime_get_ns();
591
592 while ((skb = dequeue_packet(sch, q, &credit_change, now))) {
593 if (!q->drop_early && must_drop(sch, q, skb)) {
594 drop_and_retry(q, skb, sch,
595 SKB_DROP_REASON_QDISC_CONGESTED);
596 continue;
597 }
598
599 if (skb_in_l_queue(skb) && do_step_aqm(q, skb, now)) {
600 qdisc_qstats_drop(q->l_queue);
601 drop_and_retry(q, skb, sch,
602 SKB_DROP_REASON_DUALPI2_STEP_DROP);
603 continue;
604 }
605
606 q->c_protection_credit += credit_change;
607 qdisc_bstats_update(sch, skb);
608 break;
609 }
610
611 if (q->deferred_drops_cnt) {
612 qdisc_tree_reduce_backlog(sch, q->deferred_drops_cnt,
613 q->deferred_drops_len);
614 q->deferred_drops_cnt = 0;
615 q->deferred_drops_len = 0;
616 }
617 return skb;
618 }
619
__scale_delta(u64 diff)620 static s64 __scale_delta(u64 diff)
621 {
622 do_div(diff, 1 << ALPHA_BETA_GRANULARITY);
623 return diff;
624 }
625
get_queue_delays(struct dualpi2_sched_data * q,u64 * qdelay_c,u64 * qdelay_l)626 static void get_queue_delays(struct dualpi2_sched_data *q, u64 *qdelay_c,
627 u64 *qdelay_l)
628 {
629 u64 now, qc, ql;
630
631 now = ktime_get_ns();
632 qc = READ_ONCE(q->c_head_ts);
633 ql = READ_ONCE(q->l_head_ts);
634
635 *qdelay_c = qc ? now - qc : 0;
636 *qdelay_l = ql ? now - ql : 0;
637 }
638
calculate_probability(struct Qdisc * sch)639 static u32 calculate_probability(struct Qdisc *sch)
640 {
641 struct dualpi2_sched_data *q = qdisc_priv(sch);
642 u32 new_prob;
643 u64 qdelay_c;
644 u64 qdelay_l;
645 u64 qdelay;
646 s64 delta;
647
648 get_queue_delays(q, &qdelay_c, &qdelay_l);
649 qdelay = max(qdelay_l, qdelay_c);
650
651 /* Alpha and beta take at most 32b, i.e, the delay difference would
652 * overflow for queuing delay differences > ~4.2sec.
653 */
654 delta = ((s64)qdelay - (s64)q->pi2_target) * q->pi2_alpha;
655 delta += ((s64)qdelay - (s64)q->last_qdelay) * q->pi2_beta;
656 q->last_qdelay = qdelay;
657
658 /* Bound new_prob between 0 and MAX_PROB */
659 if (delta > 0) {
660 new_prob = __scale_delta(delta) + q->pi2_prob;
661 if (new_prob < q->pi2_prob)
662 new_prob = MAX_PROB;
663 } else {
664 new_prob = q->pi2_prob - __scale_delta(~delta + 1);
665 if (new_prob > q->pi2_prob)
666 new_prob = 0;
667 }
668
669 /* If we do not drop on overload, ensure we cap the L4S probability to
670 * 100% to keep window fairness when overflowing.
671 */
672 if (!q->drop_overload)
673 return min_t(u32, new_prob, MAX_PROB / q->coupling_factor);
674 return new_prob;
675 }
676
get_memory_limit(struct Qdisc * sch,u32 limit)677 static u32 get_memory_limit(struct Qdisc *sch, u32 limit)
678 {
679 /* Apply rule of thumb, i.e., doubling the packet length,
680 * to further include per packet overhead in memory_limit.
681 */
682 u64 memlim = mul_u32_u32(limit, 2 * psched_mtu(qdisc_dev(sch)));
683
684 if (upper_32_bits(memlim))
685 return U32_MAX;
686 else
687 return lower_32_bits(memlim);
688 }
689
convert_us_to_nsec(u32 us)690 static u32 convert_us_to_nsec(u32 us)
691 {
692 u64 ns = mul_u32_u32(us, NSEC_PER_USEC);
693
694 if (upper_32_bits(ns))
695 return U32_MAX;
696
697 return lower_32_bits(ns);
698 }
699
convert_ns_to_usec(u64 ns)700 static u32 convert_ns_to_usec(u64 ns)
701 {
702 do_div(ns, NSEC_PER_USEC);
703 if (upper_32_bits(ns))
704 return U32_MAX;
705
706 return lower_32_bits(ns);
707 }
708
dualpi2_timer(struct hrtimer * timer)709 static enum hrtimer_restart dualpi2_timer(struct hrtimer *timer)
710 {
711 struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
712 struct Qdisc *sch = q->sch;
713 spinlock_t *root_lock; /* to lock qdisc for probability calculations */
714
715 rcu_read_lock();
716 root_lock = qdisc_lock(qdisc_root_sleeping(sch));
717 spin_lock(root_lock);
718
719 WRITE_ONCE(q->pi2_prob, calculate_probability(sch));
720 hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q));
721
722 spin_unlock(root_lock);
723 rcu_read_unlock();
724 return HRTIMER_RESTART;
725 }
726
727 static struct netlink_range_validation dualpi2_alpha_beta_range = {
728 .min = 1,
729 .max = ALPHA_BETA_MAX,
730 };
731
732 static const struct nla_policy dualpi2_policy[TCA_DUALPI2_MAX + 1] = {
733 [TCA_DUALPI2_LIMIT] = NLA_POLICY_MIN(NLA_U32, 1),
734 [TCA_DUALPI2_MEMORY_LIMIT] = NLA_POLICY_MIN(NLA_U32, 1),
735 [TCA_DUALPI2_TARGET] = { .type = NLA_U32 },
736 [TCA_DUALPI2_TUPDATE] = NLA_POLICY_MIN(NLA_U32, 1),
737 [TCA_DUALPI2_ALPHA] =
738 NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
739 [TCA_DUALPI2_BETA] =
740 NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
741 [TCA_DUALPI2_STEP_THRESH_PKTS] = { .type = NLA_U32 },
742 [TCA_DUALPI2_STEP_THRESH_US] = { .type = NLA_U32 },
743 [TCA_DUALPI2_MIN_QLEN_STEP] = { .type = NLA_U32 },
744 [TCA_DUALPI2_COUPLING] = NLA_POLICY_MIN(NLA_U8, 1),
745 [TCA_DUALPI2_DROP_OVERLOAD] =
746 NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_OVERLOAD_MAX),
747 [TCA_DUALPI2_DROP_EARLY] =
748 NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_EARLY_MAX),
749 [TCA_DUALPI2_C_PROTECTION] =
750 NLA_POLICY_RANGE(NLA_U8, 0, MAX_WC),
751 [TCA_DUALPI2_ECN_MASK] =
752 NLA_POLICY_RANGE(NLA_U8, TC_DUALPI2_ECN_MASK_L4S_ECT,
753 TCA_DUALPI2_ECN_MASK_MAX),
754 [TCA_DUALPI2_SPLIT_GSO] =
755 NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_SPLIT_GSO_MAX),
756 };
757
dualpi2_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)758 static int dualpi2_change(struct Qdisc *sch, struct nlattr *opt,
759 struct netlink_ext_ack *extack)
760 {
761 struct nlattr *tb[TCA_DUALPI2_MAX + 1];
762 struct dualpi2_sched_data *q;
763 int old_backlog;
764 int old_qlen;
765 int err;
766
767 if (!opt || !nla_len(opt)) {
768 NL_SET_ERR_MSG_MOD(extack, "Dualpi2 options are required");
769 return -EINVAL;
770 }
771 err = nla_parse_nested(tb, TCA_DUALPI2_MAX, opt, dualpi2_policy,
772 extack);
773 if (err < 0)
774 return err;
775 if (tb[TCA_DUALPI2_STEP_THRESH_PKTS] && tb[TCA_DUALPI2_STEP_THRESH_US]) {
776 NL_SET_ERR_MSG_MOD(extack, "multiple step thresh attributes");
777 return -EINVAL;
778 }
779
780 q = qdisc_priv(sch);
781 sch_tree_lock(sch);
782
783 if (tb[TCA_DUALPI2_LIMIT]) {
784 u32 limit = nla_get_u32(tb[TCA_DUALPI2_LIMIT]);
785
786 WRITE_ONCE(sch->limit, limit);
787 WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
788 }
789
790 if (tb[TCA_DUALPI2_MEMORY_LIMIT])
791 WRITE_ONCE(q->memory_limit,
792 nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]));
793
794 if (tb[TCA_DUALPI2_TARGET]) {
795 u64 target = nla_get_u32(tb[TCA_DUALPI2_TARGET]);
796
797 WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
798 }
799
800 if (tb[TCA_DUALPI2_TUPDATE]) {
801 u64 tupdate = nla_get_u32(tb[TCA_DUALPI2_TUPDATE]);
802
803 WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
804 }
805
806 if (tb[TCA_DUALPI2_ALPHA]) {
807 u32 alpha = nla_get_u32(tb[TCA_DUALPI2_ALPHA]);
808
809 WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
810 }
811
812 if (tb[TCA_DUALPI2_BETA]) {
813 u32 beta = nla_get_u32(tb[TCA_DUALPI2_BETA]);
814
815 WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
816 }
817
818 if (tb[TCA_DUALPI2_STEP_THRESH_PKTS]) {
819 u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_PKTS]);
820
821 WRITE_ONCE(q->step_in_packets, true);
822 WRITE_ONCE(q->step_thresh, step_th);
823 } else if (tb[TCA_DUALPI2_STEP_THRESH_US]) {
824 u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_US]);
825
826 WRITE_ONCE(q->step_in_packets, false);
827 WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
828 }
829
830 if (tb[TCA_DUALPI2_MIN_QLEN_STEP])
831 WRITE_ONCE(q->min_qlen_step,
832 nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]));
833
834 if (tb[TCA_DUALPI2_COUPLING]) {
835 u8 coupling = nla_get_u8(tb[TCA_DUALPI2_COUPLING]);
836
837 WRITE_ONCE(q->coupling_factor, coupling);
838 }
839
840 if (tb[TCA_DUALPI2_DROP_OVERLOAD]) {
841 u8 drop_overload = nla_get_u8(tb[TCA_DUALPI2_DROP_OVERLOAD]);
842
843 WRITE_ONCE(q->drop_overload, (bool)drop_overload);
844 }
845
846 if (tb[TCA_DUALPI2_DROP_EARLY]) {
847 u8 drop_early = nla_get_u8(tb[TCA_DUALPI2_DROP_EARLY]);
848
849 WRITE_ONCE(q->drop_early, (bool)drop_early);
850 }
851
852 if (tb[TCA_DUALPI2_C_PROTECTION]) {
853 u8 wc = nla_get_u8(tb[TCA_DUALPI2_C_PROTECTION]);
854
855 dualpi2_calculate_c_protection(sch, q, wc);
856 }
857
858 if (tb[TCA_DUALPI2_ECN_MASK]) {
859 u8 ecn_mask = nla_get_u8(tb[TCA_DUALPI2_ECN_MASK]);
860
861 WRITE_ONCE(q->ecn_mask, ecn_mask);
862 }
863
864 if (tb[TCA_DUALPI2_SPLIT_GSO]) {
865 u8 split_gso = nla_get_u8(tb[TCA_DUALPI2_SPLIT_GSO]);
866
867 WRITE_ONCE(q->split_gso, (bool)split_gso);
868 }
869
870 old_qlen = qdisc_qlen(sch);
871 old_backlog = sch->qstats.backlog;
872 while (qdisc_qlen(sch) > sch->limit ||
873 q->memory_used > q->memory_limit) {
874 struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
875
876 q->memory_used -= skb->truesize;
877 qdisc_qstats_backlog_dec(sch, skb);
878 rtnl_qdisc_drop(skb, sch);
879 }
880 qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch),
881 old_backlog - sch->qstats.backlog);
882
883 sch_tree_unlock(sch);
884 return 0;
885 }
886
887 /* Default alpha/beta values give a 10dB stability margin with max_rtt=100ms. */
dualpi2_reset_default(struct Qdisc * sch)888 static void dualpi2_reset_default(struct Qdisc *sch)
889 {
890 struct dualpi2_sched_data *q = qdisc_priv(sch);
891
892 q->sch->limit = 10000; /* Max 125ms at 1Gbps */
893 q->memory_limit = get_memory_limit(sch, q->sch->limit);
894
895 q->pi2_target = 15 * NSEC_PER_MSEC;
896 q->pi2_tupdate = 16 * NSEC_PER_MSEC;
897 q->pi2_alpha = dualpi2_scale_alpha_beta(41); /* ~0.16 Hz * 256 */
898 q->pi2_beta = dualpi2_scale_alpha_beta(819); /* ~3.20 Hz * 256 */
899
900 q->step_thresh = 1 * NSEC_PER_MSEC;
901 q->step_in_packets = false;
902
903 dualpi2_calculate_c_protection(q->sch, q, 10); /* wc=10%, wl=90% */
904
905 q->ecn_mask = TC_DUALPI2_ECN_MASK_L4S_ECT; /* INET_ECN_ECT_1 */
906 q->min_qlen_step = 0; /* Always apply step mark in L-queue */
907 q->coupling_factor = 2; /* window fairness for equal RTTs */
908 q->drop_overload = TC_DUALPI2_DROP_OVERLOAD_DROP; /* Drop overload */
909 q->drop_early = TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE; /* Drop dequeue */
910 q->split_gso = TC_DUALPI2_SPLIT_GSO_SPLIT_GSO; /* Split GSO */
911 }
912
dualpi2_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)913 static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
914 struct netlink_ext_ack *extack)
915 {
916 struct dualpi2_sched_data *q = qdisc_priv(sch);
917 int err;
918
919 q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
920 TC_H_MAKE(sch->handle, 1), extack);
921 if (!q->l_queue)
922 return -ENOMEM;
923
924 err = tcf_block_get(&q->tcf_block, &q->tcf_filters, sch, extack);
925 if (err)
926 return err;
927
928 q->sch = sch;
929 dualpi2_reset_default(sch);
930 hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
931 HRTIMER_MODE_ABS_PINNED_SOFT);
932
933 if (opt && nla_len(opt)) {
934 err = dualpi2_change(sch, opt, extack);
935
936 if (err)
937 return err;
938 }
939
940 hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
941 HRTIMER_MODE_ABS_PINNED_SOFT);
942 return 0;
943 }
944
dualpi2_dump(struct Qdisc * sch,struct sk_buff * skb)945 static int dualpi2_dump(struct Qdisc *sch, struct sk_buff *skb)
946 {
947 struct dualpi2_sched_data *q = qdisc_priv(sch);
948 struct nlattr *opts;
949 bool step_in_pkts;
950 u32 step_th;
951
952 step_in_pkts = READ_ONCE(q->step_in_packets);
953 step_th = READ_ONCE(q->step_thresh);
954
955 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
956 if (!opts)
957 goto nla_put_failure;
958
959 if (step_in_pkts &&
960 (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
961 nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
962 READ_ONCE(q->memory_limit)) ||
963 nla_put_u32(skb, TCA_DUALPI2_TARGET,
964 convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
965 nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
966 convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
967 nla_put_u32(skb, TCA_DUALPI2_ALPHA,
968 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
969 nla_put_u32(skb, TCA_DUALPI2_BETA,
970 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
971 nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_PKTS, step_th) ||
972 nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
973 READ_ONCE(q->min_qlen_step)) ||
974 nla_put_u8(skb, TCA_DUALPI2_COUPLING,
975 READ_ONCE(q->coupling_factor)) ||
976 nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
977 READ_ONCE(q->drop_overload)) ||
978 nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
979 READ_ONCE(q->drop_early)) ||
980 nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
981 READ_ONCE(q->c_protection_wc)) ||
982 nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
983 nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
984 goto nla_put_failure;
985
986 if (!step_in_pkts &&
987 (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
988 nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
989 READ_ONCE(q->memory_limit)) ||
990 nla_put_u32(skb, TCA_DUALPI2_TARGET,
991 convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
992 nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
993 convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
994 nla_put_u32(skb, TCA_DUALPI2_ALPHA,
995 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
996 nla_put_u32(skb, TCA_DUALPI2_BETA,
997 dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
998 nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_US,
999 convert_ns_to_usec(step_th)) ||
1000 nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
1001 READ_ONCE(q->min_qlen_step)) ||
1002 nla_put_u8(skb, TCA_DUALPI2_COUPLING,
1003 READ_ONCE(q->coupling_factor)) ||
1004 nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
1005 READ_ONCE(q->drop_overload)) ||
1006 nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
1007 READ_ONCE(q->drop_early)) ||
1008 nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
1009 READ_ONCE(q->c_protection_wc)) ||
1010 nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
1011 nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
1012 goto nla_put_failure;
1013
1014 return nla_nest_end(skb, opts);
1015
1016 nla_put_failure:
1017 nla_nest_cancel(skb, opts);
1018 return -1;
1019 }
1020
dualpi2_dump_stats(struct Qdisc * sch,struct gnet_dump * d)1021 static int dualpi2_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1022 {
1023 struct dualpi2_sched_data *q = qdisc_priv(sch);
1024 struct tc_dualpi2_xstats st = {
1025 .prob = READ_ONCE(q->pi2_prob),
1026 .packets_in_c = q->packets_in_c,
1027 .packets_in_l = q->packets_in_l,
1028 .maxq = q->maxq,
1029 .ecn_mark = q->ecn_mark,
1030 .credit = q->c_protection_credit,
1031 .step_marks = q->step_marks,
1032 .memory_used = q->memory_used,
1033 .max_memory_used = q->max_memory_used,
1034 .memory_limit = q->memory_limit,
1035 };
1036 u64 qc, ql;
1037
1038 get_queue_delays(q, &qc, &ql);
1039 st.delay_l = convert_ns_to_usec(ql);
1040 st.delay_c = convert_ns_to_usec(qc);
1041 return gnet_stats_copy_app(d, &st, sizeof(st));
1042 }
1043
1044 /* Reset both L-queue and C-queue, internal packet counters, PI probability,
1045 * C-queue protection credit, and timestamps, while preserving current
1046 * configuration of DUALPI2.
1047 */
dualpi2_reset(struct Qdisc * sch)1048 static void dualpi2_reset(struct Qdisc *sch)
1049 {
1050 struct dualpi2_sched_data *q = qdisc_priv(sch);
1051
1052 qdisc_reset_queue(sch);
1053 qdisc_reset_queue(q->l_queue);
1054 q->c_head_ts = 0;
1055 q->l_head_ts = 0;
1056 q->pi2_prob = 0;
1057 q->packets_in_c = 0;
1058 q->packets_in_l = 0;
1059 q->maxq = 0;
1060 q->ecn_mark = 0;
1061 q->step_marks = 0;
1062 q->memory_used = 0;
1063 q->max_memory_used = 0;
1064 dualpi2_reset_c_protection(q);
1065 }
1066
dualpi2_destroy(struct Qdisc * sch)1067 static void dualpi2_destroy(struct Qdisc *sch)
1068 {
1069 struct dualpi2_sched_data *q = qdisc_priv(sch);
1070
1071 q->pi2_tupdate = 0;
1072 hrtimer_cancel(&q->pi2_timer);
1073 if (q->l_queue)
1074 qdisc_put(q->l_queue);
1075 tcf_block_put(q->tcf_block);
1076 }
1077
dualpi2_leaf(struct Qdisc * sch,unsigned long arg)1078 static struct Qdisc *dualpi2_leaf(struct Qdisc *sch, unsigned long arg)
1079 {
1080 return NULL;
1081 }
1082
dualpi2_find(struct Qdisc * sch,u32 classid)1083 static unsigned long dualpi2_find(struct Qdisc *sch, u32 classid)
1084 {
1085 return 0;
1086 }
1087
dualpi2_bind(struct Qdisc * sch,unsigned long parent,u32 classid)1088 static unsigned long dualpi2_bind(struct Qdisc *sch, unsigned long parent,
1089 u32 classid)
1090 {
1091 return 0;
1092 }
1093
dualpi2_unbind(struct Qdisc * q,unsigned long cl)1094 static void dualpi2_unbind(struct Qdisc *q, unsigned long cl)
1095 {
1096 }
1097
dualpi2_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)1098 static struct tcf_block *dualpi2_tcf_block(struct Qdisc *sch, unsigned long cl,
1099 struct netlink_ext_ack *extack)
1100 {
1101 struct dualpi2_sched_data *q = qdisc_priv(sch);
1102
1103 if (cl)
1104 return NULL;
1105 return q->tcf_block;
1106 }
1107
dualpi2_walk(struct Qdisc * sch,struct qdisc_walker * arg)1108 static void dualpi2_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1109 {
1110 unsigned int i;
1111
1112 if (arg->stop)
1113 return;
1114
1115 /* We statically define only 2 queues */
1116 for (i = 0; i < 2; i++) {
1117 if (arg->count < arg->skip) {
1118 arg->count++;
1119 continue;
1120 }
1121 if (arg->fn(sch, i + 1, arg) < 0) {
1122 arg->stop = 1;
1123 break;
1124 }
1125 arg->count++;
1126 }
1127 }
1128
1129 /* Minimal class support to handle tc filters */
1130 static const struct Qdisc_class_ops dualpi2_class_ops = {
1131 .leaf = dualpi2_leaf,
1132 .find = dualpi2_find,
1133 .tcf_block = dualpi2_tcf_block,
1134 .bind_tcf = dualpi2_bind,
1135 .unbind_tcf = dualpi2_unbind,
1136 .walk = dualpi2_walk,
1137 };
1138
1139 static struct Qdisc_ops dualpi2_qdisc_ops __read_mostly = {
1140 .id = "dualpi2",
1141 .cl_ops = &dualpi2_class_ops,
1142 .priv_size = sizeof(struct dualpi2_sched_data),
1143 .enqueue = dualpi2_qdisc_enqueue,
1144 .dequeue = dualpi2_qdisc_dequeue,
1145 .peek = qdisc_peek_dequeued,
1146 .init = dualpi2_init,
1147 .destroy = dualpi2_destroy,
1148 .reset = dualpi2_reset,
1149 .change = dualpi2_change,
1150 .dump = dualpi2_dump,
1151 .dump_stats = dualpi2_dump_stats,
1152 .owner = THIS_MODULE,
1153 };
1154
dualpi2_module_init(void)1155 static int __init dualpi2_module_init(void)
1156 {
1157 return register_qdisc(&dualpi2_qdisc_ops);
1158 }
1159
dualpi2_module_exit(void)1160 static void __exit dualpi2_module_exit(void)
1161 {
1162 unregister_qdisc(&dualpi2_qdisc_ops);
1163 }
1164
1165 module_init(dualpi2_module_init);
1166 module_exit(dualpi2_module_exit);
1167
1168 MODULE_DESCRIPTION("Dual Queue with Proportional Integral controller Improved with a Square (dualpi2) scheduler");
1169 MODULE_AUTHOR("Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>");
1170 MODULE_AUTHOR("Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>");
1171 MODULE_AUTHOR("Olga Albisser <olga@albisser.org>");
1172 MODULE_AUTHOR("Henrik Steen <henrist@henrist.net>");
1173 MODULE_AUTHOR("Olivier Tilmans <olivier.tilmans@nokia.com>");
1174
1175 MODULE_LICENSE("Dual BSD/GPL");
1176 MODULE_VERSION("1.0");
1177