1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/sch_sfb.c Stochastic Fair Blue
4 *
5 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 *
8 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
9 * A New Class of Active Queue Management Algorithms.
10 * U. Michigan CSE-TR-387-99, April 1999.
11 *
12 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/random.h>
21 #include <linux/siphash.h>
22 #include <net/ip.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <net/inet_ecn.h>
26
27 /*
28 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
29 * This implementation uses L = 8 and N = 16
30 * This permits us to split one 32bit hash (provided per packet by rxhash or
31 * external classifier) into 8 subhashes of 4 bits.
32 */
33 #define SFB_BUCKET_SHIFT 4
34 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
35 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
36 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
37
38 /* SFB algo uses a virtual queue, named "bin" */
39 struct sfb_bucket {
40 u16 qlen; /* length of virtual queue */
41 u16 p_mark; /* marking probability */
42 };
43
44 /* We use a double buffering right before hash change
45 * (Section 4.4 of SFB reference : moving hash functions)
46 */
47 struct sfb_bins {
48 siphash_key_t perturbation; /* siphash key */
49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50 };
51
52 struct sfb_sched_data {
53 struct Qdisc *qdisc;
54 struct tcf_proto __rcu *filter_list;
55 struct tcf_block *block;
56 unsigned long rehash_interval;
57 unsigned long warmup_time; /* double buffering warmup time in jiffies */
58 u32 max;
59 u32 bin_size; /* maximum queue length per bin */
60 u32 increment; /* d1 */
61 u32 decrement; /* d2 */
62 u32 limit; /* HARD maximal queue length */
63 u32 penalty_rate;
64 u32 penalty_burst;
65 u32 tokens_avail;
66 unsigned long rehash_time;
67 unsigned long token_time;
68
69 u8 slot; /* current active bins (0 or 1) */
70 bool double_buffering;
71 struct sfb_bins bins[2];
72
73 struct {
74 u32 earlydrop;
75 u32 penaltydrop;
76 u32 bucketdrop;
77 u32 queuedrop;
78 u32 childdrop; /* drops in child qdisc */
79 u32 marked; /* ECN mark */
80 } stats;
81 };
82
83 /*
84 * Each queued skb might be hashed on one or two bins
85 * We store in skb_cb the two hash values.
86 * (A zero value means double buffering was not used)
87 */
88 struct sfb_skb_cb {
89 u32 hashes[2];
90 };
91
sfb_skb_cb(const struct sk_buff * skb)92 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
93 {
94 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
95 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
96 }
97
98 /*
99 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
100 * If using external classifier, hash comes from the classid.
101 */
sfb_hash(const struct sk_buff * skb,u32 slot)102 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
103 {
104 return sfb_skb_cb(skb)->hashes[slot];
105 }
106
107 /* Probabilities are coded as Q0.16 fixed-point values,
108 * with 0xFFFF representing 65535/65536 (almost 1.0)
109 * Addition and subtraction are saturating in [0, 65535]
110 */
prob_plus(u32 p1,u32 p2)111 static u32 prob_plus(u32 p1, u32 p2)
112 {
113 u32 res = p1 + p2;
114
115 return min_t(u32, res, SFB_MAX_PROB);
116 }
117
prob_minus(u32 p1,u32 p2)118 static u32 prob_minus(u32 p1, u32 p2)
119 {
120 return p1 > p2 ? p1 - p2 : 0;
121 }
122
increment_one_qlen(u32 sfbhash,u32 slot,struct sfb_sched_data * q)123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
124 {
125 int i;
126 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
127
128 for (i = 0; i < SFB_LEVELS; i++) {
129 u32 hash = sfbhash & SFB_BUCKET_MASK;
130
131 sfbhash >>= SFB_BUCKET_SHIFT;
132 if (b[hash].qlen < 0xFFFF)
133 WRITE_ONCE(b[hash].qlen, b[hash].qlen + 1);
134 b += SFB_NUMBUCKETS; /* next level */
135 }
136 }
137
increment_qlen(const struct sfb_skb_cb * cb,struct sfb_sched_data * q)138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
139 {
140 u32 sfbhash;
141
142 sfbhash = cb->hashes[0];
143 if (sfbhash)
144 increment_one_qlen(sfbhash, 0, q);
145
146 sfbhash = cb->hashes[1];
147 if (sfbhash)
148 increment_one_qlen(sfbhash, 1, q);
149 }
150
decrement_one_qlen(u32 sfbhash,u32 slot,struct sfb_sched_data * q)151 static void decrement_one_qlen(u32 sfbhash, u32 slot,
152 struct sfb_sched_data *q)
153 {
154 int i;
155 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
156
157 for (i = 0; i < SFB_LEVELS; i++) {
158 u32 hash = sfbhash & SFB_BUCKET_MASK;
159
160 sfbhash >>= SFB_BUCKET_SHIFT;
161 if (b[hash].qlen > 0)
162 WRITE_ONCE(b[hash].qlen, b[hash].qlen - 1);
163 b += SFB_NUMBUCKETS; /* next level */
164 }
165 }
166
decrement_qlen(const struct sk_buff * skb,struct sfb_sched_data * q)167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
168 {
169 u32 sfbhash;
170
171 sfbhash = sfb_hash(skb, 0);
172 if (sfbhash)
173 decrement_one_qlen(sfbhash, 0, q);
174
175 sfbhash = sfb_hash(skb, 1);
176 if (sfbhash)
177 decrement_one_qlen(sfbhash, 1, q);
178 }
179
decrement_prob(struct sfb_bucket * b,struct sfb_sched_data * q)180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
181 {
182 WRITE_ONCE(b->p_mark, prob_minus(b->p_mark, q->decrement));
183 }
184
increment_prob(struct sfb_bucket * b,struct sfb_sched_data * q)185 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
186 {
187 WRITE_ONCE(b->p_mark, prob_plus(b->p_mark, q->increment));
188 }
189
sfb_zero_all_buckets(struct sfb_sched_data * q)190 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
191 {
192 memset(&q->bins, 0, sizeof(q->bins));
193 }
194
195 /*
196 * compute max qlen, max p_mark, and avg p_mark
197 */
sfb_compute_qlen(u32 * prob_r,u32 * avgpm_r,const struct sfb_sched_data * q)198 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
199 {
200 int i;
201 u32 qlen = 0, prob = 0, totalpm = 0;
202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
203
204 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
205 u32 b_qlen = READ_ONCE(b->qlen);
206 u32 b_mark = READ_ONCE(b->p_mark);
207
208 if (qlen < b_qlen)
209 qlen = b_qlen;
210 totalpm += b_mark;
211 if (prob < b_mark)
212 prob = b_mark;
213 b++;
214 }
215 *prob_r = prob;
216 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
217 return qlen;
218 }
219
220
sfb_init_perturbation(u32 slot,struct sfb_sched_data * q)221 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
222 {
223 get_random_bytes(&q->bins[slot].perturbation,
224 sizeof(q->bins[slot].perturbation));
225 }
226
sfb_swap_slot(struct sfb_sched_data * q)227 static void sfb_swap_slot(struct sfb_sched_data *q)
228 {
229 sfb_init_perturbation(q->slot, q);
230 q->slot ^= 1;
231 q->double_buffering = false;
232 }
233
234 /* Non elastic flows are allowed to use part of the bandwidth, expressed
235 * in "penalty_rate" packets per second, with "penalty_burst" burst
236 */
sfb_rate_limit(struct sk_buff * skb,struct sfb_sched_data * q)237 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
238 {
239 if (q->penalty_rate == 0 || q->penalty_burst == 0)
240 return true;
241
242 if (q->tokens_avail < 1) {
243 unsigned long age = min(10UL * HZ, jiffies - q->token_time);
244
245 q->tokens_avail = (age * q->penalty_rate) / HZ;
246 if (q->tokens_avail > q->penalty_burst)
247 q->tokens_avail = q->penalty_burst;
248 q->token_time = jiffies;
249 if (q->tokens_avail < 1)
250 return true;
251 }
252
253 q->tokens_avail--;
254 return false;
255 }
256
sfb_classify(struct sk_buff * skb,struct tcf_proto * fl,int * qerr,u32 * salt)257 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
258 int *qerr, u32 *salt)
259 {
260 struct tcf_result res;
261 int result;
262
263 result = tcf_classify(skb, NULL, fl, &res, false);
264 if (result >= 0) {
265 #ifdef CONFIG_NET_CLS_ACT
266 switch (result) {
267 case TC_ACT_STOLEN:
268 case TC_ACT_QUEUED:
269 case TC_ACT_TRAP:
270 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
271 fallthrough;
272 case TC_ACT_SHOT:
273 return false;
274 }
275 #endif
276 *salt = TC_H_MIN(res.classid);
277 return true;
278 }
279 return false;
280 }
281
sfb_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)282 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
283 struct sk_buff **to_free)
284 {
285
286 enum qdisc_drop_reason reason = QDISC_DROP_OVERLIMIT;
287 struct sfb_sched_data *q = qdisc_priv(sch);
288 unsigned int len = qdisc_pkt_len(skb);
289 struct Qdisc *child = q->qdisc;
290 struct tcf_proto *fl;
291 struct sfb_skb_cb cb;
292 int i;
293 u32 p_min = ~0;
294 u32 minqlen = ~0;
295 u32 r, sfbhash;
296 u32 slot = q->slot;
297 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
298
299 if (unlikely(sch->q.qlen >= q->limit)) {
300 qdisc_qstats_overlimit(sch);
301 WRITE_ONCE(q->stats.queuedrop,
302 q->stats.queuedrop + 1);
303 goto drop;
304 }
305
306 if (q->rehash_interval > 0) {
307 unsigned long limit = q->rehash_time + q->rehash_interval;
308
309 if (unlikely(time_after(jiffies, limit))) {
310 sfb_swap_slot(q);
311 q->rehash_time = jiffies;
312 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
313 time_after(jiffies, limit - q->warmup_time))) {
314 q->double_buffering = true;
315 }
316 }
317
318 fl = rcu_dereference_bh(q->filter_list);
319 if (fl) {
320 u32 salt;
321
322 /* If using external classifiers, get result and record it. */
323 if (!sfb_classify(skb, fl, &ret, &salt))
324 goto other_drop;
325 sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
326 } else {
327 sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
328 }
329
330
331 if (!sfbhash)
332 sfbhash = 1;
333 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
334
335 for (i = 0; i < SFB_LEVELS; i++) {
336 u32 hash = sfbhash & SFB_BUCKET_MASK;
337 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
338
339 sfbhash >>= SFB_BUCKET_SHIFT;
340 if (b->qlen == 0)
341 decrement_prob(b, q);
342 else if (b->qlen >= q->bin_size)
343 increment_prob(b, q);
344 if (minqlen > b->qlen)
345 minqlen = b->qlen;
346 if (p_min > b->p_mark)
347 p_min = b->p_mark;
348 }
349
350 slot ^= 1;
351 sfb_skb_cb(skb)->hashes[slot] = 0;
352
353 if (unlikely(minqlen >= q->max)) {
354 qdisc_qstats_overlimit(sch);
355 WRITE_ONCE(q->stats.bucketdrop,
356 q->stats.bucketdrop + 1);
357 goto drop;
358 }
359
360 if (unlikely(p_min >= SFB_MAX_PROB)) {
361 /* Inelastic flow */
362 if (q->double_buffering) {
363 sfbhash = skb_get_hash_perturb(skb,
364 &q->bins[slot].perturbation);
365 if (!sfbhash)
366 sfbhash = 1;
367 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
368
369 for (i = 0; i < SFB_LEVELS; i++) {
370 u32 hash = sfbhash & SFB_BUCKET_MASK;
371 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
372
373 sfbhash >>= SFB_BUCKET_SHIFT;
374 if (b->qlen == 0)
375 decrement_prob(b, q);
376 else if (b->qlen >= q->bin_size)
377 increment_prob(b, q);
378 }
379 }
380 if (sfb_rate_limit(skb, q)) {
381 qdisc_qstats_overlimit(sch);
382 WRITE_ONCE(q->stats.penaltydrop,
383 q->stats.penaltydrop + 1);
384 goto drop;
385 }
386 goto enqueue;
387 }
388
389 r = get_random_u16() & SFB_MAX_PROB;
390 reason = QDISC_DROP_CONGESTED;
391
392 if (unlikely(r < p_min)) {
393 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
394 /* If we're marking that many packets, then either
395 * this flow is unresponsive, or we're badly congested.
396 * In either case, we want to start dropping packets.
397 */
398 if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
399 WRITE_ONCE(q->stats.earlydrop,
400 q->stats.earlydrop + 1);
401 goto drop;
402 }
403 }
404 if (INET_ECN_set_ce(skb)) {
405 WRITE_ONCE(q->stats.marked,
406 q->stats.marked + 1);
407 } else {
408 WRITE_ONCE(q->stats.earlydrop,
409 q->stats.earlydrop + 1);
410 goto drop;
411 }
412 }
413
414 enqueue:
415 memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
416 ret = qdisc_enqueue(skb, child, to_free);
417 if (likely(ret == NET_XMIT_SUCCESS)) {
418 sch->qstats.backlog += len;
419 sch->q.qlen++;
420 increment_qlen(&cb, q);
421 } else if (net_xmit_drop_count(ret)) {
422 WRITE_ONCE(q->stats.childdrop,
423 q->stats.childdrop + 1);
424 qdisc_qstats_drop(sch);
425 }
426 return ret;
427
428 drop:
429 qdisc_drop_reason(skb, sch, to_free, reason);
430 return NET_XMIT_CN;
431 other_drop:
432 if (ret & __NET_XMIT_BYPASS)
433 qdisc_qstats_drop(sch);
434 kfree_skb(skb);
435 return ret;
436 }
437
sfb_dequeue(struct Qdisc * sch)438 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
439 {
440 struct sfb_sched_data *q = qdisc_priv(sch);
441 struct Qdisc *child = q->qdisc;
442 struct sk_buff *skb;
443
444 skb = qdisc_dequeue_peeked(child);
445
446 if (skb) {
447 qdisc_bstats_update(sch, skb);
448 qdisc_qstats_backlog_dec(sch, skb);
449 sch->q.qlen--;
450 decrement_qlen(skb, q);
451 }
452
453 return skb;
454 }
455
sfb_peek(struct Qdisc * sch)456 static struct sk_buff *sfb_peek(struct Qdisc *sch)
457 {
458 struct sfb_sched_data *q = qdisc_priv(sch);
459 struct Qdisc *child = q->qdisc;
460
461 return child->ops->peek(child);
462 }
463
464 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
465
sfb_reset(struct Qdisc * sch)466 static void sfb_reset(struct Qdisc *sch)
467 {
468 struct sfb_sched_data *q = qdisc_priv(sch);
469
470 if (likely(q->qdisc))
471 qdisc_reset(q->qdisc);
472 q->slot = 0;
473 q->double_buffering = false;
474 sfb_zero_all_buckets(q);
475 sfb_init_perturbation(0, q);
476 }
477
sfb_destroy(struct Qdisc * sch)478 static void sfb_destroy(struct Qdisc *sch)
479 {
480 struct sfb_sched_data *q = qdisc_priv(sch);
481
482 tcf_block_put(q->block);
483 qdisc_put(q->qdisc);
484 }
485
486 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
487 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
488 };
489
490 static const struct tc_sfb_qopt sfb_default_ops = {
491 .rehash_interval = 600 * MSEC_PER_SEC,
492 .warmup_time = 60 * MSEC_PER_SEC,
493 .limit = 0,
494 .max = 25,
495 .bin_size = 20,
496 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
497 .decrement = (SFB_MAX_PROB + 3000) / 6000,
498 .penalty_rate = 10,
499 .penalty_burst = 20,
500 };
501
sfb_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)502 static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
503 struct netlink_ext_ack *extack)
504 {
505 struct sfb_sched_data *q = qdisc_priv(sch);
506 struct Qdisc *child, *old;
507 struct nlattr *tb[TCA_SFB_MAX + 1];
508 const struct tc_sfb_qopt *ctl = &sfb_default_ops;
509 u32 limit;
510 int err;
511
512 if (opt) {
513 err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
514 sfb_policy, NULL);
515 if (err < 0)
516 return -EINVAL;
517
518 if (tb[TCA_SFB_PARMS] == NULL)
519 return -EINVAL;
520
521 ctl = nla_data(tb[TCA_SFB_PARMS]);
522 }
523
524 limit = ctl->limit;
525 if (limit == 0)
526 limit = qdisc_dev(sch)->tx_queue_len;
527
528 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
529 if (IS_ERR(child))
530 return PTR_ERR(child);
531
532 if (child != &noop_qdisc)
533 qdisc_hash_add(child, true);
534 sch_tree_lock(sch);
535
536 qdisc_purge_queue(q->qdisc);
537 old = q->qdisc;
538 q->qdisc = child;
539
540 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
541 q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
542 q->rehash_time = jiffies;
543 q->limit = limit;
544 q->increment = ctl->increment;
545 q->decrement = ctl->decrement;
546 q->max = ctl->max;
547 q->bin_size = ctl->bin_size;
548 q->penalty_rate = ctl->penalty_rate;
549 q->penalty_burst = ctl->penalty_burst;
550 q->tokens_avail = ctl->penalty_burst;
551 q->token_time = jiffies;
552
553 q->slot = 0;
554 q->double_buffering = false;
555 sfb_zero_all_buckets(q);
556 sfb_init_perturbation(0, q);
557 sfb_init_perturbation(1, q);
558
559 sch_tree_unlock(sch);
560 qdisc_put(old);
561
562 return 0;
563 }
564
sfb_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)565 static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
566 struct netlink_ext_ack *extack)
567 {
568 struct sfb_sched_data *q = qdisc_priv(sch);
569 int err;
570
571 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
572 if (err)
573 return err;
574
575 q->qdisc = &noop_qdisc;
576 return sfb_change(sch, opt, extack);
577 }
578
sfb_dump(struct Qdisc * sch,struct sk_buff * skb)579 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
580 {
581 struct sfb_sched_data *q = qdisc_priv(sch);
582 struct nlattr *opts;
583 struct tc_sfb_qopt opt = {
584 .rehash_interval = jiffies_to_msecs(q->rehash_interval),
585 .warmup_time = jiffies_to_msecs(q->warmup_time),
586 .limit = q->limit,
587 .max = q->max,
588 .bin_size = q->bin_size,
589 .increment = q->increment,
590 .decrement = q->decrement,
591 .penalty_rate = q->penalty_rate,
592 .penalty_burst = q->penalty_burst,
593 };
594
595 sch->qstats.backlog = q->qdisc->qstats.backlog;
596 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
597 if (opts == NULL)
598 goto nla_put_failure;
599 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
600 goto nla_put_failure;
601 return nla_nest_end(skb, opts);
602
603 nla_put_failure:
604 nla_nest_cancel(skb, opts);
605 return -EMSGSIZE;
606 }
607
sfb_dump_stats(struct Qdisc * sch,struct gnet_dump * d)608 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
609 {
610 struct sfb_sched_data *q = qdisc_priv(sch);
611 struct tc_sfb_xstats st = {
612 .earlydrop = READ_ONCE(q->stats.earlydrop),
613 .penaltydrop = READ_ONCE(q->stats.penaltydrop),
614 .bucketdrop = READ_ONCE(q->stats.bucketdrop),
615 .queuedrop = READ_ONCE(q->stats.queuedrop),
616 .childdrop = READ_ONCE(q->stats.childdrop),
617 .marked = READ_ONCE(q->stats.marked),
618 };
619
620 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
621
622 return gnet_stats_copy_app(d, &st, sizeof(st));
623 }
624
sfb_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)625 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
626 struct sk_buff *skb, struct tcmsg *tcm)
627 {
628 return -ENOSYS;
629 }
630
sfb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)631 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
632 struct Qdisc **old, struct netlink_ext_ack *extack)
633 {
634 struct sfb_sched_data *q = qdisc_priv(sch);
635
636 if (new == NULL)
637 new = &noop_qdisc;
638
639 *old = qdisc_replace(sch, new, &q->qdisc);
640 return 0;
641 }
642
sfb_leaf(struct Qdisc * sch,unsigned long arg)643 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
644 {
645 struct sfb_sched_data *q = qdisc_priv(sch);
646
647 return q->qdisc;
648 }
649
sfb_find(struct Qdisc * sch,u32 classid)650 static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
651 {
652 return 1;
653 }
654
sfb_unbind(struct Qdisc * sch,unsigned long arg)655 static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
656 {
657 }
658
sfb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)659 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
660 struct nlattr **tca, unsigned long *arg,
661 struct netlink_ext_ack *extack)
662 {
663 return -ENOSYS;
664 }
665
sfb_delete(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)666 static int sfb_delete(struct Qdisc *sch, unsigned long cl,
667 struct netlink_ext_ack *extack)
668 {
669 return -ENOSYS;
670 }
671
sfb_walk(struct Qdisc * sch,struct qdisc_walker * walker)672 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
673 {
674 if (!walker->stop) {
675 tc_qdisc_stats_dump(sch, 1, walker);
676 }
677 }
678
sfb_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)679 static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
680 struct netlink_ext_ack *extack)
681 {
682 struct sfb_sched_data *q = qdisc_priv(sch);
683
684 if (cl)
685 return NULL;
686 return q->block;
687 }
688
sfb_bind(struct Qdisc * sch,unsigned long parent,u32 classid)689 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
690 u32 classid)
691 {
692 return 0;
693 }
694
695
696 static const struct Qdisc_class_ops sfb_class_ops = {
697 .graft = sfb_graft,
698 .leaf = sfb_leaf,
699 .find = sfb_find,
700 .change = sfb_change_class,
701 .delete = sfb_delete,
702 .walk = sfb_walk,
703 .tcf_block = sfb_tcf_block,
704 .bind_tcf = sfb_bind,
705 .unbind_tcf = sfb_unbind,
706 .dump = sfb_dump_class,
707 };
708
709 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
710 .id = "sfb",
711 .priv_size = sizeof(struct sfb_sched_data),
712 .cl_ops = &sfb_class_ops,
713 .enqueue = sfb_enqueue,
714 .dequeue = sfb_dequeue,
715 .peek = sfb_peek,
716 .init = sfb_init,
717 .reset = sfb_reset,
718 .destroy = sfb_destroy,
719 .change = sfb_change,
720 .dump = sfb_dump,
721 .dump_stats = sfb_dump_stats,
722 .owner = THIS_MODULE,
723 };
724 MODULE_ALIAS_NET_SCH("sfb");
725
sfb_module_init(void)726 static int __init sfb_module_init(void)
727 {
728 return register_qdisc(&sfb_qdisc_ops);
729 }
730
sfb_module_exit(void)731 static void __exit sfb_module_exit(void)
732 {
733 unregister_qdisc(&sfb_qdisc_ops);
734 }
735
736 module_init(sfb_module_init)
737 module_exit(sfb_module_exit)
738
739 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
740 MODULE_AUTHOR("Juliusz Chroboczek");
741 MODULE_AUTHOR("Eric Dumazet");
742 MODULE_LICENSE("GPL");
743