1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sched/sch_sfb.c Stochastic Fair Blue 4 * 5 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr> 6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> 7 * 8 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: 9 * A New Class of Active Queue Management Algorithms. 10 * U. Michigan CSE-TR-387-99, April 1999. 11 * 12 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <linux/random.h> 21 #include <linux/siphash.h> 22 #include <net/ip.h> 23 #include <net/pkt_sched.h> 24 #include <net/pkt_cls.h> 25 #include <net/inet_ecn.h> 26 27 /* 28 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) 29 * This implementation uses L = 8 and N = 16 30 * This permits us to split one 32bit hash (provided per packet by rxhash or 31 * external classifier) into 8 subhashes of 4 bits. 32 */ 33 #define SFB_BUCKET_SHIFT 4 34 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ 35 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) 36 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ 37 38 /* SFB algo uses a virtual queue, named "bin" */ 39 struct sfb_bucket { 40 u16 qlen; /* length of virtual queue */ 41 u16 p_mark; /* marking probability */ 42 }; 43 44 /* We use a double buffering right before hash change 45 * (Section 4.4 of SFB reference : moving hash functions) 46 */ 47 struct sfb_bins { 48 siphash_key_t perturbation; /* siphash key */ 49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 50 }; 51 52 struct sfb_sched_data { 53 struct Qdisc *qdisc; 54 struct tcf_proto __rcu *filter_list; 55 struct tcf_block *block; 56 unsigned long rehash_interval; 57 unsigned long warmup_time; /* double buffering warmup time in jiffies */ 58 u32 max; 59 u32 bin_size; /* maximum queue length per bin */ 60 u32 increment; /* d1 */ 61 u32 decrement; /* d2 */ 62 u32 limit; /* HARD maximal queue length */ 63 u32 penalty_rate; 64 u32 penalty_burst; 65 u32 tokens_avail; 66 unsigned long rehash_time; 67 unsigned long token_time; 68 69 u8 slot; /* current active bins (0 or 1) */ 70 bool double_buffering; 71 struct sfb_bins bins[2]; 72 73 struct { 74 u32 earlydrop; 75 u32 penaltydrop; 76 u32 bucketdrop; 77 u32 queuedrop; 78 u32 childdrop; /* drops in child qdisc */ 79 u32 marked; /* ECN mark */ 80 } stats; 81 }; 82 83 /* 84 * Each queued skb might be hashed on one or two bins 85 * We store in skb_cb the two hash values. 86 * (A zero value means double buffering was not used) 87 */ 88 struct sfb_skb_cb { 89 u32 hashes[2]; 90 }; 91 92 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 93 { 94 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); 95 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 96 } 97 98 /* 99 * If using 'internal' SFB flow classifier, hash comes from skb rxhash 100 * If using external classifier, hash comes from the classid. 101 */ 102 static u32 sfb_hash(const struct sk_buff *skb, u32 slot) 103 { 104 return sfb_skb_cb(skb)->hashes[slot]; 105 } 106 107 /* Probabilities are coded as Q0.16 fixed-point values, 108 * with 0xFFFF representing 65535/65536 (almost 1.0) 109 * Addition and subtraction are saturating in [0, 65535] 110 */ 111 static u32 prob_plus(u32 p1, u32 p2) 112 { 113 u32 res = p1 + p2; 114 115 return min_t(u32, res, SFB_MAX_PROB); 116 } 117 118 static u32 prob_minus(u32 p1, u32 p2) 119 { 120 return p1 > p2 ? p1 - p2 : 0; 121 } 122 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) 124 { 125 int i; 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 127 128 for (i = 0; i < SFB_LEVELS; i++) { 129 u32 hash = sfbhash & SFB_BUCKET_MASK; 130 131 sfbhash >>= SFB_BUCKET_SHIFT; 132 if (b[hash].qlen < 0xFFFF) 133 b[hash].qlen++; 134 b += SFB_NUMBUCKETS; /* next level */ 135 } 136 } 137 138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) 139 { 140 u32 sfbhash; 141 142 sfbhash = cb->hashes[0]; 143 if (sfbhash) 144 increment_one_qlen(sfbhash, 0, q); 145 146 sfbhash = cb->hashes[1]; 147 if (sfbhash) 148 increment_one_qlen(sfbhash, 1, q); 149 } 150 151 static void decrement_one_qlen(u32 sfbhash, u32 slot, 152 struct sfb_sched_data *q) 153 { 154 int i; 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 156 157 for (i = 0; i < SFB_LEVELS; i++) { 158 u32 hash = sfbhash & SFB_BUCKET_MASK; 159 160 sfbhash >>= SFB_BUCKET_SHIFT; 161 if (b[hash].qlen > 0) 162 b[hash].qlen--; 163 b += SFB_NUMBUCKETS; /* next level */ 164 } 165 } 166 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 168 { 169 u32 sfbhash; 170 171 sfbhash = sfb_hash(skb, 0); 172 if (sfbhash) 173 decrement_one_qlen(sfbhash, 0, q); 174 175 sfbhash = sfb_hash(skb, 1); 176 if (sfbhash) 177 decrement_one_qlen(sfbhash, 1, q); 178 } 179 180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 181 { 182 b->p_mark = prob_minus(b->p_mark, q->decrement); 183 } 184 185 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 186 { 187 b->p_mark = prob_plus(b->p_mark, q->increment); 188 } 189 190 static void sfb_zero_all_buckets(struct sfb_sched_data *q) 191 { 192 memset(&q->bins, 0, sizeof(q->bins)); 193 } 194 195 /* 196 * compute max qlen, max p_mark, and avg p_mark 197 */ 198 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) 199 { 200 int i; 201 u32 qlen = 0, prob = 0, totalpm = 0; 202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; 203 204 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { 205 if (qlen < b->qlen) 206 qlen = b->qlen; 207 totalpm += b->p_mark; 208 if (prob < b->p_mark) 209 prob = b->p_mark; 210 b++; 211 } 212 *prob_r = prob; 213 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); 214 return qlen; 215 } 216 217 218 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 219 { 220 get_random_bytes(&q->bins[slot].perturbation, 221 sizeof(q->bins[slot].perturbation)); 222 } 223 224 static void sfb_swap_slot(struct sfb_sched_data *q) 225 { 226 sfb_init_perturbation(q->slot, q); 227 q->slot ^= 1; 228 q->double_buffering = false; 229 } 230 231 /* Non elastic flows are allowed to use part of the bandwidth, expressed 232 * in "penalty_rate" packets per second, with "penalty_burst" burst 233 */ 234 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) 235 { 236 if (q->penalty_rate == 0 || q->penalty_burst == 0) 237 return true; 238 239 if (q->tokens_avail < 1) { 240 unsigned long age = min(10UL * HZ, jiffies - q->token_time); 241 242 q->tokens_avail = (age * q->penalty_rate) / HZ; 243 if (q->tokens_avail > q->penalty_burst) 244 q->tokens_avail = q->penalty_burst; 245 q->token_time = jiffies; 246 if (q->tokens_avail < 1) 247 return true; 248 } 249 250 q->tokens_avail--; 251 return false; 252 } 253 254 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, 255 int *qerr, u32 *salt) 256 { 257 struct tcf_result res; 258 int result; 259 260 result = tcf_classify(skb, NULL, fl, &res, false); 261 if (result >= 0) { 262 #ifdef CONFIG_NET_CLS_ACT 263 switch (result) { 264 case TC_ACT_STOLEN: 265 case TC_ACT_QUEUED: 266 case TC_ACT_TRAP: 267 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 268 fallthrough; 269 case TC_ACT_SHOT: 270 return false; 271 } 272 #endif 273 *salt = TC_H_MIN(res.classid); 274 return true; 275 } 276 return false; 277 } 278 279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, 280 struct sk_buff **to_free) 281 { 282 283 enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; 284 struct sfb_sched_data *q = qdisc_priv(sch); 285 unsigned int len = qdisc_pkt_len(skb); 286 struct Qdisc *child = q->qdisc; 287 struct tcf_proto *fl; 288 struct sfb_skb_cb cb; 289 int i; 290 u32 p_min = ~0; 291 u32 minqlen = ~0; 292 u32 r, sfbhash; 293 u32 slot = q->slot; 294 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 295 296 if (unlikely(sch->q.qlen >= q->limit)) { 297 qdisc_qstats_overlimit(sch); 298 q->stats.queuedrop++; 299 goto drop; 300 } 301 302 if (q->rehash_interval > 0) { 303 unsigned long limit = q->rehash_time + q->rehash_interval; 304 305 if (unlikely(time_after(jiffies, limit))) { 306 sfb_swap_slot(q); 307 q->rehash_time = jiffies; 308 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && 309 time_after(jiffies, limit - q->warmup_time))) { 310 q->double_buffering = true; 311 } 312 } 313 314 fl = rcu_dereference_bh(q->filter_list); 315 if (fl) { 316 u32 salt; 317 318 /* If using external classifiers, get result and record it. */ 319 if (!sfb_classify(skb, fl, &ret, &salt)) 320 goto other_drop; 321 sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); 322 } else { 323 sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); 324 } 325 326 327 if (!sfbhash) 328 sfbhash = 1; 329 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 330 331 for (i = 0; i < SFB_LEVELS; i++) { 332 u32 hash = sfbhash & SFB_BUCKET_MASK; 333 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 334 335 sfbhash >>= SFB_BUCKET_SHIFT; 336 if (b->qlen == 0) 337 decrement_prob(b, q); 338 else if (b->qlen >= q->bin_size) 339 increment_prob(b, q); 340 if (minqlen > b->qlen) 341 minqlen = b->qlen; 342 if (p_min > b->p_mark) 343 p_min = b->p_mark; 344 } 345 346 slot ^= 1; 347 sfb_skb_cb(skb)->hashes[slot] = 0; 348 349 if (unlikely(minqlen >= q->max)) { 350 qdisc_qstats_overlimit(sch); 351 q->stats.bucketdrop++; 352 goto drop; 353 } 354 355 if (unlikely(p_min >= SFB_MAX_PROB)) { 356 /* Inelastic flow */ 357 if (q->double_buffering) { 358 sfbhash = skb_get_hash_perturb(skb, 359 &q->bins[slot].perturbation); 360 if (!sfbhash) 361 sfbhash = 1; 362 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 363 364 for (i = 0; i < SFB_LEVELS; i++) { 365 u32 hash = sfbhash & SFB_BUCKET_MASK; 366 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 367 368 sfbhash >>= SFB_BUCKET_SHIFT; 369 if (b->qlen == 0) 370 decrement_prob(b, q); 371 else if (b->qlen >= q->bin_size) 372 increment_prob(b, q); 373 } 374 } 375 if (sfb_rate_limit(skb, q)) { 376 qdisc_qstats_overlimit(sch); 377 q->stats.penaltydrop++; 378 goto drop; 379 } 380 goto enqueue; 381 } 382 383 r = get_random_u16() & SFB_MAX_PROB; 384 reason = SKB_DROP_REASON_QDISC_CONGESTED; 385 386 if (unlikely(r < p_min)) { 387 if (unlikely(p_min > SFB_MAX_PROB / 2)) { 388 /* If we're marking that many packets, then either 389 * this flow is unresponsive, or we're badly congested. 390 * In either case, we want to start dropping packets. 391 */ 392 if (r < (p_min - SFB_MAX_PROB / 2) * 2) { 393 q->stats.earlydrop++; 394 goto drop; 395 } 396 } 397 if (INET_ECN_set_ce(skb)) { 398 q->stats.marked++; 399 } else { 400 q->stats.earlydrop++; 401 goto drop; 402 } 403 } 404 405 enqueue: 406 memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); 407 ret = qdisc_enqueue(skb, child, to_free); 408 if (likely(ret == NET_XMIT_SUCCESS)) { 409 sch->qstats.backlog += len; 410 sch->q.qlen++; 411 increment_qlen(&cb, q); 412 } else if (net_xmit_drop_count(ret)) { 413 q->stats.childdrop++; 414 qdisc_qstats_drop(sch); 415 } 416 return ret; 417 418 drop: 419 qdisc_drop_reason(skb, sch, to_free, reason); 420 return NET_XMIT_CN; 421 other_drop: 422 if (ret & __NET_XMIT_BYPASS) 423 qdisc_qstats_drop(sch); 424 kfree_skb(skb); 425 return ret; 426 } 427 428 static struct sk_buff *sfb_dequeue(struct Qdisc *sch) 429 { 430 struct sfb_sched_data *q = qdisc_priv(sch); 431 struct Qdisc *child = q->qdisc; 432 struct sk_buff *skb; 433 434 skb = child->dequeue(q->qdisc); 435 436 if (skb) { 437 qdisc_bstats_update(sch, skb); 438 qdisc_qstats_backlog_dec(sch, skb); 439 sch->q.qlen--; 440 decrement_qlen(skb, q); 441 } 442 443 return skb; 444 } 445 446 static struct sk_buff *sfb_peek(struct Qdisc *sch) 447 { 448 struct sfb_sched_data *q = qdisc_priv(sch); 449 struct Qdisc *child = q->qdisc; 450 451 return child->ops->peek(child); 452 } 453 454 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ 455 456 static void sfb_reset(struct Qdisc *sch) 457 { 458 struct sfb_sched_data *q = qdisc_priv(sch); 459 460 if (likely(q->qdisc)) 461 qdisc_reset(q->qdisc); 462 q->slot = 0; 463 q->double_buffering = false; 464 sfb_zero_all_buckets(q); 465 sfb_init_perturbation(0, q); 466 } 467 468 static void sfb_destroy(struct Qdisc *sch) 469 { 470 struct sfb_sched_data *q = qdisc_priv(sch); 471 472 tcf_block_put(q->block); 473 qdisc_put(q->qdisc); 474 } 475 476 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { 477 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, 478 }; 479 480 static const struct tc_sfb_qopt sfb_default_ops = { 481 .rehash_interval = 600 * MSEC_PER_SEC, 482 .warmup_time = 60 * MSEC_PER_SEC, 483 .limit = 0, 484 .max = 25, 485 .bin_size = 20, 486 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ 487 .decrement = (SFB_MAX_PROB + 3000) / 6000, 488 .penalty_rate = 10, 489 .penalty_burst = 20, 490 }; 491 492 static int sfb_change(struct Qdisc *sch, struct nlattr *opt, 493 struct netlink_ext_ack *extack) 494 { 495 struct sfb_sched_data *q = qdisc_priv(sch); 496 struct Qdisc *child, *old; 497 struct nlattr *tb[TCA_SFB_MAX + 1]; 498 const struct tc_sfb_qopt *ctl = &sfb_default_ops; 499 u32 limit; 500 int err; 501 502 if (opt) { 503 err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt, 504 sfb_policy, NULL); 505 if (err < 0) 506 return -EINVAL; 507 508 if (tb[TCA_SFB_PARMS] == NULL) 509 return -EINVAL; 510 511 ctl = nla_data(tb[TCA_SFB_PARMS]); 512 } 513 514 limit = ctl->limit; 515 if (limit == 0) 516 limit = qdisc_dev(sch)->tx_queue_len; 517 518 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack); 519 if (IS_ERR(child)) 520 return PTR_ERR(child); 521 522 if (child != &noop_qdisc) 523 qdisc_hash_add(child, true); 524 sch_tree_lock(sch); 525 526 qdisc_purge_queue(q->qdisc); 527 old = q->qdisc; 528 q->qdisc = child; 529 530 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); 531 q->warmup_time = msecs_to_jiffies(ctl->warmup_time); 532 q->rehash_time = jiffies; 533 q->limit = limit; 534 q->increment = ctl->increment; 535 q->decrement = ctl->decrement; 536 q->max = ctl->max; 537 q->bin_size = ctl->bin_size; 538 q->penalty_rate = ctl->penalty_rate; 539 q->penalty_burst = ctl->penalty_burst; 540 q->tokens_avail = ctl->penalty_burst; 541 q->token_time = jiffies; 542 543 q->slot = 0; 544 q->double_buffering = false; 545 sfb_zero_all_buckets(q); 546 sfb_init_perturbation(0, q); 547 sfb_init_perturbation(1, q); 548 549 sch_tree_unlock(sch); 550 qdisc_put(old); 551 552 return 0; 553 } 554 555 static int sfb_init(struct Qdisc *sch, struct nlattr *opt, 556 struct netlink_ext_ack *extack) 557 { 558 struct sfb_sched_data *q = qdisc_priv(sch); 559 int err; 560 561 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 562 if (err) 563 return err; 564 565 q->qdisc = &noop_qdisc; 566 return sfb_change(sch, opt, extack); 567 } 568 569 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) 570 { 571 struct sfb_sched_data *q = qdisc_priv(sch); 572 struct nlattr *opts; 573 struct tc_sfb_qopt opt = { 574 .rehash_interval = jiffies_to_msecs(q->rehash_interval), 575 .warmup_time = jiffies_to_msecs(q->warmup_time), 576 .limit = q->limit, 577 .max = q->max, 578 .bin_size = q->bin_size, 579 .increment = q->increment, 580 .decrement = q->decrement, 581 .penalty_rate = q->penalty_rate, 582 .penalty_burst = q->penalty_burst, 583 }; 584 585 sch->qstats.backlog = q->qdisc->qstats.backlog; 586 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 587 if (opts == NULL) 588 goto nla_put_failure; 589 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 590 goto nla_put_failure; 591 return nla_nest_end(skb, opts); 592 593 nla_put_failure: 594 nla_nest_cancel(skb, opts); 595 return -EMSGSIZE; 596 } 597 598 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 599 { 600 struct sfb_sched_data *q = qdisc_priv(sch); 601 struct tc_sfb_xstats st = { 602 .earlydrop = q->stats.earlydrop, 603 .penaltydrop = q->stats.penaltydrop, 604 .bucketdrop = q->stats.bucketdrop, 605 .queuedrop = q->stats.queuedrop, 606 .childdrop = q->stats.childdrop, 607 .marked = q->stats.marked, 608 }; 609 610 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); 611 612 return gnet_stats_copy_app(d, &st, sizeof(st)); 613 } 614 615 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, 616 struct sk_buff *skb, struct tcmsg *tcm) 617 { 618 return -ENOSYS; 619 } 620 621 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 622 struct Qdisc **old, struct netlink_ext_ack *extack) 623 { 624 struct sfb_sched_data *q = qdisc_priv(sch); 625 626 if (new == NULL) 627 new = &noop_qdisc; 628 629 *old = qdisc_replace(sch, new, &q->qdisc); 630 return 0; 631 } 632 633 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) 634 { 635 struct sfb_sched_data *q = qdisc_priv(sch); 636 637 return q->qdisc; 638 } 639 640 static unsigned long sfb_find(struct Qdisc *sch, u32 classid) 641 { 642 return 1; 643 } 644 645 static void sfb_unbind(struct Qdisc *sch, unsigned long arg) 646 { 647 } 648 649 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 650 struct nlattr **tca, unsigned long *arg, 651 struct netlink_ext_ack *extack) 652 { 653 return -ENOSYS; 654 } 655 656 static int sfb_delete(struct Qdisc *sch, unsigned long cl, 657 struct netlink_ext_ack *extack) 658 { 659 return -ENOSYS; 660 } 661 662 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) 663 { 664 if (!walker->stop) { 665 tc_qdisc_stats_dump(sch, 1, walker); 666 } 667 } 668 669 static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl, 670 struct netlink_ext_ack *extack) 671 { 672 struct sfb_sched_data *q = qdisc_priv(sch); 673 674 if (cl) 675 return NULL; 676 return q->block; 677 } 678 679 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, 680 u32 classid) 681 { 682 return 0; 683 } 684 685 686 static const struct Qdisc_class_ops sfb_class_ops = { 687 .graft = sfb_graft, 688 .leaf = sfb_leaf, 689 .find = sfb_find, 690 .change = sfb_change_class, 691 .delete = sfb_delete, 692 .walk = sfb_walk, 693 .tcf_block = sfb_tcf_block, 694 .bind_tcf = sfb_bind, 695 .unbind_tcf = sfb_unbind, 696 .dump = sfb_dump_class, 697 }; 698 699 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { 700 .id = "sfb", 701 .priv_size = sizeof(struct sfb_sched_data), 702 .cl_ops = &sfb_class_ops, 703 .enqueue = sfb_enqueue, 704 .dequeue = sfb_dequeue, 705 .peek = sfb_peek, 706 .init = sfb_init, 707 .reset = sfb_reset, 708 .destroy = sfb_destroy, 709 .change = sfb_change, 710 .dump = sfb_dump, 711 .dump_stats = sfb_dump_stats, 712 .owner = THIS_MODULE, 713 }; 714 MODULE_ALIAS_NET_SCH("sfb"); 715 716 static int __init sfb_module_init(void) 717 { 718 return register_qdisc(&sfb_qdisc_ops); 719 } 720 721 static void __exit sfb_module_exit(void) 722 { 723 unregister_qdisc(&sfb_qdisc_ops); 724 } 725 726 module_init(sfb_module_init) 727 module_exit(sfb_module_exit) 728 729 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); 730 MODULE_AUTHOR("Juliusz Chroboczek"); 731 MODULE_AUTHOR("Eric Dumazet"); 732 MODULE_LICENSE("GPL"); 733