1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) 4 * 5 * Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com> 6 * 7 * Meant to be mostly used for locally generated traffic : 8 * Fast classification depends on skb->sk being set before reaching us. 9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. 10 * All packets belonging to a socket are considered as a 'flow'. 11 * 12 * Flows are dynamically allocated and stored in a hash table of RB trees 13 * They are also part of one Round Robin 'queues' (new or old flows) 14 * 15 * Burst avoidance (aka pacing) capability : 16 * 17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 18 * bunch of packets, and this packet scheduler adds delay between 19 * packets to respect rate limitation. 20 * 21 * enqueue() : 22 * - lookup one RB tree (out of 1024 or more) to find the flow. 23 * If non existent flow, create it, add it to the tree. 24 * Add skb to the per flow list of skb (fifo). 25 * - Use a special fifo for high prio packets 26 * 27 * dequeue() : serves flows in Round Robin 28 * Note : When a flow becomes empty, we do not immediately remove it from 29 * rb trees, for performance reasons (its expected to send additional packets, 30 * or SLAB cache will reuse socket for another flow) 31 */ 32 33 #include <linux/module.h> 34 #include <linux/types.h> 35 #include <linux/kernel.h> 36 #include <linux/jiffies.h> 37 #include <linux/string.h> 38 #include <linux/in.h> 39 #include <linux/errno.h> 40 #include <linux/init.h> 41 #include <linux/skbuff.h> 42 #include <linux/slab.h> 43 #include <linux/rbtree.h> 44 #include <linux/hash.h> 45 #include <linux/prefetch.h> 46 #include <linux/vmalloc.h> 47 #include <net/netlink.h> 48 #include <net/pkt_sched.h> 49 #include <net/sock.h> 50 #include <net/tcp_states.h> 51 #include <net/tcp.h> 52 53 struct fq_skb_cb { 54 u64 time_to_send; 55 u8 band; 56 }; 57 58 static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb) 59 { 60 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb)); 61 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data; 62 } 63 64 /* 65 * Per flow structure, dynamically allocated. 66 * If packets have monotically increasing time_to_send, they are placed in O(1) 67 * in linear list (head,tail), otherwise are placed in a rbtree (t_root). 68 */ 69 struct fq_flow { 70 /* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */ 71 struct rb_root t_root; 72 struct sk_buff *head; /* list of skbs for this flow : first skb */ 73 union { 74 struct sk_buff *tail; /* last skb in the list */ 75 unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */ 76 }; 77 union { 78 struct rb_node fq_node; /* anchor in fq_root[] trees */ 79 /* Following field is only used for q->internal, 80 * because q->internal is not hashed in fq_root[] 81 */ 82 u64 stat_fastpath_packets; 83 }; 84 struct sock *sk; 85 u32 socket_hash; /* sk_hash */ 86 int qlen; /* number of packets in flow queue */ 87 88 /* Second cache line */ 89 int credit; 90 int band; 91 struct fq_flow *next; /* next pointer in RR lists */ 92 93 struct rb_node rate_node; /* anchor in q->delayed tree */ 94 u64 time_next_packet; 95 }; 96 97 struct fq_flow_head { 98 struct fq_flow *first; 99 struct fq_flow *last; 100 }; 101 102 struct fq_perband_flows { 103 struct fq_flow_head new_flows; 104 struct fq_flow_head old_flows; 105 int credit; 106 int quantum; /* based on band nr : 576KB, 192KB, 64KB */ 107 }; 108 109 #define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2) 110 111 struct fq_sched_data { 112 /* Read mostly cache line */ 113 114 u64 offload_horizon; 115 u32 quantum; 116 u32 initial_quantum; 117 u32 flow_refill_delay; 118 u32 flow_plimit; /* max packets per flow */ 119 unsigned long flow_max_rate; /* optional max rate per flow */ 120 u64 ce_threshold; 121 u64 horizon; /* horizon in ns */ 122 u32 orphan_mask; /* mask for orphaned skb */ 123 u32 low_rate_threshold; 124 struct rb_root *fq_root; 125 u8 rate_enable; 126 u8 fq_trees_log; 127 u8 horizon_drop; 128 u8 prio2band[FQ_PRIO2BAND_CRUMB_SIZE]; 129 u32 timer_slack; /* hrtimer slack in ns */ 130 131 /* Read/Write fields. */ 132 133 unsigned int band_nr; /* band being serviced in fq_dequeue() */ 134 135 struct fq_perband_flows band_flows[FQ_BANDS]; 136 137 struct fq_flow internal; /* fastpath queue. */ 138 struct rb_root delayed; /* for rate limited flows */ 139 u64 time_next_delayed_flow; 140 unsigned long unthrottle_latency_ns; 141 142 u32 band_pkt_count[FQ_BANDS]; 143 u32 flows; 144 u32 inactive_flows; /* Flows with no packet to send. */ 145 u32 throttled_flows; 146 147 u64 stat_throttled; 148 struct qdisc_watchdog watchdog; 149 u64 stat_gc_flows; 150 151 /* Seldom used fields. */ 152 153 u64 stat_band_drops[FQ_BANDS]; 154 u64 stat_ce_mark; 155 u64 stat_horizon_drops; 156 u64 stat_horizon_caps; 157 u64 stat_flows_plimit; 158 u64 stat_pkts_too_long; 159 u64 stat_allocation_errors; 160 }; 161 162 /* return the i-th 2-bit value ("crumb") */ 163 static u8 fq_prio2band(const u8 *prio2band, unsigned int prio) 164 { 165 return (READ_ONCE(prio2band[prio / 4]) >> (2 * (prio & 0x3))) & 0x3; 166 } 167 168 /* 169 * f->tail and f->age share the same location. 170 * We can use the low order bit to differentiate if this location points 171 * to a sk_buff or contains a jiffies value, if we force this value to be odd. 172 * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2 173 */ 174 static void fq_flow_set_detached(struct fq_flow *f) 175 { 176 f->age = jiffies | 1UL; 177 } 178 179 static bool fq_flow_is_detached(const struct fq_flow *f) 180 { 181 return !!(f->age & 1UL); 182 } 183 184 /* special value to mark a throttled flow (not on old/new list) */ 185 static struct fq_flow throttled; 186 187 static bool fq_flow_is_throttled(const struct fq_flow *f) 188 { 189 return f->next == &throttled; 190 } 191 192 enum new_flow { 193 NEW_FLOW, 194 OLD_FLOW 195 }; 196 197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow, 198 enum new_flow list_sel) 199 { 200 struct fq_perband_flows *pband = &q->band_flows[flow->band]; 201 struct fq_flow_head *head = (list_sel == NEW_FLOW) ? 202 &pband->new_flows : 203 &pband->old_flows; 204 205 if (head->first) 206 head->last->next = flow; 207 else 208 head->first = flow; 209 head->last = flow; 210 flow->next = NULL; 211 } 212 213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) 214 { 215 rb_erase(&f->rate_node, &q->delayed); 216 q->throttled_flows--; 217 fq_flow_add_tail(q, f, OLD_FLOW); 218 } 219 220 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 221 { 222 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 223 224 while (*p) { 225 struct fq_flow *aux; 226 227 parent = *p; 228 aux = rb_entry(parent, struct fq_flow, rate_node); 229 if (f->time_next_packet >= aux->time_next_packet) 230 p = &parent->rb_right; 231 else 232 p = &parent->rb_left; 233 } 234 rb_link_node(&f->rate_node, parent, p); 235 rb_insert_color(&f->rate_node, &q->delayed); 236 q->throttled_flows++; 237 q->stat_throttled++; 238 239 f->next = &throttled; 240 if (q->time_next_delayed_flow > f->time_next_packet) 241 q->time_next_delayed_flow = f->time_next_packet; 242 } 243 244 245 static struct kmem_cache *fq_flow_cachep __read_mostly; 246 247 248 /* limit number of collected flows per round */ 249 #define FQ_GC_MAX 8 250 #define FQ_GC_AGE (3*HZ) 251 252 static bool fq_gc_candidate(const struct fq_flow *f) 253 { 254 return fq_flow_is_detached(f) && 255 time_after(jiffies, f->age + FQ_GC_AGE); 256 } 257 258 static void fq_gc(struct fq_sched_data *q, 259 struct rb_root *root, 260 struct sock *sk) 261 { 262 struct rb_node **p, *parent; 263 void *tofree[FQ_GC_MAX]; 264 struct fq_flow *f; 265 int i, fcnt = 0; 266 267 p = &root->rb_node; 268 parent = NULL; 269 while (*p) { 270 parent = *p; 271 272 f = rb_entry(parent, struct fq_flow, fq_node); 273 if (f->sk == sk) 274 break; 275 276 if (fq_gc_candidate(f)) { 277 tofree[fcnt++] = f; 278 if (fcnt == FQ_GC_MAX) 279 break; 280 } 281 282 if (f->sk > sk) 283 p = &parent->rb_right; 284 else 285 p = &parent->rb_left; 286 } 287 288 if (!fcnt) 289 return; 290 291 for (i = fcnt; i > 0; ) { 292 f = tofree[--i]; 293 rb_erase(&f->fq_node, root); 294 } 295 q->flows -= fcnt; 296 q->inactive_flows -= fcnt; 297 q->stat_gc_flows += fcnt; 298 299 kmem_cache_free_bulk(fq_flow_cachep, fcnt, tofree); 300 } 301 302 /* Fast path can be used if : 303 * 1) Packet tstamp is in the past, or within the pacing offload horizon. 304 * 2) FQ qlen == 0 OR 305 * (no flow is currently eligible for transmit, 306 * AND fast path queue has less than 8 packets) 307 * 3) No SO_MAX_PACING_RATE on the socket (if any). 308 * 4) No @maxrate attribute on this qdisc, 309 * 310 * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure. 311 */ 312 static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb, 313 u64 now) 314 { 315 const struct fq_sched_data *q = qdisc_priv(sch); 316 const struct sock *sk; 317 318 if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon) 319 return false; 320 321 if (sch->q.qlen != 0) { 322 /* Even if some packets are stored in this qdisc, 323 * we can still enable fast path if all of them are 324 * scheduled in the future (ie no flows are eligible) 325 * or in the fast path queue. 326 */ 327 if (q->flows != q->inactive_flows + q->throttled_flows) 328 return false; 329 330 /* Do not allow fast path queue to explode, we want Fair Queue mode 331 * under pressure. 332 */ 333 if (q->internal.qlen >= 8) 334 return false; 335 } 336 337 sk = skb->sk; 338 if (sk && sk_fullsock(sk) && !sk_is_tcp(sk) && 339 sk->sk_max_pacing_rate != ~0UL) 340 return false; 341 342 if (q->flow_max_rate != ~0UL) 343 return false; 344 345 return true; 346 } 347 348 static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb, 349 u64 now) 350 { 351 struct fq_sched_data *q = qdisc_priv(sch); 352 struct rb_node **p, *parent; 353 struct sock *sk = skb->sk; 354 struct rb_root *root; 355 struct fq_flow *f; 356 357 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket 358 * or a listener (SYNCOOKIE mode) 359 * 1) request sockets are not full blown, 360 * they do not contain sk_pacing_rate 361 * 2) They are not part of a 'flow' yet 362 * 3) We do not want to rate limit them (eg SYNFLOOD attack), 363 * especially if the listener set SO_MAX_PACING_RATE 364 * 4) We pretend they are orphaned 365 */ 366 if (!sk || sk_listener(sk)) { 367 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 368 369 /* By forcing low order bit to 1, we make sure to not 370 * collide with a local flow (socket pointers are word aligned) 371 */ 372 sk = (struct sock *)((hash << 1) | 1UL); 373 skb_orphan(skb); 374 } else if (sk->sk_state == TCP_CLOSE) { 375 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 376 /* 377 * Sockets in TCP_CLOSE are non connected. 378 * Typical use case is UDP sockets, they can send packets 379 * with sendto() to many different destinations. 380 * We probably could use a generic bit advertising 381 * non connected sockets, instead of sk_state == TCP_CLOSE, 382 * if we care enough. 383 */ 384 sk = (struct sock *)((hash << 1) | 1UL); 385 } 386 387 if (fq_fastpath_check(sch, skb, now)) { 388 q->internal.stat_fastpath_packets++; 389 if (skb->sk == sk && q->rate_enable && 390 READ_ONCE(sk->sk_pacing_status) != SK_PACING_FQ) 391 smp_store_release(&sk->sk_pacing_status, 392 SK_PACING_FQ); 393 return &q->internal; 394 } 395 396 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; 397 398 fq_gc(q, root, sk); 399 400 p = &root->rb_node; 401 parent = NULL; 402 while (*p) { 403 parent = *p; 404 405 f = rb_entry(parent, struct fq_flow, fq_node); 406 if (f->sk == sk) { 407 /* socket might have been reallocated, so check 408 * if its sk_hash is the same. 409 * It not, we need to refill credit with 410 * initial quantum 411 */ 412 if (unlikely(skb->sk == sk && 413 f->socket_hash != sk->sk_hash)) { 414 f->credit = q->initial_quantum; 415 f->socket_hash = sk->sk_hash; 416 if (q->rate_enable) 417 smp_store_release(&sk->sk_pacing_status, 418 SK_PACING_FQ); 419 if (fq_flow_is_throttled(f)) 420 fq_flow_unset_throttled(q, f); 421 f->time_next_packet = 0ULL; 422 } 423 return f; 424 } 425 if (f->sk > sk) 426 p = &parent->rb_right; 427 else 428 p = &parent->rb_left; 429 } 430 431 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); 432 if (unlikely(!f)) { 433 q->stat_allocation_errors++; 434 return &q->internal; 435 } 436 /* f->t_root is already zeroed after kmem_cache_zalloc() */ 437 438 fq_flow_set_detached(f); 439 f->sk = sk; 440 if (skb->sk == sk) { 441 f->socket_hash = sk->sk_hash; 442 if (q->rate_enable) 443 smp_store_release(&sk->sk_pacing_status, 444 SK_PACING_FQ); 445 } 446 f->credit = q->initial_quantum; 447 448 rb_link_node(&f->fq_node, parent, p); 449 rb_insert_color(&f->fq_node, root); 450 451 q->flows++; 452 q->inactive_flows++; 453 return f; 454 } 455 456 static struct sk_buff *fq_peek(struct fq_flow *flow) 457 { 458 struct sk_buff *skb = skb_rb_first(&flow->t_root); 459 struct sk_buff *head = flow->head; 460 461 if (!skb) 462 return head; 463 464 if (!head) 465 return skb; 466 467 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send) 468 return skb; 469 return head; 470 } 471 472 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow, 473 struct sk_buff *skb) 474 { 475 if (skb == flow->head) { 476 flow->head = skb->next; 477 } else { 478 rb_erase(&skb->rbnode, &flow->t_root); 479 skb->dev = qdisc_dev(sch); 480 } 481 } 482 483 /* Remove one skb from flow queue. 484 * This skb must be the return value of prior fq_peek(). 485 */ 486 static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow, 487 struct sk_buff *skb) 488 { 489 fq_erase_head(sch, flow, skb); 490 skb_mark_not_on_list(skb); 491 qdisc_qstats_backlog_dec(sch, skb); 492 sch->q.qlen--; 493 } 494 495 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) 496 { 497 struct rb_node **p, *parent; 498 struct sk_buff *head, *aux; 499 500 head = flow->head; 501 if (!head || 502 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) { 503 if (!head) 504 flow->head = skb; 505 else 506 flow->tail->next = skb; 507 flow->tail = skb; 508 skb->next = NULL; 509 return; 510 } 511 512 p = &flow->t_root.rb_node; 513 parent = NULL; 514 515 while (*p) { 516 parent = *p; 517 aux = rb_to_skb(parent); 518 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send) 519 p = &parent->rb_right; 520 else 521 p = &parent->rb_left; 522 } 523 rb_link_node(&skb->rbnode, parent, p); 524 rb_insert_color(&skb->rbnode, &flow->t_root); 525 } 526 527 static bool fq_packet_beyond_horizon(const struct sk_buff *skb, 528 const struct fq_sched_data *q, u64 now) 529 { 530 return unlikely((s64)skb->tstamp > (s64)(now + q->horizon)); 531 } 532 533 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 534 struct sk_buff **to_free) 535 { 536 struct fq_sched_data *q = qdisc_priv(sch); 537 struct fq_flow *f; 538 u64 now; 539 u8 band; 540 541 band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX); 542 if (unlikely(q->band_pkt_count[band] >= sch->limit)) { 543 q->stat_band_drops[band]++; 544 return qdisc_drop(skb, sch, to_free); 545 } 546 547 now = ktime_get_ns(); 548 if (!skb->tstamp) { 549 fq_skb_cb(skb)->time_to_send = now; 550 } else { 551 /* Check if packet timestamp is too far in the future. */ 552 if (fq_packet_beyond_horizon(skb, q, now)) { 553 if (q->horizon_drop) { 554 q->stat_horizon_drops++; 555 return qdisc_drop(skb, sch, to_free); 556 } 557 q->stat_horizon_caps++; 558 skb->tstamp = now + q->horizon; 559 } 560 fq_skb_cb(skb)->time_to_send = skb->tstamp; 561 } 562 563 f = fq_classify(sch, skb, now); 564 565 if (f != &q->internal) { 566 if (unlikely(f->qlen >= q->flow_plimit)) { 567 q->stat_flows_plimit++; 568 return qdisc_drop(skb, sch, to_free); 569 } 570 571 if (fq_flow_is_detached(f)) { 572 fq_flow_add_tail(q, f, NEW_FLOW); 573 if (time_after(jiffies, f->age + q->flow_refill_delay)) 574 f->credit = max_t(u32, f->credit, q->quantum); 575 } 576 577 f->band = band; 578 q->band_pkt_count[band]++; 579 fq_skb_cb(skb)->band = band; 580 if (f->qlen == 0) 581 q->inactive_flows--; 582 } 583 584 f->qlen++; 585 /* Note: this overwrites f->age */ 586 flow_queue_add(f, skb); 587 588 qdisc_qstats_backlog_inc(sch, skb); 589 sch->q.qlen++; 590 591 return NET_XMIT_SUCCESS; 592 } 593 594 static void fq_check_throttled(struct fq_sched_data *q, u64 now) 595 { 596 unsigned long sample; 597 struct rb_node *p; 598 599 if (q->time_next_delayed_flow > now + q->offload_horizon) 600 return; 601 602 /* Update unthrottle latency EWMA. 603 * This is cheap and can help diagnosing timer/latency problems. 604 */ 605 sample = (unsigned long)(now - q->time_next_delayed_flow); 606 if ((long)sample > 0) { 607 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; 608 q->unthrottle_latency_ns += sample >> 3; 609 } 610 now += q->offload_horizon; 611 612 q->time_next_delayed_flow = ~0ULL; 613 while ((p = rb_first(&q->delayed)) != NULL) { 614 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node); 615 616 if (f->time_next_packet > now) { 617 q->time_next_delayed_flow = f->time_next_packet; 618 break; 619 } 620 fq_flow_unset_throttled(q, f); 621 } 622 } 623 624 static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband) 625 { 626 if (pband->credit <= 0) 627 return NULL; 628 629 if (pband->new_flows.first) 630 return &pband->new_flows; 631 632 return pband->old_flows.first ? &pband->old_flows : NULL; 633 } 634 635 static struct sk_buff *fq_dequeue(struct Qdisc *sch) 636 { 637 struct fq_sched_data *q = qdisc_priv(sch); 638 struct fq_perband_flows *pband; 639 struct fq_flow_head *head; 640 struct sk_buff *skb; 641 struct fq_flow *f; 642 unsigned long rate; 643 int retry; 644 u32 plen; 645 u64 now; 646 647 if (!sch->q.qlen) 648 return NULL; 649 650 skb = fq_peek(&q->internal); 651 if (unlikely(skb)) { 652 q->internal.qlen--; 653 fq_dequeue_skb(sch, &q->internal, skb); 654 goto out; 655 } 656 657 now = ktime_get_ns(); 658 fq_check_throttled(q, now); 659 retry = 0; 660 pband = &q->band_flows[q->band_nr]; 661 begin: 662 head = fq_pband_head_select(pband); 663 if (!head) { 664 while (++retry <= FQ_BANDS) { 665 if (++q->band_nr == FQ_BANDS) 666 q->band_nr = 0; 667 pband = &q->band_flows[q->band_nr]; 668 pband->credit = min(pband->credit + pband->quantum, 669 pband->quantum); 670 if (pband->credit > 0) 671 goto begin; 672 retry = 0; 673 } 674 if (q->time_next_delayed_flow != ~0ULL) 675 qdisc_watchdog_schedule_range_ns(&q->watchdog, 676 q->time_next_delayed_flow, 677 q->timer_slack); 678 return NULL; 679 } 680 f = head->first; 681 retry = 0; 682 if (f->credit <= 0) { 683 f->credit += q->quantum; 684 head->first = f->next; 685 fq_flow_add_tail(q, f, OLD_FLOW); 686 goto begin; 687 } 688 689 skb = fq_peek(f); 690 if (skb) { 691 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send, 692 f->time_next_packet); 693 694 if (now + q->offload_horizon < time_next_packet) { 695 head->first = f->next; 696 f->time_next_packet = time_next_packet; 697 fq_flow_set_throttled(q, f); 698 goto begin; 699 } 700 prefetch(&skb->end); 701 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) { 702 INET_ECN_set_ce(skb); 703 q->stat_ce_mark++; 704 } 705 if (--f->qlen == 0) 706 q->inactive_flows++; 707 q->band_pkt_count[fq_skb_cb(skb)->band]--; 708 fq_dequeue_skb(sch, f, skb); 709 } else { 710 head->first = f->next; 711 /* force a pass through old_flows to prevent starvation */ 712 if (head == &pband->new_flows) { 713 fq_flow_add_tail(q, f, OLD_FLOW); 714 } else { 715 fq_flow_set_detached(f); 716 } 717 goto begin; 718 } 719 plen = qdisc_pkt_len(skb); 720 f->credit -= plen; 721 pband->credit -= plen; 722 723 if (!q->rate_enable) 724 goto out; 725 726 rate = q->flow_max_rate; 727 728 /* If EDT time was provided for this skb, we need to 729 * update f->time_next_packet only if this qdisc enforces 730 * a flow max rate. 731 */ 732 if (!skb->tstamp) { 733 if (skb->sk) 734 rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate); 735 736 if (rate <= q->low_rate_threshold) { 737 f->credit = 0; 738 } else { 739 plen = max(plen, q->quantum); 740 if (f->credit > 0) 741 goto out; 742 } 743 } 744 if (rate != ~0UL) { 745 u64 len = (u64)plen * NSEC_PER_SEC; 746 747 if (likely(rate)) 748 len = div64_ul(len, rate); 749 /* Since socket rate can change later, 750 * clamp the delay to 1 second. 751 * Really, providers of too big packets should be fixed ! 752 */ 753 if (unlikely(len > NSEC_PER_SEC)) { 754 len = NSEC_PER_SEC; 755 q->stat_pkts_too_long++; 756 } 757 /* Account for schedule/timers drifts. 758 * f->time_next_packet was set when prior packet was sent, 759 * and current time (@now) can be too late by tens of us. 760 */ 761 if (f->time_next_packet) 762 len -= min(len/2, now - f->time_next_packet); 763 f->time_next_packet = now + len; 764 } 765 out: 766 qdisc_bstats_update(sch, skb); 767 return skb; 768 } 769 770 static void fq_flow_purge(struct fq_flow *flow) 771 { 772 struct rb_node *p = rb_first(&flow->t_root); 773 774 while (p) { 775 struct sk_buff *skb = rb_to_skb(p); 776 777 p = rb_next(p); 778 rb_erase(&skb->rbnode, &flow->t_root); 779 rtnl_kfree_skbs(skb, skb); 780 } 781 rtnl_kfree_skbs(flow->head, flow->tail); 782 flow->head = NULL; 783 flow->qlen = 0; 784 } 785 786 static void fq_reset(struct Qdisc *sch) 787 { 788 struct fq_sched_data *q = qdisc_priv(sch); 789 struct rb_root *root; 790 struct rb_node *p; 791 struct fq_flow *f; 792 unsigned int idx; 793 794 sch->q.qlen = 0; 795 sch->qstats.backlog = 0; 796 797 fq_flow_purge(&q->internal); 798 799 if (!q->fq_root) 800 return; 801 802 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 803 root = &q->fq_root[idx]; 804 while ((p = rb_first(root)) != NULL) { 805 f = rb_entry(p, struct fq_flow, fq_node); 806 rb_erase(p, root); 807 808 fq_flow_purge(f); 809 810 kmem_cache_free(fq_flow_cachep, f); 811 } 812 } 813 for (idx = 0; idx < FQ_BANDS; idx++) { 814 q->band_flows[idx].new_flows.first = NULL; 815 q->band_flows[idx].old_flows.first = NULL; 816 } 817 q->delayed = RB_ROOT; 818 q->flows = 0; 819 q->inactive_flows = 0; 820 q->throttled_flows = 0; 821 } 822 823 static void fq_rehash(struct fq_sched_data *q, 824 struct rb_root *old_array, u32 old_log, 825 struct rb_root *new_array, u32 new_log) 826 { 827 struct rb_node *op, **np, *parent; 828 struct rb_root *oroot, *nroot; 829 struct fq_flow *of, *nf; 830 int fcnt = 0; 831 u32 idx; 832 833 for (idx = 0; idx < (1U << old_log); idx++) { 834 oroot = &old_array[idx]; 835 while ((op = rb_first(oroot)) != NULL) { 836 rb_erase(op, oroot); 837 of = rb_entry(op, struct fq_flow, fq_node); 838 if (fq_gc_candidate(of)) { 839 fcnt++; 840 kmem_cache_free(fq_flow_cachep, of); 841 continue; 842 } 843 nroot = &new_array[hash_ptr(of->sk, new_log)]; 844 845 np = &nroot->rb_node; 846 parent = NULL; 847 while (*np) { 848 parent = *np; 849 850 nf = rb_entry(parent, struct fq_flow, fq_node); 851 BUG_ON(nf->sk == of->sk); 852 853 if (nf->sk > of->sk) 854 np = &parent->rb_right; 855 else 856 np = &parent->rb_left; 857 } 858 859 rb_link_node(&of->fq_node, parent, np); 860 rb_insert_color(&of->fq_node, nroot); 861 } 862 } 863 q->flows -= fcnt; 864 q->inactive_flows -= fcnt; 865 q->stat_gc_flows += fcnt; 866 } 867 868 static void fq_free(void *addr) 869 { 870 kvfree(addr); 871 } 872 873 static int fq_resize(struct Qdisc *sch, u32 log) 874 { 875 struct fq_sched_data *q = qdisc_priv(sch); 876 struct rb_root *array; 877 void *old_fq_root; 878 u32 idx; 879 880 if (q->fq_root && log == q->fq_trees_log) 881 return 0; 882 883 /* If XPS was setup, we can allocate memory on right NUMA node */ 884 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL, 885 netdev_queue_numa_node_read(sch->dev_queue)); 886 if (!array) 887 return -ENOMEM; 888 889 for (idx = 0; idx < (1U << log); idx++) 890 array[idx] = RB_ROOT; 891 892 sch_tree_lock(sch); 893 894 old_fq_root = q->fq_root; 895 if (old_fq_root) 896 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); 897 898 q->fq_root = array; 899 WRITE_ONCE(q->fq_trees_log, log); 900 901 sch_tree_unlock(sch); 902 903 fq_free(old_fq_root); 904 905 return 0; 906 } 907 908 static const struct netlink_range_validation iq_range = { 909 .max = INT_MAX, 910 }; 911 912 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { 913 [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK }, 914 915 [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, 916 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, 917 [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, 918 [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range), 919 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, 920 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 921 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 922 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 923 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, 924 [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 }, 925 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, 926 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 }, 927 [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 }, 928 [TCA_FQ_HORIZON] = { .type = NLA_U32 }, 929 [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 }, 930 [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)), 931 [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)), 932 [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 }, 933 }; 934 935 /* compress a u8 array with all elems <= 3 to an array of 2-bit fields */ 936 static void fq_prio2band_compress_crumb(const u8 *in, u8 *out) 937 { 938 const int num_elems = TC_PRIO_MAX + 1; 939 u8 tmp[FQ_PRIO2BAND_CRUMB_SIZE]; 940 int i; 941 942 memset(tmp, 0, sizeof(tmp)); 943 for (i = 0; i < num_elems; i++) 944 tmp[i / 4] |= in[i] << (2 * (i & 0x3)); 945 946 for (i = 0; i < FQ_PRIO2BAND_CRUMB_SIZE; i++) 947 WRITE_ONCE(out[i], tmp[i]); 948 } 949 950 static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out) 951 { 952 const int num_elems = TC_PRIO_MAX + 1; 953 int i; 954 955 for (i = 0; i < num_elems; i++) 956 out[i] = fq_prio2band(in, i); 957 } 958 959 static int fq_load_weights(struct fq_sched_data *q, 960 const struct nlattr *attr, 961 struct netlink_ext_ack *extack) 962 { 963 s32 *weights = nla_data(attr); 964 int i; 965 966 for (i = 0; i < FQ_BANDS; i++) { 967 if (weights[i] < FQ_MIN_WEIGHT) { 968 NL_SET_ERR_MSG_FMT_MOD(extack, "Weight %d less that minimum allowed %d", 969 weights[i], FQ_MIN_WEIGHT); 970 return -EINVAL; 971 } 972 } 973 for (i = 0; i < FQ_BANDS; i++) 974 WRITE_ONCE(q->band_flows[i].quantum, weights[i]); 975 return 0; 976 } 977 978 static int fq_load_priomap(struct fq_sched_data *q, 979 const struct nlattr *attr, 980 struct netlink_ext_ack *extack) 981 { 982 const struct tc_prio_qopt *map = nla_data(attr); 983 int i; 984 985 if (map->bands != FQ_BANDS) { 986 NL_SET_ERR_MSG_MOD(extack, "FQ only supports 3 bands"); 987 return -EINVAL; 988 } 989 for (i = 0; i < TC_PRIO_MAX + 1; i++) { 990 if (map->priomap[i] >= FQ_BANDS) { 991 NL_SET_ERR_MSG_FMT_MOD(extack, "FQ priomap field %d maps to a too high band %d", 992 i, map->priomap[i]); 993 return -EINVAL; 994 } 995 } 996 fq_prio2band_compress_crumb(map->priomap, q->prio2band); 997 return 0; 998 } 999 1000 static int fq_change(struct Qdisc *sch, struct nlattr *opt, 1001 struct netlink_ext_ack *extack) 1002 { 1003 struct fq_sched_data *q = qdisc_priv(sch); 1004 struct nlattr *tb[TCA_FQ_MAX + 1]; 1005 int err, drop_count = 0; 1006 unsigned drop_len = 0; 1007 u32 fq_log; 1008 1009 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy, 1010 NULL); 1011 if (err < 0) 1012 return err; 1013 1014 sch_tree_lock(sch); 1015 1016 fq_log = q->fq_trees_log; 1017 1018 if (tb[TCA_FQ_BUCKETS_LOG]) { 1019 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); 1020 1021 if (nval >= 1 && nval <= ilog2(256*1024)) 1022 fq_log = nval; 1023 else 1024 err = -EINVAL; 1025 } 1026 if (tb[TCA_FQ_PLIMIT]) 1027 WRITE_ONCE(sch->limit, 1028 nla_get_u32(tb[TCA_FQ_PLIMIT])); 1029 1030 if (tb[TCA_FQ_FLOW_PLIMIT]) 1031 WRITE_ONCE(q->flow_plimit, 1032 nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT])); 1033 1034 if (tb[TCA_FQ_QUANTUM]) { 1035 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 1036 1037 if (quantum > 0 && quantum <= (1 << 20)) { 1038 WRITE_ONCE(q->quantum, quantum); 1039 } else { 1040 NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); 1041 err = -EINVAL; 1042 } 1043 } 1044 1045 if (tb[TCA_FQ_INITIAL_QUANTUM]) 1046 WRITE_ONCE(q->initial_quantum, 1047 nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM])); 1048 1049 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 1050 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", 1051 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); 1052 1053 if (tb[TCA_FQ_FLOW_MAX_RATE]) { 1054 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); 1055 1056 WRITE_ONCE(q->flow_max_rate, 1057 (rate == ~0U) ? ~0UL : rate); 1058 } 1059 if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) 1060 WRITE_ONCE(q->low_rate_threshold, 1061 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD])); 1062 1063 if (tb[TCA_FQ_RATE_ENABLE]) { 1064 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); 1065 1066 if (enable <= 1) 1067 WRITE_ONCE(q->rate_enable, 1068 enable); 1069 else 1070 err = -EINVAL; 1071 } 1072 1073 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { 1074 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; 1075 1076 WRITE_ONCE(q->flow_refill_delay, 1077 usecs_to_jiffies(usecs_delay)); 1078 } 1079 1080 if (!err && tb[TCA_FQ_PRIOMAP]) 1081 err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack); 1082 1083 if (!err && tb[TCA_FQ_WEIGHTS]) 1084 err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack); 1085 1086 if (tb[TCA_FQ_ORPHAN_MASK]) 1087 WRITE_ONCE(q->orphan_mask, 1088 nla_get_u32(tb[TCA_FQ_ORPHAN_MASK])); 1089 1090 if (tb[TCA_FQ_CE_THRESHOLD]) 1091 WRITE_ONCE(q->ce_threshold, 1092 (u64)NSEC_PER_USEC * 1093 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD])); 1094 1095 if (tb[TCA_FQ_TIMER_SLACK]) 1096 WRITE_ONCE(q->timer_slack, 1097 nla_get_u32(tb[TCA_FQ_TIMER_SLACK])); 1098 1099 if (tb[TCA_FQ_HORIZON]) 1100 WRITE_ONCE(q->horizon, 1101 (u64)NSEC_PER_USEC * 1102 nla_get_u32(tb[TCA_FQ_HORIZON])); 1103 1104 if (tb[TCA_FQ_HORIZON_DROP]) 1105 WRITE_ONCE(q->horizon_drop, 1106 nla_get_u8(tb[TCA_FQ_HORIZON_DROP])); 1107 1108 if (tb[TCA_FQ_OFFLOAD_HORIZON]) { 1109 u64 offload_horizon = (u64)NSEC_PER_USEC * 1110 nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]); 1111 1112 if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) { 1113 WRITE_ONCE(q->offload_horizon, offload_horizon); 1114 } else { 1115 NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon"); 1116 err = -EINVAL; 1117 } 1118 } 1119 if (!err) { 1120 1121 sch_tree_unlock(sch); 1122 err = fq_resize(sch, fq_log); 1123 sch_tree_lock(sch); 1124 } 1125 while (sch->q.qlen > sch->limit) { 1126 struct sk_buff *skb = fq_dequeue(sch); 1127 1128 if (!skb) 1129 break; 1130 drop_len += qdisc_pkt_len(skb); 1131 rtnl_kfree_skbs(skb, skb); 1132 drop_count++; 1133 } 1134 qdisc_tree_reduce_backlog(sch, drop_count, drop_len); 1135 1136 sch_tree_unlock(sch); 1137 return err; 1138 } 1139 1140 static void fq_destroy(struct Qdisc *sch) 1141 { 1142 struct fq_sched_data *q = qdisc_priv(sch); 1143 1144 fq_reset(sch); 1145 fq_free(q->fq_root); 1146 qdisc_watchdog_cancel(&q->watchdog); 1147 } 1148 1149 static int fq_init(struct Qdisc *sch, struct nlattr *opt, 1150 struct netlink_ext_ack *extack) 1151 { 1152 struct fq_sched_data *q = qdisc_priv(sch); 1153 int i, err; 1154 1155 sch->limit = 10000; 1156 q->flow_plimit = 100; 1157 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 1158 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 1159 q->flow_refill_delay = msecs_to_jiffies(40); 1160 q->flow_max_rate = ~0UL; 1161 q->time_next_delayed_flow = ~0ULL; 1162 q->rate_enable = 1; 1163 for (i = 0; i < FQ_BANDS; i++) { 1164 q->band_flows[i].new_flows.first = NULL; 1165 q->band_flows[i].old_flows.first = NULL; 1166 } 1167 q->band_flows[0].quantum = 9 << 16; 1168 q->band_flows[1].quantum = 3 << 16; 1169 q->band_flows[2].quantum = 1 << 16; 1170 q->delayed = RB_ROOT; 1171 q->fq_root = NULL; 1172 q->fq_trees_log = ilog2(1024); 1173 q->orphan_mask = 1024 - 1; 1174 q->low_rate_threshold = 550000 / 8; 1175 1176 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */ 1177 1178 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */ 1179 q->horizon_drop = 1; /* by default, drop packets beyond horizon */ 1180 1181 /* Default ce_threshold of 4294 seconds */ 1182 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; 1183 1184 fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band); 1185 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); 1186 1187 if (opt) 1188 err = fq_change(sch, opt, extack); 1189 else 1190 err = fq_resize(sch, q->fq_trees_log); 1191 1192 return err; 1193 } 1194 1195 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) 1196 { 1197 struct fq_sched_data *q = qdisc_priv(sch); 1198 struct tc_prio_qopt prio = { 1199 .bands = FQ_BANDS, 1200 }; 1201 struct nlattr *opts; 1202 u64 offload_horizon; 1203 u64 ce_threshold; 1204 s32 weights[3]; 1205 u64 horizon; 1206 1207 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 1208 if (opts == NULL) 1209 goto nla_put_failure; 1210 1211 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ 1212 1213 ce_threshold = READ_ONCE(q->ce_threshold); 1214 do_div(ce_threshold, NSEC_PER_USEC); 1215 1216 horizon = READ_ONCE(q->horizon); 1217 do_div(horizon, NSEC_PER_USEC); 1218 1219 offload_horizon = READ_ONCE(q->offload_horizon); 1220 do_div(offload_horizon, NSEC_PER_USEC); 1221 1222 if (nla_put_u32(skb, TCA_FQ_PLIMIT, 1223 READ_ONCE(sch->limit)) || 1224 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, 1225 READ_ONCE(q->flow_plimit)) || 1226 nla_put_u32(skb, TCA_FQ_QUANTUM, 1227 READ_ONCE(q->quantum)) || 1228 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, 1229 READ_ONCE(q->initial_quantum)) || 1230 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, 1231 READ_ONCE(q->rate_enable)) || 1232 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, 1233 min_t(unsigned long, 1234 READ_ONCE(q->flow_max_rate), ~0U)) || 1235 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, 1236 jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) || 1237 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, 1238 READ_ONCE(q->orphan_mask)) || 1239 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, 1240 READ_ONCE(q->low_rate_threshold)) || 1241 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) || 1242 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, 1243 READ_ONCE(q->fq_trees_log)) || 1244 nla_put_u32(skb, TCA_FQ_TIMER_SLACK, 1245 READ_ONCE(q->timer_slack)) || 1246 nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) || 1247 nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) || 1248 nla_put_u8(skb, TCA_FQ_HORIZON_DROP, 1249 READ_ONCE(q->horizon_drop))) 1250 goto nla_put_failure; 1251 1252 fq_prio2band_decompress_crumb(q->prio2band, prio.priomap); 1253 if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio)) 1254 goto nla_put_failure; 1255 1256 weights[0] = READ_ONCE(q->band_flows[0].quantum); 1257 weights[1] = READ_ONCE(q->band_flows[1].quantum); 1258 weights[2] = READ_ONCE(q->band_flows[2].quantum); 1259 if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights)) 1260 goto nla_put_failure; 1261 1262 return nla_nest_end(skb, opts); 1263 1264 nla_put_failure: 1265 return -1; 1266 } 1267 1268 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 1269 { 1270 struct fq_sched_data *q = qdisc_priv(sch); 1271 struct tc_fq_qd_stats st; 1272 int i; 1273 1274 st.pad = 0; 1275 1276 sch_tree_lock(sch); 1277 1278 st.gc_flows = q->stat_gc_flows; 1279 st.highprio_packets = 0; 1280 st.fastpath_packets = q->internal.stat_fastpath_packets; 1281 st.tcp_retrans = 0; 1282 st.throttled = q->stat_throttled; 1283 st.flows_plimit = q->stat_flows_plimit; 1284 st.pkts_too_long = q->stat_pkts_too_long; 1285 st.allocation_errors = q->stat_allocation_errors; 1286 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack - 1287 ktime_get_ns(); 1288 st.flows = q->flows; 1289 st.inactive_flows = q->inactive_flows; 1290 st.throttled_flows = q->throttled_flows; 1291 st.unthrottle_latency_ns = min_t(unsigned long, 1292 q->unthrottle_latency_ns, ~0U); 1293 st.ce_mark = q->stat_ce_mark; 1294 st.horizon_drops = q->stat_horizon_drops; 1295 st.horizon_caps = q->stat_horizon_caps; 1296 for (i = 0; i < FQ_BANDS; i++) { 1297 st.band_drops[i] = q->stat_band_drops[i]; 1298 st.band_pkt_count[i] = q->band_pkt_count[i]; 1299 } 1300 sch_tree_unlock(sch); 1301 1302 return gnet_stats_copy_app(d, &st, sizeof(st)); 1303 } 1304 1305 static struct Qdisc_ops fq_qdisc_ops __read_mostly = { 1306 .id = "fq", 1307 .priv_size = sizeof(struct fq_sched_data), 1308 1309 .enqueue = fq_enqueue, 1310 .dequeue = fq_dequeue, 1311 .peek = qdisc_peek_dequeued, 1312 .init = fq_init, 1313 .reset = fq_reset, 1314 .destroy = fq_destroy, 1315 .change = fq_change, 1316 .dump = fq_dump, 1317 .dump_stats = fq_dump_stats, 1318 .owner = THIS_MODULE, 1319 }; 1320 MODULE_ALIAS_NET_SCH("fq"); 1321 1322 static int __init fq_module_init(void) 1323 { 1324 int ret; 1325 1326 fq_flow_cachep = kmem_cache_create("fq_flow_cache", 1327 sizeof(struct fq_flow), 1328 0, SLAB_HWCACHE_ALIGN, NULL); 1329 if (!fq_flow_cachep) 1330 return -ENOMEM; 1331 1332 ret = register_qdisc(&fq_qdisc_ops); 1333 if (ret) 1334 kmem_cache_destroy(fq_flow_cachep); 1335 return ret; 1336 } 1337 1338 static void __exit fq_module_exit(void) 1339 { 1340 unregister_qdisc(&fq_qdisc_ops); 1341 kmem_cache_destroy(fq_flow_cachep); 1342 } 1343 1344 module_init(fq_module_init) 1345 module_exit(fq_module_exit) 1346 MODULE_AUTHOR("Eric Dumazet"); 1347 MODULE_LICENSE("GPL"); 1348 MODULE_DESCRIPTION("Fair Queue Packet Scheduler"); 1349